python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from typing import Any, Dict, Optional, Text, Tuple
from model.layers import get_activation
from model.blocks import conv2d_block
__all__ = ['mb_conv_block']
def mb_conv_block(inputs: tf.Tensor,
block: dict,
config: dict,
prefix: Text = None):
"""Mobile Inverted Residual Bottleneck.
Args:
inputs: the Keras input to the block
block: BlockConfig, arguments to create a Block
config: ModelConfig, a set of model parameters
prefix: prefix for naming all layers
Returns:
the output of the block
"""
use_se = config.mparams.use_se if 'use_se' in config.mparams else block['se_ratio'] is not None
activation = get_activation(config.mparams.activation)
drop_connect_rate = config.mparams.drop_connect_rate
data_format = tf.keras.backend.image_data_format()
use_depthwise = block['conv_type'] != 'no_depthwise'
prefix = prefix or ''
filters = block['input_filters'] * block['expand_ratio']
x = inputs
if block['fused_conv']:
# If we use fused mbconv, skip expansion and use regular conv.
x = conv2d_block(x,
filters,
config,
kernel_size=block['kernel_size'],
strides=block['strides'],
activation=activation,
name=prefix + 'fused')
else:
if block['expand_ratio'] != 1:
# Expansion phase
kernel_size = (1, 1) if use_depthwise else (3, 3)
x = conv2d_block(x,
filters,
config,
kernel_size=kernel_size,
activation=activation,
name=prefix + 'expand')
# Depthwise Convolution
if use_depthwise:
x = conv2d_block(x,
conv_filters=None,
config=config,
kernel_size=block['kernel_size'],
strides=block['strides'],
activation=activation,
depthwise=True,
name=prefix + 'depthwise')
# Squeeze and Excitation phase
if use_se:
assert block['se_ratio'] is not None
assert 0 < block['se_ratio'] <= 1
num_reduced_filters = max(1, int(
block['input_filters'] * block['se_ratio']
))
if data_format == 'channels_first':
se_shape = (filters, 1, 1)
else:
se_shape = (1, 1, filters)
se = tf.keras.layers.GlobalAveragePooling2D(name=prefix + 'se_squeeze')(x)
se = tf.keras.layers.Reshape(se_shape, name=prefix + 'se_reshape')(se)
se = conv2d_block(se,
num_reduced_filters,
config,
use_bias=True,
use_batch_norm=False,
activation=activation,
name=prefix + 'se_reduce')
se = conv2d_block(se,
filters,
config,
use_bias=True,
use_batch_norm=False,
activation='sigmoid',
name=prefix + 'se_expand')
x = tf.keras.layers.multiply([x, se], name=prefix + 'se_excite')
# Output phase
x = conv2d_block(x,
block['output_filters'],
config,
activation=None,
name=prefix + 'project')
# Add identity so that quantization-aware training can insert quantization
# ops correctly.
x = tf.keras.layers.Activation(get_activation('identity'),
name=prefix + 'id')(x)
if (block['id_skip']
and all(s == 1 for s in block['strides'])
and block['input_filters'] == block['output_filters']):
if drop_connect_rate and drop_connect_rate > 0:
# Apply dropconnect
# The only difference between dropout and dropconnect in TF is scaling by
# drop_connect_rate during training. See:
# https://github.com/keras-team/keras/pull/9898#issuecomment-380577612
x = tf.keras.layers.Dropout(drop_connect_rate,
noise_shape=(None, 1, 1, 1),
name=prefix + 'drop')(x)
x = tf.keras.layers.add([x, inputs], name=prefix + 'add')
return x | DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/model/blocks/mb_conv_block.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Customized Swish activation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import math
import tensorflow as tf
__all__ = ['simple_swish', 'hard_swish', 'identity', 'gelu', 'get_activation']
@tf.keras.utils.register_keras_serializable(package='Text')
def simple_swish(features):
"""Computes the Swish activation function.
The tf.nn.swish operation uses a custom gradient to reduce memory usage.
Since saving custom gradients in SavedModel is currently not supported, and
one would not be able to use an exported TF-Hub module for fine-tuning, we
provide this wrapper that can allow to select whether to use the native
TensorFlow swish operation, or whether to use a customized operation that
has uses default TensorFlow gradient computation.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features = tf.convert_to_tensor(features)
return features * tf.nn.sigmoid(features)
@tf.keras.utils.register_keras_serializable(package='Text')
def hard_swish(features):
"""Computes a hard version of the swish function.
This operation can be used to reduce computational cost and improve
quantization for edge devices.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features = tf.convert_to_tensor(features)
return features * tf.nn.relu6(features + tf.constant(3.)) * (1. / 6.)
@tf.keras.utils.register_keras_serializable(package='Text')
def identity(features):
"""Computes the identity function.
Useful for helping in quantization.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features = tf.convert_to_tensor(features)
return tf.identity(features)
@tf.keras.utils.register_keras_serializable(package='Text')
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(math.sqrt(2 / math.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
# TODO(hongkuny): consider moving custom string-map lookup to keras api.
def get_activation(identifier):
"""Maps a identifier to a Python function, e.g., "relu" => `tf.nn.relu`.
It checks string first and if it is one of customized activation not in TF,
the corresponding activation will be returned. For non-customized activation
names and callable identifiers, always fallback to tf.keras.activations.get.
Args:
identifier: String name of the activation function or callable.
Returns:
A Python function corresponding to the activation function.
"""
if isinstance(identifier, six.string_types):
name_to_fn = {
"gelu": gelu,
"simple_swish": simple_swish,
"hard_swish": hard_swish,
"identity": identity,
}
identifier = str(identifier).lower()
if identifier in name_to_fn:
return tf.keras.activations.get(name_to_fn[identifier])
return tf.keras.activations.get(identifier)
| DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/model/layers/activations.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from model.layers.activations import simple_swish, hard_swish, identity, gelu, get_activation
from model.layers.normalization import get_batch_norm
__all__ = ['simple_swish', 'hard_swish', 'identity', 'gelu', 'get_activation', 'get_batch_norm'] | DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/model/layers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common modeling utilities."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf1
from typing import Text, Optional
from tensorflow.python.tpu import tpu_function
__all__ = ['get_batch_norm']
@tf.keras.utils.register_keras_serializable(package='Vision')
class TpuBatchNormalization(tf.keras.layers.BatchNormalization):
"""Cross replica batch normalization."""
def __init__(self, fused: Optional[bool] = False, **kwargs):
if fused in (True, None):
raise ValueError('TpuBatchNormalization does not support fused=True.')
super(TpuBatchNormalization, self).__init__(fused=fused, **kwargs)
def _cross_replica_average(self, t: tf.Tensor, num_shards_per_group: int):
"""Calculates the average value of input tensor across TPU replicas."""
num_shards = tpu_function.get_tpu_context().number_of_shards
group_assignment = None
if num_shards_per_group > 1:
if num_shards % num_shards_per_group != 0:
raise ValueError(
'num_shards: %d mod shards_per_group: %d, should be 0' %
(num_shards, num_shards_per_group))
num_groups = num_shards // num_shards_per_group
group_assignment = [[
x for x in range(num_shards) if x // num_shards_per_group == y
] for y in range(num_groups)]
return tf1.tpu.cross_replica_sum(t, group_assignment) / tf.cast(
num_shards_per_group, t.dtype)
def _moments(self, inputs: tf.Tensor, reduction_axes: int, keep_dims: int):
"""Compute the mean and variance: it overrides the original _moments."""
shard_mean, shard_variance = super(TpuBatchNormalization, self)._moments(
inputs, reduction_axes, keep_dims=keep_dims)
num_shards = tpu_function.get_tpu_context().number_of_shards or 1
if num_shards <= 8: # Skip cross_replica for 2x2 or smaller slices.
num_shards_per_group = 1
else:
num_shards_per_group = max(8, num_shards // 8)
if num_shards_per_group > 1:
# Compute variance using: Var[X]= E[X^2] - E[X]^2.
shard_square_of_mean = tf.math.square(shard_mean)
shard_mean_of_square = shard_variance + shard_square_of_mean
group_mean = self._cross_replica_average(shard_mean, num_shards_per_group)
group_mean_of_square = self._cross_replica_average(
shard_mean_of_square, num_shards_per_group)
group_variance = group_mean_of_square - tf.math.square(group_mean)
return (group_mean, group_variance)
else:
return (shard_mean, shard_variance)
@tf.keras.utils.register_keras_serializable(package='Vision')
class SyncBatchNormalization(tf.keras.layers.BatchNormalization):
"""Cross replica batch normalization."""
def __init__(self, **kwargs):
if not kwargs.get('name', None):
kwargs['name'] = 'tpu_batch_normalization'
super(SyncBatchNormalization, self).__init__(**kwargs)
def _moments(self, inputs, reduction_axes, keep_dims):
"""Compute the mean and variance: it overrides the original _moments."""
import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top
shard_mean, shard_variance = super(SyncBatchNormalization, self)._moments(
inputs, reduction_axes, keep_dims=keep_dims)
num_shards = hvd.size()
if num_shards > 1:
# Compute variance using: Var[X]= E[X^2] - E[X]^2.
shard_square_of_mean = tf.math.square(shard_mean)
shard_mean_of_square = shard_variance + shard_square_of_mean
shard_stack = tf.stack([shard_mean, shard_mean_of_square])
group_mean, group_mean_of_square = tf.unstack(hvd.allreduce(shard_stack))
group_variance = group_mean_of_square - tf.math.square(group_mean)
return (group_mean, group_variance)
else:
return (shard_mean, shard_variance)
def call(self, *args, **kwargs):
outputs = super(SyncBatchNormalization, self).call(*args, **kwargs)
# A temporary hack for tf1 compatibility with keras batch norm.
# for u in self.updates:
# tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, u)
return outputs
def get_batch_norm(batch_norm_type: Text) -> tf.keras.layers.BatchNormalization:
"""A helper to create a batch normalization getter.
Args:
batch_norm_type: The type of batch normalization layer implementation. `tpu`
will use `TpuBatchNormalization`.
Returns:
An instance of `tf.keras.layers.BatchNormalization`.
"""
if batch_norm_type == 'tpu':
return TpuBatchNormalization
if batch_norm_type == 'syncbn':
return SyncBatchNormalization
return tf.keras.layers.BatchNormalization | DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/model/layers/normalization.py |
DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/dataloader/__init__.py |
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoAugment and RandAugment policies for enhanced image preprocessing.
AutoAugment Reference: https://arxiv.org/abs/1805.09501
RandAugment Reference: https://arxiv.org/abs/1909.13719
"""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import math
import tensorflow as tf
from typing import Any, Dict, List, Optional, Text, Tuple
try:
from keras.layers.preprocessing import image_preprocessing as image_ops
except (ImportError, ModuleNotFoundError):
import keras.src.layers.preprocessing.image_preprocessing as image_ops
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
def to_4d(image: tf.Tensor) -> tf.Tensor:
"""Converts an input Tensor to 4 dimensions.
4D image => [N, H, W, C] or [N, C, H, W]
3D image => [1, H, W, C] or [1, C, H, W]
2D image => [1, H, W, 1]
Args:
image: The 2/3/4D input tensor.
Returns:
A 4D image tensor.
Raises:
`TypeError` if `image` is not a 2/3/4D tensor.
"""
shape = tf.shape(image)
original_rank = tf.rank(image)
left_pad = tf.cast(tf.less_equal(original_rank, 3), dtype=tf.int32)
right_pad = tf.cast(tf.equal(original_rank, 2), dtype=tf.int32)
new_shape = tf.concat(
[
tf.ones(shape=left_pad, dtype=tf.int32),
shape,
tf.ones(shape=right_pad, dtype=tf.int32),
],
axis=0,
)
return tf.reshape(image, new_shape)
def from_4d(image: tf.Tensor, ndims: tf.Tensor) -> tf.Tensor:
"""Converts a 4D image back to `ndims` rank."""
shape = tf.shape(image)
begin = tf.cast(tf.less_equal(ndims, 3), dtype=tf.int32)
end = 4 - tf.cast(tf.equal(ndims, 2), dtype=tf.int32)
new_shape = shape[begin:end]
return tf.reshape(image, new_shape)
def _convert_translation_to_transform(translations: tf.Tensor) -> tf.Tensor:
"""Converts translations to a projective transform.
The translation matrix looks like this:
[[1 0 -dx]
[0 1 -dy]
[0 0 1]]
Args:
translations: The 2-element list representing [dx, dy], or a matrix of
2-element lists representing [dx dy] to translate for each image. The
shape must be static.
Returns:
The transformation matrix of shape (num_images, 8).
Raises:
`TypeError` if
- the shape of `translations` is not known or
- the shape of `translations` is not rank 1 or 2.
"""
translations = tf.convert_to_tensor(translations, dtype=tf.float32)
if translations.get_shape().ndims is None:
raise TypeError('translations rank must be statically known')
elif len(translations.get_shape()) == 1:
translations = translations[None]
elif len(translations.get_shape()) != 2:
raise TypeError('translations should have rank 1 or 2.')
num_translations = tf.shape(translations)[0]
return tf.concat(
values=[
tf.ones((num_translations, 1), tf.dtypes.float32),
tf.zeros((num_translations, 1), tf.dtypes.float32),
-translations[:, 0, None],
tf.zeros((num_translations, 1), tf.dtypes.float32),
tf.ones((num_translations, 1), tf.dtypes.float32),
-translations[:, 1, None],
tf.zeros((num_translations, 2), tf.dtypes.float32),
],
axis=1,
)
def _convert_angles_to_transform(
angles: tf.Tensor,
image_width: tf.Tensor,
image_height: tf.Tensor) -> tf.Tensor:
"""Converts an angle or angles to a projective transform.
Args:
angles: A scalar to rotate all images, or a vector to rotate a batch of
images. This must be a scalar.
image_width: The width of the image(s) to be transformed.
image_height: The height of the image(s) to be transformed.
Returns:
A tensor of shape (num_images, 8).
Raises:
`TypeError` if `angles` is not rank 0 or 1.
"""
angles = tf.convert_to_tensor(angles, dtype=tf.float32)
if len(angles.get_shape()) == 0: # pylint:disable=g-explicit-length-test
angles = angles[None]
elif len(angles.get_shape()) != 1:
raise TypeError('Angles should have a rank 0 or 1.')
x_offset = ((image_width - 1) -
(tf.math.cos(angles) * (image_width - 1) - tf.math.sin(angles) *
(image_height - 1))) / 2.0
y_offset = ((image_height - 1) -
(tf.math.sin(angles) * (image_width - 1) + tf.math.cos(angles) *
(image_height - 1))) / 2.0
num_angles = tf.shape(angles)[0]
return tf.concat(
values=[
tf.math.cos(angles)[:, None],
-tf.math.sin(angles)[:, None],
x_offset[:, None],
tf.math.sin(angles)[:, None],
tf.math.cos(angles)[:, None],
y_offset[:, None],
tf.zeros((num_angles, 2), tf.dtypes.float32),
],
axis=1,
)
def transform(image: tf.Tensor, transforms) -> tf.Tensor:
"""Prepares input data for `image_ops.transform`."""
original_ndims = tf.rank(image)
transforms = tf.convert_to_tensor(transforms, dtype=tf.float32)
if transforms.shape.rank == 1:
transforms = transforms[None]
image = to_4d(image)
image = image_ops.transform(
images=image,
transforms=transforms,
interpolation='nearest')
return from_4d(image, original_ndims)
def translate(image: tf.Tensor, translations) -> tf.Tensor:
"""Translates image(s) by provided vectors.
Args:
image: An image Tensor of type uint8.
translations: A vector or matrix representing [dx dy].
Returns:
The translated version of the image.
"""
transforms = _convert_translation_to_transform(translations)
return transform(image, transforms=transforms)
def rotate(image: tf.Tensor, degrees: float) -> tf.Tensor:
"""Rotates the image by degrees either clockwise or counterclockwise.
Args:
image: An image Tensor of type uint8.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
Returns:
The rotated version of image.
"""
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = tf.cast(degrees * degrees_to_radians, tf.float32)
original_ndims = tf.rank(image)
image = to_4d(image)
image_height = tf.cast(tf.shape(image)[1], tf.float32)
image_width = tf.cast(tf.shape(image)[2], tf.float32)
transforms = _convert_angles_to_transform(angles=radians,
image_width=image_width,
image_height=image_height)
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
image = transform(image, transforms=transforms)
return from_4d(image, original_ndims)
def blend(image1: tf.Tensor, image2: tf.Tensor, factor: float) -> tf.Tensor:
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor of type uint8.
image2: An image Tensor of type uint8.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor of type uint8.
"""
if factor == 0.0:
return tf.convert_to_tensor(image1)
if factor == 1.0:
return tf.convert_to_tensor(image2)
image1 = tf.cast(image1, tf.float32)
image2 = tf.cast(image2, tf.float32)
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = tf.cast(image1, tf.float32) + scaled
# Interpolate
if factor > 0.0 and factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return tf.cast(temp, tf.uint8)
# Extrapolate:
#
# We need to clip and then cast.
return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8)
def cutout(image: tf.Tensor, pad_size: int, replace: int = 0) -> tf.Tensor:
"""Apply cutout (https://arxiv.org/abs/1708.04552) to image.
This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
a random location within `img`. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole image.
Args:
image: An image Tensor of type uint8.
pad_size: Specifies how big the zero mask that will be generated is that
is applied to the image. The mask will be of size
(2*pad_size x 2*pad_size).
replace: What pixel value to fill in the image in the area that has
the cutout mask applied to it.
Returns:
An image Tensor that is of type uint8.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random.uniform(
shape=[], minval=0, maxval=image_height,
dtype=tf.int32)
cutout_center_width = tf.random.uniform(
shape=[], minval=0, maxval=image_width,
dtype=tf.int32)
lower_pad = tf.maximum(0, cutout_center_height - pad_size)
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf.maximum(0, cutout_center_width - pad_size)
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, 3])
image = tf.where(
tf.equal(mask, 0),
tf.ones_like(image, dtype=image.dtype) * replace,
image)
return image
def solarize(image: tf.Tensor, threshold: int = 128) -> tf.Tensor:
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
return tf.where(image < threshold, image, 255 - image)
def solarize_add(image: tf.Tensor,
addition: int = 0,
threshold: int = 128) -> tf.Tensor:
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128.
added_image = tf.cast(image, tf.int64) + addition
added_image = tf.cast(tf.clip_by_value(added_image, 0, 255), tf.uint8)
return tf.where(image < threshold, added_image, image)
def color(image: tf.Tensor, factor: float) -> tf.Tensor:
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image: tf.Tensor, factor: float) -> tf.Tensor:
"""Equivalent of PIL Contrast."""
degenerate = tf.image.rgb_to_grayscale(image)
# Cast before calling tf.histogram.
degenerate = tf.cast(degenerate, tf.int32)
# Compute the grayscale histogram, then compute the mean pixel value,
# and create a constant image size of that value. Use that as the
# blending degenerate target of the original image.
hist = tf.histogram_fixed_width(degenerate, [0, 255], nbins=256)
mean = tf.reduce_sum(tf.cast(hist, tf.float32)) / 256.0
degenerate = tf.ones_like(degenerate, dtype=tf.float32) * mean
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.image.grayscale_to_rgb(tf.cast(degenerate, tf.uint8))
return blend(degenerate, image, factor)
def brightness(image: tf.Tensor, factor: float) -> tf.Tensor:
"""Equivalent of PIL Brightness."""
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image: tf.Tensor, bits: int) -> tf.Tensor:
"""Equivalent of PIL Posterize."""
shift = 8 - bits
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def wrapped_rotate(image: tf.Tensor, degrees: float, replace: int) -> tf.Tensor:
"""Applies rotation with wrap/unwrap."""
image = rotate(wrap(image), degrees=degrees)
return unwrap(image, replace)
def translate_x(image: tf.Tensor, pixels: int, replace: int) -> tf.Tensor:
"""Equivalent of PIL Translate in X dimension."""
image = translate(wrap(image), [-pixels, 0])
return unwrap(image, replace)
def translate_y(image: tf.Tensor, pixels: int, replace: int) -> tf.Tensor:
"""Equivalent of PIL Translate in Y dimension."""
image = translate(wrap(image), [0, -pixels])
return unwrap(image, replace)
def shear_x(image: tf.Tensor, level: float, replace: int) -> tf.Tensor:
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
image = transform(image=wrap(image),
transforms=[1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image, replace)
def shear_y(image: tf.Tensor, level: float, replace: int) -> tf.Tensor:
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1].
image = transform(image=wrap(image),
transforms=[1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image, replace)
def autocontrast(image: tf.Tensor) -> tf.Tensor:
"""Implements Autocontrast function from PIL using TF ops.
Args:
image: A 3D uint8 tensor.
Returns:
The image after it has had autocontrast applied to it and will be of type
uint8.
"""
def scale_channel(image: tf.Tensor) -> tf.Tensor:
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.cast(tf.reduce_min(image), tf.float32)
hi = tf.cast(tf.reduce_max(image), tf.float32)
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.cast(im, tf.float32) * scale + offset
im = tf.clip_by_value(im, 0.0, 255.0)
return tf.cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
def sharpness(image: tf.Tensor, factor: float) -> tf.Tensor:
"""Implements Sharpness function from PIL using TF ops."""
orig_image = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation.
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel.
kernel = tf.constant(
[[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension.
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
degenerate = tf.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', dilations=[1, 1])
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.squeeze(tf.cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)
# Blend the final result.
return blend(result, orig_image, factor)
def equalize(image: tf.Tensor) -> tf.Tensor:
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(tf.equal(step, 0),
lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def invert(image: tf.Tensor) -> tf.Tensor:
"""Inverts the image pixels."""
image = tf.convert_to_tensor(image)
return 255 - image
def wrap(image: tf.Tensor) -> tf.Tensor:
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], axis=2)
return extended
def unwrap(image: tf.Tensor, replace: int) -> tf.Tensor:
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = tf.expand_dims(flattened_image[:, 3], axis=-1)
replace = tf.concat([replace, tf.ones([1], image.dtype)], 0)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])
return image
def _randomly_negate_tensor(tensor):
"""With 50% prob turn the tensor negative."""
should_flip = tf.cast(tf.floor(tf.random.uniform([]) + 0.5), tf.bool)
final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor)
return final_tensor
def _rotate_level_to_arg(level: float):
level = (level/_MAX_LEVEL) * 30.
level = _randomly_negate_tensor(level)
return (level,)
def _shrink_level_to_arg(level: float):
"""Converts level to ratio by which we shrink the image content."""
if level == 0:
return (1.0,) # if level is zero, do not shrink the image
# Maximum shrinking ratio is 2.9.
level = 2. / (_MAX_LEVEL / level) + 0.9
return (level,)
def _enhance_level_to_arg(level: float):
return ((level/_MAX_LEVEL) * 1.8 + 0.1,)
def _shear_level_to_arg(level: float):
level = (level/_MAX_LEVEL) * 0.3
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _translate_level_to_arg(level: float, translate_const: float):
level = (level/_MAX_LEVEL) * float(translate_const)
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _mult_to_arg(level: float, multiplier: float = 1.):
return (int((level / _MAX_LEVEL) * multiplier),)
def _apply_func_with_prob(func: Any,
image: tf.Tensor,
args: Any,
prob: float):
"""Apply `func` to image w/ `args` as input with probability `prob`."""
assert isinstance(args, tuple)
# Apply the function with probability `prob`.
should_apply_op = tf.cast(
tf.floor(tf.random.uniform([], dtype=tf.float32) + prob), tf.bool)
augmented_image = tf.cond(
should_apply_op,
lambda: func(image, *args),
lambda: image)
return augmented_image
def select_and_apply_random_policy(policies: Any, image: tf.Tensor):
"""Select a random policy from `policies` and apply it to `image`."""
policy_to_select = tf.random.uniform([], maxval=len(policies), dtype=tf.int32)
# Note that using tf.case instead of tf.conds would result in significantly
# larger graphs and would even break export for some larger policies.
for (i, policy) in enumerate(policies):
image = tf.cond(
tf.equal(i, policy_to_select),
lambda selected_policy=policy: selected_policy(image),
lambda: image)
return image
NAME_TO_FUNC = {
'AutoContrast': autocontrast,
'Equalize': equalize,
'Invert': invert,
'Rotate': wrapped_rotate,
'Posterize': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x,
'TranslateY': translate_y,
'Cutout': cutout,
}
# Functions that have a 'replace' parameter
REPLACE_FUNCS = frozenset({
'Rotate',
'TranslateX',
'ShearX',
'ShearY',
'TranslateY',
'Cutout',
})
def level_to_arg(cutout_const: float, translate_const: float):
"""Creates a dict mapping image operation names to their arguments."""
no_arg = lambda level: ()
posterize_arg = lambda level: _mult_to_arg(level, 4)
solarize_arg = lambda level: _mult_to_arg(level, 256)
solarize_add_arg = lambda level: _mult_to_arg(level, 110)
cutout_arg = lambda level: _mult_to_arg(level, cutout_const)
translate_arg = lambda level: _translate_level_to_arg(level, translate_const)
args = {
'AutoContrast': no_arg,
'Equalize': no_arg,
'Invert': no_arg,
'Rotate': _rotate_level_to_arg,
'Posterize': posterize_arg,
'Solarize': solarize_arg,
'SolarizeAdd': solarize_add_arg,
'Color': _enhance_level_to_arg,
'Contrast': _enhance_level_to_arg,
'Brightness': _enhance_level_to_arg,
'Sharpness': _enhance_level_to_arg,
'ShearX': _shear_level_to_arg,
'ShearY': _shear_level_to_arg,
'Cutout': cutout_arg,
'TranslateX': translate_arg,
'TranslateY': translate_arg,
}
return args
def _parse_policy_info(name: Text,
prob: float,
level: float,
replace_value: List[int],
cutout_const: float,
translate_const: float) -> Tuple[Any, float, Any]:
"""Return the function that corresponds to `name` and update `level` param."""
func = NAME_TO_FUNC[name]
args = level_to_arg(cutout_const, translate_const)[name](level)
if name in REPLACE_FUNCS:
# Add in replace arg if it is required for the function that is called.
args = tuple(list(args) + [replace_value])
return func, prob, args
class ImageAugment(object):
"""Image augmentation class for applying image distortions."""
def distort(self, image: tf.Tensor) -> tf.Tensor:
"""Given an image tensor, returns a distorted image with the same shape.
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
Returns:
The augmented version of `image`.
"""
raise NotImplementedError()
class AutoAugment(ImageAugment):
"""Applies the AutoAugment policy to images.
AutoAugment is from the paper: https://arxiv.org/abs/1805.09501.
"""
def __init__(self,
augmentation_name: Text = 'v0',
policies: Optional[Dict[Text, Any]] = None,
cutout_const: float = 100,
translate_const: float = 250):
"""Applies the AutoAugment policy to images.
Args:
augmentation_name: The name of the AutoAugment policy to use. The
available options are `v0` and `test`. `v0` is the policy used for all
of the results in the paper and was found to achieve the best results on
the COCO dataset. `v1`, `v2` and `v3` are additional good policies found
on the COCO dataset that have slight variation in what operations were
used during the search procedure along with how many operations are
applied in parallel to a single image (2 vs 3).
policies: list of lists of tuples in the form `(func, prob, level)`,
`func` is a string name of the augmentation function, `prob` is the
probability of applying the `func` operation, `level` is the input
argument for `func`.
cutout_const: multiplier for applying cutout.
translate_const: multiplier for applying translation.
"""
super(AutoAugment, self).__init__()
if policies is None:
self.available_policies = {
'v0': self.policy_v0(),
'test': self.policy_test(),
'simple': self.policy_simple(),
}
if augmentation_name not in self.available_policies:
raise ValueError(
'Invalid augmentation_name: {}'.format(augmentation_name))
self.augmentation_name = augmentation_name
self.policies = self.available_policies[augmentation_name]
self.cutout_const = float(cutout_const)
self.translate_const = float(translate_const)
def distort(self, image: tf.Tensor) -> tf.Tensor:
"""Applies the AutoAugment policy to `image`.
AutoAugment is from the paper: https://arxiv.org/abs/1805.09501.
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
Returns:
A version of image that now has data augmentation applied to it based on
the `policies` pass into the function.
"""
input_image_type = image.dtype
if input_image_type != tf.uint8:
image = tf.clip_by_value(image, 0.0, 255.0)
image = tf.cast(image, dtype=tf.uint8)
replace_value = [128] * 3
# func is the string name of the augmentation function, prob is the
# probability of applying the operation and level is the parameter
# associated with the tf op.
# tf_policies are functions that take in an image and return an augmented
# image.
tf_policies = []
for policy in self.policies:
tf_policy = []
# Link string name to the correct python function and make sure the
# correct argument is passed into that function.
for policy_info in policy:
policy_info = list(policy_info) + [
replace_value, self.cutout_const, self.translate_const
]
tf_policy.append(_parse_policy_info(*policy_info))
# Now build the tf policy that will apply the augmentation procedue
# on image.
def make_final_policy(tf_policy_):
def final_policy(image_):
for func, prob, args in tf_policy_:
image_ = _apply_func_with_prob(func, image_, args, prob)
return image_
return final_policy
tf_policies.append(make_final_policy(tf_policy))
image = select_and_apply_random_policy(tf_policies, image)
image = tf.cast(image, dtype=input_image_type)
return image
@staticmethod
def policy_v0():
"""Autoaugment policy that was used in AutoAugment Paper.
Each tuple is an augmentation operation of the form
(operation, probability, magnitude). Each element in policy is a
sub-policy that will be applied sequentially on the image.
Returns:
the policy.
"""
# TODO(dankondratyuk): tensorflow_addons defines custom ops, which
# for some reason are not included when building/linking
# This results in the error, "Op type not registered
# 'Addons>ImageProjectiveTransformV2' in binary" when running on borg TPUs
policy = [
[('Equalize', 0.8, 1), ('ShearY', 0.8, 4)],
[('Color', 0.4, 9), ('Equalize', 0.6, 3)],
[('Color', 0.4, 1), ('Rotate', 0.6, 8)],
[('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
[('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
[('Color', 0.2, 0), ('Equalize', 0.8, 8)],
[('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
[('ShearX', 0.2, 9), ('Rotate', 0.6, 8)],
[('Color', 0.6, 1), ('Equalize', 1.0, 2)],
[('Invert', 0.4, 9), ('Rotate', 0.6, 0)],
[('Equalize', 1.0, 9), ('ShearY', 0.6, 3)],
[('Color', 0.4, 7), ('Equalize', 0.6, 0)],
[('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],
[('Solarize', 0.6, 8), ('Color', 0.6, 9)],
[('Solarize', 0.2, 4), ('Rotate', 0.8, 9)],
[('Rotate', 1.0, 7), ('TranslateY', 0.8, 9)],
[('ShearX', 0.0, 0), ('Solarize', 0.8, 4)],
[('ShearY', 0.8, 0), ('Color', 0.6, 4)],
[('Color', 1.0, 0), ('Rotate', 0.6, 2)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
[('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
[('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)],
[('Posterize', 0.8, 2), ('Solarize', 0.6, 10)],
[('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
[('Color', 0.8, 6), ('Rotate', 0.4, 5)],
]
return policy
@staticmethod
def policy_simple():
"""Same as `policy_v0`, except with custom ops removed."""
policy = [
[('Color', 0.4, 9), ('Equalize', 0.6, 3)],
[('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
[('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
[('Color', 0.2, 0), ('Equalize', 0.8, 8)],
[('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
[('Color', 0.6, 1), ('Equalize', 1.0, 2)],
[('Color', 0.4, 7), ('Equalize', 0.6, 0)],
[('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],
[('Solarize', 0.6, 8), ('Color', 0.6, 9)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
[('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
[('Posterize', 0.8, 2), ('Solarize', 0.6, 10)],
[('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
]
return policy
@staticmethod
def policy_test():
"""Autoaugment test policy for debugging."""
policy = [
[('TranslateX', 1.0, 4), ('Equalize', 1.0, 10)],
]
return policy
class RandAugment(ImageAugment):
"""Applies the RandAugment policy to images.
RandAugment is from the paper https://arxiv.org/abs/1909.13719,
"""
def __init__(self,
num_layers: int = 2,
magnitude: float = 10.,
cutout_const: float = 40.,
translate_const: float = 100.):
"""Applies the RandAugment policy to images.
Args:
num_layers: Integer, the number of augmentation transformations to apply
sequentially to an image. Represented as (N) in the paper. Usually best
values will be in the range [1, 3].
magnitude: Integer, shared magnitude across all augmentation operations.
Represented as (M) in the paper. Usually best values are in the range
[5, 10].
cutout_const: multiplier for applying cutout.
translate_const: multiplier for applying translation.
"""
super(RandAugment, self).__init__()
self.num_layers = num_layers
self.magnitude = float(magnitude)
self.cutout_const = float(cutout_const)
self.translate_const = float(translate_const)
self.available_ops = [
'AutoContrast', 'Equalize', 'Invert', 'Rotate', 'Posterize', 'Solarize',
'Color', 'Contrast', 'Brightness', 'Sharpness', 'ShearX', 'ShearY',
'TranslateX', 'TranslateY', 'Cutout', 'SolarizeAdd'
]
def distort(self, image: tf.Tensor) -> tf.Tensor:
"""Applies the RandAugment policy to `image`.
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
Returns:
The augmented version of `image`.
"""
input_image_type = image.dtype
if input_image_type != tf.uint8:
image = tf.clip_by_value(image, 0.0, 255.0)
image = tf.cast(image, dtype=tf.uint8)
replace_value = [128] * 3
min_prob, max_prob = 0.2, 0.8
for _ in range(self.num_layers):
op_to_select = tf.random.uniform(
[], maxval=len(self.available_ops) + 1, dtype=tf.int32)
branch_fns = []
for (i, op_name) in enumerate(self.available_ops):
prob = tf.random.uniform([],
minval=min_prob,
maxval=max_prob,
dtype=tf.float32)
func, _, args = _parse_policy_info(op_name,
prob,
self.magnitude,
replace_value,
self.cutout_const,
self.translate_const)
branch_fns.append((
i,
# pylint:disable=g-long-lambda
lambda selected_func=func, selected_args=args: selected_func(
image, *selected_args)))
# pylint:enable=g-long-lambda
image = tf.switch_case(branch_index=op_to_select,
branch_fns=branch_fns,
default=lambda: tf.identity(image))
image = tf.cast(image, dtype=input_image_type)
return image
| DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/dataloader/augment.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import tensorflow as tf
import horovod.tensorflow.keras as hvd
from nvidia import dali
import nvidia.dali.plugin.tf as dali_tf
import numpy as np
class DaliPipeline(dali.pipeline.Pipeline):
def __init__(
self,
tfrec_filenames,
tfrec_idx_filenames,
height,
width,
batch_size,
num_threads,
device_id,
shard_id,
num_gpus,
num_classes,
deterministic=False,
dali_cpu=True,
training=True
):
kwargs = dict()
if deterministic:
kwargs['seed'] = 7 * (1 + hvd.rank())
super(DaliPipeline, self).__init__(batch_size, num_threads, device_id, **kwargs)
self.training = training
self.input = dali.ops.TFRecordReader(
path=tfrec_filenames,
index_path=tfrec_idx_filenames,
random_shuffle=True,
shard_id=shard_id,
num_shards=num_gpus,
initial_fill=10000,
features={
'image/encoded': dali.tfrecord.FixedLenFeature((), dali.tfrecord.string, ""),
'image/class/label': dali.tfrecord.FixedLenFeature([1], dali.tfrecord.int64, -1),
'image/class/text': dali.tfrecord.FixedLenFeature([], dali.tfrecord.string, ''),
'image/object/bbox/xmin': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0),
'image/object/bbox/ymin': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0),
'image/object/bbox/xmax': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0),
'image/object/bbox/ymax': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0)
}
)
if self.training:
self.decode = dali.ops.ImageDecoderRandomCrop(
device="cpu" if dali_cpu else "mixed",
output_type=dali.types.RGB,
random_aspect_ratio=[0.75, 1.33],
random_area=[0.05, 1.0],
num_attempts=100
)
self.resize = dali.ops.Resize(device="cpu" if dali_cpu else "gpu", resize_x=width, resize_y=height)
else:
self.decode = dali.ops.ImageDecoder(
device="cpu",
output_type=dali.types.RGB
)
# Make sure that every image > 224 for CropMirrorNormalize
self.resize = dali.ops.Resize(device="cpu" if dali_cpu else "gpu", resize_x=width, resize_y=height)
self.normalize = dali.ops.CropMirrorNormalize(
device="gpu",
output_dtype=dali.types.FLOAT,
image_type=dali.types.RGB,
output_layout=dali.types.NHWC,
mirror=1 if self.training else 0
)
self.one_hot = dali.ops.OneHot(num_classes=num_classes)
self.shapes = dali.ops.Shapes(type=dali.types.INT32)
self.crop = dali.ops.Crop(device="gpu")
self.cast_float = dali.ops.Cast(dtype=dali.types.FLOAT)
self.extract_h = dali.ops.Slice(normalized_anchor=False, normalized_shape=False, axes=[0])
self.extract_w = dali.ops.Slice(normalized_anchor=False, normalized_shape=False, axes=[0])
def define_graph(self):
# Read images and labels
inputs = self.input(name="Reader")
images = inputs["image/encoded"]
labels = inputs["image/class/label"]
labels -= 1
labels = self.one_hot(labels).gpu()
# Decode and augmentation
images = self.decode(images)
if not self.training:
shapes = self.shapes(images)
h = self.extract_h(shapes, dali.types.Constant(np.array([0], dtype=np.float32)), dali.types.Constant(np.array([1], dtype=np.float32)))
w = self.extract_w(shapes, dali.types.Constant(np.array([1], dtype=np.float32)), dali.types.Constant(np.array([1], dtype=np.float32)))
CROP_PADDING = 32
CROP_H = h * h / (h + CROP_PADDING)
CROP_W = w * w / (w + CROP_PADDING)
CROP_H = self.cast_float(CROP_H)
CROP_W = self.cast_float(CROP_W)
images = images.gpu()
images = self.crop(images, crop_h = CROP_H, crop_w = CROP_W)
images = self.resize(images)
images = self.normalize(images)
return (images, labels)
| DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/dataloader/Dali.py |
# Lint as: python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dataset utilities for vision tasks using TFDS and tf.data.Dataset."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import os
from typing import Any, List, Optional, Tuple, Mapping, Union
import functools
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow import keras
from dataloader import augment
from dataloader import preprocessing
from dataloader import Dali
import horovod.tensorflow.keras as hvd
import nvidia.dali.plugin.tf as dali_tf
AUGMENTERS = {
'autoaugment': augment.AutoAugment,
'randaugment': augment.RandAugment,
}
def cutmix_mask(alpha, h, w):
"""[summary]
Returns image mask of size wxh for CutMix where the masked region is one
and bakground is zero. To create the mask, we first sample the top-left
corner of the masked region and then determine its width and height by
sampling a scale ratio from the beta distribution parameterized by alpha.
The masked region determined above is painted white and then zero-padded
to have width w and height h.
Args:
alpha ([float]): used to sample a scale ratio
h ([integer]): width of the mask image
w ([integer]): height of the mask image
Returns:
[type]: [description]
"""
if alpha == 0:
return tf.zeros((h,w,1))
r_x = tf.random.uniform([], 0, w, tf.int32)
r_y = tf.random.uniform([], 0, h, tf.int32)
area = tf.compat.v1.distributions.Beta(alpha, alpha).sample()
patch_ratio = tf.cast(tf.math.sqrt(1 - area), tf.float32)
r_w = tf.cast(patch_ratio * tf.cast(w, tf.float32), tf.int32)
r_h = tf.cast(patch_ratio * tf.cast(h, tf.float32), tf.int32)
bbx1 = tf.clip_by_value(tf.cast(r_x - r_w // 2, tf.int32), 0, w)
bby1 = tf.clip_by_value(tf.cast(r_y - r_h // 2, tf.int32), 0, h)
bbx2 = tf.clip_by_value(tf.cast(r_x + r_w // 2, tf.int32), 0, w)
bby2 = tf.clip_by_value(tf.cast(r_y + r_h // 2, tf.int32), 0, h)
# Create the binary mask.
pad_left = bbx1
pad_top = bby1
pad_right = tf.maximum(w - bbx2, 0)
pad_bottom = tf.maximum(h - bby2, 0)
r_h = bby2 - bby1
r_w = bbx2 - bbx1
mask = tf.pad(
tf.ones((r_h, r_w)),
paddings=[[pad_top, pad_bottom], [pad_left, pad_right]],
mode='CONSTANT',
constant_values=0)
mask.set_shape((h, w))
return mask[..., None] # Add channel dim.
def mixup(batch_size, alpha, images, labels, defer_img_mixing):
"""Applies Mixup regularization to a batch of images and labels.
[1] Hongyi Zhang, Moustapha Cisse, Yann N. Dauphin, David Lopez-Paz
Mixup: Beyond Empirical Risk Minimization.
ICLR'18, https://arxiv.org/abs/1710.09412
Arguments:
batch_size: The input batch size for images and labels.
alpha: Float that controls the strength of Mixup regularization.
images: A batch of images of shape [batch_size, ...]
labels: A batch of labels of shape [batch_size, num_classes]
defer_img_mixing: If true, labels are mixed in this function but image
mixing is postponed until the data arrives on the compute device. This
can accelerate the data pipeline. Note that it is the user's responsibility
to implement image mixing in the module that defines the forward pass of the
network. To ensure that the subsequent on-device image mixing is consistent
with label mixing performed here, this function returns the mixing weights
as well.
Returns:
A tuple of ((images, mix_weights), labels) with the same dimensions as the input with
Mixup regularization applied.
"""
if alpha == 0.0:
# returning 1s as mixing weights means to mixup
return (images, tf.ones((batch_size,1,1,1))), labels
mix_weight = tf.compat.v1.distributions.Beta(alpha, alpha).sample([batch_size, 1])
mix_weight = tf.maximum(mix_weight, 1. - mix_weight)
img_weight = tf.cast(tf.reshape(mix_weight, [batch_size, 1, 1, 1]), images.dtype)
labels_weight = tf.cast(mix_weight, labels.dtype)
# Mixup: taking a weighted sum with the same batch in reverse.
labels_mix = labels * labels_weight + labels[::-1] * (1. - labels_weight)
if not defer_img_mixing:
images_mix = images * img_weight + images[::-1] * (1. - img_weight)
else:
# postpone image mixing
images_mix = images
return (images_mix, img_weight), labels_mix
def cutmix(images, labels, masks, defer_img_mixing):
"""[summary]
Applies CutMix regularization to a batch of images and labels.
Reference: https://arxiv.org/pdf/1905.04899.pdf
Args:
images: a Tensor of batched images
labels: a Tensor of batched labels.
masks: a Tensor of batched masks.
defer_img_mixing: If true, labels are mixed in this function but image
mixing is postponed until the data arrives on the compute device. This
can accelerate the data pipeline. Note that it is the user's responsibility
to implement image mixing in the module that defines the forward pass of the
network. To ensure that the subsequent on-device image mixing is consistent
with label mixing performed here, this function returns the mixing masks
as well.
Returns:
A tuple of ((images, mix_masks), labels)
"""
mix_area = tf.reduce_sum(masks) / tf.cast(tf.size(masks), masks.dtype)
mix_area = tf.cast(mix_area, labels.dtype)
mixed_label = (1. - mix_area) * labels + mix_area * labels[::-1]
masks = tf.cast(masks, images.dtype)
if not defer_img_mixing:
mixed_images = (1. - masks) * images + masks * images[::-1]
else:
# postpone image mixing
mixed_images = images
return (mixed_images, masks), mixed_label
def mixing(batch_size, mixup_alpha, cutmix_alpha, defer_img_mixing, features, labels):
"""Applies mixing regularization to a batch of images and labels. If both
mixup and cutmix requested, the batch is halved followed by applying
mixup on one half and cutmix on the other half.
Arguments:
batch_size: The input batch size for images and labels.
mixup_alpha: Float that controls the strength of Mixup regularization.
cutmix_alpha: FLoat that controls the strength of Cutmix regularization.
defer_img_mixing: If true, the image mixing ops will be postponed.
labels: a dict of batched labels.
Returns:
A new dict of features with updated images and labels with the same
dimensions as the input.
"""
image = features['image']
label = labels['label']
mix_masks = features['cutmix_mask']
if mixup_alpha and cutmix_alpha:
# split the batch half-half, and apply mixup and cutmix for each half.
bs = batch_size // 2
(img1, mix_weights), lab1 = mixup(bs, mixup_alpha, image[:bs], label[:bs], defer_img_mixing)
(img2, mix_masks), lab2 = cutmix(image[bs:], label[bs:], mix_masks[bs:], defer_img_mixing)
image = tf.concat([img1, img2], axis=0)
label = tf.concat([lab1, lab2], axis=0)
elif mixup_alpha:
# only mixup
(image, mix_weights), label = mixup(batch_size, mixup_alpha, image, label, defer_img_mixing)
# mix_masks = tf.zeros_like(mix_masks) -> mix_masks is already all 0s (see cutmix fn)
elif cutmix_alpha:
# only cutmix
(image, mix_masks), label = cutmix(image, label, mix_masks, defer_img_mixing)
mix_weights = tf.ones((batch_size,1,1,1)) # 1s mean no mixup
else:
# mix_masks = tf.zeros_like(mix_masks) -> mix_masks is already all 0s (see cutmix fn)
mix_weights = tf.ones((batch_size,1,1,1)) # 1s mean no mixup
features['image'] = image
features['mixup_weight'] = mix_weights
features['cutmix_mask'] = mix_masks
return features, label
def mixing_lite(images, mixup_weights, cutmix_masks, batch_size, do_mixup, do_cutmix):
"""[summary]
This function, which is a simplified version of the mixing function (see above),
will be called in the model module when the user wishes to perform image mixing
on-device (defer_image_mixing=True).
Note: the logic here must be identical to that of the mixing fn above.
Args:
images: a Tensor of batched images.
mixup_weights: a Tensor of batched mixup weights.
cutmix_masks: a Tensor of batched cutmix masks.
batch_size: static batch size.
do_mixup: boolean, to determine if mixup is needed
do_cutmix: boolean, to determine if cutmix is needed
Returns:
a Tensor of batched MIXED images
"""
if do_mixup and do_cutmix:
# split the batch half-half, and apply mixup and cutmix for each half.
bs = batch_size // 2
images_mixup = images[:bs] * mixup_weights + images[:bs][::-1] * (1. - mixup_weights)
images_cutmix = images[bs:] * (1. - cutmix_masks) * + images[bs:][::-1] * cutmix_masks
# concat order must be consistent with mixing fn
return tf.concat([images_mixup, images_cutmix], axis=0)
elif do_mixup:
return images * mixup_weights + images[::-1] * (1. - mixup_weights)
elif do_cutmix:
return images * (1. - cutmix_masks) + images[::-1] * cutmix_masks
else:
return images
class Dataset:
"""An object for building datasets.
Allows building various pipelines fetching examples, preprocessing, etc.
Maintains additional state information calculated from the dataset, i.e.,
training set split, batch size, and number of steps (batches).
"""
def __init__(self,
data_dir,
index_file_dir,
split='train',
num_classes=None,
image_size=224,
num_channels=3,
batch_size=128,
dtype='float32',
one_hot=False,
use_dali=False,
augmenter=None,
shuffle_buffer_size=10000,
file_shuffle_buffer_size=1024,
cache=False,
mean_subtract=False,
standardize=False,
augmenter_params=None,
cutmix_alpha=0.0,
mixup_alpha=0.0,
defer_img_mixing=True,
hvd_size=None,
disable_map_parallelization=False
):
"""Initialize the builder from the config."""
if not os.path.exists(data_dir):
raise FileNotFoundError('Cannot find data dir: {}'.format(data_dir))
if one_hot and num_classes is None:
raise FileNotFoundError('Number of classes is required for one_hot')
self._data_dir = data_dir
self._split = split
self._image_size = image_size
self._num_classes = num_classes
self._num_channels = num_channels
self._batch_size = batch_size
self._dtype = dtype
self._one_hot = one_hot
self._augmenter_name = augmenter
self._shuffle_buffer_size = shuffle_buffer_size
self._file_shuffle_buffer_size = file_shuffle_buffer_size
self._cache = cache
self._mean_subtract = mean_subtract
self._standardize = standardize
self._index_file = index_file_dir
self._use_dali = use_dali
self.mixup_alpha = mixup_alpha
self.cutmix_alpha = cutmix_alpha
self.defer_img_mixing = defer_img_mixing
self.disable_map_parallelization = disable_map_parallelization
self._num_gpus = hvd.size() if not hvd_size else hvd_size
if self._augmenter_name is not None:
augmenter = AUGMENTERS.get(self._augmenter_name, None)
params = augmenter_params or {}
self._augmenter = augmenter(**params) if augmenter is not None else None
else:
self._augmenter = None
@property
def is_training(self) -> bool:
"""Whether this is the training set."""
return self._split == 'train'
@property
def global_batch_size(self) -> int:
"""The batch size, multiplied by the number of replicas (if configured)."""
return self._batch_size * self._num_gpus
@property
def local_batch_size(self):
"""The base unscaled batch size."""
return self._batch_size
@property
def dtype(self) -> tf.dtypes.DType:
"""Converts the config's dtype string to a tf dtype.
Returns:
A mapping from string representation of a dtype to the `tf.dtypes.DType`.
Raises:
ValueError if the config's dtype is not supported.
"""
dtype_map = {
'float32': tf.float32,
'bfloat16': tf.bfloat16,
'float16': tf.float16,
'fp32': tf.float32,
'bf16': tf.bfloat16,
}
try:
return dtype_map[self._dtype]
except:
raise ValueError('{} provided key. Invalid DType provided. Supported types: {}'.format(self._dtype,
dtype_map.keys()))
@property
def image_size(self) -> int:
"""The size of each image (can be inferred from the dataset)."""
return int(self._image_size)
@property
def num_channels(self) -> int:
"""The number of image channels (can be inferred from the dataset)."""
return int(self._num_channels)
@property
def num_classes(self) -> int:
"""The number of classes (can be inferred from the dataset)."""
return int(self._num_classes)
@property
def num_steps(self) -> int:
"""The number of classes (can be inferred from the dataset)."""
return int(self._num_steps)
def set_shapes(self, batch_size, features, labels):
"""Statically set the batch_size dimension."""
features['image'].set_shape(features['image'].get_shape().merge_with(
tf.TensorShape([batch_size, None, None, None])))
labels['label'].set_shape(labels['label'].get_shape().merge_with(
tf.TensorShape([batch_size, None])))
return features, labels
def build(self) -> tf.data.Dataset:
"""Construct a dataset end-to-end and return it.
Args:
input_context: An optional context provided by `tf.distribute` for
cross-replica training.
Returns:
A TensorFlow dataset outputting batched images and labels.
"""
if self._use_dali:
print("Using dali for {train} dataloading".format(train = "training" if self.is_training else "validation"))
tfrec_filenames = sorted(tf.io.gfile.glob(os.path.join(self._data_dir, '%s-*' % self._split)))
tfrec_idx_filenames = sorted(tf.io.gfile.glob(os.path.join(self._index_file, '%s-*' % self._split)))
# # Create pipeline
dali_pipeline = Dali.DaliPipeline(tfrec_filenames=tfrec_filenames,
tfrec_idx_filenames=tfrec_idx_filenames,
height=self._image_size,
width=self._image_size,
batch_size=self.local_batch_size,
num_threads=1,
device_id=hvd.local_rank(),
shard_id=hvd.rank(),
num_gpus=hvd.size(),
num_classes=self.num_classes,
deterministic=False,
dali_cpu=False,
training=self.is_training)
# Define shapes and types of the outputs
shapes = (
(self.local_batch_size, self._image_size, self._image_size, 3),
(self.local_batch_size, self._num_classes))
dtypes = (
tf.float32,
tf.float32)
# Create dataset
dataset = dali_tf.DALIDataset(
pipeline=dali_pipeline,
batch_size=self.local_batch_size,
output_shapes=shapes,
output_dtypes=dtypes,
device_id=hvd.local_rank())
# if self.is_training and self._augmenter:
# print('Augmenting with {}'.format(self._augmenter))
# dataset.unbatch().map(self.augment_pipeline, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(self.local_batch_size)
return dataset
else:
print("Using tf native pipeline for {train} dataloading".format(train = "training" if self.is_training else "validation"))
dataset = self.load_records()
dataset = self.pipeline(dataset)
return dataset
# def augment_pipeline(self, image, label) -> Tuple[tf.Tensor, tf.Tensor]:
# image = self._augmenter.distort(image)
# return image, label
def load_records(self) -> tf.data.Dataset:
"""Return a dataset loading files with TFRecords."""
if self._data_dir is None:
raise ValueError('Dataset must specify a path for the data files.')
file_pattern = os.path.join(self._data_dir,
'{}*'.format(self._split))
dataset = tf.data.Dataset.list_files(file_pattern, shuffle=False)
return dataset
def pipeline(self, dataset: tf.data.Dataset) -> tf.data.Dataset:
"""Build a pipeline fetching, shuffling, and preprocessing the dataset.
Args:
dataset: A `tf.data.Dataset` that loads raw files.
Returns:
A TensorFlow dataset outputting batched images and labels.
"""
# This can help resolve OOM issues when using only 1 GPU for training
options = tf.data.Options()
options.experimental_optimization.map_parallelization = (not self.disable_map_parallelization)
dataset = dataset.with_options(options)
if self._num_gpus > 1:
# For multi-host training, we want each hosts to always process the same
# subset of files. Each host only sees a subset of the entire dataset,
# allowing us to cache larger datasets in memory.
dataset = dataset.shard(self._num_gpus, hvd.rank())
if self.is_training:
# Shuffle the input files.
dataset.shuffle(buffer_size=self._file_shuffle_buffer_size)
if self.is_training and not self._cache:
dataset = dataset.repeat()
# Read the data from disk in parallel
dataset = dataset.interleave(
tf.data.TFRecordDataset,
cycle_length=10,
block_length=1,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
if self._cache:
dataset = dataset.cache()
if self.is_training:
dataset = dataset.shuffle(self._shuffle_buffer_size)
dataset = dataset.repeat()
# Parse, pre-process, and batch the data in parallel
preprocess = self.parse_record
dataset = dataset.map(preprocess,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
if self._num_gpus > 1:
# The batch size of the dataset will be multiplied by the number of
# replicas automatically when strategy.distribute_datasets_from_function
# is called, so we use local batch size here.
dataset = dataset.batch(self.local_batch_size,
drop_remainder=self.is_training)
else:
dataset = dataset.batch(self.global_batch_size,
drop_remainder=self.is_training)
# apply Mixup/CutMix only during training, if requested in the data pipeline,
# otherwise they will be applied in the model module on device
mixup_alpha = self.mixup_alpha if self.is_training else 0.0
cutmix_alpha = self.cutmix_alpha if self.is_training else 0.0
dataset = dataset.map(
functools.partial(mixing, self.local_batch_size, mixup_alpha, cutmix_alpha, self.defer_img_mixing),
num_parallel_calls=64)
# Assign static batch size dimension
# dataset = dataset.map(
# functools.partial(self.set_shapes, batch_size),
# num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Prefetch overlaps in-feed with training
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def parse_record(self, record: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""Parse an ImageNet record from a serialized string Tensor."""
keys_to_features = {
'image/encoded':
tf.io.FixedLenFeature((), tf.string, ''),
'image/format':
tf.io.FixedLenFeature((), tf.string, 'jpeg'),
'image/class/label':
tf.io.FixedLenFeature([], tf.int64, -1),
'image/class/text':
tf.io.FixedLenFeature([], tf.string, ''),
'image/object/bbox/xmin':
tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin':
tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax':
tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax':
tf.io.VarLenFeature(dtype=tf.float32),
'image/object/class/label':
tf.io.VarLenFeature(dtype=tf.int64),
}
parsed = tf.io.parse_single_example(record, keys_to_features)
label = tf.reshape(parsed['image/class/label'], shape=[1])
label = tf.cast(label, dtype=tf.int32)
# Subtract one so that labels are in [0, 1000)
label -= 1
image_bytes = tf.reshape(parsed['image/encoded'], shape=[])
image, label = self.preprocess(image_bytes, label)
# populate features and labels dict
features = dict()
labels = dict()
features['image'] = image
features['is_tr_split'] = self.is_training
if self.cutmix_alpha:
features['cutmix_mask'] = cutmix_mask(self.cutmix_alpha, self._image_size, self._image_size)
else:
features['cutmix_mask'] = tf.zeros((self._image_size, self._image_size,1))
labels['label'] = label
return features, labels
def preprocess(self, image: tf.Tensor, label: tf.Tensor
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Apply image preprocessing and augmentation to the image and label."""
if self.is_training:
image = preprocessing.preprocess_for_train(
image,
image_size=self._image_size,
mean_subtract=self._mean_subtract,
standardize=self._standardize,
dtype=self.dtype,
augmenter=self._augmenter)
else:
image = preprocessing.preprocess_for_eval(
image,
image_size=self._image_size,
num_channels=self._num_channels,
mean_subtract=self._mean_subtract,
standardize=self._standardize,
dtype=self.dtype)
label = tf.cast(label, tf.int32)
if self._one_hot:
label = tf.one_hot(label, self.num_classes)
label = tf.reshape(label, [self.num_classes])
return image, label
# @classmethod
# def from_params(cls, *args, **kwargs):
# """Construct a dataset builder from a default config and any overrides."""
# config = DatasetConfig.from_args(*args, **kwargs)
# return cls(config)
| DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/dataloader/dataset_factory.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocessing functions for images."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import tensorflow as tf
from typing import List, Optional, Text, Tuple
from dataloader import augment
# Calculated from the ImageNet training set
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
IMAGE_SIZE = 224
CROP_PADDING = 32
def mean_image_subtraction(
image_bytes: tf.Tensor,
means: Tuple[float, ...],
num_channels: int = 3,
dtype: tf.dtypes.DType = tf.float32,
) -> tf.Tensor:
"""Subtracts the given means from each image channel.
For example:
means = [123.68, 116.779, 103.939]
image_bytes = mean_image_subtraction(image_bytes, means)
Note that the rank of `image` must be known.
Args:
image_bytes: a tensor of size [height, width, C].
means: a C-vector of values to subtract from each channel.
num_channels: number of color channels in the image that will be distorted.
dtype: the dtype to convert the images to. Set to `None` to skip conversion.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `means`.
"""
if image_bytes.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
# We have a 1-D tensor of means; convert to 3-D.
# Note(b/130245863): we explicitly call `broadcast` instead of simply
# expanding dimensions for better performance.
means = tf.broadcast_to(means, tf.shape(image_bytes))
if dtype is not None:
means = tf.cast(means, dtype=dtype)
return image_bytes - means
def standardize_image(
image_bytes: tf.Tensor,
stddev: Tuple[float, ...],
num_channels: int = 3,
dtype: tf.dtypes.DType = tf.float32,
) -> tf.Tensor:
"""Divides the given stddev from each image channel.
For example:
stddev = [123.68, 116.779, 103.939]
image_bytes = standardize_image(image_bytes, stddev)
Note that the rank of `image` must be known.
Args:
image_bytes: a tensor of size [height, width, C].
stddev: a C-vector of values to divide from each channel.
num_channels: number of color channels in the image that will be distorted.
dtype: the dtype to convert the images to. Set to `None` to skip conversion.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `stddev`.
"""
if image_bytes.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
if len(stddev) != num_channels:
raise ValueError('len(stddev) must match the number of channels')
# We have a 1-D tensor of stddev; convert to 3-D.
# Note(b/130245863): we explicitly call `broadcast` instead of simply
# expanding dimensions for better performance.
stddev = tf.broadcast_to(stddev, tf.shape(image_bytes))
if dtype is not None:
stddev = tf.cast(stddev, dtype=dtype)
return image_bytes / stddev
def normalize_images(features: tf.Tensor,
mean_rgb: Tuple[float, ...] = MEAN_RGB,
stddev_rgb: Tuple[float, ...] = STDDEV_RGB,
num_channels: int = 3,
dtype: tf.dtypes.DType = tf.float32,
data_format: Text = 'channels_last') -> tf.Tensor:
"""Normalizes the input image channels with the given mean and stddev.
Args:
features: `Tensor` representing decoded images in float format.
mean_rgb: the mean of the channels to subtract.
stddev_rgb: the stddev of the channels to divide.
num_channels: the number of channels in the input image tensor.
dtype: the dtype to convert the images to. Set to `None` to skip conversion.
data_format: the format of the input image tensor
['channels_first', 'channels_last'].
Returns:
A normalized image `Tensor`.
"""
# TODO(allencwang) - figure out how to use mean_image_subtraction and
# standardize_image on batches of images and replace the following.
if data_format == 'channels_first':
stats_shape = [num_channels, 1, 1]
else:
stats_shape = [1, 1, num_channels]
if dtype is not None:
features = tf.image.convert_image_dtype(features, dtype=dtype)
if mean_rgb is not None:
mean_rgb = tf.constant(mean_rgb,
shape=stats_shape,
dtype=features.dtype)
mean_rgb = tf.broadcast_to(mean_rgb, tf.shape(features))
features = features - mean_rgb
if stddev_rgb is not None:
stddev_rgb = tf.constant(stddev_rgb,
shape=stats_shape,
dtype=features.dtype)
stddev_rgb = tf.broadcast_to(stddev_rgb, tf.shape(features))
features = features / stddev_rgb
return features
def decode_and_center_crop(image_bytes: tf.Tensor,
image_size: int = IMAGE_SIZE,
crop_padding: int = CROP_PADDING) -> tf.Tensor:
"""Crops to center of image with padding then scales image_size.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
image_size: image height/width dimension.
crop_padding: the padding size to use when centering the crop.
Returns:
A decoded and cropped image `Tensor`.
"""
decoded = image_bytes.dtype != tf.string
shape = (tf.shape(image_bytes) if decoded
else tf.image.extract_jpeg_shape(image_bytes))
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
((image_size / (image_size + crop_padding)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)),
tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([offset_height, offset_width,
padded_center_crop_size, padded_center_crop_size])
if decoded:
image = tf.image.crop_to_bounding_box(
image_bytes,
offset_height=offset_height,
offset_width=offset_width,
target_height=padded_center_crop_size,
target_width=padded_center_crop_size)
else:
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
image = resize_image(image_bytes=image,
height=image_size,
width=image_size)
return image
def decode_crop_and_flip(image_bytes: tf.Tensor) -> tf.Tensor:
"""Crops an image to a random part of the image, then randomly flips.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
Returns:
A decoded and cropped image `Tensor`.
"""
decoded = image_bytes.dtype != tf.string
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
shape = (tf.shape(image_bytes) if decoded
else tf.image.extract_jpeg_shape(image_bytes))
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
shape,
bounding_boxes=bbox,
min_object_covered=0.1,
aspect_ratio_range=[0.75, 1.33],
area_range=[0.05, 1.0],
max_attempts=100,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Reassemble the bounding box in the format the crop op requires.
offset_height, offset_width, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_height, offset_width,
target_height, target_width])
if decoded:
cropped = tf.image.crop_to_bounding_box(
image_bytes,
offset_height=offset_height,
offset_width=offset_width,
target_height=target_height,
target_width=target_width)
else:
cropped = tf.image.decode_and_crop_jpeg(image_bytes,
crop_window,
channels=3)
# Flip to add a little more random distortion in.
cropped = tf.image.random_flip_left_right(cropped)
return cropped
def resize_image(image_bytes: tf.Tensor,
height: int = IMAGE_SIZE,
width: int = IMAGE_SIZE) -> tf.Tensor:
"""Resizes an image to a given height and width.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
height: image height dimension.
width: image width dimension.
Returns:
A tensor containing the resized image.
"""
return tf.compat.v1.image.resize(
image_bytes, [height, width], method=tf.image.ResizeMethod.BILINEAR,
align_corners=False)
def preprocess_for_predict(
images: tf.Tensor,
image_size: int = IMAGE_SIZE,
num_channels: int = 3,
dtype: tf.dtypes.DType = tf.float32
) -> tf.Tensor:
images = tf.reshape(images, [image_size, image_size, num_channels])
if dtype is not None:
images = tf.image.convert_image_dtype(images, dtype=dtype)
return images
def preprocess_for_eval(
image_bytes: tf.Tensor,
image_size: int = IMAGE_SIZE,
num_channels: int = 3,
mean_subtract: bool = False,
standardize: bool = False,
dtype: tf.dtypes.DType = tf.float32
) -> tf.Tensor:
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
image_size: image height/width dimension.
num_channels: number of image input channels.
mean_subtract: whether or not to apply mean subtraction.
standardize: whether or not to apply standardization.
dtype: the dtype to convert the images to. Set to `None` to skip conversion.
Returns:
A preprocessed and normalized image `Tensor`.
"""
images = decode_and_center_crop(image_bytes, image_size)
images = tf.reshape(images, [image_size, image_size, num_channels])
if mean_subtract:
images = mean_image_subtraction(image_bytes=images, means=MEAN_RGB)
if standardize:
images = standardize_image(image_bytes=images, stddev=STDDEV_RGB)
if dtype is not None:
images = tf.image.convert_image_dtype(images, dtype=dtype)
return images
def load_eval_image(filename: Text, image_size: int = IMAGE_SIZE) -> tf.Tensor:
"""Reads an image from the filesystem and applies image preprocessing.
Args:
filename: a filename path of an image.
image_size: image height/width dimension.
Returns:
A preprocessed and normalized image `Tensor`.
"""
image_bytes = tf.io.read_file(filename)
image = preprocess_for_eval(image_bytes, image_size)
return image
def build_eval_dataset(filenames: List[Text],
labels: List[int] = None,
image_size: int = IMAGE_SIZE,
batch_size: int = 1) -> tf.Tensor:
"""Builds a tf.data.Dataset from a list of filenames and labels.
Args:
filenames: a list of filename paths of images.
labels: a list of labels corresponding to each image.
image_size: image height/width dimension.
batch_size: the batch size used by the dataset
Returns:
A preprocessed and normalized image `Tensor`.
"""
if labels is None:
labels = [0] * len(filenames)
filenames = tf.constant(filenames)
labels = tf.constant(labels)
dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
dataset = dataset.map(
lambda filename, label: (load_eval_image(filename, image_size), label))
dataset = dataset.batch(batch_size)
return dataset
def preprocess_for_train(image_bytes: tf.Tensor,
image_size: int = IMAGE_SIZE,
augmenter: Optional[augment.ImageAugment] = None,
mean_subtract: bool = False,
standardize: bool = False,
dtype: tf.dtypes.DType = tf.float32) -> tf.Tensor:
"""Preprocesses the given image for training.
Args:
image_bytes: `Tensor` representing an image binary of
arbitrary size of dtype tf.uint8.
image_size: image height/width dimension.
augmenter: the image augmenter to apply.
mean_subtract: whether or not to apply mean subtraction.
standardize: whether or not to apply standardization.
dtype: the dtype to convert the images to. Set to `None` to skip conversion.
Returns:
A preprocessed and normalized image `Tensor`.
"""
images = decode_crop_and_flip(image_bytes=image_bytes)
images = resize_image(images, height=image_size, width=image_size)
if mean_subtract:
images = mean_image_subtraction(image_bytes=images, means=MEAN_RGB)
if standardize:
images = standardize_image(image_bytes=images, stddev=STDDEV_RGB)
if augmenter is not None:
images = augmenter.distort(images)
if dtype is not None:
images = tf.image.convert_image_dtype(images, dtype)
return images
| DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/dataloader/preprocessing.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
import os
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from data.feature_spec import FeatureSpec, FEATURES_SELECTOR, TYPE_SELECTOR, FILES_SELECTOR
from data.outbrain.defaults import MULTIHOT_CHANNEL, PARQUET_TYPE
def parse_args():
parser = ArgumentParser()
parser.add_argument('--input', type=str, default='',
help='Path to input data directory')
parser.add_argument('--feature_spec_in', type=str, default='feature_spec.yaml',
help='Name of the input feature specification file')
parser.add_argument('--output', type=str, default='/data',
help='Path to output data directory')
parser.add_argument('--feature_spec_out', type=str, default='feature_spec.yaml',
help='Name of the output feature specification file')
parser.add_argument('--chunk_size', type=int, default=65536,
help='Number of rows to write out per partition')
parser.add_argument('--minimum_partition_number', type=int, default=8,
help='throw error if each mapping does not produce at least this many partitions')
return parser.parse_args()
def check_only_one_file_per_chunk(feature_spec):
for mapping in feature_spec.source_spec.values():
for chunk in mapping:
chunk_files = chunk[FILES_SELECTOR]
assert len(chunk_files) == 1
assert chunk[TYPE_SELECTOR] == 'csv'
def main():
args = parse_args()
args_output = args.output
args_input = args.input
args_feature_spec_in = args.feature_spec_in
args_feature_spec_out = args.feature_spec_out
batch_size = args.chunk_size
fspec_in_path = os.path.join(args_input, args_feature_spec_in)
fspec_in = FeatureSpec.from_yaml(fspec_in_path)
os.makedirs(args.output, exist_ok=True)
paths_per_mapping = dict()
check_only_one_file_per_chunk(fspec_in)
for mapping_name, mapping in fspec_in.source_spec.items():
paths_per_mapping[mapping_name]=[]
df_iterators = []
for chunk in mapping:
# We checked earlier it's a single file chunk
path_to_load = os.path.join(fspec_in.base_directory, chunk[FILES_SELECTOR][0])
chunk_iterator = pd.read_csv(path_to_load, header=None, chunksize=batch_size, names=chunk[FEATURES_SELECTOR])
df_iterators.append(chunk_iterator)
zipped = zip(*df_iterators)
# writer = None
for chunk_id, chunks in enumerate(zipped):
# chunks is now a list of the chunk_id-th segment of each dataframe iterator and contains all columns
mapping_df = pd.concat(chunks, axis=1) # This takes care of making sure feature names are unique
#transform multihots from strings to objects # TODO: find a better way to do this
multihot_features = fspec_in.get_names_by_channel(MULTIHOT_CHANNEL)
for feature in multihot_features:
mapping_df[feature] = mapping_df[feature].apply(lambda x: np.fromstring(x[1:-1], sep=' ,'))
# prepare path
partition_path = f"{mapping_name}_{chunk_id}.parquet"
paths_per_mapping[mapping_name].append(partition_path)
partition_path_abs = os.path.join(args.output, partition_path)
#write to parquet
mapping_table = pa.Table.from_pandas(mapping_df)
pq.write_table(mapping_table, partition_path_abs)
# Prepare the new feature spec
new_source_spec = {}
old_source_spec = fspec_in.source_spec
for mapping_name in old_source_spec.keys():
#check if we met the required partitions number
min_partitions = args.minimum_partition_number
got_partitions = len(paths_per_mapping[mapping_name])
assert got_partitions>min_partitions, f"Not enough partitions generated for mapping:{mapping_name}. Expected at least {min_partitions}, got {got_partitions}"
all_features = []
for chunk in old_source_spec[mapping_name]:
all_features = all_features + chunk[FEATURES_SELECTOR]
new_source_spec[mapping_name] = []
new_source_spec[mapping_name].append({TYPE_SELECTOR: PARQUET_TYPE,
FEATURES_SELECTOR: all_features,
FILES_SELECTOR: paths_per_mapping[mapping_name]})
fspec_out = FeatureSpec(feature_spec=fspec_in.feature_spec, source_spec=new_source_spec,
channel_spec=fspec_in.channel_spec, metadata=fspec_in.metadata)
fspec_out.base_directory = args.output
feature_spec_save_path = os.path.join(args_output, args_feature_spec_out)
fspec_out.to_yaml(output_path=feature_spec_save_path)
if __name__ == '__main__':
main() | DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/transcode.py |
from data.feature_spec import FeatureSpec
from data.outbrain.defaults import ONEHOT_CHANNEL, MULTIHOT_CHANNEL
from argparse import ArgumentParser
import random
import json
def parse_args():
parser = ArgumentParser()
parser.add_argument('--feature_spec_in', type=str, default='feature_spec.yaml',
help='Name of the input feature specification file')
parser.add_argument('--output', type=str)
parser.add_argument('--max_size', type=int, default=256,
help='Max embedding size to pick')
return parser.parse_args()
def main():
#this generator supports the following feature types:
#onehot categorical
#numerical
#label
#multihot categorical
args = parse_args()
fspec_in = FeatureSpec.from_yaml(args.feature_spec_in)
max_size = args.max_size
onehot_features = fspec_in.get_names_by_channel(ONEHOT_CHANNEL)
multihot_features = fspec_in.get_names_by_channel(MULTIHOT_CHANNEL)
sizes = {feature: random.randint(1,max_size) for feature in onehot_features+multihot_features}
with open(args.output, "w") as opened:
json.dump(sizes, opened)
if __name__ == "__main__":
main() | DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/gen_embedding_sizes.py |
from data.feature_spec import FeatureSpec
from data.outbrain.defaults import ONEHOT_CHANNEL, MULTIHOT_CHANNEL, LABEL_CHANNEL, NUMERICAL_CHANNEL, \
MAP_FEATURE_CHANNEL
from argparse import ArgumentParser
import pandas as pd
import os
import numpy as np
def parse_args():
parser = ArgumentParser()
parser.add_argument('--feature_spec_in', type=str, default='feature_spec.yaml',
help='Name of the input feature specification file')
parser.add_argument('--output', type=str, default='/data')
parser.add_argument('--size', type=int, default=1000,
help='The desired number of rows in the output csv file')
return parser.parse_args()
def main():
#this generator supports the following feature types:
#onehot categorical
#numerical
#label
#multihot categorical
args = parse_args()
dataset_size = args.size
fspec_in = FeatureSpec.from_yaml(args.feature_spec_in)
fspec_in.base_directory = args.output
#prepare shapes for one-hot categorical features
onehot_features = fspec_in.get_names_by_channel(ONEHOT_CHANNEL)
onehot_cardinalities: dict = fspec_in.get_cardinalities(onehot_features)
multihot_features = fspec_in.get_names_by_channel(MULTIHOT_CHANNEL)
multihot_cardinalities: dict = fspec_in.get_cardinalities(multihot_features)
multihot_hotnesses: dict = fspec_in.get_multihot_hotnesses(multihot_features)
input_label_feature_name = fspec_in.get_names_by_channel(LABEL_CHANNEL)[0]
numerical_names_set = set(fspec_in.get_names_by_channel(NUMERICAL_CHANNEL))
map_channel_features = fspec_in.get_names_by_channel(MAP_FEATURE_CHANNEL)
map_feature = None
if len(map_channel_features)>0:
map_feature=map_channel_features[0]
for mapping_name, mapping in fspec_in.source_spec.items():
for chunk in mapping:
assert chunk['type'] == 'csv', "Only csv files supported in this generator"
assert len(chunk['files']) == 1, "Only one file per chunk supported in this generator"
path_to_save = os.path.join(fspec_in.base_directory, chunk['files'][0])
data = {}
for name in chunk['features']:
if name == input_label_feature_name:
data[name]=np.random.randint(0, 2, size=dataset_size)
elif name in numerical_names_set:
data[name]=np.random.rand(dataset_size)
elif name in set(onehot_features):
local_cardinality = onehot_cardinalities[name]
data[name]=np.random.randint(0, local_cardinality, size=dataset_size)
elif name in set(multihot_features):
local_cardinality = multihot_cardinalities[name]
local_hotness = multihot_hotnesses[name]
data[name]=np.random.randint(0, local_cardinality, size=(dataset_size, local_hotness)).tolist()
elif name == map_feature:
raise NotImplementedError("Cannot generate datasets with map feature enabled")
# TODO add a parameter that specifies max repeats and generate
else:
raise ValueError(f"Cannot generate for unused features. Unknown feature: {name}")
# Columns in the csv appear in the order they are listed in the source spec for a given chunk
column_order = chunk['files']
df = pd.DataFrame(data)
os.makedirs(os.path.dirname(path_to_save), exist_ok=True)
df.to_csv(path_to_save, columns=column_order, index=False, header=False)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/gen_csv.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DO NOT REMOVE THIS IMPORT
# It is here to initialize nvtabular before tensorflow is initialized.
# Removing it leads to a drop in nvtabular dataloader performance
# Do not put other imports before this without running performance validation
import nvtabular # noqa # pylint: disable=unused-import
# See above
import os
os.environ["TF_GPU_ALLOCATOR"]="cuda_malloc_async"
from trainer.model.widedeep import wide_deep_model
from trainer.run import run
from trainer.utils.arguments import parse_args
from trainer.utils.setup import create_config
def main():
args = parse_args()
config = create_config(args)
model, _ = wide_deep_model(args, config["feature_spec"], config["embedding_dimensions"])
run(args, model, config)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/main.py |
#!/usr/bin/env python3
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
from pathlib import Path
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "1"
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator # noqa: E402 module level import not at top of file
from .deployment_toolkit.core import ( # noqa: E402 module level import not at top of file
DATALOADER_FN_NAME,
BaseLoader,
BaseSaver,
ExportFormat,
ModelInputType,
TorchJit,
load_from_file,
)
from .deployment_toolkit.extensions import loaders, savers # noqa: E402 module level import not at top of file
LOGGER = logging.getLogger("export_model")
INPUT_MODEL_TYPES = [
ModelInputType.TF_ESTIMATOR,
ModelInputType.TF_KERAS,
ModelInputType.PYT,
]
OUTPUT_MODEL_TYPES = [
ExportFormat.TF_SAVEDMODEL,
ExportFormat.TORCHSCRIPT,
ExportFormat.ONNX,
]
TORCH_JIT_TYPES = [
TorchJit.NONE,
TorchJit.TRACE,
TorchJit.SCRIPT,
]
def _get_args():
parser = argparse.ArgumentParser(
description="Script for exporting models from supported frameworks.", allow_abbrev=False
)
parser.add_argument("--input-path", help="Path to input python module", required=True)
parser.add_argument(
"--input-type", help="Input model type", choices=[f.value for f in INPUT_MODEL_TYPES], required=True
)
parser.add_argument("--output-path", help="Path to output model file", required=True)
parser.add_argument(
"--output-type", help="Output model type", choices=[f.value for f in OUTPUT_MODEL_TYPES], required=True
)
parser.add_argument(
"--torch-jit",
help="Torch Jit",
choices=[f.value for f in TORCH_JIT_TYPES],
required=False,
default=None,
)
parser.add_argument("--dataloader", help="Path to python module containing data loader")
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
parser.add_argument(
"--ignore-unknown-parameters",
help="Ignore unknown parameters (argument often used in CI where set of arguments is constant)",
action="store_true",
default=False,
)
args, unparsed_args = parser.parse_known_args()
Loader: BaseLoader = loaders.get(args.input_type)
ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser)
if args.input_type == ModelInputType.PYT.value and args.output_type == ExportFormat.ONNX.value:
saver_type = f"{ModelInputType.PYT.value}--{ExportFormat.ONNX.value}"
else:
saver_type = args.output_type
Saver: BaseSaver = savers.get(saver_type)
ArgParserGenerator(Saver).update_argparser(parser)
if args.dataloader is not None:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
if args.ignore_unknown_parameters:
args, unknown_args = parser.parse_known_args()
LOGGER.warning(f"Got additional args {unknown_args}")
else:
args = parser.parse_args()
return args
def main():
args = _get_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
dataloader_fn = None
if args.dataloader is not None:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
Loader: BaseLoader = loaders.get(args.input_type)
loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args)
print(args.input_path)
print(os.path.isfile(args.input_path))
print(args.output_type)
model = loader.load(
args.input_path,
dataloader_fn=dataloader_fn,
output_type=args.output_type,
torch_jit=args.torch_jit,
)
LOGGER.info("inputs: %s", model.inputs)
LOGGER.info("outputs: %s", model.outputs)
if args.input_type == ModelInputType.PYT.value and args.output_type == ExportFormat.ONNX.value:
saver_type = f"{ModelInputType.PYT.value}--{ExportFormat.ONNX.value}"
else:
saver_type = args.output_type
Saver: BaseSaver = savers.get(saver_type)
saver = ArgParserGenerator(Saver).from_args(args)
saver.save(model, args.output_path, dataloader_fn)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/export_model.py |
#!/usr/bin/env python3
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional
import numpy as np
import tensorflow as tf
from triton.deployment_toolkit.core import BaseMetricsCalculator
class MetricsCalculator(BaseMetricsCalculator):
def __init__(self, *, output_used_for_metrics: str):
self.output_used_for_metrics = output_used_for_metrics
self._ids = None
self._y_pred = None
self._y_real = None
def update(
self,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
):
y_real = y_real[self.output_used_for_metrics]
y_pred = y_pred[self.output_used_for_metrics]
def _concat_batches(b1, b2):
if b1 is None:
return b2
else:
return np.concatenate([b1, b2], axis=0)
self._ids = _concat_batches(self._ids, ids)
self._y_real = _concat_batches(self._y_real, y_real)
self._y_pred = _concat_batches(self._y_pred, y_pred)
@property
def metrics(self) -> Dict[str, Any]:
metrics = {"map12": self.get_map12(self._ids, self._y_pred, self._y_real)}
return metrics
def get_map12(self, ids, y_pred, y_real):
with tf.device("/cpu:0"):
predictions = tf.reshape(y_pred, [-1])
predictions = tf.cast(predictions, tf.float64)
display_ids = tf.reshape(ids, [-1])
labels = tf.reshape(y_real, [-1])
sorted_ids = tf.argsort(display_ids)
display_ids = tf.gather(display_ids, indices=sorted_ids)
predictions = tf.gather(predictions, indices=sorted_ids)
labels = tf.gather(labels, indices=sorted_ids)
_, display_ids_idx, display_ids_ads_count = tf.unique_with_counts(display_ids, out_idx=tf.int64)
pad_length = 30 - tf.reduce_max(display_ids_ads_count)
preds = tf.RaggedTensor.from_value_rowids(predictions, display_ids_idx).to_tensor()
labels = tf.RaggedTensor.from_value_rowids(labels, display_ids_idx).to_tensor()
labels_mask = tf.math.reduce_max(labels, 1)
preds_masked = tf.boolean_mask(preds, labels_mask)
labels_masked = tf.boolean_mask(labels, labels_mask)
labels_masked = tf.argmax(labels_masked, axis=1, output_type=tf.int32)
labels_masked = tf.reshape(labels_masked, [-1, 1])
preds_masked = tf.pad(preds_masked, [(0, 0), (0, pad_length)])
_, predictions_idx = tf.math.top_k(preds_masked, 12)
indices = tf.math.equal(predictions_idx, labels_masked)
indices_mask = tf.math.reduce_any(indices, 1)
masked_indices = tf.boolean_mask(indices, indices_mask)
res = tf.argmax(masked_indices, axis=1)
ap_matrix = tf.divide(1, tf.add(res, 1))
ap_sum = tf.reduce_sum(ap_matrix)
shape = tf.cast(tf.shape(indices)[0], tf.float64)
return (ap_sum / shape).numpy()
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/metrics.py |
#!/usr/bin/env python3
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Using `calculate_metrics.py` script, you can obtain model accuracy/error metrics using defined `MetricsCalculator` class.
Data provided to `MetricsCalculator` are obtained from dump files
stored in directory pointed by `--dump-dir` argument.
Above files are prepared by `run_inference_on_fw.py` and `run_inference_on_triton.py` scripts.
Output data is stored in csv file pointed by `--csv` argument.
Example call:
```shell script
python ./triton/calculate_metrics.py \
--dump-dir /results/dump_triton \
--csv /results/accuracy_results.csv \
--metrics metrics.py \
--metric-class-param1 value
```
"""
import argparse
import csv
import logging
import string
from pathlib import Path
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import BaseMetricsCalculator, load_from_file
from .deployment_toolkit.dump import JsonDumpReader
LOGGER = logging.getLogger("calculate_metrics")
TOTAL_COLUMN_NAME = "_total_"
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="Run models with given dataloader", allow_abbrev=False)
parser.add_argument("--metrics", help="Path to python module containing metrics calculator", required=True)
parser.add_argument("--csv", help="Path to csv file", required=True)
parser.add_argument("--dump-dir", help="Path to directory with dumped outputs (and labels)", required=True)
args, *_ = parser.parse_known_args()
MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator")
ArgParserGenerator(MetricsCalculator).update_argparser(parser)
args = parser.parse_args()
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator")
metrics_calculator: BaseMetricsCalculator = ArgParserGenerator(MetricsCalculator).from_args(args)
reader = JsonDumpReader(args.dump_dir)
for ids, x, y_true, y_pred in reader.iterate_over(["ids", "inputs", "labels", "outputs"]):
ids = list(ids["ids"]) if ids is not None else None
metrics_calculator.update(ids=ids, x=x, y_pred=y_pred, y_real=y_true)
metrics = metrics_calculator.metrics
metric_names_with_space = [name for name in metrics if any([c in string.whitespace for c in name])]
if metric_names_with_space:
raise ValueError(f"Metric names shall have no spaces; Incorrect names: {', '.join(metric_names_with_space)}")
csv_path = Path(args.csv)
csv_path.parent.mkdir(parents=True, exist_ok=True)
with csv_path.open("w") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=list(metrics.keys()))
writer.writeheader()
writer.writerow(metrics)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/calculate_metrics.py |
#!/usr/bin/env python3
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
To infer the model deployed on Triton, you can use `run_inference_on_triton.py` script.
It sends a request with data obtained from pointed data loader and dumps received data into dump files.
Those files are stored in directory pointed by `--output-dir` argument.
Currently, the client communicates with the Triton server asynchronously using GRPC protocol.
Example call:
```shell script
python ./triton/run_inference_on_triton.py \
--server-url localhost:8001 \
--model-name ResNet50 \
--model-version 1 \
--dump-labels \
--output-dir /results/dump_triton
```
"""
import argparse
import logging
import time
import traceback
from pathlib import Path
from tqdm import tqdm
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import DATALOADER_FN_NAME, load_from_file
from .deployment_toolkit.dump import JsonDumpWriter
from .deployment_toolkit.triton_inference_runner import TritonInferenceRunner
LOGGER = logging.getLogger("run_inference_on_triton")
def _parse_args():
parser = argparse.ArgumentParser(description="Infer model on Triton server", allow_abbrev=False)
parser.add_argument(
"--server-url", type=str, default="localhost:8001", help="Inference server URL (default localhost:8001)"
)
parser.add_argument("--model-name", help="The name of the model used for inference.", required=True)
parser.add_argument("--model-version", help="The version of the model used for inference.", required=True)
parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True)
parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False)
parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False)
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=True)
parser.add_argument("--output-dir", required=True, help="Path to directory where outputs will be saved")
parser.add_argument(
"--response-wait-time", required=False, help="Maximal time to wait for response", default=120, type=float
)
parser.add_argument(
"--max-unresponded-requests",
required=False,
help="Maximal number of unresponded requests",
default=128,
type=int,
)
parser.add_argument(
"--synchronous", help="Enable synchronous calls to Triton Server", action="store_true", default=False
)
args, *_ = parser.parse_known_args()
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
args = parser.parse_args()
return args
def main():
args = _parse_args()
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
log_level = logging.INFO if not args.verbose else logging.DEBUG
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
try:
runner = TritonInferenceRunner(
server_url=args.server_url,
model_name=args.model_name,
model_version=args.model_version,
dataloader_fn=dataloader_fn,
verbose=False,
response_wait_time=args.response_wait_time,
max_unresponded_requests=args.max_unresponded_requests,
synchronous=args.synchronous,
)
except Exception as e:
message = traceback.format_exc()
LOGGER.error(f"Encountered exception \n{message}")
raise e
with JsonDumpWriter(output_dir=args.output_dir) as writer:
start = time.time()
for ids, x, y_pred, y_real in tqdm(runner, unit="batch", mininterval=10):
data = _verify_and_format_dump(args, ids, x, y_pred, y_real)
writer.write(**data)
stop = time.time()
LOGGER.info(f"\nThe inference took {stop - start:0.3f}s")
def _verify_and_format_dump(args, ids, x, y_pred, y_real):
data = {"outputs": y_pred, "ids": {"ids": ids}}
if args.dump_inputs:
data["inputs"] = x
if args.dump_labels:
if not y_real:
raise ValueError(
"Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument"
)
data["labels"] = y_real
return data
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/run_inference_on_triton.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pandas as pd
import tensorflow as tf
from data.outbrain.features import get_features_keys, MULTIHOT_COLUMNS
def prepare_df(df):
for multihot_key, value in MULTIHOT_COLUMNS.items():
multihot_col = df.pop(multihot_key)
for i in range(value):
df[f"{multihot_key}_{i}"] = multihot_col.apply(
lambda x: x[i] if len(x) > i else -1
)
df[f"{multihot_key}_nnzs"] = multihot_col.apply(lambda x: len(x))
for col in df.columns:
if np.issubdtype(df[col].dtype, np.integer):
df[col] = df[col].astype(np.int32)
if np.issubdtype(df[col].dtype, np.floating):
df[col] = df[col].astype(np.float32)
return df
def _merge_multihots(*multihots, axis=1):
expanded = [tf.expand_dims(multihot, axis) for multihot in multihots]
concatenated = tf.concat(expanded, axis)
reshaped = tf.reshape(concatenated, [-1])
mask = tf.math.not_equal(reshaped, -1)
filtered = tf.boolean_mask(reshaped, mask)
return tf.reshape(filtered, [-1, 1])
def _filter_batch(elem):
label = elem.pop("clicked")
label = tf.reshape(label, [-1, 1])
disp_id = elem.pop("display_id")
for multihot_key, value in MULTIHOT_COLUMNS.items():
multihot_values = [elem.pop(f"{multihot_key}_{i}") for i in range(value)]
multihot_nnzs = elem.pop(f"{multihot_key}_nnzs")
values = _merge_multihots(*multihot_values)
row_lengths = multihot_nnzs
values = tf.reshape(values, [-1])
row_lengths = tf.reshape(row_lengths, [-1])
x = tf.RaggedTensor.from_row_lengths(
values, row_lengths, validate=False
).to_tensor(default_value=-1, shape=[None, value])
elem[f"{multihot_key}"] = x
features = get_features_keys()
elem = {
key: (
tf.reshape(value, [-1, 1])
if "list" not in key
else tf.reshape(value, [-1, MULTIHOT_COLUMNS[key]])
)
for key, value in elem.items()
if key in features or "list" in key
}
return elem, label, disp_id
def eval_input_fn(files_path, records_batch_size):
frames = []
for file in files_path:
frames.append(pd.read_parquet(file))
if len(frames) > 1:
df = pd.concat(frames)
else:
df = frames[0]
full_df = prepare_df(df)
dataset = tf.data.Dataset.from_tensor_slices(dict(full_df))
dataset = dataset.batch(batch_size=records_batch_size, drop_remainder=False)
dataset = dataset.map(map_func=partial(_filter_batch), num_parallel_calls=None)
dataset = dataset.prefetch(buffer_size=2)
return dataset
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/tf_dataloader.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from types import SimpleNamespace
from typing import List
import tensorflow as tf
from data.outbrain.features import get_outbrain_feature_spec, EMBEDDING_DIMENSIONS
from trainer.model.widedeep import wide_deep_model
def update_argparser(parser):
parser.add_argument('--deep-hidden-units', type=int, default=[1024, 1024, 1024, 1024, 1024], nargs='+',
help='Hidden units per layer for deep model, separated by spaces')
parser.add_argument('--deep-dropout', type=float, default=0.1,
help='Dropout regularization for deep model')
parser.add_argument('--combiner', type=str, default='sum', choices=['mean', 'sum'],
help='Type of aggregation used for multi hot categorical features')
parser.add_argument('--precision', type=str, default="fp16", choices=['fp32', 'fp16'],
help='Precision of the ops. AMP will be used in case of fp16')
parser.add_argument('--checkpoint-dir', type=str, required=True,
help='Path to directory containing checkpoint')
def get_model(
*,
deep_hidden_units: List[int],
deep_dropout: float,
combiner: str,
checkpoint_dir: str,
precision: str = "fp32",
batch_size: int = 131072
):
args = {
'deep_hidden_units': deep_hidden_units,
'deep_dropout': deep_dropout,
'combiner': combiner
}
args = SimpleNamespace(**args)
#This will be changed in the future when feature spec support for triton is added
feature_spec = get_outbrain_feature_spec("")
embedding_dimensions = EMBEDDING_DIMENSIONS
model, features = wide_deep_model(args, feature_spec, embedding_dimensions)
checkpoint = tf.train.Checkpoint(model=model)
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir)).expect_partial()
inputs = features.values()
outputs = model(features, training=False)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
@tf.function
def call_fn(*model_inputs):
return model(model_inputs, training=False)
return model, call_fn
if __name__ == '__main__':
get_model(deep_hidden_units=[1024, 1024, 1024, 1024, 1024], deep_dropout=0.1, combiner='sum',
checkpoint_dir='/tmp/wd2/checkpoint')
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/model.py |
#!/usr/bin/env python3
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
To infer the model on framework runtime, you can use `run_inference_on_fw.py` script.
It infers data obtained from pointed data loader locally and saves received data into dump files.
Those files are stored in directory pointed by `--output-dir` argument.
Example call:
```shell script
python ./triton/run_inference_on_fw.py \
--input-path /models/exported/model.onnx \
--input-type onnx \
--dataloader triton/dataloader.py \
--data-dir /data/imagenet \
--batch-size 32 \
--output-dir /results/dump_local \
--dump-labels
```
"""
import argparse
import logging
import os
from pathlib import Path
from tqdm import tqdm
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "0"
from .deployment_toolkit.args import ArgParserGenerator # noqa: E402 module level import not at top of file
from .deployment_toolkit.core import ( # noqa: E402 module level import not at top of file
DATALOADER_FN_NAME,
BaseLoader,
BaseRunner,
load_from_file,
)
from .deployment_toolkit.dump import JsonDumpWriter # noqa: E402 module level import not at top of file
from .deployment_toolkit.extensions import loaders, runners # noqa: E402 module level import not at top of file
LOGGER = logging.getLogger("run_inference_on_fw")
def _verify_and_format_dump(args, ids, x, y_pred, y_real):
data = {"outputs": y_pred, "ids": {"ids": ids}}
if args.dump_inputs:
data["inputs"] = x
if args.dump_labels:
if not y_real:
raise ValueError(
"Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument"
)
data["labels"] = y_real
return data
def _parse_and_validate_args():
supported_inputs = set(runners.supported_extensions) & set(loaders.supported_extensions)
parser = argparse.ArgumentParser(description="Dump local inference output of given model", allow_abbrev=False)
parser.add_argument("--input-path", help="Path to input model", required=True)
parser.add_argument("--input-type", help="Input model type", choices=supported_inputs, required=True)
parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True)
parser.add_argument("--output-dir", help="Path to dir where output files will be stored", required=True)
parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False)
parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False)
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
args, *_ = parser.parse_known_args()
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
Loader: BaseLoader = loaders.get(args.input_type)
ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser)
Runner: BaseRunner = runners.get(args.input_type)
ArgParserGenerator(Runner).update_argparser(parser)
args = parser.parse_args()
types_requiring_io_params = []
if args.input_type in types_requiring_io_params and not all(p for p in [args.inputs, args.outptputs]):
parser.error(f"For {args.input_type} input provide --inputs and --outputs parameters")
return args
def main():
args = _parse_and_validate_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
Loader: BaseLoader = loaders.get(args.input_type)
Runner: BaseRunner = runners.get(args.input_type)
loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args)
runner = ArgParserGenerator(Runner).from_args(args)
LOGGER.info(f"Loading {args.input_path}")
model = loader.load(args.input_path)
with runner.init_inference(model=model) as runner_session, JsonDumpWriter(args.output_dir) as writer:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
LOGGER.info("Data loader initialized; Running inference")
for ids, x, y_real in tqdm(dataloader_fn(), unit="batch", mininterval=10):
y_pred = runner_session(x)
data = _verify_and_format_dump(args, ids=ids, x=x, y_pred=y_pred, y_real=y_real)
writer.write(**data)
LOGGER.info("Inference finished")
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/run_inference_on_fw.py |
#!/usr/bin/env python3
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import tensorflow as tf
from triton.tf_dataloader import eval_input_fn
def get_dataloader_fn(
*,
data_pattern: str,
batch_size: int,
):
files_path = (glob.glob(data_pattern))
assert len(files_path), "Expected at least 1 parquet file, found 0"
with tf.device('/cpu:0'):
input_fn = eval_input_fn(
files_path=files_path,
records_batch_size=batch_size,
)
def _get_dataloader():
for x, y, ids in input_fn:
ids = ids.numpy()
x = {name: tensor.numpy() for name, tensor in x.items()}
y = {'wide_deep_model': y.numpy()}
yield ids, x, y
return _get_dataloader
def main():
import argparse
parser = argparse.ArgumentParser(description="short_description")
parser.add_argument("--data_pattern", required=True)
parser.add_argument("--batch_size", type=int, required=True)
args = parser.parse_args()
dataloader_fn = get_dataloader_fn(data_pattern=args.data_pattern,
batch_size=args.batch_size)
for i, (ids, x, y) in enumerate(dataloader_fn()):
print(x, y)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/dataloader.py |
#!/usr/bin/env python3
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import pathlib
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .deployment_toolkit.core import EvaluationMode, MeasurementMode, OfflineMode, PerformanceTool
from .deployment_toolkit.triton_performance_runner import TritonPerformanceRunner
LOGGER = logging.getLogger("run_performance_on_triton")
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model-name",
type=str,
required=True,
help="Name of the model to test",
)
parser.add_argument(
"--result-path",
type=pathlib.Path,
required=True,
help="Path where results files is stored.",
)
parser.add_argument(
"--server-url",
type=str,
default="http://127.0.0.1:8000",
help="Url to Triton server",
)
parser.add_argument(
"--model-version",
type=str,
default=1,
help="Version of model",
)
parser.add_argument(
"--input-data",
type=str,
default="random",
help="Input data to perform profiling.",
)
parser.add_argument(
"--input-shapes",
action="append",
help="Input data shape in form INPUT_NAME:<full_shape_without_batch_axis>.",
)
parser.add_argument(
"--batch-sizes",
type=int,
default=[1],
help="List of batch sizes to tests.",
nargs="*",
)
parser.add_argument(
"--concurrency",
type=int,
default=[1],
help="List of concurrency modes.",
nargs="*",
)
parser.add_argument(
"--measurement-mode",
choices=[item.value for item in MeasurementMode],
default=MeasurementMode.COUNT_WINDOWS.value,
type=str,
help="Select measurement mode "
"'time_windows' stabilize performance on measurement window. "
"'count_windows' stabilize performance on number of samples.",
)
parser.add_argument(
"--measurement-interval",
help="Time window perf_analyzer will wait to stabilize the measurement",
default=5000,
type=int,
)
parser.add_argument(
"--measurement-request-count",
help="Number of samples on which perf_analyzer will stabilize the measurement",
default=50,
type=int,
)
parser.add_argument(
"--evaluation-mode",
choices=[item.value for item in EvaluationMode],
default=EvaluationMode.OFFLINE.value,
type=str,
help="Select evaluation mode "
"'offline' run offline analysis and use GPU memory to pass tensors. "
"'online' run online analysis and use HTTP protocol.",
)
parser.add_argument(
"--offline-mode",
choices=[item.value for item in OfflineMode],
default=OfflineMode.SYSTEM.value,
type=str,
help="Select offline mode "
"'system' pass tensors through CPU RAM memory. "
"'cuda' pass tensors through GPU RAM memory.",
)
parser.add_argument(
"--output-shared-memory-size",
default=102400,
type=int,
help="Size of memory buffer allocated for output with dynamic shapes in bytes. "
"Has to be equal to maximal size of output tensor.",
)
parser.add_argument(
"--performance-tool",
choices=[item.value for item in PerformanceTool],
default=PerformanceTool.MODEL_ANALYZER.value,
type=str,
help="Select performance tool for measurement mode "
"'model_analyzer' use Model Analyzer "
"'perf_analyzer' use Perf Analyzer",
)
parser.add_argument(
"--model-repository",
default=None,
type=str,
help="Path to model repository. Valid when using Model Analyzer",
)
parser.add_argument(
"--warmup",
help="Enable model warmup before performance test",
action="store_true",
default=False,
)
parser.add_argument(
"--timeout",
help="Timeout for performance analysis",
type=int,
default=None,
required=False,
)
parser.add_argument(
"-v",
"--verbose",
help="Verbose logs",
action="store_true",
default=False,
)
args = parser.parse_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
runner = TritonPerformanceRunner(
server_url=args.server_url,
model_name=args.model_name,
input_data=args.input_data,
input_shapes=args.input_shapes or [],
batch_sizes=args.batch_sizes,
measurement_mode=MeasurementMode(args.measurement_mode),
measurement_interval=args.measurement_interval,
measurement_request_count=args.measurement_request_count,
concurrency=args.concurrency,
evaluation_mode=EvaluationMode(args.evaluation_mode),
offline_mode=OfflineMode(args.offline_mode),
output_shared_memory_size=args.output_shared_memory_size,
performance_tool=PerformanceTool(args.performance_tool),
model_repository=args.model_repository,
result_path=args.result_path,
warmup=args.warmup,
timeout=args.timeout,
verbose=args.verbose,
)
runner.run()
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/run_performance_on_triton.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/__init__.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import importlib
import logging
import os
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
import numpy as np
LOGGER = logging.getLogger(__name__)
DATALOADER_FN_NAME = "get_dataloader_fn"
GET_MODEL_FN_NAME = "get_model"
GET_SERVING_INPUT_RECEIVER_FN = "get_serving_input_receiver_fn"
GET_ARGPARSER_FN_NAME = "update_argparser"
class TensorSpec(NamedTuple):
name: str
dtype: str
shape: Tuple
class Parameter(Enum):
def __lt__(self, other: "Parameter") -> bool:
return self.value < other.value
def __str__(self):
return self.value
class BackendAccelerator(Parameter):
NONE = "none"
AMP = "amp"
TRT = "trt"
class ExportPrecision(Parameter):
FP16 = "fp16"
FP32 = "fp32"
class Precision(Parameter):
INT8 = "int8"
FP16 = "fp16"
FP32 = "fp32"
class DeviceKind(Parameter):
CPU = "cpu"
GPU = "gpu"
class ModelInputType(Parameter):
TF_GRAPHDEF = "tf-graphdef"
TF_ESTIMATOR = "tf-estimator"
TF_KERAS = "tf-keras"
PYT = "pyt"
class Format(Parameter):
TF_SAVEDMODEL = "tf-savedmodel"
TF_TRT = "tf-trt"
ONNX = "onnx"
TORCHSCRIPT = "torchscript"
TRT = "trt"
FASTERTRANSFORMER = "fastertransformer"
# deprecated, backward compatibility only
TS_TRACE = "ts-trace"
TS_SCRIPT = "ts-script"
class ExportFormat(Parameter):
TF_SAVEDMODEL = "tf-savedmodel"
TORCHSCRIPT = "torchscript"
ONNX = "onnx"
# deprecated, backward compatibility only
TS_TRACE = "ts-trace"
TS_SCRIPT = "ts-script"
class TorchJit(Parameter):
NONE = "none"
TRACE = "trace"
SCRIPT = "script"
class Model(NamedTuple):
handle: object
# TODO: precision should be removed
precision: Optional[Precision]
inputs: Dict[str, TensorSpec]
outputs: Dict[str, TensorSpec]
def load_from_file(file_path, label, target):
spec = importlib.util.spec_from_file_location(name=label, location=file_path)
my_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
return getattr(my_module, target, None)
class BaseLoader(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def load(self, model_path: Union[str, Path], **kwargs) -> Model:
"""
Loads and process model from file based on given set of args
"""
pass
class BaseSaver(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None:
"""
Save model to file
"""
pass
class BaseRunner(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def init_inference(self, model: Model):
raise NotImplementedError
class BaseRunnerSession(abc.ABC):
def __init__(self, model: Model):
self._model = model
@abc.abstractmethod
def __enter__(self):
raise NotImplementedError()
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError()
@abc.abstractmethod
def __call__(self, x: Dict[str, object]):
raise NotImplementedError()
def _set_env_variables(self) -> Dict[str, object]:
"""this method not remove values; fix it if needed"""
to_set = {}
old_values = {k: os.environ.pop(k, None) for k in to_set}
os.environ.update(to_set)
return old_values
def _recover_env_variables(self, old_envs: Dict[str, object]):
for name, value in old_envs.items():
if value is None:
del os.environ[name]
else:
os.environ[name] = str(value)
class BaseConverter(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def convert(self, model: Model, dataloader_fn) -> Model:
raise NotImplementedError()
@staticmethod
def required_source_model_precision(requested_model_precision: Precision) -> Precision:
return requested_model_precision
class BaseMetricsCalculator(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
def calc(
self,
*,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
) -> Dict[str, float]:
"""
Calculates error/accuracy metrics
Args:
ids: List of ids identifying each sample in the batch
y_pred: model output as dict where key is output name and value is output value
x: model input as dict where key is input name and value is input value
y_real: input ground truth as dict where key is output name and value is output value
Returns:
dictionary where key is metric name and value is its value
"""
pass
@abc.abstractmethod
def update(
self,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
):
pass
@property
@abc.abstractmethod
def metrics(self) -> Dict[str, Any]:
pass
class ShapeSpec(NamedTuple):
min: Tuple
opt: Tuple
max: Tuple
class MeasurementMode(Enum):
"""
Available measurement stabilization modes
"""
COUNT_WINDOWS = "count_windows"
TIME_WINDOWS = "time_windows"
class PerformanceTool(Enum):
"""
Available performance evaluation tools
"""
MODEL_ANALYZER = "model_analyzer"
PERF_ANALYZER = "perf_analyzer"
class EvaluationMode(Enum):
"""
Available evaluation modes
"""
OFFLINE = "offline"
ONLINE = "online"
class OfflineMode(Enum):
"""
Available offline mode for memory
"""
SYSTEM = "system"
CUDA = "cuda"
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/core.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import json
import pickle
import threading
from pathlib import Path
from typing import Dict, Iterator, List, Union
import numpy as np
MB2B = 2 ** 20
B2MB = 1 / MB2B
FLUSH_THRESHOLD_B = 256 * MB2B
def _validate_batch(name: str, value: Union[list, np.ndarray]):
if not isinstance(value, (list, np.ndarray)):
raise ValueError(f"Values shall be lists or np.ndarrays; current type {type(value)}")
def _validate_prefix_data(prefix_data: Dict[str, List[np.ndarray]]):
batch_sizes_per_io_name = {name: [len(batch) for batch in batches] for name, batches in prefix_data.items()}
names = list(batch_sizes_per_io_name)
for io_name in names:
for batch_idx, batch_size in enumerate(batch_sizes_per_io_name[io_name]):
if not all([batch_sizes_per_io_name[other_name][batch_idx] == batch_size for other_name in names]):
non_equal_batch_sizes = {
other_name: batch_sizes_per_io_name[other_name][batch_idx] for other_name in names
}
non_equal_batch_sizes_str = ", ".join(
[f"{name}={batch_size}" for name, batch_size in non_equal_batch_sizes.items()]
)
raise ValueError(
"All inputs/outputs should have same number of batches with equal batch_size. "
f"At batch_idx={batch_idx} there are batch_sizes: {non_equal_batch_sizes_str}"
)
# ensure if each io has same number of batches with equal size
def _get_nitems_and_batches(prefix_data: Dict[str, List[np.ndarray]]):
nitems = 0
nbatches = 0
if prefix_data:
nitems_per_io_name = {name: sum(len(batch) for batch in batches) for name, batches in prefix_data.items()}
nbatches_per_io_name = {name: len(batches) for name, batches in prefix_data.items()}
nitems = list(nitems_per_io_name.values())[0]
nbatches = list(nbatches_per_io_name.values())[0]
return nitems, nbatches
class BaseDumpWriter(abc.ABC):
FILE_SUFFIX = ".abstract"
def __init__(self, output_dir: Union[str, Path]):
self._output_dir = Path(output_dir)
# outer dict key is prefix (i.e. input/output/labels/...), inner dict key is input/output name
# list is list of batches
self._items_cache: Dict[str, Dict[str, List[np.ndarray]]] = {}
# key is prefix
self._items_counters: Dict[str, int] = {}
self._cache_lock = threading.RLock()
self._flush_threshold_b = FLUSH_THRESHOLD_B
@property
def cache_size(self):
def _get_bytes_size(name, batch):
_validate_batch(name, batch)
if not isinstance(batch, np.ndarray):
batch = np.narray(batch)
return batch.nbytes
with self._cache_lock:
return {
prefix: sum(_get_bytes_size(name, batch) for name, batches in data.items() for batch in batches)
for prefix, data in self._items_cache.items()
}
def _append_to_cache(self, prefix, prefix_data):
if prefix_data is None:
return
if not isinstance(prefix_data, dict):
raise ValueError(f"{prefix} data to store shall be dict")
with self._cache_lock:
cached_prefix_data = self._items_cache.setdefault(prefix, {})
for name, batch in prefix_data.items():
_validate_batch(name, batch)
if not isinstance(batch, np.ndarray):
batch = np.array(batch)
cached_batches = cached_prefix_data.setdefault(name, [])
cached_batches += [batch]
def write(self, **kwargs):
with self._cache_lock:
for prefix, prefix_data in kwargs.items():
self._append_to_cache(prefix, prefix_data)
biggest_prefix_data_size = max(self.cache_size.values())
if biggest_prefix_data_size > self._flush_threshold_b:
self.flush()
def flush(self):
with self._cache_lock:
for prefix, prefix_data in self._items_cache.items():
_validate_prefix_data(prefix_data)
output_path = self._output_dir / self._get_filename(prefix)
self._dump(prefix_data, output_path)
nitems, nbatches = _get_nitems_and_batches(prefix_data)
self._items_counters[prefix] += nitems
self._items_cache = {}
def _get_filename(self, prefix):
idx = self._items_counters.setdefault(prefix, 0)
return f"{prefix}-{idx:012d}{self.FILE_SUFFIX}"
@abc.abstractmethod
def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path):
pass
def __enter__(self):
if self._output_dir.exists() and len(list(self._output_dir.iterdir())):
raise ValueError(f"{self._output_dir.as_posix()} is not empty")
self._output_dir.mkdir(parents=True, exist_ok=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.flush()
class PickleDumpWriter(BaseDumpWriter):
FILE_SUFFIX = ".pkl"
def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path):
output_path.parent.mkdir(parents=True, exist_ok=True)
with output_path.open("wb") as pickle_file:
pickle.dump(prefix_data, pickle_file)
class JsonDumpWriter(BaseDumpWriter):
FILE_SUFFIX = ".json"
def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path):
repacked_prefix_data = self._format_data(prefix_data)
output_path.parent.mkdir(parents=True, exist_ok=True)
with output_path.open("w") as json_file:
json.dump(repacked_prefix_data, json_file)
def _format_data(self, prefix_data: Dict[str, List[np.ndarray]]) -> Dict:
def _format_batch_for_perf_analyzer_json_format(batch: np.ndarray):
return {
"content": batch.flatten().tolist(),
"shape": list(batch.shape),
"dtype": str(batch.dtype),
}
_, nbatches = _get_nitems_and_batches(prefix_data)
batches = [{} for _ in range(nbatches)]
for io_name, batches_per_io in prefix_data.items():
for batch_idx, batch in enumerate(batches_per_io):
batches[batch_idx][io_name] = _format_batch_for_perf_analyzer_json_format(batch)
return {"data": batches}
class BaseDumpReader(abc.ABC):
FILE_SUFFIX = ".abstract"
def __init__(self, dump_dir: Union[Path, str]):
self._dump_dir = Path(dump_dir)
def get(self, prefix: str) -> Iterator[Dict[str, np.ndarray]]:
dump_files_paths = sorted(self._dump_dir.glob(f"{prefix}*{self.FILE_SUFFIX}"))
for dump_file_path in dump_files_paths:
prefix_data = self._load_file(dump_file_path)
nitems, nbatches = _get_nitems_and_batches(prefix_data)
for batch_idx in range(nbatches):
yield {io_name: prefix_data[io_name][batch_idx] for io_name in prefix_data}
@abc.abstractmethod
def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]:
pass
def iterate_over(self, prefix_list: List[str]) -> Iterator:
iterators = [self.get(prefix) for prefix in prefix_list]
empty_iterators = [False] * len(iterators)
while not all(empty_iterators):
values = [None] * len(iterators)
for idx, iterator in enumerate(iterators):
if empty_iterators[idx]:
continue
try:
values[idx] = next(iterator)
except StopIteration:
empty_iterators[idx] = True
if all(empty_iterators):
break
if not all(empty_iterators):
yield values
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class PickleDumpReader(BaseDumpReader):
FILE_SUFFIX = ".pkl"
def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]:
with dump_file_path.open("rb") as pickle_file:
return pickle.load(pickle_file)
class JsonDumpReader(BaseDumpReader):
FILE_SUFFIX = ".json"
def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]:
with dump_file_path.open("rb") as json_file:
data = json.load(json_file)
return self._repack_data(data)
def _repack_data(self, data: Dict) -> Dict[str, List[np.ndarray]]:
result: Dict[str, List[np.ndarray]] = {}
batches = data["data"]
for batch in batches:
for io_name, batch_as_dict in batch.items():
io_batches = result.setdefault(io_name, [])
flat_array = batch_as_dict["content"]
shape = batch_as_dict["shape"]
dtype = batch_as_dict["dtype"]
batch_as_array = np.array(flat_array).reshape(shape).astype(dtype)
io_batches.append(batch_as_array)
return result
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/dump.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import logging
import os
import re
from pathlib import Path
from typing import List
LOGGER = logging.getLogger(__name__)
class ExtensionManager:
def __init__(self, name: str):
self._name = name
self._registry = {}
def register_extension(self, extension: str, clazz):
already_registered_class = self._registry.get(extension, None)
if already_registered_class and already_registered_class.__module__ != clazz.__module__:
raise RuntimeError(
f"Conflicting extension {self._name}/{extension}; "
f"{already_registered_class.__module__}.{already_registered_class.__name} "
f"and "
f"{clazz.__module__}.{clazz.__name__}"
)
elif already_registered_class is None:
clazz_full_name = f"{clazz.__module__}.{clazz.__name__}" if clazz is not None else "None"
LOGGER.debug(f"Registering extension {self._name}/{extension}: {clazz_full_name}")
self._registry[extension] = clazz
def get(self, extension):
if extension not in self._registry:
raise RuntimeError(f"Missing extension {self._name}/{extension}")
return self._registry[extension]
@property
def supported_extensions(self):
return list(self._registry)
@staticmethod
def scan_for_extensions(extension_dirs: List[Path]):
register_pattern = r".*\.register_extension\(.*"
for extension_dir in extension_dirs:
for python_path in extension_dir.rglob("*.py"):
if not python_path.is_file():
continue
payload = python_path.read_text()
if re.findall(register_pattern, payload):
import_path = python_path.relative_to(toolkit_root_dir.parent)
package = import_path.parent.as_posix().replace(os.sep, ".")
package_with_module = f"{package}.{import_path.stem}"
spec = importlib.util.spec_from_file_location(name=package_with_module, location=python_path)
my_module = importlib.util.module_from_spec(spec)
my_module.__package__ = package
try:
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
except ModuleNotFoundError as e:
LOGGER.error(
f"Could not load extensions from {import_path} due to missing python packages; {e}"
)
runners = ExtensionManager("runners")
loaders = ExtensionManager("loaders")
savers = ExtensionManager("savers")
toolkit_root_dir = (Path(__file__).parent / "..").resolve()
ExtensionManager.scan_for_extensions([toolkit_root_dir])
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/extensions.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from enum import Enum
from typing import Any, Dict, Tuple
LOGGER = logging.getLogger(__name__)
class TritonClientProtocol(Enum):
"""Describe protocol with which client communicates with Triton"""
GRPC = "grpc"
HTTP = "http"
def parse_server_url(server_url: str) -> Tuple[TritonClientProtocol, str, int]:
DEFAULT_PORTS = {
TritonClientProtocol.HTTP: 8000,
TritonClientProtocol.GRPC: 8001,
}
# extract protocol
server_url_items = server_url.split("://")
if len(server_url_items) != 2:
raise ValueError("Prefix server_url with protocol ex.: grpc://127.0.0.1:8001")
requested_protocol, server_url = server_url_items
requested_protocol = TritonClientProtocol(requested_protocol.lower())
if requested_protocol not in DEFAULT_PORTS:
raise ValueError(f"Unsupported protocol: {requested_protocol}")
# extract host and port
default_port = DEFAULT_PORTS[requested_protocol]
server_url_items = server_url.split(":")
if len(server_url_items) == 1:
host, port = server_url, default_port
elif len(server_url_items) == 2:
host, port = server_url_items
port = int(port)
if port != default_port:
LOGGER.warning(
f"Current server URL is {server_url} while default {requested_protocol} port is {default_port}"
)
else:
raise ValueError(f"Could not parse {server_url}. Example of correct server URL: grpc://127.0.0.1:8001")
return requested_protocol, host, port
def log_dict(title: str, dict_: Dict[str, Any]):
LOGGER.info(title)
for key, value in dict_.items():
LOGGER.info(f"\t{key} = {value}")
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/utils.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import inspect
import logging
from typing import Callable, Dict, Optional, Union
from model_navigator.utils.cli import is_dict_generic, is_list_generic, is_optional_generic
from .core import GET_ARGPARSER_FN_NAME, load_from_file
LOGGER = logging.getLogger(__name__)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def filter_fn_args(args: Union[dict, argparse.Namespace], fn: Callable) -> dict:
signature = inspect.signature(fn)
parameters_names = list(signature.parameters)
if isinstance(args, argparse.Namespace):
args = vars(args)
args = {k: v for k, v in args.items() if k in parameters_names}
return args
def add_args_for_fn_signature(parser, fn) -> argparse.ArgumentParser:
parser.conflict_handler = "resolve"
signature = inspect.signature(fn)
for parameter in signature.parameters.values():
if parameter.name in ["self", "args", "kwargs"]:
continue
argument_kwargs = {}
if parameter.annotation != inspect.Parameter.empty:
is_optional = is_optional_generic(parameter.annotation)
if is_optional:
annotation = parameter.annotation.__args__[0] # Optional[cls] will be changed into Union[cls, None]
else:
annotation = parameter.annotation
is_list = is_list_generic(annotation)
is_dict = is_dict_generic(annotation)
if parameter.annotation == bool:
argument_kwargs["type"] = str2bool
argument_kwargs["choices"] = [0, 1]
elif is_list:
argument_kwargs["type"] = annotation.__args__[0] # List[cls] -> cls
elif is_dict:
raise RuntimeError(
f"Could not prepare argument parser for {parameter.name}: {parameter.annotation} in {fn}"
)
else:
argument_kwargs["type"] = annotation
if parameter.default != inspect.Parameter.empty:
if parameter.annotation == bool:
argument_kwargs["default"] = str2bool(parameter.default)
else:
argument_kwargs["default"] = parameter.default
else:
argument_kwargs["required"] = True
name = parameter.name.replace("_", "-")
LOGGER.debug(f"Adding argument {name} with {argument_kwargs}")
parser.add_argument(f"--{name}", **argument_kwargs)
return parser
class ArgParserGenerator:
def __init__(self, cls_or_fn, module_path: Optional[str] = None):
self._cls_or_fn = cls_or_fn
init_method_name = "__init__"
self._handle = cls_or_fn if inspect.isfunction(cls_or_fn) else getattr(cls_or_fn, init_method_name, None)
input_is_python_file = module_path and module_path.endswith(".py")
self._input_path = module_path if input_is_python_file else None
self._required_fn_name_for_signature_parsing = getattr(
cls_or_fn, "required_fn_name_for_signature_parsing", None
)
def update_argparser(self, parser):
name = self._handle.__name__
group_parser = parser.add_argument_group(name)
add_args_for_fn_signature(group_parser, fn=self._handle)
self._update_argparser(group_parser)
def get_args(self, args: argparse.Namespace):
filtered_args = filter_fn_args(args, fn=self._handle)
tmp_parser = argparse.ArgumentParser(allow_abbrev=False)
self._update_argparser(tmp_parser)
custom_names = [
p.dest.replace("-", "_") for p in tmp_parser._actions if not isinstance(p, argparse._HelpAction)
]
custom_params = {n: getattr(args, n) for n in custom_names}
filtered_args = {**filtered_args, **custom_params}
return filtered_args
def from_args(self, args: Union[argparse.Namespace, Dict]):
args = self.get_args(args)
LOGGER.info(f"Initializing {self._cls_or_fn.__name__}({args})")
return self._cls_or_fn(**args)
def _update_argparser(self, parser):
label = "argparser_update"
if self._input_path:
update_argparser_handle = load_from_file(self._input_path, label=label, target=GET_ARGPARSER_FN_NAME)
if update_argparser_handle:
update_argparser_handle(parser)
elif self._required_fn_name_for_signature_parsing:
fn_handle = load_from_file(
self._input_path, label=label, target=self._required_fn_name_for_signature_parsing
)
if fn_handle:
add_args_for_fn_signature(parser, fn_handle)
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/args.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import re
from typing import Dict, List
from natsort import natsorted
from tabulate import tabulate
def sort_results(results: List):
results = natsorted(results, key=lambda item: [item[key] for key in item.keys()])
return results
def save_results(filename: str, data: List, formatted: bool = False):
data = format_data(data=data) if formatted else data
with open(filename, "a") as csvfile:
fieldnames = data[0].keys()
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in data:
writer.writerow(row)
def format_data(data: List[Dict]) -> List[Dict]:
formatted_data = list()
for item in data:
formatted_item = format_keys(data=item)
formatted_data.append(formatted_item)
return formatted_data
def format_keys(data: Dict) -> Dict:
keys = {format_key(key=key): value for key, value in data.items()}
return keys
def format_key(key: str) -> str:
key = " ".join([k.capitalize() for k in re.split("_| ", key)])
return key
def show_results(results: List[Dict]):
headers = list(results[0].keys())
summary = map(lambda x: list(map(lambda item: item[1], x.items())), results)
print(tabulate(summary, headers=headers))
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/report.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import queue
import threading
from pathlib import Path
from typing import Optional
# pytype: disable=import-error
try:
from tritonclient import utils as client_utils # noqa: F401
except ImportError:
import tritonclientutils as client_utils # noqa: F401
try:
import tritonclient.grpc as grpc_client
except ImportError:
import tritongrpcclient as grpc_client
# pytype: enable=import-error
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .base import BaseRunner
LOGGER = logging.getLogger("triton_inference_runner.grpc")
class SyncInferenceRunner(BaseRunner):
def __iter__(self):
LOGGER.debug(f"Connecting to {self._server_url}")
client = grpc_client.InferenceServerClient(url=self._server_url, verbose=self._verbose)
error = self._verify_triton_state(client)
if error:
raise RuntimeError(f"Could not communicate to Triton Server: {error}")
LOGGER.debug(
f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} " f"are up and ready!"
)
model_config = client.get_model_config(self._model_name, self._model_version)
model_metadata = client.get_model_metadata(self._model_name, self._model_version)
LOGGER.info(f"Model config {model_config}")
LOGGER.info(f"Model metadata {model_metadata}")
inputs = {tm.name: tm for tm in model_metadata.inputs}
outputs = {tm.name: tm for tm in model_metadata.outputs}
output_names = list(outputs)
outputs_req = [grpc_client.InferRequestedOutput(name) for name in outputs]
for ids, x, y_real in self._dataloader:
infer_inputs = []
for name in inputs:
data = x[name]
datatype = inputs[name].datatype
infer_input = grpc_client.InferInput(name, data.shape, datatype)
target_np_dtype = client_utils.triton_to_np_dtype(datatype)
data = data.astype(target_np_dtype)
infer_input.set_data_from_numpy(data)
infer_inputs.append(infer_input)
results = client.infer(
model_name=self._model_name,
model_version=self._model_version,
inputs=infer_inputs,
outputs=outputs_req,
timeout=self._response_wait_t,
)
y_pred = {name: results.as_numpy(name) for name in output_names}
yield ids, x, y_pred, y_real
class AsyncInferenceRunner(BaseRunner):
DEFAULT_MAX_UNRESP_REQS = 128
def __init__(
self,
server_url: str,
model_name: str,
model_version: str,
*,
dataloader,
verbose=False,
response_wait_time: Optional[float] = None,
max_unresponded_requests: Optional[int] = None,
):
super().__init__(
server_url,
model_name,
model_version,
dataloader=dataloader,
verbose=verbose,
response_wait_time=response_wait_time,
)
self._max_unresp_reqs = (
self.DEFAULT_MAX_UNRESP_REQS if max_unresponded_requests is None else max_unresponded_requests
)
self._results = queue.Queue()
self._processed_all = False
self._errors = []
self._num_waiting_for = 0
self._sync = threading.Condition()
self._req_thread = threading.Thread(target=self.req_loop, daemon=True)
def __iter__(self):
self._req_thread.start()
timeout_s = 0.050 # check flags processed_all and error flags every 50ms
while True:
try:
ids, x, y_pred, y_real = self._results.get(timeout=timeout_s)
yield ids, x, y_pred, y_real
except queue.Empty:
shall_stop = self._processed_all or self._errors
if shall_stop:
break
LOGGER.debug("Waiting for request thread to stop")
self._req_thread.join()
if self._errors:
error_msg = "\n".join(map(str, self._errors))
raise RuntimeError(error_msg)
def _on_result(self, ids, x, y_real, output_names, result, error):
with self._sync:
request_id = str(ids[0])
NOT_MATCHING_REQUEST_ID_MSG = (
"Error during processing result - request_id doesn't match. This shouldn't have happened."
)
if error:
response_id = error.get_response().id
if response_id != request_id:
raise RuntimeError(NOT_MATCHING_REQUEST_ID_MSG)
self._errors.append(error)
else:
response_id = result.get_response().id
if response_id != request_id:
raise RuntimeError(NOT_MATCHING_REQUEST_ID_MSG)
y_pred = {name: result.as_numpy(name) for name in output_names}
self._results.put((ids, x, y_pred, y_real))
self._num_waiting_for -= 1
self._sync.notify_all()
def req_loop(self):
LOGGER.debug(f"Connecting to {self._server_url}")
client = grpc_client.InferenceServerClient(url=self._server_url, verbose=self._verbose)
self._errors = self._verify_triton_state(client)
if self._errors:
return
LOGGER.debug(
f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} " f"are up and ready!"
)
model_config = client.get_model_config(self._model_name, self._model_version)
model_metadata = client.get_model_metadata(self._model_name, self._model_version)
LOGGER.info(f"Model config {model_config}")
LOGGER.info(f"Model metadata {model_metadata}")
inputs = {tm.name: tm for tm in model_metadata.inputs}
outputs = {tm.name: tm for tm in model_metadata.outputs}
output_names = list(outputs)
self._num_waiting_for = 0
for ids, x, y_real in self._dataloader:
infer_inputs = []
for name in inputs:
data = x[name]
datatype = inputs[name].datatype
infer_input = grpc_client.InferInput(name, data.shape, datatype)
target_np_dtype = client_utils.triton_to_np_dtype(datatype)
data = data.astype(target_np_dtype)
infer_input.set_data_from_numpy(data)
infer_inputs.append(infer_input)
outputs_req = [grpc_client.InferRequestedOutput(name) for name in outputs]
with self._sync:
def _check_can_send():
return self._num_waiting_for < self._max_unresp_reqs
can_send = self._sync.wait_for(_check_can_send, timeout=self._response_wait_t)
if not can_send:
error_msg = f"Runner could not send new requests for {self._response_wait_t}s"
self._errors.append(error_msg)
self._sync.notify_all()
break
request_id = str(ids[0])
callback = functools.partial(AsyncInferenceRunner._on_result, self, ids, x, y_real, output_names)
client.async_infer(
model_name=self._model_name,
model_version=self._model_version,
inputs=infer_inputs,
outputs=outputs_req,
callback=callback,
request_id=request_id,
)
self._num_waiting_for += 1
self._sync.notify_all()
# wait till receive all requested data
with self._sync:
def _all_processed():
LOGGER.debug(f"wait for {self._num_waiting_for} unprocessed jobs")
return self._num_waiting_for == 0
self._processed_all = self._sync.wait_for(_all_processed, self._max_wait_time)
if not self._processed_all:
error_msg = f"Runner {self._response_wait_t}s timeout received while waiting for results from server"
self._errors.append(error_msg)
self._sync.notify_all()
LOGGER.debug("Finished request thread")
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_inference_runner/grpc.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Optional
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from ..utils import TritonClientProtocol, parse_server_url
from .grpc import AsyncInferenceRunner as AsyncGRPCRunner
from .grpc import SyncInferenceRunner as SyncGRPCRunner
from .http import AsyncInferenceRunner as AsyncHTPPRunner
from .http import SyncInferenceRunner as SyncHTTPRunner
class TritonInferenceRunner:
async_runners = {
TritonClientProtocol.GRPC: AsyncGRPCRunner,
TritonClientProtocol.HTTP: AsyncHTPPRunner,
}
sync_runners = {
TritonClientProtocol.GRPC: SyncGRPCRunner,
TritonClientProtocol.HTTP: SyncHTTPRunner,
}
def __init__(
self,
server_url: str,
model_name: str,
model_version: str,
dataloader_fn,
verbose: bool = False,
response_wait_time: Optional[float] = None,
max_unresponded_requests: int = 128,
synchronous: bool = False,
):
protocol, host, port = parse_server_url(server_url)
server_url = f"{host}:{port}"
if synchronous:
sync_runner_cls = TritonInferenceRunner.sync_runners[protocol]
self._runner = sync_runner_cls(
server_url,
model_name,
model_version,
dataloader=dataloader_fn(),
verbose=verbose,
response_wait_time=response_wait_time,
)
else:
async_runner_cls = TritonInferenceRunner.async_runners[protocol]
self._runner = async_runner_cls(
server_url,
model_name,
model_version,
dataloader=dataloader_fn(),
verbose=verbose,
response_wait_time=response_wait_time,
max_unresponded_requests=max_unresponded_requests,
)
def __iter__(self):
return self._runner.__iter__()
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_inference_runner/runner.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .runner import TritonInferenceRunner # noqa: F401
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_inference_runner/__init__.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from pathlib import Path
from typing import Optional
# pytype: disable=import-error
try:
from tritonclient import utils as client_utils # noqa: F401
except ImportError:
import tritonclientutils as client_utils # noqa: F401
try:
import tritonclient.http as http_client
except (ImportError, RuntimeError):
import tritonhttpclient as http_client
# pytype: enable=import-error
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .base import BaseRunner
LOGGER = logging.getLogger("triton_inference_runner.http")
class HTTPInferenceRunner(BaseRunner):
def _parse_content(self, response):
return json.dumps(response, indent=4)
class SyncInferenceRunner(HTTPInferenceRunner):
def __iter__(self):
LOGGER.debug(f"Connecting to {self._server_url}")
client = http_client.InferenceServerClient(
url=self._server_url,
verbose=self._verbose,
connection_timeout=self._response_wait_t,
network_timeout=self._response_wait_t,
)
error = self._verify_triton_state(client)
if error:
raise RuntimeError(f"Could not communicate to Triton Server: {error}")
LOGGER.debug(
f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} " f"are up and ready!"
)
model_config = client.get_model_config(self._model_name, self._model_version)
model_metadata = client.get_model_metadata(self._model_name, self._model_version)
LOGGER.info(f"Model config {self._parse_content(model_config)}")
LOGGER.info(f"Model metadata {self._parse_content(model_metadata)}")
inputs = {tm["name"]: tm for tm in model_metadata["inputs"]}
outputs = {tm["name"]: tm for tm in model_metadata["outputs"]}
output_names = list(outputs)
outputs_req = [http_client.InferRequestedOutput(name) for name in outputs]
for ids, x, y_real in self._dataloader:
infer_inputs = []
for name in inputs:
data = x[name]
datatype = inputs[name]["datatype"]
infer_input = http_client.InferInput(name, data.shape, datatype)
target_np_dtype = client_utils.triton_to_np_dtype(datatype)
data = data.astype(target_np_dtype)
infer_input.set_data_from_numpy(data)
infer_inputs.append(infer_input)
results = client.infer(
model_name=self._model_name,
model_version=self._model_version,
inputs=infer_inputs,
outputs=outputs_req,
timeout=self._response_wait_t_ms,
)
y_pred = {name: results.as_numpy(name) for name in output_names}
yield ids, x, y_pred, y_real
class AsyncInferenceRunner(HTTPInferenceRunner):
DEFAULT_MAX_UNRESP_REQS = 128
def __init__(
self,
server_url: str,
model_name: str,
model_version: str,
*,
dataloader,
verbose=False,
response_wait_time: Optional[float] = None,
max_unresponded_requests: Optional[int] = None,
):
super().__init__(
server_url,
model_name,
model_version,
dataloader=dataloader,
verbose=verbose,
response_wait_time=response_wait_time,
)
self._max_unresp_reqs = (
self.DEFAULT_MAX_UNRESP_REQS if max_unresponded_requests is None else max_unresponded_requests
)
def __iter__(self):
client = http_client.InferenceServerClient(
url=self._server_url,
verbose=self._verbose,
concurrency=self._max_unresp_reqs,
connection_timeout=self._response_wait_t,
network_timeout=self._response_wait_t,
)
self._errors = self._verify_triton_state(client)
if self._errors:
return
LOGGER.debug(
f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} " f"are up and ready!"
)
model_config = client.get_model_config(self._model_name, self._model_version)
model_metadata = client.get_model_metadata(self._model_name, self._model_version)
LOGGER.info(f"Model config {self._parse_content(model_config)}")
LOGGER.info(f"Model metadata {self._parse_content(model_metadata)}")
inputs = {tm["name"]: tm for tm in model_metadata["inputs"]}
outputs = {tm["name"]: tm for tm in model_metadata["outputs"]}
output_names = list(outputs)
async_requests = []
for ids, x, y_real in self._dataloader:
infer_inputs = []
for name in inputs:
data = x[name]
datatype = inputs[name]["datatype"]
infer_input = http_client.InferInput(name, data.shape, datatype)
target_np_dtype = client_utils.triton_to_np_dtype(datatype)
data = data.astype(target_np_dtype)
infer_input.set_data_from_numpy(data)
infer_inputs.append(infer_input)
outputs_req = [http_client.InferRequestedOutput(name) for name in outputs]
request_id = str(ids[0])
async_request = client.async_infer(
model_name=self._model_name,
model_version=self._model_version,
inputs=infer_inputs,
outputs=outputs_req,
request_id=request_id,
timeout=self._response_wait_t_ms,
)
async_requests.append((ids, x, y_real, async_request))
if len(async_requests) > self._max_unresp_reqs:
yield from self._yield_response(async_requests, output_names)
async_requests = []
yield from self._yield_response(async_requests, output_names)
LOGGER.debug("Finished request thread")
def _yield_response(self, async_requests, output_names):
for ids, x, y_real, async_response in async_requests:
result = async_response.get_result()
y_pred = {name: result.as_numpy(name) for name in output_names}
yield ids, x, y_pred, y_real
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_inference_runner/http.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Optional
LOGGER = logging.getLogger("triton_inference_runner.base")
class BaseRunner:
DEFAULT_MAX_RESP_WAIT_S = 120
DEFAULT_MAX_FINISH_WAIT_S = 900 # 15min
def __init__(
self,
server_url: str,
model_name: str,
model_version: str,
*,
dataloader,
verbose=False,
response_wait_time: Optional[float] = None,
):
self._model_name = model_name
self._model_version = model_version
self._dataloader = dataloader
self._verbose = verbose
self._response_wait_t = int(self.DEFAULT_MAX_RESP_WAIT_S if response_wait_time is None else response_wait_time)
self._response_wait_t_ms = self._response_wait_t * 1000 * 1000
self._max_wait_time = max(self._response_wait_t, self.DEFAULT_MAX_FINISH_WAIT_S)
self._server_url = server_url
def _verify_triton_state(self, triton_client):
errors = []
if not triton_client.is_server_live():
errors.append(f"Triton server {self._server_url} is not live")
elif not triton_client.is_server_ready():
errors.append(f"Triton server {self._server_url} is not ready")
elif not triton_client.is_model_ready(self._model_name, self._model_version):
errors.append(f"Model {self._model_name}:{self._model_version} is not ready")
return errors
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_inference_runner/base.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pathlib import Path
from typing import Dict, Optional, Tuple, Union
# pytype: disable=import-error
import tensorflow as tf
from tensorflow.python.eager import wrap_function
from tf2onnx.shape_inference import infer_shape
from tf2onnx.tf_loader import freeze_session, inputs_without_resource, is_function, remove_redundant_inputs, tf_optimize
from ..args import filter_fn_args
from ..core import (
GET_MODEL_FN_NAME,
GET_SERVING_INPUT_RECEIVER_FN,
BaseLoader,
BaseRunner,
BaseRunnerSession,
BaseSaver,
ExportFormat,
Format,
Model,
ModelInputType,
TensorSpec,
load_from_file,
)
from ..extensions import loaders, runners, savers
# pytype: enable=import-error
LOGGER = logging.getLogger(__name__)
def is_tf2():
return tf.__version__.startswith("2.")
def create_session_config(*, allow_growth=False, use_xla=False, gpu_memory_fraction=1.0):
gpu_options = tf.compat.v1.GPUOptions(
per_process_gpu_memory_fraction=gpu_memory_fraction, allow_growth=allow_growth
)
config = tf.compat.v1.ConfigProto(gpu_options=gpu_options)
if use_xla:
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
LOGGER.debug(
f"Using gpu memory fraction: allow_growth={allow_growth} "
f"gpu_memory_fraction={gpu_memory_fraction} "
f"use_xla={use_xla}"
)
return config
def _from_saved_model_v1(sess, model_path, tag, signatures):
"""
Load tensorflow graph from saved_model.
NOTICE: Modified version from tf2onnx project
"""
wrn_no_tag = "'--tag' not specified for saved_model. Using --tag serve"
wrn_empty_tag = "'--tag' value is empty string. Using tag =[[]]"
if tag is None:
tag = [tf.saved_model.SERVING]
LOGGER.warning(wrn_no_tag)
if tag == "":
tag = [[]]
LOGGER.warning(wrn_empty_tag)
if not isinstance(tag, list):
tag = [tag]
imported = tf.compat.v1.saved_model.loader.load(sess, tag, model_path)
for k in imported.signature_def.keys():
if k.startswith("_"):
# consider signatures starting with '_' private
continue
signatures.append(k)
try:
from tensorflow.contrib.saved_model.python.saved_model import ( # pytype: disable=import-error
signature_def_utils,
)
def get_signature_def(meta_graph_def, k):
return signature_def_utils.get_signature_def_by_key(meta_graph_def, k)
except ImportError:
# TF1.12 changed the api
def get_signature_def(meta_graph_def, k):
return meta_graph_def.signature_def[k]
inputs = {}
outputs = {}
for k in signatures:
inputs_tensor_info = get_signature_def(imported, k).inputs
for name, input_tensor in inputs_tensor_info.items():
inputs[name] = input_tensor.name
outputs_tensor_info = get_signature_def(imported, k).outputs
for name, output_tensor in outputs_tensor_info.items():
outputs[name] = output_tensor.name
frozen_graph = freeze_session(sess, input_names=list(inputs.values()), output_names=list(outputs.values()))
return frozen_graph, inputs, outputs
class TFEstimatorLoader(BaseLoader):
required_fn_name_for_signature_parsing: Optional[str] = GET_MODEL_FN_NAME
def __init__(self, **kwargs):
self._model_args = kwargs
def load(self, model_path: Union[str, Path], **_) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
get_model = load_from_file(model_path, "model", GET_MODEL_FN_NAME)
get_serving_input_receiver_fn = load_from_file(model_path, "model", GET_SERVING_INPUT_RECEIVER_FN)
if get_model is None:
raise RuntimeError(f"Could not find {GET_MODEL_FN_NAME} in {model_path}")
if get_serving_input_receiver_fn is None:
raise RuntimeError(f"Could not find {GET_SERVING_INPUT_RECEIVER_FN} in {model_path}")
model_args = filter_fn_args(self._model_args, fn=get_model)
serving_input_receiver_args = filter_fn_args(self._model_args, fn=get_serving_input_receiver_fn)
session_config = create_session_config(allow_growth=True)
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session(config=session_config) as sess:
estimator = get_model(**model_args)
serving_input_receiver_fn = get_serving_input_receiver_fn(**serving_input_receiver_args)
input_receiver = serving_input_receiver_fn()
estimator_spec = estimator.model_fn(
features=input_receiver.features,
labels=None,
mode=tf.estimator.ModeKeys.PREDICT,
config=estimator.config,
)
input_tensors_dict = input_receiver.receiver_tensors
output_tensors_dict = estimator_spec.predictions
inputs_dict = {k: tensor2tensor_spec(tensor) for k, tensor in input_tensors_dict.items()}
outputs_dict = {k: tensor2tensor_spec(tensor) for k, tensor in output_tensors_dict.items()}
input_tensor_names = [t.name for t in inputs_dict.values()]
output_tensor_names = [t.name for t in outputs_dict.values()]
graph_saver = estimator_spec.scaffold.saver or tf.compat.v1.train.Saver(sharded=True)
graph_saver.restore(sess, estimator.latest_checkpoint())
input_tensor_names = inputs_without_resource(sess, input_tensor_names)
frozen_graph = freeze_session(sess, input_names=input_tensor_names, output_names=output_tensor_names)
input_tensor_names = remove_redundant_inputs(frozen_graph, input_tensor_names)
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session(config=estimator.config.session_config):
frozen_graph = tf_optimize(input_tensor_names, output_tensor_names, frozen_graph)
tf.compat.v1.reset_default_graph()
return Model(frozen_graph, None, inputs_dict, outputs_dict)
class TFKerasLoader(BaseLoader):
"""
Loads keras model from source code
The tf-allow-growth flag control limiting GPU memory growth feature
(https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth). By default it is disabled.
"""
required_fn_name_for_signature_parsing: Optional[str] = GET_MODEL_FN_NAME
def __init__(self, tf_allow_growth: bool = False, **kwargs):
self._allow_growth = tf_allow_growth
self._model_args = kwargs
def load(self, model_path: Union[str, Path], **_) -> Model:
# TODO fix: RuntimeError: Physical devices cannot be modified after being initialized
# if self._allow_growth:
# physical_devices = tf.config.experimental.list_physical_devices("GPU")
# for device in physical_devices:
# tf.config.experimental.set_memory_growth(device, True)
tf.keras.backend.clear_session()
tf.keras.backend.set_learning_phase(False)
if isinstance(model_path, Path):
model_path = model_path.as_posix()
get_model = load_from_file(model_path, "model", GET_MODEL_FN_NAME)
if get_model is None:
raise RuntimeError(f"Could not find {GET_MODEL_FN_NAME} in {model_path}")
model_args = filter_fn_args(self._model_args, fn=get_model)
model, call_fn = get_model(**model_args)
inputs_dict: Dict[str, TensorSpec] = {
input_name: TensorSpec(t.name, t.dtype.name, tuple(t.shape.as_list()))
for input_name, t in zip(model.input_names, model.inputs)
}
concrete_func = call_fn.get_concrete_function(
*(tf.TensorSpec(shape=spec.shape, dtype=spec.dtype, name=name) for name, spec in inputs_dict.items())
)
output_tensors_names = [tensor.name for tensor in concrete_func.outputs]
outputs_dict: Dict[str, TensorSpec] = {
output_name: TensorSpec(output_tensor_name, t.dtype.name, tuple(t.shape.as_list()))
for output_name, output_tensor_name, t in zip(model.output_names, output_tensors_names, model.outputs)
}
tf.keras.backend.clear_session()
tf.keras.backend.set_learning_phase(False)
def _add_suffix_as_quickfix_for_tf24_func_refactor(spec):
if not spec.name.endswith(":0"):
spec = spec._replace(name=spec.name + ":0")
return spec
inputs_dict = {name: _add_suffix_as_quickfix_for_tf24_func_refactor(spec) for name, spec in inputs_dict.items()}
return Model(model, None, inputs_dict, outputs_dict)
class TFSavedModelLoader(BaseLoader):
def __init__(self, tf_allow_growth: bool = False):
self._allow_growth = tf_allow_growth
def load(self, model_path: Union[str, Path], **kwargs) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
tf.compat.v1.reset_default_graph()
if self._allow_growth:
physical_devices = tf.config.experimental.list_physical_devices("GPU")
for device in physical_devices:
tf.config.experimental.set_memory_growth(device, True)
if is_tf2():
from tf2onnx.tf_loader import _from_saved_model_v2 # pytype: disable=import-error
(
graph_def,
input_names,
output_names,
concrete_func,
imported,
initialized_tables,
tensors_to_rename,
) = _from_saved_model_v2(
model_path=model_path,
input_names=None,
output_names=None,
tag=None,
signature_def=[],
concrete_function_index=None,
large_model=False,
use_graph_names=False,
)
# inspired by
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/saved_model_cli.py#L205
if concrete_func.structured_input_signature:
input_args, input_kwargs = concrete_func.structured_input_signature
input_names = list(input_kwargs)
assert (
not input_args
), f"Not supported args in concrete function signature args={input_args}, kwargs={input_kwargs}"
elif concrete_func._arg_keywords: # pylint: disable=protected-access
# For pure ConcreteFunctions we might have nothing better than _arg_keywords.
assert concrete_func._num_positional_args in [0, 1]
input_names = concrete_func._arg_keywords
input_tensors = [tensor for tensor in concrete_func.inputs if tensor.dtype != tf.dtypes.resource]
inputs = {name: tensor.name for name, tensor in zip(input_names, input_tensors)}
# they are already flattened
output_tensors = [tensor for tensor in concrete_func.outputs if tensor.dtype != tf.dtypes.resource]
output_names = sorted(concrete_func.structured_outputs) # because outputs are in flatten form
outputs = {name: tensor.name for name, tensor in zip(output_names, output_tensors)}
else:
session_config = create_session_config(allow_growth=True)
with tf.compat.v1.Session(config=session_config) as sess:
graph_def, inputs, outputs = _from_saved_model_v1(sess, model_path, tag=None, signatures=[])
inputs, outputs = handle_tensor_specs(graph_def, inputs, outputs)
return Model(graph_def, None, inputs, outputs)
class TFRunner(BaseRunner):
def __init__(self):
pass
def init_inference(self, model: Model):
if is_tf2():
return TF2RunnerSession(model=model)
else:
return TF1RunnerSession(model=model)
class TF1RunnerSession(BaseRunnerSession):
def __init__(self, model: Model):
super().__init__(model)
assert isinstance(model.handle, tf.compat.v1.GraphDef)
self._inputs = None
self._outputs = None
self._session = None
self._old_env_values = {}
def __enter__(self):
self._old_env_values = self._set_env_variables()
tf.compat.v1.reset_default_graph()
session_config = create_session_config(allow_growth=True)
self._session = tf.compat.v1.Session(config=session_config)
self._session.__enter__()
tf.import_graph_def(self._model.handle, name="")
self._inputs = {
name: self._session.graph.get_tensor_by_name(spec.name) for name, spec in self._model.inputs.items()
}
self._outputs = {
name: self._session.graph.get_tensor_by_name(spec.name) for name, spec in self._model.outputs.items()
}
return self
def __exit__(self, exc_type, exc_value, traceback):
self._session.__exit__(exc_type, exc_value, traceback)
tf.compat.v1.reset_default_graph()
self._inputs = None
self._outputs = None
self._session = None
self._recover_env_variables(self._old_env_values)
def __call__(self, x: Dict[str, object]):
feed_dict = {placeholder: x[name] for name, placeholder in self._inputs.items()}
return self._session.run(self._outputs, feed_dict=feed_dict)
class TF2RunnerSession(BaseRunnerSession):
def __init__(self, model: Model):
super().__init__(model)
assert isinstance(model.handle, tf.compat.v1.GraphDef)
self._concrete_func = None
def __enter__(self):
tf.compat.v1.reset_default_graph()
input_tensor_names = [spec.name for spec in self._model.inputs.values()]
output_tensor_names = [spec.name for spec in self._model.outputs.values()]
self._concrete_func = wrap_function.function_from_graph_def(
self._model.handle, input_tensor_names, output_tensor_names
)
self._concrete_func._signature = [
tf.TensorSpec(shape=spec.shape, dtype=spec.dtype, name=name) for name, spec in self._model.inputs.items()
]
return self
def __exit__(self, exc_type, exc_value, traceback):
self._concrete_func = None
tf.compat.v1.reset_default_graph()
def __call__(self, x: Dict[str, object]):
x = tf.nest.map_structure(tf.convert_to_tensor, x)
y_pred = self._concrete_func(**x)
output_struct = {name: spec.name for name, spec in self._model.outputs.items()}
y_pred = tf.nest.map_structure(lambda t: t.numpy(), y_pred)
y_pred = tf.nest.pack_sequence_as(output_struct, y_pred)
return y_pred
class TFSavedModelSaver(BaseSaver):
def save(self, model, model_path: Union[str, Path], dataloader_fn) -> None:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
if is_tf2():
tf.keras.models.save_model(model=model.handle, filepath=model_path, overwrite=True)
else:
session_config = create_session_config(allow_growth=True)
with tf.compat.v1.Session(config=session_config) as sess:
tf.import_graph_def(model.handle, name="")
is_func = is_function(sess.graph)
if not is_func:
infer_shape(sess.graph, {})
inputs = {name: sess.graph.get_tensor_by_name(spec.name) for name, spec in model.inputs.items()}
outputs = {name: sess.graph.get_tensor_by_name(spec.name) for name, spec in model.outputs.items()}
def _ensure_shape(tensors_dict, tensors_specs):
for name, tensor in tensors_dict.items():
if tensor.shape.rank is None:
tensor.set_shape(tensors_specs[name].shape)
return tensors_dict
inputs = _ensure_shape(inputs, model.inputs)
outputs = _ensure_shape(outputs, model.outputs)
LOGGER.info(inputs)
LOGGER.info(outputs)
tf.compat.v1.saved_model.simple_save(sess, model_path, inputs, outputs, legacy_init_op=None)
def handle_tensor_specs(
graph_def, inputs: Dict[str, str], outputs: Dict[str, str]
) -> Tuple[Dict[str, TensorSpec], Dict[str, TensorSpec]]:
session_config = tf.compat.v1.ConfigProto(graph_options=tf.compat.v1.GraphOptions(infer_shapes=True))
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session(config=session_config) as sess:
tf.import_graph_def(graph_def, name="")
def _get_spec(tensors_dict):
tensors_dict = {name: sess.graph.get_tensor_by_name(tname) for name, tname in tensors_dict.items()}
return {name: tensor2tensor_spec(tensor) for name, tensor in tensors_dict.items()}
inputs = _get_spec(inputs)
outputs = _get_spec(outputs)
tf.compat.v1.reset_default_graph()
return inputs, outputs
def tensor2tensor_spec(tensor):
shape = tuple(s.value if hasattr(s, "value") else s for s in tensor.shape)
return TensorSpec(tensor.name, tensor.dtype.name, shape)
loaders.register_extension(ModelInputType.TF_ESTIMATOR.value, TFEstimatorLoader)
loaders.register_extension(ModelInputType.TF_KERAS.value, TFKerasLoader)
loaders.register_extension(Format.TF_SAVEDMODEL.value, TFSavedModelLoader)
loaders.register_extension(Format.TF_TRT.value, TFSavedModelLoader)
savers.register_extension(Format.TF_SAVEDMODEL.value, TFSavedModelSaver)
savers.register_extension(Format.TF_TRT.value, TFSavedModelSaver)
runners.register_extension(ModelInputType.TF_ESTIMATOR.value, TFRunner)
runners.register_extension(ModelInputType.TF_KERAS.value, TFRunner)
runners.register_extension(Format.TF_SAVEDMODEL.value, TFRunner)
runners.register_extension(Format.TF_TRT.value, TFRunner)
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/library/tf.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pathlib import Path
from typing import Dict, Optional, Union
import numpy as np
# pytype: disable=import-error
import onnx
import onnx.shape_inference
import onnxruntime
from google.protobuf import text_format
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
from ..core import BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec
from ..extensions import loaders, runners, savers
from .utils import infer_precision
# pytype: enable=import-error
LOGGER = logging.getLogger(__name__)
def _value_info2tensor_spec(value_info: onnx.ValueInfoProto):
onnx_data_type_map = {"float": "float32", "double": "float64"}
elem_type_name = onnx.TensorProto.DataType.Name(value_info.type.tensor_type.elem_type).lower()
dtype = onnx_data_type_map.get(elem_type_name, elem_type_name)
def _get_dim(dim):
which = dim.WhichOneof("value")
if which is not None: # which is None when dim is None
dim = getattr(dim, which)
return None if isinstance(dim, (str, bytes)) else dim
shape = value_info.type.tensor_type.shape
shape = tuple(_get_dim(d) for d in shape.dim)
return TensorSpec(value_info.name, dtype=dtype, shape=shape)
def _infer_graph_precision(onnx_graph: onnx.GraphProto) -> Optional[Precision]:
import networkx as nx
# build directed graph
nx_graph = nx.DiGraph()
def _get_dtype(vi):
t = vi.type
if hasattr(t, "tensor_type"):
type_id = t.tensor_type.elem_type
else:
raise NotImplementedError("Not implemented yet")
return TENSOR_TYPE_TO_NP_TYPE[type_id]
node_output2type = {vi.name: _get_dtype(vi) for vi in onnx_graph.value_info}
node_outputs2node = {output_name: node for node in onnx_graph.node for output_name in node.output}
node_inputs2node = {input_name: node for node in onnx_graph.node for input_name in node.input}
for node in onnx_graph.node:
node_dtype = node_output2type.get("+".join(node.output), None)
nx_graph.add_node(
node.name,
op=node.op_type,
attr={a.name: a for a in node.attribute},
dtype=node_dtype,
)
for input_name in node.input:
prev_node = node_outputs2node.get(input_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, node.name)
for input_node in onnx_graph.input:
input_name = input_node.name
nx_graph.add_node(input_name, op="input", dtype=_get_dtype(input_node))
next_node = node_inputs2node.get(input_name, None)
if next_node:
nx_graph.add_edge(input_name, next_node.name)
for output in onnx_graph.output:
output_name = output.name
nx_graph.add_node(output_name, op="output", dtype=_get_dtype(output))
prev_node = node_outputs2node.get(output_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, output_name)
else:
LOGGER.warning(f"Could not find previous node for {output_name}")
input_names = [n.name for n in onnx_graph.input]
output_names = [n.name for n in onnx_graph.output]
most_common_dtype = infer_precision(nx_graph, input_names, output_names, lambda node: node.get("dtype", None))
if most_common_dtype is not None:
precision = {np.dtype("float32"): Precision.FP32, np.dtype("float16"): Precision.FP16}[most_common_dtype]
else:
precision = None
return precision
class OnnxLoader(BaseLoader):
def load(self, model_path: Union[str, Path], **_) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
model = onnx.load(model_path)
onnx.checker.check_model(model)
onnx.helper.strip_doc_string(model)
model = onnx.shape_inference.infer_shapes(model)
# TODO: probably modification of onnx model ios causes error on optimize
# from onnx.utils import polish_model
# model = polish_model(model) # run checker, docs strip, optimizer and shape inference
inputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.input}
outputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.output}
precision = _infer_graph_precision(model.graph)
return Model(model, precision, inputs, outputs)
class OnnxSaver(BaseSaver):
def __init__(self, as_text: bool = False):
self._as_text = as_text
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None:
model_path = Path(model_path)
LOGGER.debug(f"Saving ONNX model to {model_path.as_posix()}")
model_path.parent.mkdir(parents=True, exist_ok=True)
onnx_model: onnx.ModelProto = model.handle
if self._as_text:
with model_path.open("w") as f:
f.write(text_format.MessageToString(onnx_model))
else:
with model_path.open("wb") as f:
f.write(onnx_model.SerializeToString())
"""
ExecutionProviders on onnxruntime 1.4.0
['TensorrtExecutionProvider',
'CUDAExecutionProvider',
'MIGraphXExecutionProvider',
'NGRAPHExecutionProvider',
'OpenVINOExecutionProvider',
'DnnlExecutionProvider',
'NupharExecutionProvider',
'VitisAIExecutionProvider',
'ArmNNExecutionProvider',
'ACLExecutionProvider',
'CPUExecutionProvider']
"""
def _check_providers(providers):
providers = providers or []
if not isinstance(providers, (list, tuple)):
providers = [providers]
available_providers = onnxruntime.get_available_providers()
unavailable = set(providers) - set(available_providers)
if unavailable:
raise RuntimeError(f"Unavailable providers {unavailable}")
return providers
class OnnxRunner(BaseRunner):
def __init__(self, verbose_runtime_logs: bool = False):
self._providers = None
self._verbose_runtime_logs = verbose_runtime_logs
def init_inference(self, model: Model):
assert isinstance(model.handle, onnx.ModelProto)
return OnnxRunnerSession(
model=model, providers=self._providers, verbose_runtime_logs=self._verbose_runtime_logs
)
class OnnxRunnerSession(BaseRunnerSession):
def __init__(self, model: Model, providers, verbose_runtime_logs: bool = False):
super().__init__(model)
self._input_names = None
self._output_names = None
self._session = None
self._providers = providers
self._verbose_runtime_logs = verbose_runtime_logs
self._old_env_values = {}
def __enter__(self):
self._old_env_values = self._set_env_variables()
sess_options = onnxruntime.SessionOptions() # default session options
if self._verbose_runtime_logs:
sess_options.log_severity_level = 0
sess_options.log_verbosity_level = 1
LOGGER.info(
f"Starting inference session for onnx model providers={self._providers} sess_options={sess_options}"
)
self._input_names = list(self._model.inputs)
self._output_names = list(self._model.outputs)
model_payload = self._model.handle.SerializeToString()
self._session = onnxruntime.InferenceSession(
model_payload, providers=self._providers, sess_options=sess_options
)
return self
def __exit__(self, exc_type, exc_value, traceback):
self._input_names = None
self._output_names = None
self._session = None
self._recover_env_variables(self._old_env_values)
def __call__(self, x: Dict[str, object]):
feed_dict = {k: x[k] for k in self._input_names}
y_pred = self._session.run(self._output_names, feed_dict)
y_pred = dict(zip(self._output_names, y_pred))
return y_pred
loaders.register_extension(Format.ONNX.value, OnnxLoader)
runners.register_extension(Format.ONNX.value, OnnxRunner)
savers.register_extension(Format.ONNX.value, OnnxSaver)
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/library/onnx.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/library/__init__.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from typing import Callable, Dict, List, Optional
import networkx as nx
from ..core import ShapeSpec
def infer_precision(
nx_graph: nx.Graph,
input_names: List[str],
output_names: List[str],
get_node_dtype_fn: Callable,
):
node_dtypes = [nx_graph.nodes[node_name].get("dtype", None) for node_name in nx_graph.nodes]
node_dtypes = [dt for dt in node_dtypes if dt is None or dt.kind not in ["i", "b"]]
dtypes_counter = Counter(node_dtypes)
return dtypes_counter.most_common()[0][0]
def get_shapes_with_dynamic_axes(dataloader, batch_size_dim: Optional[int] = None):
def _set_dynamic_shapes(t, shapes):
for k, v in t.items():
shape = list(v.shape)
for dim, s in enumerate(shape):
if shapes[k][dim] != -1 and shapes[k][dim] != s:
shapes[k][dim] = -1
def _mark_batch_axis(shape, batch_axis: int):
shape = list(shape)
shape[batch_axis] = -1
return tuple(shape)
## get all shapes from input and output tensors
input_shapes = {}
output_shapes = {}
for batch in dataloader:
_, x, y = batch
for k, v in x.items():
input_shapes[k] = list(v.shape)
for k, v in y.items():
output_shapes[k] = list(v.shape)
break
# based on max <max_num_iters> iterations, check which
# dimensions differ to determine dynamic_axes
max_num_iters = 100
for idx, batch in enumerate(dataloader):
if idx >= max_num_iters:
break
_, x, y = batch
_set_dynamic_shapes(x, input_shapes)
_set_dynamic_shapes(y, output_shapes)
if batch_size_dim is not None:
input_shapes = {name: _mark_batch_axis(shape, batch_size_dim) for name, shape in input_shapes.items()}
output_shapes = {name: _mark_batch_axis(shape, batch_size_dim) for name, shape in output_shapes.items()}
return input_shapes, output_shapes
def get_dynamic_axes(dataloader, batch_size_dim: Optional[int] = None):
input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader, batch_size_dim=batch_size_dim)
all_shapes = {**input_shapes, **output_shapes}
dynamic_axes = {}
for k, shape in all_shapes.items():
for idx, s in enumerate(shape):
if s == -1:
dynamic_axes[k] = {idx: k + "_" + str(idx)}
for k in all_shapes:
if k in dynamic_axes:
dynamic_axes[k].update({batch_size_dim: "batch_size_" + str(batch_size_dim)})
else:
dynamic_axes[k] = {batch_size_dim: "batch_size_" + str(batch_size_dim)}
return dynamic_axes
def get_input_shapes(dataloader, max_batch_size=1) -> Dict[str, ShapeSpec]:
def init_counters_and_shapes(x, counters, min_shapes, max_shapes):
for k, v in x.items():
counters[k] = Counter()
min_shapes[k] = [float("inf")] * v.ndim
max_shapes[k] = [float("-inf")] * v.ndim
counters = {}
min_shapes: Dict[str, tuple] = {}
max_shapes: Dict[str, tuple] = {}
for idx, batch in enumerate(dataloader):
ids, x, y = batch
if idx == 0:
init_counters_and_shapes(x, counters, min_shapes, max_shapes)
for k, v in x.items():
shape = v.shape
counters[k][shape] += 1
min_shapes[k] = tuple(min(a, b) for a, b in zip(min_shapes[k], shape))
max_shapes[k] = tuple(max(a, b) for a, b in zip(max_shapes[k], shape))
opt_shapes: Dict[str, tuple] = {}
for k, v in counters.items():
opt_shapes[k] = v.most_common(1)[0][0]
shapes = {}
for k in opt_shapes.keys(): # same keys in min_shapes and max_shapes
shapes[k] = ShapeSpec(
min=(1,) + min_shapes[k][1:],
max=(max_batch_size,) + max_shapes[k][1:],
opt=(max_batch_size,) + opt_shapes[k][1:],
)
return shapes
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/library/utils.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from pathlib import Path
from typing import Dict, NamedTuple, Optional, Union
import numpy as np
# pytype: disable=import-error
try:
import pycuda.autoinit
import pycuda.driver as cuda
except Exception as e:
logging.getLogger(__name__).warning(f"Problems with importing pycuda package; {e}")
# pytype: enable=import-error
import tensorrt as trt # pytype: disable=import-error
from ..core import BaseLoader, BaseRunner, BaseRunnerSession, Format, Model, TensorSpec
from ..extensions import loaders, runners
LOGGER = logging.getLogger(__name__)
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
# documentation:
# https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/index.html
# https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#python_samples_section
_NP_DTYPE2TRT_DTYPE = {
np.dtype("float32"): trt.DataType.FLOAT,
np.dtype("float16"): trt.DataType.HALF,
np.dtype("int8"): trt.DataType.INT8,
np.dtype("int32"): trt.DataType.INT32,
np.dtype("bool"): trt.DataType.BOOL,
}
class TensorRTLoader(BaseLoader):
def load(self, model_path: Union[str, Path], **_) -> Model:
model_path = Path(model_path)
LOGGER.debug(f"Loading TensorRT engine from {model_path}")
engine = self._load_engine(model_path)
if engine is None:
LOGGER.debug("Unable to load engine without plugins. Loading plugins.")
trt.init_libnvinfer_plugins(logger=TRT_LOGGER, namespace="")
LOGGER.debug(f"Loading TensorRT engine with plugins from {model_path}")
engine = self._load_engine(model_path)
if engine is None:
raise RuntimeError(f"Could not load ICudaEngine from {model_path}")
inputs = {}
outputs = {}
for binding_idx in range(engine.num_bindings):
name = engine.get_binding_name(binding_idx)
is_input = engine.binding_is_input(binding_idx)
dtype = np.dtype(trt.nptype(engine.get_binding_dtype(binding_idx))).name
shape = engine.get_binding_shape(binding_idx)
if is_input:
inputs[name] = TensorSpec(name, dtype, shape)
else:
outputs[name] = TensorSpec(name, dtype, shape)
return Model(engine, None, inputs, outputs)
def _load_engine(self, model_path: Path):
with model_path.open("rb") as fh, trt.Runtime(TRT_LOGGER) as runtime:
engine = runtime.deserialize_cuda_engine(fh.read())
return engine
class TRTBuffers(NamedTuple):
x_host: Optional[Dict[str, object]]
x_dev: Dict[str, object]
y_pred_host: Dict[str, object]
y_pred_dev: Dict[str, object]
class TensorRTRunner(BaseRunner):
def __init__(self):
pass
def init_inference(self, model: Model):
return TensorRTRunnerSession(model=model)
class TensorRTRunnerSession(BaseRunnerSession):
def __init__(self, model: Model):
super().__init__(model)
assert isinstance(model.handle, trt.ICudaEngine)
self._model = model
self._has_dynamic_shapes = None
self._context = None
self._engine: trt.ICudaEngine = self._model.handle
self._cuda_context = pycuda.autoinit.context
self._input_names = None
self._output_names = None
self._buffers = None
def __enter__(self):
self._context = self._engine.create_execution_context()
self._context.__enter__()
self._input_names = [
self._engine[idx] for idx in range(self._engine.num_bindings) if self._engine.binding_is_input(idx)
]
self._output_names = [
self._engine[idx] for idx in range(self._engine.num_bindings) if not self._engine.binding_is_input(idx)
]
# all_binding_shapes_specified is True for models without dynamic shapes
# so initially this variable is False for models with dynamic shapes
self._has_dynamic_shapes = not self._context.all_binding_shapes_specified
return self
def __exit__(self, exc_type, exc_value, traceback):
self._context.__exit__(exc_type, exc_value, traceback)
self._input_names = None
self._output_names = None
# TODO: are cuda buffers dealloc automatically?
self._buffers = None
def __call__(self, x):
buffers = self._prepare_buffers_if_needed(x)
bindings = self._update_bindings(buffers)
for name in self._input_names:
cuda.memcpy_htod(buffers.x_dev[name], buffers.x_host[name])
self._cuda_context.push()
self._context.execute_v2(bindings=bindings)
self._cuda_context.pop()
for name in self._output_names:
cuda.memcpy_dtoh(buffers.y_pred_host[name], buffers.y_pred_dev[name])
return buffers.y_pred_host
def _update_bindings(self, buffers: TRTBuffers):
bindings = [None] * self._engine.num_bindings
for name in buffers.y_pred_dev:
binding_idx: int = self._engine[name]
bindings[binding_idx] = buffers.y_pred_dev[name]
for name in buffers.x_dev:
binding_idx: int = self._engine[name]
bindings[binding_idx] = buffers.x_dev[name]
return bindings
def _set_dynamic_input_shapes(self, x_host):
def _is_shape_dynamic(input_shape):
return any([dim is None or dim == -1 for dim in input_shape])
for name in self._input_names:
bindings_idx = self._engine[name]
data_shape = x_host[name].shape # pytype: disable=attribute-error
if self._engine.is_shape_binding(bindings_idx):
input_shape = self._context.get_shape(bindings_idx)
if _is_shape_dynamic(input_shape):
self._context.set_shape_input(bindings_idx, data_shape)
else:
input_shape = self._engine.get_binding_shape(bindings_idx)
if _is_shape_dynamic(input_shape):
self._context.set_binding_shape(bindings_idx, data_shape)
assert self._context.all_binding_shapes_specified and self._context.all_shape_inputs_specified
def _prepare_buffers_if_needed(self, x_host: Dict[str, object]):
# pytype: disable=attribute-error
new_batch_size = list(x_host.values())[0].shape[0]
current_batch_size = list(self._buffers.y_pred_host.values())[0].shape[0] if self._buffers else 0
# pytype: enable=attribute-error
if self._has_dynamic_shapes or new_batch_size != current_batch_size:
# TODO: are CUDA buffers dealloc automatically?
self._set_dynamic_input_shapes(x_host)
y_pred_host = {}
for name in self._output_names:
shape = self._context.get_binding_shape(self._engine[name])
binding_idx: int = self._engine[name]
dtype_from_trt_binding = np.dtype(trt.nptype(self._engine.get_binding_dtype(binding_idx)))
dtype_from_model_spec = np.dtype(self._model.outputs[name].dtype)
assert dtype_from_model_spec == dtype_from_trt_binding
y_pred_host[name] = np.zeros(shape, dtype=dtype_from_model_spec)
y_pred_dev = {name: cuda.mem_alloc(data.nbytes) for name, data in y_pred_host.items()}
# cast host input into binding dtype
def _cast_input(name, data):
binding_idx: int = self._engine[name]
np_dtype = trt.nptype(self._engine.get_binding_dtype(binding_idx))
return data.astype(np_dtype)
x_host = {name: _cast_input(name, host_input) for name, host_input in x_host.items()}
x_dev = {
name: cuda.mem_alloc(host_input.nbytes)
for name, host_input in x_host.items()
if name in self._input_names # pytype: disable=attribute-error
}
self._buffers = TRTBuffers(None, x_dev, y_pred_host, y_pred_dev)
return self._buffers._replace(x_host=x_host)
if "pycuda.driver" in sys.modules:
loaders.register_extension(Format.TRT.value, TensorRTLoader)
runners.register_extension(Format.TRT.value, TensorRTRunner)
else:
LOGGER.warning("Do not register TensorRT extension due problems with importing pycuda.driver package.")
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/library/tensorrt.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# method from PEP-366 to support relative import in executed modules
import logging
import pathlib
from typing import List, Optional
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ..core import EvaluationMode, MeasurementMode, OfflineMode, PerformanceTool
from .model_analyzer import ModelAnalyzerRunner
from .perf_analyzer import PerfAnalyzerRunner, PerfAnalyzerWarmupRunner
LOGGER = logging.getLogger("triton_performance_runner")
class TritonPerformanceRunner:
def __init__(
self,
server_url: str,
model_name: str,
input_data: str,
input_shapes: List[str],
batch_sizes: List[int],
concurrency: List[int],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
output_shared_memory_size: int,
performance_tool: PerformanceTool,
model_repository: str,
result_path: pathlib.Path,
warmup: bool,
timeout: Optional[int],
verbose: bool,
):
self._warmup_runner = None
if warmup:
LOGGER.info("Running warmup before the main test")
self._warmup_runner = PerfAnalyzerWarmupRunner(
server_url=server_url,
model_name=model_name,
input_data=input_data,
input_shapes=input_shapes,
batch_sizes=batch_sizes,
concurrency=concurrency,
measurement_mode=measurement_mode,
measurement_interval=measurement_interval,
measurement_request_count=measurement_request_count,
evaluation_mode=evaluation_mode,
offline_mode=offline_mode,
output_shared_memory_size=output_shared_memory_size,
timeout=timeout,
)
if performance_tool == PerformanceTool.MODEL_ANALYZER:
LOGGER.info("Using Model Analyzer for performance evaluation")
self._runner = ModelAnalyzerRunner(
server_url=server_url,
model_name=model_name,
input_data=input_data,
input_shapes=input_shapes,
batch_sizes=batch_sizes,
concurrency=concurrency,
measurement_mode=measurement_mode,
measurement_interval=measurement_interval,
measurement_request_count=measurement_request_count,
evaluation_mode=evaluation_mode,
offline_mode=offline_mode,
output_shared_memory_size=output_shared_memory_size,
model_repository=model_repository,
result_path=result_path,
timeout=timeout,
verbose=verbose,
)
elif performance_tool == PerformanceTool.PERF_ANALYZER:
LOGGER.info("Using Perf Analyzer for performance evaluation")
self._runner = PerfAnalyzerRunner(
server_url=server_url,
model_name=model_name,
input_data=input_data,
input_shapes=input_shapes,
batch_sizes=batch_sizes,
measurement_mode=measurement_mode,
measurement_interval=measurement_interval,
measurement_request_count=measurement_request_count,
concurrency=concurrency,
evaluation_mode=evaluation_mode,
offline_mode=offline_mode,
output_shared_memory_size=output_shared_memory_size,
result_path=result_path,
timeout=timeout,
verbose=verbose,
)
else:
raise ValueError(f"Unsupported performance tool {performance_tool}")
def run(self):
if self._warmup_runner:
self._warmup_runner.run()
self._runner.run()
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_performance_runner/runner.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .runner import TritonPerformanceRunner # noqa: F401
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_performance_runner/__init__.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
import shutil
import sys
from distutils.version import LooseVersion
from typing import List, Optional
import yaml
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...core import EvaluationMode, MeasurementMode, OfflineMode
from ...utils import log_dict, parse_server_url
from .model_analyzer import ModelAnalyzer, ModelAnalyzerMode
from .model_analyzer_config import ModelAnalyzerConfig
if LooseVersion(sys.version) >= LooseVersion("3.8.0"):
from importlib.metadata import version
TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient"))
TRITON_MODEL_ANALYZER_VERSION = LooseVersion(version("triton-model-analyzer"))
else:
import pkg_resources
TRITON_CLIENT_VERSION = LooseVersion(pkg_resources.get_distribution("tritonclient").version)
TRITON_MODEL_ANALYZER_VERSION = LooseVersion(pkg_resources.get_distribution("triton-model-analyzer").version)
LOGGER = logging.getLogger("triton_performance_runner.model_analyzer")
class ModelAnalyzerRunner:
def __init__(
self,
server_url: str,
model_name: str,
input_data: str,
input_shapes: List[str],
batch_sizes: List[int],
concurrency: List[int],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
model_repository: str,
result_path: pathlib.Path,
output_shared_memory_size: int = 102400,
timeout: Optional[int] = None,
verbose: bool = False,
):
log_dict(
"Selected configuration",
{
"server_url": server_url,
"model_name": model_name,
"input_data": input_data,
"input_shapes": input_shapes,
"batch_sizes": batch_sizes,
"concurrency": concurrency,
"measurement_mode": measurement_mode,
"measurement_interval": measurement_interval,
"measurement_request_count": measurement_request_count,
"evaluation_mode": evaluation_mode,
"offline_mode": offline_mode,
"output_shared_memory_size": output_shared_memory_size,
"model_repository": model_repository,
"result_path": result_path,
"verbose": verbose,
},
)
if result_path.suffix:
raise ValueError(
"Results path for Model Analyzer is invalid. Please, provide the directory name. Example: results"
)
self._checkpoints = pathlib.Path("./checkpoints")
self._result_path = result_path
self._verbose = verbose
self._filename_model_inference = "metrics-model-inference.csv"
self._filename_model_gpu = "metrics-model-gpu.csv"
self._profile_config = self._prepare_profile_config(
server_url=server_url,
model_name=model_name,
input_data=input_data,
input_shapes=input_shapes,
batch_sizes=batch_sizes,
concurrency=concurrency,
measurement_mode=measurement_mode,
measurement_interval=measurement_interval,
measurement_request_count=measurement_request_count,
evaluation_mode=evaluation_mode,
offline_mode=offline_mode,
model_repository=model_repository,
output_shared_memory_size=output_shared_memory_size,
checkpoints=self._checkpoints,
verbose=verbose,
)
self._analyze_config = self._prepare_analyze_config(
model_name=model_name,
result_path=result_path,
verbose=verbose,
filename_model_inference=self._filename_model_inference,
filename_model_gpu=self._filename_model_gpu,
)
def run(self):
self._result_path.mkdir(parents=True, exist_ok=True)
if self._checkpoints.is_dir():
shutil.rmtree(self._checkpoints.as_posix())
self._checkpoints.mkdir(parents=True, exist_ok=True)
model_analyzer = ModelAnalyzer(config=self._profile_config)
model_analyzer.run(mode=ModelAnalyzerMode.PROFILE, verbose=self._verbose)
for file in self._checkpoints.iterdir():
if not file.is_file() or file.suffix != ".ckpt":
continue
LOGGER.info(f"Moving checkpoint {file.name} to {self._result_path}")
shutil.move(file, self._result_path / file.name)
model_analyzer = ModelAnalyzer(config=self._analyze_config)
model_analyzer.run(mode=ModelAnalyzerMode.ANALYZE, verbose=self._verbose)
inference_metrics_file = pathlib.Path("/tmp") / "results" / self._filename_model_inference
gpu_metrics_file = pathlib.Path("/tmp") / "results" / self._filename_model_gpu
for file in [inference_metrics_file, gpu_metrics_file]:
LOGGER.info(f"Moving metrics {file.name} to {self._result_path}")
shutil.move(file, self._result_path / file.name)
def _prepare_profile_config(
self,
server_url: str,
model_name: str,
input_data: str,
input_shapes: List[str],
batch_sizes: List[int],
concurrency: List[int],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
model_repository: str,
checkpoints: pathlib.Path,
output_shared_memory_size: int = 102400,
verbose: bool = False,
):
protocol, host, port = parse_server_url(server_url)
perf_analyzer_config = self._perf_analyzer_config(
input_data,
input_shapes,
measurement_mode,
measurement_interval,
measurement_request_count,
evaluation_mode,
offline_mode,
output_shared_memory_size,
)
config = {
"model_repository": model_repository,
"triton_launch_mode": "remote",
"run_config_search_disable": True,
"perf_analyzer_flags": perf_analyzer_config,
"perf_analyzer_timeout": 3600, # Workaround for Perf Analyzer timeout - use 1h
"profile_models": [model_name],
"batch_sizes": batch_sizes,
"concurrency": concurrency,
"verbose": verbose,
"checkpoint_directory": checkpoints.as_posix(),
"override_output_model_repository": True,
"client_protocol": protocol.value,
f"triton_{protocol.value}_endpoint": f"{host}:{port}",
}
if verbose:
log_dict("Model Analyzer profiling configuration", config)
with open("config_profile.yaml", "w") as file:
yaml.safe_dump(config, file)
config = ModelAnalyzerConfig()
config["config-file"] = "config_profile.yaml"
return config
def _prepare_analyze_config(
self,
model_name: str,
result_path: pathlib.Path,
filename_model_inference: str,
filename_model_gpu: str,
verbose: bool,
):
inference_output_fields = [
"batch_size",
"concurrency",
"perf_throughput",
"perf_latency",
"perf_client_send_recv",
"perf_client_response_wait",
"perf_server_queue",
"perf_server_compute_input",
"perf_server_compute_infer",
"perf_server_compute_output",
]
gpu_output_fields = [
"gpu_uuid",
"batch_size",
"concurrency",
"gpu_used_memory",
"gpu_free_memory",
"gpu_utilization",
"gpu_power_usage",
]
config = {
"analysis_models": model_name,
"checkpoint_directory": result_path.as_posix(),
"export_path": "/tmp",
"inference_output_fields": inference_output_fields,
"gpu_output_fields": gpu_output_fields,
"filename_model_inference": filename_model_inference,
"filename_model_gpu": filename_model_gpu,
"summarize": False,
}
if verbose:
log_dict("Model Analyzer analysis configuration", config)
with open("config_analyze.yaml", "w") as file:
yaml.safe_dump(config, file)
config = ModelAnalyzerConfig()
config["config-file"] = "config_analyze.yaml"
return config
def _perf_analyzer_config(
self,
input_data: str,
input_shapes: List[str],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
output_shared_memory_size: int = 102400,
):
perf_analyzer_config = {
"measurement-interval": measurement_interval,
}
if TRITON_MODEL_ANALYZER_VERSION >= LooseVersion("1.8.0"):
perf_analyzer_config["input-data"] = [input_data]
else:
perf_analyzer_config["input-data"] = input_data
if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"):
perf_analyzer_config["measurement-mode"] = measurement_mode.value
perf_analyzer_config["measurement-request-count"] = measurement_request_count
if evaluation_mode == EvaluationMode.OFFLINE:
perf_analyzer_config["shared-memory"] = offline_mode.value
perf_analyzer_config["output-shared-memory-size"] = output_shared_memory_size
if input_shapes:
if TRITON_MODEL_ANALYZER_VERSION > LooseVersion("1.8.0"):
perf_analyzer_config["shape"] = input_shapes
else:
perf_analyzer_config["shape"] = input_shapes[0]
LOGGER.warning("Model Analyzer <= 1.8.0 support only single shape param for Perf Analyzer.")
return perf_analyzer_config
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_performance_runner/model_analyzer/runner.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .runner import ModelAnalyzerRunner # noqa: F401
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_performance_runner/model_analyzer/__init__.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .exceptions import ModelAnalyzerException
class ModelAnalyzerConfig:
"""
A config class to set arguments to the Model Analyzer.
An argument set to None will use the default.
"""
model_analyzer_args = [
"config-file",
]
input_to_options = [
"config-file",
]
def __init__(self):
# Args will be a dict with the string representation as key
self._args = {k: None for k in self.model_analyzer_args}
self._options = {
"-f": "config.yaml",
}
self._input_to_options = {
"config-file": "-f",
}
def to_cli_string(self):
"""
Utility function to convert a config into a
string of arguments to the server with CLI.
Returns
-------
str
the command consisting of all set arguments to
the model analyzer.
e.g. '--model-repository=/models --verbose=True'
"""
# single dashed options, then verbose flags, then main args
args = [f"{k} {v}" for k, v in self._options.items() if v]
args += [f"--{k}={v}" for k, v in self._args.items() if v]
return " ".join(args)
@classmethod
def allowed_keys(cls):
"""
Returns
-------
list of str
The keys that are allowed to be
passed into model_analyzer
"""
return list(cls.model_analyzer_args) + list(cls.input_to_options)
def __getitem__(self, key):
"""
Gets an arguments value in config
Parameters
----------
key : str
The name of the argument to the model analyzer
Returns
-------
The value that the argument is set to in this config
"""
if key in self._args:
return self._args[key]
elif key in self._input_to_options:
return self._options[self._input_to_options[key]]
else:
raise ModelAnalyzerException(f"'{key}' Key not found in config")
def __setitem__(self, key, value):
"""
Sets an arguments value in config
after checking if defined/supported.
Parameters
----------
key : str
The name of the argument to the model analyzer
value : (any)
The value to which the argument is being set
Raises
------
TritonModelAnalyzerException
If key is unsupported or undefined in the
config class
"""
if key in self._args:
self._args[key] = value
elif key in self._input_to_options:
self._options[self._input_to_options[key]] = value
else:
raise ModelAnalyzerException(f"The argument '{key}' to the Model Analyzer is not supported.")
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_performance_runner/model_analyzer/model_analyzer_config.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ModelAnalyzerException(Exception):
def __init__(self, message: str):
self._message = message
def __str__(self):
"""
Get the exception string representation.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
@property
def message(self):
"""
Get the exception message.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_performance_runner/model_analyzer/exceptions.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import subprocess
from subprocess import CalledProcessError
from .exceptions import ModelAnalyzerException
SERVER_OUTPUT_TIMEOUT_SECS = 5
LOGGER = logging.getLogger(__name__)
class ModelAnalyzerMode:
PROFILE = "profile"
ANALYZE = "analyze"
REPORT = "report"
class ModelAnalyzerReportMode:
OFFLINE = "offline"
ONLINE = "online"
class ModelAnalyzer:
"""
Concrete Implementation of Model Analyzer interface that runs
analyzer locally as as subprocess.
"""
_analyzer_path = "model-analyzer"
def __init__(self, config, timeout: int = None):
"""
Parameters
----------
config : AnalyzerConfig
the config object containing arguments for this server instance
"""
self._analyzer_process = None
self._analyzer_config = config
self._log = None
self._timeout = timeout
def run(self, mode: str, verbose: bool = False, quiet: bool = False, report_mode: str = None):
"""
Starts the model analyzer locally
"""
if self._analyzer_path:
cmd = []
if self._timeout:
cmd = ["timeout", str(self._timeout)]
cmd += [self._analyzer_path]
if verbose:
cmd += ["--verbose"]
if quiet:
cmd += ["--quiet"]
if report_mode:
cmd += ["-m"]
cmd += [report_mode]
cmd += [mode]
cmd += self._analyzer_config.to_cli_string().split()
LOGGER.debug(f"Model Analyze command: {cmd}")
try:
subprocess.run(cmd, check=True, start_new_session=True)
except CalledProcessError as e:
raise ModelAnalyzerException(
f"Running {self._analyzer_path} with {e.cmd} failed with"
f" exit status {e.returncode} : {e.output}"
)
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_performance_runner/model_analyzer/model_analyzer.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import logging
import os
import pathlib
import sys
from distutils.version import LooseVersion
from typing import Dict, List, Optional
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...core import EvaluationMode, MeasurementMode, OfflineMode
from ...report import save_results, show_results, sort_results
from ...utils import log_dict, parse_server_url
from .perf_analyzer import PerfAnalyzer
from .perf_config import PerfAnalyzerConfig
if LooseVersion(sys.version) >= LooseVersion("3.8.0"):
from importlib.metadata import version
TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient"))
else:
import pkg_resources
TRITON_CLIENT_VERSION = LooseVersion(pkg_resources.get_distribution("tritonclient").version)
LOGGER = logging.getLogger("triton_performance_runner.perf_analyzer")
class PerfAnalyzerRunner:
def __init__(
self,
server_url: str,
model_name: str,
input_data: str,
input_shapes: List[str],
batch_sizes: List[int],
concurrency: List[int],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
result_path: pathlib.Path,
output_shared_memory_size: int = 102400,
timeout: Optional[int] = None,
verbose: bool = False,
):
log_dict(
"Selected configuration",
{
"server_url": server_url,
"model_name": model_name,
"input_data": input_data,
"input_shapes": input_shapes,
"batch_sizes": batch_sizes,
"concurrency": concurrency,
"measurement_mode": measurement_mode,
"measurement_interval": measurement_interval,
"measurement_request_count": measurement_request_count,
"evaluation_mode": evaluation_mode,
"offline_mode": offline_mode,
"output_shared_memory_size": output_shared_memory_size,
"result_path": result_path,
"timeout": timeout,
"verbose": verbose,
},
)
if result_path.suffix != ".csv":
raise ValueError(
"Results path for Perf Analyzer is invalid. Please, provide the CSV file name. Example: results.csv"
)
self._server_url = server_url
self._model_name = model_name
self._input_data = input_data
self._input_shapes = input_shapes
self._batch_sizes = batch_sizes
self._concurrency = concurrency
self._measurement_mode = measurement_mode
self._measurement_interval = measurement_interval
self._measurement_request_count = measurement_request_count
self._evaluation_mode = evaluation_mode
self._offline_mode = offline_mode
self._result_path = result_path
self._output_shared_memory_size = output_shared_memory_size
self._timeout = timeout
self._verbose = verbose
self._protocol, self._host, self._port = parse_server_url(server_url)
def run(self):
results: List[Dict] = []
for batch_size in self._batch_sizes:
for concurrency in self._concurrency:
performance_partial_file = (
f"{self._evaluation_mode.value.lower()}_partial_{batch_size}_{concurrency}.csv"
)
params = {
"model-name": self._model_name,
"model-version": 1,
"batch-size": batch_size,
"url": f"{self._host}:{self._port}",
"protocol": self._protocol.value,
"input-data": self._input_data,
"measurement-interval": self._measurement_interval,
"concurrency-range": f"{concurrency}:{concurrency}:1",
"latency-report-file": performance_partial_file,
}
if self._verbose:
params["extra-verbose"] = True
if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"):
params["measurement-mode"] = self._measurement_mode.value
params["measurement-request-count"] = self._measurement_request_count
if self._evaluation_mode == EvaluationMode.OFFLINE:
params["shared-memory"] = self._offline_mode.value
params["output-shared-memory-size"] = self._output_shared_memory_size
if self._verbose:
log_dict(
f"Perf Analyzer config for batch_size: {batch_size} and concurrency: {concurrency}", params
)
config = PerfAnalyzerConfig()
for param, value in params.items():
config[param] = value
for shape in self._input_shapes:
config["shape"] = shape
perf_analyzer = PerfAnalyzer(config=config, timeout=self._timeout)
perf_analyzer.run()
self._update_performance_data(results, batch_size, performance_partial_file)
os.remove(performance_partial_file)
results = sort_results(results=results)
save_results(filename=self._result_path.as_posix(), data=results)
show_results(results=results)
def _calculate_average_latency(self, r):
avg_sum_fields = [
"Client Send",
"Network+Server Send/Recv",
"Server Queue",
"Server Compute",
"Server Compute Input",
"Server Compute Infer",
"Server Compute Output",
"Client Recv",
]
avg_latency = sum(int(r.get(f, 0)) for f in avg_sum_fields)
return avg_latency
def _update_performance_data(self, results: List, batch_size: int, performance_partial_file: str):
row: Dict = {"Batch": batch_size}
with open(performance_partial_file) as csvfile:
reader = csv.DictReader(csvfile)
for r in reader:
avg_latency = self._calculate_average_latency(r)
row = {**row, **r, "avg latency": avg_latency}
results.append(row)
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_performance_runner/perf_analyzer/runner.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .runner import PerfAnalyzerRunner # noqa: F401
from .warmup import PerfAnalyzerWarmupRunner # noqa: F401
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_performance_runner/perf_analyzer/__init__.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
from distutils.version import LooseVersion
from importlib.metadata import version
from typing import List, Optional
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...core import EvaluationMode, MeasurementMode, OfflineMode
from ...utils import parse_server_url
from .perf_analyzer import PerfAnalyzer
from .perf_config import PerfAnalyzerConfig
LOGGER = logging.getLogger("warmup")
TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient"))
class PerfAnalyzerWarmupRunner:
def __init__(
self,
server_url: str,
model_name: str,
batch_sizes: List[int],
concurrency: List[int],
input_data: str,
input_shapes: List[str],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
offline_mode: OfflineMode,
evaluation_mode: EvaluationMode,
output_shared_memory_size: int,
timeout: Optional[int],
):
self._model_name = model_name
self._input_data = input_data
self._input_shapes = input_shapes
self._measurement_mode = measurement_mode
self._offline_mode = offline_mode
self._evaluation_mode = evaluation_mode
self._output_shared_memory_size = output_shared_memory_size
self._protocol, self._host, self._port = parse_server_url(server_url)
self._measurement_interval = 2 * measurement_interval
self._measurement_request_count = 2 * measurement_request_count
self._batch_sizes = [min(batch_sizes)]
self._concurrency = [max(concurrency)]
self._timeout = timeout
def run(self):
for batch_size in self._batch_sizes:
for concurrency in self._concurrency:
params = {
"model-name": self._model_name,
"model-version": 1,
"batch-size": batch_size,
"url": f"{self._host}:{self._port}",
"protocol": self._protocol.value,
"input-data": self._input_data,
"measurement-interval": self._measurement_interval,
"concurrency-range": f"{concurrency}:{concurrency}:1",
"verbose": True,
}
if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"):
params["measurement-mode"] = self._measurement_mode.value
params["measurement-request-count"] = self._measurement_request_count
if self._evaluation_mode == EvaluationMode.OFFLINE:
params["shared-memory"] = self._offline_mode.value
params["output-shared-memory-size"] = self._output_shared_memory_size
config = PerfAnalyzerConfig()
for param, value in params.items():
config[param] = value
for shape in self._input_shapes:
config["shape"] = shape
perf_analyzer = PerfAnalyzer(config=config, timeout=self._timeout)
perf_analyzer.run()
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_performance_runner/perf_analyzer/warmup.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from .exceptions import PerfAnalyzerException
class PerfAnalyzerConfig:
"""
A config class to set arguments to the perf_analyzer.
An argument set to None will use the perf_analyzer's default.
"""
perf_analyzer_args = [
"async",
"sync",
"measurement-interval",
"measurement-mode",
"measurement-request-count",
"concurrency-range",
"request-rate-range",
"request-distribution",
"request-intervals",
"binary-search",
"num-of-sequence",
"latency-threshold",
"max-threads",
"stability-percentage",
"max-trials",
"percentile",
"input-data",
"shared-memory",
"output-shared-memory-size",
"sequence-length",
"string-length",
"string-data",
]
perf_analyzer_multiple_args = [
"shape",
]
input_to_options = [
"model-name",
"model-version",
"batch-size",
"url",
"protocol",
"latency-report-file",
"streaming",
]
input_to_verbose = ["verbose", "extra-verbose"]
def __init__(self):
"""
Construct a PerfAnalyzerConfig
"""
self._args = {k: None for k in self.perf_analyzer_args}
self._multiple_args = {k: [] for k in self.perf_analyzer_multiple_args}
self._options = {
"-m": None,
"-x": None,
"-b": None,
"-u": None,
"-i": None,
"-f": None,
"-H": None,
"-c": None,
"-t": None,
}
self._verbose = {"-v": None, "-v -v": None}
self._input_to_options = {
"model-name": "-m",
"model-version": "-x",
"batch-size": "-b",
"url": "-u",
"protocol": "-i",
"latency-report-file": "-f",
"streaming": "-H",
"concurrency": "-c",
"threads": "-t",
}
self._input_to_verbose = {"verbose": "-v", "extra-verbose": "-v -v"}
@classmethod
def allowed_keys(cls):
"""
Returns
-------
list of str
The keys that are allowed to be
passed into perf_analyzer
"""
return (
list(cls.perf_analyzer_args)
+ list(cls.perf_analyzer_multiple_args)
+ list(cls.input_to_options)
+ list(cls.input_to_verbose)
)
def update_config(self, params=None):
"""
Allows setting values from a
params dict
Parameters
----------
params: dict
keys are allowed args to perf_analyzer
"""
if params:
for key in params:
self[key] = params[key]
def to_cli_string(self):
"""
Utility function to convert a config into a
string of arguments to the perf_analyzer with CLI.
Returns
-------
str
cli command string consisting of all arguments
to the perf_analyzer set in the config, without
the executable name.
"""
# single dashed options, then verbose flags, then main args
args = [f"{k} {v}" for k, v in self._options.items() if v]
args += [k for k, v in self._verbose.items() if v]
args += [f"--{k}={v}" for k, v in self._args.items() if v]
for k, v in self._multiple_args.items():
for item in v:
args.append(f"--{k}={item}")
return " ".join(args)
def __getitem__(self, key: str):
"""
Gets an arguments value in config
Parameters
----------
key : str
The name of the argument to the perf_analyzer
Returns
-------
The value that the argument is set to in this config
Raises
------
TritonModelAnalyzerException
If argument not found in the config
"""
if key in self._args:
return self._args[key]
elif key in self._multiple_args:
return self._multiple_args[key]
elif key in self._input_to_options:
return self._options[self._input_to_options[key]]
elif key in self._input_to_verbose:
return self._verbose[self._input_to_verbose[key]]
else:
raise PerfAnalyzerException(f"'{key}' Key not found in config")
def __setitem__(self, key: str, value: Any):
"""
Sets an arguments value in config
after checking if defined/supported.
Parameters
----------
key : str
The name of the argument to the perf_analyzer
value : (any)
The value to which the argument is being set
Raises
------
TritonModelAnalyzerException
If key is unsupported or undefined in the
config class
"""
if key in self._args:
self._args[key] = value
elif key in self._multiple_args:
self._multiple_args[key].append(value)
elif key in self._input_to_options:
self._options[self._input_to_options[key]] = value
elif key in self._input_to_verbose:
self._verbose[self._input_to_verbose[key]] = value
else:
raise PerfAnalyzerException(
f"The argument '{key}' to the perf_analyzer " "is not supported by the model analyzer."
)
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_performance_runner/perf_analyzer/perf_config.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class PerfAnalyzerException(Exception):
def __init__(self, message: str):
self._message = message
def __str__(self):
"""
Get the exception string representation.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
@property
def message(self):
"""
Get the exception message.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_performance_runner/perf_analyzer/exceptions.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
from subprocess import PIPE, CalledProcessError, Popen
# method from PEP-366 to support relative import in executed modules
from typing import List, Optional
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .exceptions import PerfAnalyzerException
MAX_INTERVAL_CHANGES = 10
COUNT_INTERVAL_DELTA = 50
TIME_INTERVAL_DELTA = 2000
LOGGER = logging.getLogger(__name__)
class PerfAnalyzer:
"""
This class provides an interface for running workloads
with perf_analyzer.
"""
def __init__(self, config, timeout: Optional[int]):
"""
Parameters
----------
config : PerfAnalyzerConfig
keys are names of arguments to perf_analyzer,
values are their values.
"""
self.bin_path = "perf_analyzer"
self._config = config
self._output = ""
self._timeout = timeout
def run(self):
"""
Runs the perf analyzer with the
initialized configuration
Returns
-------
List of Records
List of the metrics obtained from this
run of perf_analyzer
Raises
------
PerfAnalyzerException
If subprocess throws CalledProcessError
"""
self._output = ""
for _ in range(MAX_INTERVAL_CHANGES):
command = [self.bin_path]
command += self._config.to_cli_string().replace("=", " ").split()
LOGGER.debug(f"Perf Analyze command: {command}")
if not self._timeout:
LOGGER.debug("Perf Analyze command timeout not set")
else:
LOGGER.debug(f"Perf Analyze command timeout: {self._timeout} [s]")
try:
self._run_with_stream(command=command)
return
except CalledProcessError as e:
if self._failed_with_measurement_inverval(e.output):
if self._config["measurement-mode"] is None or self._config["measurement-mode"] == "count_windows":
self._increase_request_count()
else:
self._increase_time_interval()
else:
raise PerfAnalyzerException(
f"Running perf_analyzer with {e.cmd} failed with" f" exit status {e.returncode} : {e.output}"
)
raise PerfAnalyzerException(f"Ran perf_analyzer {MAX_INTERVAL_CHANGES} times, but no valid requests recorded.")
def output(self):
"""
Returns
-------
The stdout output of the
last perf_analyzer run
"""
if self._output:
return self._output
raise PerfAnalyzerException("Attempted to get perf_analyzer output" "without calling run first.")
def _run_with_stream(self, command: List[str]):
commands_lst = []
if self._timeout:
commands_lst = ["timeout", str(self._timeout)]
commands_lst.extend(command)
LOGGER.debug(f"Run with stream: {commands_lst}")
process = Popen(commands_lst, start_new_session=True, stdout=PIPE, encoding="utf-8")
streamed_output = ""
while True:
output = process.stdout.readline()
if output == "" and process.poll() is not None:
break
if output:
streamed_output += output
print(output.rstrip())
self._output += streamed_output
result = process.poll()
LOGGER.debug(f"Perf Analyzer process exited with result: {result}")
# WAR for Perf Analyzer exit code 0 when stabilization failed
if result == 0 and self._failed_with_measurement_inverval(streamed_output):
LOGGER.debug("Perf Analyzer finished with exit status 0, however measurement stabilization failed.")
result = 1
if result != 0:
raise CalledProcessError(returncode=result, cmd=commands_lst, output=streamed_output)
def _failed_with_measurement_inverval(self, output: str):
checks = [
output.find("Failed to obtain stable measurement"),
output.find("Please use a larger time window"),
]
result = any([status != -1 for status in checks])
LOGGER.debug(f"Measurement stability message validation: {checks}. Result: {result}.")
return result
def _increase_request_count(self):
self._config["measurement-request-count"] += COUNT_INTERVAL_DELTA
LOGGER.debug(
"perf_analyzer's measurement request count is too small, "
f"increased to {self._config['measurement-request-count']}."
)
def _increase_time_interval(self):
self._config["measurement-interval"] += TIME_INTERVAL_DELTA
LOGGER.debug(
"perf_analyzer's measurement window is too small, "
f"increased to {self._config['measurement-interval']} ms."
)
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_performance_runner/perf_analyzer/perf_analyzer.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import Any, Dict, Optional
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .task import DataObject
class Configuration(DataObject):
"""
Configuration object - handle single experiment data
"""
def __init__(
self,
parameters: Dict,
checkpoint: Optional[str],
):
"""
Args:
parameters: Configuration parameters
checkpoint: Checkpoint used for experiment
"""
self.parameters = parameters
self.checkpoint = checkpoint
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/configuration.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
from datetime import datetime
from typing import Dict, List
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .config import Config
from .configuration import Configuration
from .downloader import download
from .experiment import Experiment, Stage
from .logger import LOGGER
from .maintainer import Maintainer
from .pipeline import Pipeline
from .stages import ResultsType, TritonPerformanceOfflineStage, TritonPerformanceOnlineStage
from .task import Checkpoint, Dataset, SystemInfo, Task
from .triton import Triton
from .utils import clean_directory
class Preparer(abc.ABC):
"""
Runner preparer object.
"""
@abc.abstractmethod
def exec(
self,
workspace: pathlib.Path,
config: Config,
pipeline: Pipeline,
maintainer: Maintainer,
triton: Triton,
logs_dir: pathlib.Path,
):
pass
class ExperimentPreparer(Preparer):
"""
Experiment runner preparer object.
"""
def exec(
self,
workspace: pathlib.Path,
config: Config,
pipeline: Pipeline,
maintainer: Maintainer,
triton: Triton,
logs_dir: pathlib.Path,
):
LOGGER.info("Preparing Triton container image")
triton_container_image = self._prepare_triton_container_image(config, maintainer, triton)
LOGGER.info("Initialize task")
task = self._initialize_task(
workspace=workspace,
config=config,
pipeline=pipeline,
triton_container_image=triton_container_image,
logs_dir=logs_dir,
)
LOGGER.info("Preparing directories")
self._create_dirs(workspace, task)
LOGGER.info("Clean previous run artifacts directories")
self._clean_previous_run_artifacts(workspace, task)
LOGGER.info("Downloading checkpoints")
self._download_checkpoints(task)
return task
def _create_dirs(self, workspace: pathlib.Path, task: Task) -> None:
"""
Create directories used to store artifacts and final results
Returns:
None
"""
for directory in [task.results_dir, task.logs_dir, task.checkpoints_dir]:
directory_path = workspace / directory
directory_path.mkdir(parents=True, exist_ok=True)
LOGGER.info(f"Directory {directory} created.")
def _clean_previous_run_artifacts(self, workspace: pathlib.Path, task: Task) -> None:
"""
Clean logs from previous run
Returns:
None
"""
for directory in [
task.logs_dir,
task.results_dir,
]:
directory_path = workspace / directory
clean_directory(directory_path)
LOGGER.info(f"Location {directory} cleaned.")
def _prepare_triton_container_image(self, config: Config, maintainer: Maintainer, triton: Triton) -> str:
"""
Prepare Triton Container Image based on provided configuration
Returns:
Name of container image to use in process
"""
if not config.triton_dockerfile:
image_name = triton.container_image(config.container_version)
LOGGER.info(f"Using official Triton container image: {image_name}.")
return image_name
if config.triton_container_image:
LOGGER.info(f"Using provided Triton Container Image: {config.triton_container_image}")
return config.triton_container_image
normalized_model_name = config.model_name.lower().replace("_", "-")
image_name = f"tritonserver-{normalized_model_name}:latest"
LOGGER.info(f"Building Triton Container Image: {image_name}")
maintainer.build_image(
image_name=image_name,
image_file_path=pathlib.Path(config.triton_dockerfile),
build_args={"FROM_IMAGE": triton.container_image(container_version=config.container_version)},
)
return image_name
def _download_checkpoints(self, task: Task) -> None:
"""
Download checkpoints
"""
for variant, checkpoint in task.checkpoints.items():
checkpoint_url = checkpoint.url
download_path = checkpoint.path
if download_path.is_dir():
LOGGER.info(f"Checkpoint {download_path.name} already downloaded.")
continue
if not checkpoint_url:
LOGGER.warning(
f"Checkpoint {variant} url is not provided."
"\nIf you want to use that checkpoint please train the model locally"
f"\nand copy to {download_path} directory"
)
continue
download(checkpoint_url, download_path)
def _initialize_task(
self,
workspace: pathlib.Path,
config: Config,
pipeline: Pipeline,
triton_container_image: str,
logs_dir: pathlib.Path,
) -> Task:
"""
Initialize task object
Args:
workspace: Path to workspace where artifacts are stored
config: Config object
pipeline: Pipeline object
triton_container_image: Triton Inference Server container image used for tests
Returns:
Task object
"""
datasets = {}
for dataset in config.datasets:
datasets[dataset.name] = Dataset(name=dataset.name)
checkpoints = {}
for checkpoint in config.checkpoints:
download_path = workspace / Task.checkpoints_dir / checkpoint.name
checkpoints[checkpoint.name] = Checkpoint(name=checkpoint.name, url=checkpoint.url, path=download_path)
results_types = self._task_results_types(pipeline=pipeline)
stages = {}
for stage in pipeline.stages():
stages[stage.label] = {"result_path": stage.result_path, "result_type": stage.result_type}
experiments = []
for idx, configuration in enumerate(config.configurations, start=1):
experiment = self._prepare_experiment(
idx=idx,
configuration=configuration,
results_types=results_types,
stages=stages,
)
experiments.append(experiment)
system_info = SystemInfo.from_host()
task = Task(
model_name=config.model_name,
ensemble_model_name=config.ensemble_model_name,
framework=config.framework,
checkpoints=checkpoints,
datasets=datasets,
datasets_dir=config.datasets_dir,
experiments=experiments,
container_version=config.container_version,
system_info=system_info,
triton_container_image=triton_container_image,
triton_custom_operations=config.triton_custom_operations,
triton_load_model_method=config.triton_load_model_method,
started_at=int(datetime.utcnow().timestamp()),
logs_dir=logs_dir,
batching=config.batching,
measurement_steps_offline=config.measurement_steps_offline,
measurement_steps_online=config.measurement_steps_online,
performance_tool=config.performance_tool,
)
return task
def _task_results_types(self, pipeline: Pipeline) -> List[str]:
"""
Types of results generated as part of task
Returns:
List of result types
"""
results = []
for stage in pipeline.stages():
if TritonPerformanceOfflineStage.label == stage.label:
results.append(ResultsType.TRITON_PERFORMANCE_OFFLINE)
continue
if TritonPerformanceOnlineStage.label == stage.label:
results.append(ResultsType.TRITON_PERFORMANCE_ONLINE)
continue
return results
def _prepare_experiment(
self,
idx: int,
configuration: Configuration,
results_types: List[str],
stages: Dict,
) -> Experiment:
"""
Prepare experiments data
Args:
idx: Experiment index
configuration: Configuration object
results_types: Results types stored in experiment
stages: Stages executed as part of experiment
Returns:
Experiment object
"""
results_mapped = {}
for result_type in results_types:
results_mapped[result_type] = result_type
stages_mapped = {}
for name, stage_data in stages.items():
stages_mapped[name] = Stage(name=name, **stage_data)
experiment = Experiment(
experiment_id=idx,
parameters=configuration.parameters,
stages=stages_mapped,
results=results_mapped,
checkpoint=configuration.checkpoint,
)
return experiment
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/preparer.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import platform
import subprocess
from datetime import datetime
from typing import Dict, List, Optional, Union
import cpuinfo
import psutil
import yaml
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ..deployment_toolkit.core import PerformanceTool
from .core import CustomDumper, DataObject
from .experiment import Experiment
from .triton import Triton
class GPU(DataObject):
"""
GPU information data object
"""
name: str
driver_version: str
cuda_version: str
memory: str
tdp: str
def __init__(self, name: str, driver_version: str, cuda_version: str, memory: str, tdp: str):
"""
Args:
name: name of GPU
driver_version: version of driver
cuda_version: version of CUDA
memory: size of memory available on GPU [MB]
tdp: Max TDP of GPU unit
"""
self.name = name
self.driver_version = driver_version
self.cuda_version = cuda_version
self.memory = memory
self.tdp = tdp
@staticmethod
def from_dict(data: Dict):
"""
Create GPU object from dictionary
Args:
data: dictionary with GPU data
Returns:
GPU object
"""
return GPU(
name=data["name"],
driver_version=data["driver_version"],
cuda_version=data["cuda_version"],
memory=data["memory"],
tdp=data["tdp"],
)
@staticmethod
def from_host():
"""
Create GPU object from host data
Returns:
GPU object
"""
data = subprocess.check_output(
["nvidia-smi", "--query-gpu=name,driver_version,memory.total,power.max_limit", "--format=csv"]
).decode()
lines = data.split(sep="\n")
device_details = lines[1].split(",")
name = device_details[0].strip()
driver_version = device_details[1].strip()
memory = device_details[2].strip()
tdp = device_details[3].strip()
cuda_version = None
data = subprocess.check_output(["nvidia-smi", "--query"]).decode()
lines = data.split(sep="\n")
for line in lines:
if line.startswith("CUDA Version"):
cuda_version = line.split(":")[1].strip()
break
return GPU(
name=name,
driver_version=driver_version,
cuda_version=cuda_version,
memory=memory,
tdp=tdp,
)
class CPU(DataObject):
"""
CPU details
"""
name: str
physical_cores: int
logical_cores: int
min_frequency: float
max_frequency: float
def __init__(self, name: str, physical_cores: int, logical_cores: int, min_frequency: float, max_frequency: float):
"""
Args:
name: name of CPU unit
physical_cores: number of physical cores available on CPU
logical_cores: number of logical cores available on CPU
min_frequency: minimal clock frequency
max_frequency: maximal clock frequency
"""
self.name = name
self.physical_cores = physical_cores
self.logical_cores = logical_cores
self.min_frequency = min_frequency
self.max_frequency = max_frequency
@staticmethod
def from_host():
"""
Create CPU object from host data
Returns:
CPU object
"""
return CPU(
name=cpuinfo.get_cpu_info()["brand_raw"],
physical_cores=psutil.cpu_count(logical=False),
logical_cores=psutil.cpu_count(logical=True),
min_frequency=psutil.cpu_freq().min,
max_frequency=psutil.cpu_freq().max,
)
class Memory(DataObject):
"""
Memory data object
"""
size: float
def __init__(self, size: float):
"""
Args:
size: RAM memory size in MB
"""
self.size = size
@staticmethod
def from_host():
"""
Create Memory object from host data
Returns:
Memory object
"""
svm = psutil.virtual_memory()
return Memory(size=svm.total)
class SystemInfo(DataObject):
"""
System Information data object
"""
system: str
cpu: CPU
memory: Memory
gpu: GPU
def __init__(self, system: str, cpu: CPU, memory: Memory, gpu: GPU):
"""
Args:
system: name of operating system
cpu: CPU info
memory: Memory info
gpu: GPU info
"""
self.system = system
self.cpu = cpu
self.memory = memory
self.gpu = gpu
@staticmethod
def from_host():
"""
Create SystemInfo object from host data
Returns:
SystemInfo object
"""
system = platform.platform()
gpu = GPU.from_host()
memory = Memory.from_host()
cpu = CPU.from_host()
return SystemInfo(system=system, cpu=cpu, gpu=gpu, memory=memory)
class Checkpoint(DataObject):
"""
Checkpoint data object
"""
def __init__(self, name: str, url: str, path: Union[str, pathlib.Path]):
"""
Args:
name: Name of checkpoint
path: Location of checkpoint on local hardware
"""
self.name = name
self.url = url
self.path = pathlib.Path(path)
class Dataset(DataObject):
"""
Dataset data object
"""
def __init__(self, name: str):
"""
Args:
name: Name of dataset
"""
self.name = name
class Task(DataObject):
"""
Task data object to store build information
"""
model_name: str
framework: str
batching: str
started_at: int
ended_at: Optional[int]
container_version: str
checkpoints: Dict[str, Checkpoint]
datasets: Dict[str, Dataset]
datasets_dir: Optional[Union[str, pathlib.Path]]
experiments: List[Experiment]
system_info: SystemInfo
triton_container_image: Optional[str]
triton_custom_operations: Optional[str]
performance_tool: PerformanceTool
filename: str = "task.yaml"
results_dir: str = "results"
checkpoints_dir: str = "checkpoints"
def __init__(
self,
model_name: str,
ensemble_model_name: Optional[str],
framework: str,
batching: str,
container_version: str,
checkpoints: Dict,
datasets: Dict,
experiments: List,
system_info: SystemInfo,
started_at: int,
logs_dir: pathlib.Path,
datasets_dir: Optional[Union[str, pathlib.Path]] = None,
ended_at: Optional[int] = None,
triton_container_image: Optional[str] = None,
triton_custom_operations: Optional[str] = None,
triton_load_model_method: str = Triton.LOAD_MODE.EXPLICIT,
measurement_steps_offline: int = 8,
measurement_steps_online: int = 32,
performance_tool: PerformanceTool = PerformanceTool.MODEL_ANALYZER,
):
"""
Args:
model_name: Name of model
framework: Model framework
container_version: Container version used in task
checkpoints: List of checkpoints
datasets: List of datasets
datasets_dir: Directory where datasests are stored
experiments: List of experiments run as part of task
system_info: information about node on which experiment was executed
started_at: Time when task has started
ended_at: Time when task has ended
triton_container_image: Custom Triton Container Image used for task
triton_custom_operations: Custom operation library path
triton_load_model_method: Method how models are loaded on Triton
measurement_steps_offline: Number of measurement steps in offline performance stage
measurement_steps_online: Number of measurement steps in online performance stage
performance_tool: Performance Tool used for generating results
logs_dir: place where logs for task are stored
"""
self.started_at = started_at
self.ended_at = ended_at
self.model_name = model_name
self.ensemble_model_name = ensemble_model_name
self.framework = framework
self.container_version = container_version
self.checkpoints = checkpoints
self.datasets = datasets
self.datasets_dir = pathlib.Path(datasets_dir)
self.experiments = experiments
self.system_info = system_info
self.triton_container_image = triton_container_image
self.triton_custom_operations = triton_custom_operations
self.triton_load_model_method = triton_load_model_method
self.measurement_steps_offline = measurement_steps_offline
self.measurement_steps_online = measurement_steps_online
self.logs_dir = logs_dir
self.batching = batching
self.performance_tool = performance_tool
def start(self) -> None:
"""
Update stage execution info at start
Returns:
None
"""
self.started_at = int(datetime.utcnow().timestamp())
def end(self) -> None:
"""
Update stage execution info at end
Returns:
None
"""
self.ended_at = int(datetime.utcnow().timestamp())
def to_file(self, file_path: Union[pathlib.Path, str]):
"""
Store task data to YAML file
Args:
file_path: path to file where task data has to be saved
Returns:
None
"""
task_data = self.to_dict()
with open(file_path, "w") as f:
yaml.dump(task_data, f, Dumper=CustomDumper, width=240, sort_keys=False)
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/task.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
import signal
import sys
from typing import List, Type
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .config import Config
from .exceptions import RunnerException
from .executor import Executor
from .finalizer import Finalizer
from .logger import LOGGER, log_format
from .maintainer import Maintainer
from .pipeline import Pipeline
from .preparer import Preparer
from .triton import Triton
class Runner:
"""
Runner class. Main entrypoint to performing task and experiments
"""
WORKSPACE = pathlib.Path.cwd()
EXECUTOR_WORKSPACE = WORKSPACE / "runner_workspace"
def __init__(
self,
pipeline: Pipeline,
config: Config,
executor_cls: Type[Executor],
maintainer_cls: Type[Maintainer],
preparer_cls: Type[Preparer],
finalizer_cls: Type[Finalizer],
devices: List[str] = None,
log_level: int = logging.INFO,
):
self._pipeline = pipeline
self._config = config
self._pipeline = pipeline
self._config = config
self._preparer = preparer_cls()
self._finalizer = finalizer_cls()
self._devices = devices or ["0"]
self._log_level = log_level
self._logs_dir = self.EXECUTOR_WORKSPACE / "logs"
self._log_file_path = self._logs_dir / "runner.log"
self._maintainer = maintainer_cls()
self._executor = executor_cls(
workspace=self.EXECUTOR_WORKSPACE,
maintainer=self._maintainer,
pipeline=pipeline,
devices=devices,
)
signal.signal(signal.SIGINT, self._catch)
self._logs_dir.mkdir(parents=True, exist_ok=True)
def start(self) -> None:
"""
Start runner
Returns:
None
"""
self._setup_logger()
task = self._preparer.exec(
workspace=self.EXECUTOR_WORKSPACE,
config=self._config,
pipeline=self._pipeline,
logs_dir=self._logs_dir,
maintainer=self._maintainer,
triton=Triton(),
)
results = []
try:
for result in self._executor.start(task):
results.append(result)
except RunnerException as e:
LOGGER.error(f"Error running task: {str(e)}")
finally:
self._executor.stop()
self._finalizer.exec(workspace=self.EXECUTOR_WORKSPACE, task=task, results=results)
def _catch(self, signum, frame):
"""
SIGINT catcher. Stops executor on any sigterm.
Args:
signum: signal id
frame: signal frame
"""
self._executor.stop()
sys.exit(0)
def _setup_logger(self) -> None:
"""
Add file handle for logger
Returns:
None
"""
file = logging.FileHandler(self._log_file_path)
formatter = logging.Formatter(log_format)
file.setFormatter(formatter)
LOGGER.addHandler(file)
LOGGER.setLevel(level=self._log_level)
LOGGER.initialize(file_path=self._log_file_path)
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/runner.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import Framework, Paths
class Triton:
"""
Triton Inference Server helper class
"""
image = "nvcr.io/nvidia/tritonserver"
tag = "py3"
class LOAD_MODE:
"""
Loading mode available in Triton
"""
POLL = "poll"
EXPLICIT = "explicit"
@staticmethod
def container_image(container_version: str):
"""
Container image based on version
Args:
container_version: Version of container to be used
Returns:
Image name with tag
"""
return f"{Triton.image}:{container_version}-{Triton.tag}"
@staticmethod
def command(
framework: str,
repository_path: str,
strict_mode: bool = False,
poll_model: bool = False,
metrics: bool = False,
verbose: bool = False,
):
"""
Command to run Triton Inference Server inside container
Args:
framework: Framework used for model
repository_path: Path to model repository
strict_mode: Flag to use strict model config
poll_model: Poll model
metrics: Enable GPU metrics (disable for MIG)
verbose: Use verbose mode logging
Returns:
"""
triton_command = f"tritonserver --model-store={repository_path}"
if poll_model:
triton_command += " --model-control-mode=poll --repository-poll-secs 5"
else:
triton_command += " --model-control-mode=explicit"
if not strict_mode:
triton_command += " --strict-model-config=false"
if not metrics:
triton_command += " --allow-metrics=false --allow-gpu-metrics=false"
if verbose:
triton_command += " --log-verbose 1"
if framework in (Framework.TensorFlow1, Framework.TensorFlow2):
version = 1 if framework == Framework.TensorFlow1 else 2
triton_command += f" --backend-config=tensorflow,version={version}"
return triton_command
@staticmethod
def library_path(framework: str):
"""
Obtain custom library path for framework
Args:
framework: Framework used for model
Returns:
Path to additional libraries needed by framework
"""
paths = {
Framework.PyTorch.name: "/opt/tritonserver/backends/pytorch",
Framework.TensorFlow1.name: "/opt/tritonserver/backends/tensorflow1",
Framework.TensorFlow2.name: "/opt/tritonserver/backends/tensorflow2",
}
return paths[framework]
@staticmethod
def custom_library_path_remote() -> str:
"""
Path to custom library mounted in Triton container
Returns:
Path to shared library with custom operations
"""
return f"{Paths.LIBRARIES_PATH}/libcustomops.so"
@staticmethod
def custom_library_path_local(libs_dir: pathlib.Path) -> pathlib.Path:
"""
Path to custom library in local path
Args:
libs_dir: path to libraries directory
Returns:
Path to shared library with custom operations
"""
return libs_dir / "libcustomops.so"
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/triton.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import Dict, List, Optional, Union
import yaml
from ..deployment_toolkit.core import PerformanceTool
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .configuration import Configuration
from .core import DataObject
from .triton import Triton
class Checkpoint(DataObject):
"""
Checkpoint data placeholder
"""
name: str
url: str
def __init__(self, name: str, url: str):
self.name = name
self.url = url
class Dataset(DataObject):
"""
Dataset data placeholder
"""
name: str
def __init__(self, name: str):
self.name = name
class Config(DataObject):
"""
Configuration object for runner experiments
"""
def __init__(
self,
model_name: str,
framework: str,
container_version: str,
batching: str,
configurations: List[Configuration],
ensemble_model_name: Optional[str] = None,
datasets_dir: str = "datasets",
datasets: List[Dataset] = None,
checkpoints: List[Checkpoint] = None,
triton_dockerfile: Optional[str] = None,
triton_container_image: Optional[str] = None,
triton_custom_operations: Optional[str] = None,
triton_load_model_method: Optional[str] = Triton.LOAD_MODE.EXPLICIT,
measurement_steps_offline: int = 8,
measurement_steps_online: int = 32,
performance_tool: PerformanceTool = PerformanceTool.MODEL_ANALYZER,
):
"""
Args:
model_name: Name of model
framework: Framework used to create model
container_version: Version of Triton Inference Server container used for evaluation
batching: Mark if model support batching
configurations: List of experiments configurations
datasets_dir: Directory where datasets are stored
datasets: Datasets used for conversion/export
checkpoints: Checkpoints with trained model
triton_load_model_method: Triton Inference Server model loading mode
triton_dockerfile: Dockerfile for Triton to build custom image
triton_container_image: Custom image used for Triton Server - leave empty to use default or built from Dockerfile
triton_custom_operations: Path where custom operation library is stored
measurement_steps_offline: Number of measurement steps in offline performance stage
measurement_steps_online: Number of measurement steps in online performance stage
performance_tool: Performance Tool used for generating results
"""
self.model_name = model_name
self.ensemble_model_name = ensemble_model_name
self.framework = framework
self.container_version = container_version
self.batching = batching
self.configurations = configurations
self.datasets_dir = datasets_dir
self.datasets = datasets
self.checkpoints = checkpoints
self.triton_load_model_method = triton_load_model_method
self.triton_dockerfile = triton_dockerfile
self.triton_container_image = triton_container_image
self.triton_custom_operations = triton_custom_operations
self.measurement_steps_offline = measurement_steps_offline
self.measurement_steps_online = measurement_steps_online
self.performance_tool = performance_tool
def to_file(self, file_path: Union[pathlib.Path, str]) -> None:
"""
Save config data to file
Args:
file_path: path to file where config data is should be stored
Returns:
None
"""
data = self.to_dict()
with open(file_path, "w") as f:
yaml.safe_dump(data, f)
@staticmethod
def from_dict(config_data: Dict):
"""
Create configuration object from data stored in dictionary
Args:
config_data: dictionary with config data
Returns:
Config object
"""
configurations = []
for configuration_data in config_data["configurations"]:
configuration = Configuration(**configuration_data)
configurations.append(configuration)
checkpoints = []
for checkpoint_data in config_data.get("checkpoints", []):
checkpoint = Checkpoint(
name=checkpoint_data["name"],
url=checkpoint_data["url"],
)
checkpoints.append(checkpoint)
datasets = []
for dataset_data in config_data.get("datasets", []):
dataset = Dataset(name=dataset_data["name"])
datasets.append(dataset)
return Config(
model_name=config_data["model_name"],
framework=config_data["framework"],
container_version=config_data["container_version"],
batching=config_data["batching"],
configurations=configurations,
checkpoints=checkpoints,
datasets=datasets,
datasets_dir=config_data.get("datasets_dir"),
triton_load_model_method=config_data["triton_load_model_method"],
triton_dockerfile=config_data.get("triton_dockerfile"),
triton_custom_operations=config_data.get("triton_custom_operations"),
measurement_steps_offline=config_data["measurement_steps_offline"],
measurement_steps_online=config_data["measurement_steps_online"],
performance_tool=PerformanceTool(config_data["performance_tool"]),
)
@staticmethod
def from_file(file_path: Union[pathlib.Path, str]):
"""
Load experiment data from file
Args:
file_path: path to file where experiment data is stored
Returns:
Experiment object
"""
with open(file_path) as f:
config_data = yaml.safe_load(f)
return Config.from_dict(config_data)
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/config.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import shutil
import urllib.request
from typing import Any, Callable
from zipfile import ZipFile
from retrying import retry
from tqdm.auto import tqdm
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .logger import LOGGER
from .exceptions import RunnerException
def unzip(checkpoint_path: pathlib.Path, archive_path: pathlib.Path) -> None:
"""
Unzip acrhive to provided path
Args:
checkpoint_path: Path where archive has to be unpacked
archive_path: Path to archive Archive filename
Returns:
None
"""
LOGGER.info(f"Creating directory for checkpoint: {checkpoint_path.name}")
checkpoint_path.mkdir(parents=True, exist_ok=True)
LOGGER.info(f"Unpacking checkpoint files {checkpoint_path}")
with ZipFile(archive_path, "r") as zf:
zf.extractall(path=checkpoint_path)
LOGGER.info("done")
LOGGER.info(f"Removing zip file: {archive_path}")
archive_path.unlink()
LOGGER.info("done")
def download_progress(t: Any) -> Callable:
"""
Progress bar
Args:
t: progress
Returns:
Callable
"""
last_b = [0]
def update_to(b: int = 1, bsize: int = 1, tsize: int = None):
if tsize not in (None, -1):
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return update_to
@retry(stop_max_attempt_number=3)
def download(checkpoint_url: str, checkpoint_path: pathlib.Path) -> None:
"""
Download checkpoint from given url to provided path
Args:
checkpoint_url: Url from which checkpoint has to be downloaded
checkpoint_path: Path where checkpoint has to be stored
Returns:
None
"""
LOGGER.info(f"Downloading checkpoint from {checkpoint_url}")
with tqdm(unit="B") as t:
reporthook = download_progress(t)
result = urllib.request.urlretrieve(checkpoint_url, reporthook=reporthook)
filename = result[0]
LOGGER.info(f"Checkpoint saved in {filename}")
file_path = pathlib.Path(filename)
if not file_path.is_file() and not file_path.is_dir():
raise RunnerException(f"Checkpoint {filename} does not exist")
LOGGER.info(f"Moving checkpoint to {checkpoint_path.parent}")
shutil.move(file_path, checkpoint_path.parent / file_path.name)
LOGGER.info("done")
archive_path = checkpoint_path.parent / file_path.name
unzip(checkpoint_path, archive_path)
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/downloader.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
from typing import Dict, List
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .experiment import ExperimentResult
from .logger import LOGGER
from .stages import ResultsType
from .summary import load_results, save_summary
from .task import Task
class Finalizer(abc.ABC):
@abc.abstractmethod
def exec(self, workspace: pathlib.Path, task: Task, results: List[ExperimentResult]):
pass
class ExperimentFinalizer(Finalizer):
"""
Public runner finalizer object.
"""
def exec(self, workspace: pathlib.Path, task: Task, results: List[ExperimentResult]):
results_path = workspace / task.results_dir
self._generate_summary(results_path, results)
self._finalize_task(results_path, task)
def _finalize_task(self, results_path: pathlib.Path, task: Task) -> None:
"""
Finalize task information
Args:
task: Task object
Returns:
None
"""
task.end()
file_path = results_path / task.filename
LOGGER.debug(f"Saving task details to file {file_path}")
task.to_file(file_path)
LOGGER.debug("Done")
LOGGER.info(f"Task details and results stored in {results_path}")
def _generate_summary(self, results_path: pathlib.Path, experiment_results: List[ExperimentResult]):
"""
Generate summary for results collected in all experiments
Args:
results_path: Path where results should be stored
experiment_results: Results collected from experiments
Returns:
"""
performance_offline_results = list()
performance_online_results = list()
results_mapping = {
ResultsType.TRITON_PERFORMANCE_OFFLINE: performance_offline_results,
ResultsType.TRITON_PERFORMANCE_ONLINE: performance_online_results,
}
self._collect_summary_results(experiment_results, results_mapping)
self._prepare_final_results(results_path, results_mapping)
def _collect_summary_results(self, experiment_results: List[ExperimentResult], results_mapping: Dict):
for experiment_result in experiment_results:
experiment = experiment_result.experiment
for result_type, result_path in experiment_result.results.items():
if not result_path.is_file() and not result_path.is_dir():
raise FileNotFoundError(f"Expected file {result_path} not found")
LOGGER.debug(f"Found {result_type} in {result_path} file.")
if result_type not in results_mapping:
LOGGER.debug(f"Results {result_type} for {experiment.experiment_id} are ignored in final summary.")
return
LOGGER.debug(f"Collecting {result_type} results from {result_path} for summary")
result = load_results(
results_path=result_path,
parameters=experiment.parameters,
result_type=result_type,
)
results_mapping[result_type].extend(result)
LOGGER.debug("Done.")
def _prepare_final_results(self, results_path: pathlib.Path, results_mapping: Dict) -> None:
"""
Prepare summary files for offline and online performance
Args:
results_path: Path where results should be stored
results_mapping: Mapping with results type and collected results for given stage
Returns:
None
"""
for results_type, results in results_mapping.items():
save_summary(
result_type=results_type,
results=results,
summary_dir=results_path,
)
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/finalizer.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import Command
from .exceptions import RunnerException
from .stages import Stage
class CommandsExporter:
"""
Command exported to BASH scripts
"""
def __init__(self, scripts_dir: pathlib.Path):
"""
Args:
scripts_dir: Paths where scripts should be stored
"""
self._scripts_dir = scripts_dir
def export(self, stage: Stage) -> Command:
"""
Export stage commands to script and return new command to execute
Args:
stage: Stage object with commands
Returns:
Command object with script execution command
"""
filename = self._get_filename(stage.label)
file_path = self._scripts_dir / filename
with open(file_path, "w+") as stagefile:
stagefile.write("set -x\n")
stagefile.write("set -e\n")
stagefile.write("export PYTHONUNBUFFERED=1\n")
stagefile.write("export PYTHONPATH=`pwd`\n")
for command in stage.commands:
for line in str(command).split("\n"):
stagefile.write(str(line.rstrip()))
stagefile.write("\n")
stagefile.write("\n")
result = os.system(f'ex +"set syn=sh" +"norm gg=G" -cwq {file_path}')
if result != 0:
raise RunnerException(f"Failed running {filename} script formatting. Exit code {result}")
command = Command(f"bash -xe {file_path.as_posix()}")
return command
def _get_filename(self, label: str):
"""
Generate filename for script based on label
Args:
label: String with stage label
Returns:
String with script filename
"""
filename = label.replace(" ", "_").lower()
filename = f"{filename}.sh"
return filename
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/exporter.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/__init__.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import pathlib
from enum import Enum
from typing import Any, Dict, List
import yaml
class CustomDumper(yaml.Dumper):
"""
Custom YAML dumper to avoid craeting aliases
"""
def ignore_aliases(self, data: Dict) -> bool:
return True
class Paths:
"""
Paths mapping inside Triton Container
"""
MODEL_REPOSITORY_PATH = "/mnt/triton-models"
LIBRARIES_PATH = "/mnt/libs"
class Framework(Enum):
"""
Supported frameworks
"""
TensorFlow1 = "TensorFlow1"
TensorFlow2 = "TensorFlow2"
PyTorch = "PyTorch"
class Command:
"""Represents wrapper of raw string command"""
def __init__(self, data: str):
"""
Store command data
Args:
data: string with bash commands to execute
"""
self._data = data
def __str__(self) -> str:
"""
String object representation
Returns:
String
"""
return self._data
@dataclasses.dataclass
class Measurement:
offline_batch_sizes: List[int]
offline_concurrency: List[int]
online_batch_sizes: List[int]
online_concurrency: List[int]
min_shapes_batch: int
max_shapes_batch: int
opt_shapes_batch: int
class DataObject:
"""
Data object representation handling recursive transformation from object to dict
"""
READ_ONLY = set()
def to_dict(self) -> Dict:
"""
Represent object as dictionary
Returns:
Dict
"""
data = {}
filtered_data = {key: value for key, value in self.__dict__.items() if key not in self.READ_ONLY}
for key, value in filtered_data.items():
data[key] = self._convert_value(value)
return data
def _convert_value(self, value: Any) -> Any:
"""
Convert value based on its type
Args:
value: variable to convert
Returns:
Converted object
"""
if isinstance(value, DataObject):
value = value.to_dict()
elif isinstance(value, dict):
value = self._from_dict(value)
elif isinstance(value, list):
value = self._from_list(value)
elif isinstance(value, Enum):
value = value.value
elif isinstance(value, pathlib.Path):
value = value.as_posix()
return value
def _from_dict(self, values: Dict) -> Any:
"""
Convert dictionary values
Args:
values: dictionary with values
Returns:
Any
"""
data = {}
for key, value in values.items():
data[key] = self._convert_value(value)
return data
def _from_list(self, values: List) -> Any:
"""
Convert list of values
Args:
values: list with values
Returns:
Any
"""
items = []
for value in values:
item = self._convert_value(value)
items.append(item)
return items
AVAILABLE_FRAMEWORKS = [f.value for f in Framework]
class Batching(Enum):
DISABLED = "disabled"
STATIC = "static"
DYNAMIC = "dynamic"
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/core.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
import coloredlogs
class Logger(logging.Logger):
def __init__(self, name, level=logging.NOTSET):
super().__init__(name, level=level)
self._file_path = None
def initialize(self, file_path: pathlib.Path):
self._file_path = file_path
def write(self, log: str):
if not self._file_path:
return
with open(self._file_path, "+a") as file:
file.write(log)
LOGGER = Logger("runner")
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(format=log_format)
coloredlogs.install(
level=logging.INFO,
fmt=log_format,
logger=LOGGER,
field_styles={
"asctime": {"color": "green"},
"hostname": {"color": "magenta"},
"levelname": {"bold": True, "color": "blue"},
"name": {"color": "blue"},
"programname": {"color": "cyan"},
"username": {"color": "yellow"},
},
reconfigure=True,
)
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/logger.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import List, Type
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .config import Config
from .executor import Executor
from .finalizer import Finalizer
from .maintainer import Maintainer
from .pipeline import Pipeline
from .preparer import Preparer
from .runner import Runner
class RunnerProxy:
"""
Runner proxy to configure original runner
"""
maintainer_cls: Type[Maintainer] = None
executor_cls: Type[Executor] = None
preparer_cls: Type[Preparer] = None
finalizer_cls: Type[Finalizer] = None
def __init__(self, config: Config, pipeline: Pipeline, devices: List[str]):
"""
RunnerProxy constructor
Args:
config: Config object
pipeline: Pipeline to evaluate
devices: List of devices to use for tests
"""
self._runner = Runner(
config=config,
pipeline=pipeline,
devices=devices,
maintainer_cls=self.maintainer_cls,
executor_cls=self.executor_cls,
preparer_cls=self.preparer_cls,
finalizer_cls=self.finalizer_cls,
)
def start(self) -> None:
"""
Runner interface
"""
self._runner.start()
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/runner_proxy.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import List, Optional, Tuple, Union
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import Command
class ResultsType:
"""
Results types generated by runner
"""
TRITON_PERFORMANCE_OFFLINE = "triton_performance_offline"
TRITON_PERFORMANCE_ONLINE = "triton_performance_online"
class Stage:
"""
Stage definition
"""
label: str
commands: List[Command]
result_path: Optional[str]
result_type: Optional[str]
def __init__(
self,
commands: Union[Tuple[str, ...], List[str]],
result_path: Optional[str] = None,
result_type: Optional[str] = None,
):
"""
Args:
commands: List or Tuple of commands provided as raw string
result_path: Path to results file generated by stage
result_type: Type of results generated by stage
"""
if type(commands) not in [tuple, list]:
raise ValueError("""Incorrect type of commands list. Please, provide list of commands as tuple.""")
self.commands = list(map(lambda command: Command(data=command), commands))
self.result_path = result_path
self.result_type = result_type
class ExportStage(Stage):
label = "Export Model"
class ConversionStage(Stage):
label = "Convert Model"
class DeployStage(Stage):
label = "Deploy Model"
class CorrectnessStage(Stage):
label = "Model Correctness"
class TritonPreparePerformanceProfilingDataStage(Stage):
label = "Prepare Triton Profiling Data"
class TritonPerformanceOfflineStage(Stage):
label = "Triton Performance Offline"
class TritonPerformanceOnlineStage(Stage):
label = "Triton Performance Online"
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/stages.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import pathlib
from datetime import datetime
from typing import Any, Dict, Optional
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import DataObject
class ExperimentStatus(object):
"""
Experiment status flags object
"""
SUCCEED = "Succeed"
FAILED = "Failed"
class StageStatus:
"""
Stages status flags object
"""
SUCCEED = "Succeed"
FAILED = "Failed"
class Stage(DataObject):
"""
Stage data object
"""
name: str
status: str
started_at: Optional[int]
ended_at: Optional[int]
result_path: Optional[str]
result_type: Optional[str]
def __init__(
self,
name: str,
result_path: Optional[str],
result_type: Optional[str],
status: str = StageStatus.FAILED,
started_at: Optional[int] = None,
ended_at: Optional[int] = None,
):
"""
Args:
name: name of stage
result_path: path where results file is stored
result_type: type of results
status: success/fail status
started_at: time when stage has started
ended_at: time when stage has ended
"""
self.name = name
self.status = status
self.started_at = started_at
self.ended_at = ended_at
self.result_path = result_path
self.result_type = result_type
def start(self) -> None:
"""
Update stage execution info at start
Returns:
None
"""
self.started_at = int(datetime.utcnow().timestamp())
def end(self) -> None:
"""
Update stage execution info at end
Returns:
None
"""
self.status = StageStatus.SUCCEED
self.ended_at = int(datetime.utcnow().timestamp())
class Experiment(DataObject):
"""
Experiment data object
"""
experiment_id: int
parameters: Dict
stages: Dict[str, Stage]
results: Dict[str, str]
status: str
checkpoint_variant: str
started_at: Optional[int]
ended_at: Optional[int]
def __init__(
self,
experiment_id: int,
parameters: Dict,
stages: Dict[str, Stage],
results: Dict[str, str],
checkpoint: str,
started_at: Optional[int] = None,
ended_at: Optional[int] = None,
status: str = ExperimentStatus.FAILED,
):
"""
Args:
experiment_id: experiment identifier
parameters: dictionary with experiment configuration
stages: dictionary with stages run in experiment
results: mapping between results types and location where are stored
started_at: time when experiment has started
ended_at: time when experiment has ended
status: experiment success/fail information
checkpoint: Checkpoint used for experiment
"""
self.experiment_id = experiment_id
self.started_at = started_at
self.ended_at = ended_at
self.parameters = parameters
self.stages = stages
self.status = status
self.checkpoint = checkpoint
self.results = results
self.results_dir = f"experiment_{experiment_id}"
def start(self) -> None:
"""
Update experiment execution info at start
Returns:
None
"""
self.started_at = int(datetime.utcnow().timestamp())
def end(self) -> None:
"""
Update experiment execution info at end
Returns:
None
"""
self.status = ExperimentStatus.SUCCEED
self.ended_at = int(datetime.utcnow().timestamp())
@dataclasses.dataclass
class Status:
state: ExperimentStatus
message: str
@dataclasses.dataclass
class ExperimentResult:
"""
Experiment result object
"""
status: Status
experiment: Experiment
results: Dict[str, pathlib.Path]
payload: Dict[str, Any] = dataclasses.field(default_factory=dict)
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/experiment.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import pathlib
from typing import Dict, List, Union
# method from PEP-366 to support relative import in executed modules
import yaml
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ..deployment_toolkit.report import save_results, sort_results
from .logger import LOGGER
def save_summary(result_type: str, results: List, summary_dir: pathlib.Path) -> None:
"""
Create file with summary for results of given type
Args:
result_type: Type of results to dump
results: Results data
summary_dir: Path where results should be stored
Returns:
None
"""
if len(results) == 0:
LOGGER.warning(f"No {result_type} results found.")
return
results = sort_results(results=results)
kind_file = summary_dir / f"{result_type}_summary.csv"
save_results(filename=kind_file.as_posix(), data=results, formatted=True)
LOGGER.info(f"Summary for {result_type} stored in {kind_file}")
def load_results(*, results_path: Union[pathlib.Path, str], result_type: str, parameters: Dict) -> List:
"""
Update results
Args:
results_path: Path to file or directory from which data should be read
result_type: type of results
parameters: Parameters used in experiment which generated results
Returns:
List of result rows
"""
LOGGER.debug(f"Loading {result_type} from {results_path} for summary")
results_path = pathlib.Path(results_path)
if results_path.is_file():
files = [results_path]
elif results_path.is_dir():
files = list(results_path.iterdir())
else:
LOGGER.debug(f"Unable to load file: {results_path}. Generating empty rows.")
data = [{}]
return data
if any([file.name.endswith(".ckpt") for file in files]):
model_analyzer_metrics = results_path / "metrics-model-inference.csv"
files = [model_analyzer_metrics]
else:
files = [file for file in files if file.name.endswith(".csv")]
results = list()
parameters_cpy = {key: value for key, value in parameters.items() if key != "batch"}
for file in files:
if file.suffix == ".csv":
data = _generate_data_from_csv(file=file)
elif file.suffix == ".json":
data = _generate_data_from_json(file=file)
elif file.suffix == ".yaml":
data = _generate_data_from_yaml(file=file)
else:
raise ValueError(f"Unsupported file extension: {file.suffix}")
for item in data:
result = {**parameters_cpy, **item}
results.append(result)
LOGGER.debug(f"Loading done. Collected {len(results)} results.")
return results
def _normalize_key(*, key: str) -> str:
"""
Normalize key
Args:
key: Key to normalize
Returns:
Normalized string
"""
key = "_".join(key.split(sep=" "))
key = key.lower()
return key
def _normalize_keys(*, data: Dict) -> Dict:
"""
Normalize keys in dictionary
Args:
data: Dictionary to normalize
Returns:
Normalized dictionary
"""
keys = {_normalize_key(key=key): value for key, value in data.items()}
return keys
def _generate_data_from_csv(*, file: Union[pathlib.Path, str]) -> List[Dict]:
"""
Generate result rows from CSV file
Args:
file: CSV file path
Returns:
List of rows
"""
LOGGER.debug(f"Reading data from {file}")
filtered_rows: List[Dict] = []
with open(file, "r") as csvfile:
reader = csv.DictReader(csvfile)
for r in reader:
r = _normalize_keys(data=r)
filtered_row = {k: v for k, v in r.items()}
filtered_rows.append(filtered_row)
LOGGER.debug("done")
return filtered_rows
def _generate_data_from_json(file: pathlib.Path) -> List[Dict]:
LOGGER.info(f"Reading data from {file}")
filtered_rows: List[Dict] = list()
with open(file, "r") as json_file:
file_data = json.load(json_file)
if not isinstance(file_data, list):
file_data = [file_data]
for r in file_data:
r = _normalize_keys(data=r)
filtered_row = {k: v for k, v in r.items()}
filtered_rows.append(filtered_row)
LOGGER.info("done")
return filtered_rows
def _generate_data_from_yaml(file: pathlib.Path) -> List[Dict]:
LOGGER.info(f"Reading data from {file}")
filtered_rows: List[Dict] = list()
with open(file, "r") as yaml_file:
file_data = yaml.safe_load(yaml_file)
if not isinstance(file_data, list):
file_data = [file_data]
for r in file_data:
r = _normalize_keys(data=r)
filtered_row = {k: v for k, v in r.items()}
filtered_rows.append(filtered_row)
LOGGER.info("done")
return filtered_rows
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/summary.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .pipeline import Pipeline
pipeline = Pipeline()
pipeline.model_export(
commands=(
r"""
python3 triton/export_model.py \
--input-path triton/model.py \
--input-type tf-keras \
--output-path ${SHARED_DIR}/exported_model.savedmodel \
--output-type ${EXPORT_FORMAT} \
--ignore-unknown-parameters \
\
--checkpoint-dir ${CHECKPOINT_DIR}/checkpoint \
--batch-size ${MAX_BATCH_SIZE} \
--precision ${EXPORT_PRECISION} \
\
--dataloader triton/dataloader.py \
--batch-size ${MAX_BATCH_SIZE} \
--data-pattern "${DATASETS_DIR}/outbrain/valid/*.parquet"
""",
)
)
pipeline.model_conversion(
commands=(
r"""
model-navigator convert \
--model-name ${MODEL_NAME} \
--model-path ${SHARED_DIR}/exported_model.savedmodel \
--output-path ${SHARED_DIR}/converted_model \
--target-formats ${FORMAT} \
--target-precisions ${PRECISION} \
--launch-mode local \
--override-workspace \
--verbose \
\
--onnx-opsets 13 \
--max-batch-size ${MAX_BATCH_SIZE} \
--max-workspace-size 8589934592 \
--atol wide_deep_model=0.015 \
--rtol wide_deep_model=12.0
""",
)
)
pipeline.model_deploy(
commands=(
r"""
model-navigator triton-config-model \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--model-version 1 \
--model-path ${SHARED_DIR}/converted_model \
--model-format ${FORMAT} \
--model-control-mode explicit \
--load-model \
--load-model-timeout-s 120 \
--verbose \
\
--batching ${MODEL_BATCHING} \
--backend-accelerator ${BACKEND_ACCELERATOR} \
--tensorrt-precision ${PRECISION} \
--tensorrt-capture-cuda-graph \
--max-batch-size ${MAX_BATCH_SIZE} \
--preferred-batch-sizes ${MAX_BATCH_SIZE} \
--engine-count-per-device ${DEVICE_KIND}=${NUMBER_OF_MODEL_INSTANCES}
""",
)
)
pipeline.triton_performance_offline_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes ${MEASUREMENT_OFFLINE_BATCH_SIZES} \
--concurrency ${MEASUREMENT_OFFLINE_CONCURRENCY} \
--performance-tool ${PERFORMANCE_TOOL} \
--measurement-request-count 100 \
--evaluation-mode offline \
--warmup \
--result-path ${SHARED_DIR}/triton_performance_offline.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_offline.csv",
)
pipeline.triton_performance_online_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes ${MEASUREMENT_ONLINE_BATCH_SIZES} \
--concurrency ${MEASUREMENT_ONLINE_CONCURRENCY} \
--performance-tool ${PERFORMANCE_TOOL} \
--measurement-request-count 500 \
--evaluation-mode online \
--warmup \
--result-path ${SHARED_DIR}/triton_performance_online.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_online.csv",
) | DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/pipeline_impl.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
import shutil
import subprocess
from enum import Enum
from typing import Any
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import Command
from .exceptions import RunnerException
from .logger import LOGGER
def format_env_key(s: str):
"""
Format environmental variable key
Args:
s: String to format
Returns:
Upper cased string
"""
return s.upper()
def format_env_value(value: Any) -> str:
"""
Format environment variable value
Args:
value: value to be formatted
Returns:
Formatted value as a string
"""
value = value if not isinstance(value, Enum) else value.value
value = value if type(value) not in [list, tuple] else ",".join(map(str, value))
value = str(value)
return value
def get_result_path(result_path: str) -> str:
"""
Map result path when different variants passed ex. with env variable in path
Args:
result_path: Path to result file
Returns:
str
"""
for env_var, val in os.environ.items():
result_path = result_path.replace(f"${{{env_var}}}", val)
if result_path.startswith("/"):
return result_path
if result_path.startswith("./"):
result_path = result_path[2:]
return result_path
def clean_directory(directory: pathlib.Path) -> None:
"""
Remove all files and directories from directory
Args:
directory: Path to directory which should be cleaned
Returns:
None
"""
LOGGER.debug(f"Cleaning {directory.as_posix()}")
if not directory.is_dir():
LOGGER.warning(f"{directory.name} is not a directory.")
return
for item in os.listdir(directory):
item_path = directory / item
if item_path.is_dir():
LOGGER.debug(f"Remove dir {item_path.as_posix()}")
shutil.rmtree(item_path.as_posix())
elif item_path.is_file():
LOGGER.debug(f"Remove file: {item_path.as_posix()}")
item_path.unlink()
else:
LOGGER.warning(f"Cannot remove item {item_path.name}. Not a file or directory.")
def exec_command(command: Command) -> None:
"""
Execute command
Args:
command: Command to run
"""
try:
process = subprocess.Popen(
[str(command)],
shell=True,
start_new_session=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding="utf-8",
)
while True:
output = process.stdout.readline()
if output == "" and process.poll() is not None:
break
if output:
print(output.rstrip())
LOGGER.write(output)
result = process.poll()
if result != 0:
raise RunnerException(f"Command {command} failed with exit status: {result}")
except subprocess.CalledProcessError as e:
raise RunnerException(f"Running command {e.cmd} failed with exit status {e.returncode} : {e.output}")
def measurement_env_params(measurement):
params = {}
for key, value in measurement.__dict__.items():
param = f"{measurement.__class__.__name__.upper()}_{key.upper()}"
params[param] = " ".join(list(map(lambda val: str(val), value))) if isinstance(value, list) else int(value)
return params
def offline_performance_configuration(steps, max_batch_size):
step = int(max_batch_size) // steps
batch_sizes = [step * idx for idx in range(1, steps + 1)]
concurrency = [1]
return batch_sizes, concurrency
def online_performance_configuration(steps, max_batch_size, number_of_model_instances):
max_total_requests = 2 * int(max_batch_size) * int(number_of_model_instances)
max_concurrency = min(128, max_total_requests)
step = max(1, max_concurrency // steps)
min_concurrency = step
batch_sizes = [max(1, max_total_requests // max_concurrency)]
concurrency = list(range(min_concurrency, max_concurrency + 1, step))
return batch_sizes, concurrency
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/utils.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import Dict, Tuple
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .stages import (
ConversionStage,
DeployStage,
ExportStage,
ResultsType,
TritonPerformanceOfflineStage,
TritonPerformanceOnlineStage,
TritonPreparePerformanceProfilingDataStage,
)
class Pipeline:
"""
Definition of stages that has to be executed before and during experiments
"""
# Stages to execute as part of single experiment
_experiment_stages = [
ExportStage.label,
ConversionStage.label,
DeployStage.label,
TritonPreparePerformanceProfilingDataStage.label,
TritonPerformanceOfflineStage.label,
TritonPerformanceOnlineStage.label,
]
def __init__(self):
"""
Initialize pipeline
"""
self._stages: Dict = dict()
def model_export(self, commands: Tuple[str, ...]) -> None:
"""
Model export stage
Args:
commands: Commands to be executed as part of stage
Returns:
None
"""
stage = ExportStage(commands=commands)
self._stages[stage.label] = stage
def model_conversion(self, commands: Tuple[str, ...]) -> None:
"""
Model conversion stage
Args:
commands: Commands to be executed as part of stage
Returns:
None
"""
stage = ConversionStage(commands=commands)
self._stages[stage.label] = stage
def model_deploy(self, commands: Tuple[str, ...]) -> None:
"""
Model deployment stage
Args:
commands: Commands to be executed as part of stage
Returns:
None
"""
stage = DeployStage(commands=commands)
self._stages[stage.label] = stage
def triton_prepare_performance_profiling_data(self, commands: Tuple[str, ...]) -> None:
"""
Model profiling data creation stage
Args:
commands: Commands to be executed as part of stage
Returns:
None
"""
stage = TritonPreparePerformanceProfilingDataStage(commands=commands)
self._stages[stage.label] = stage
def triton_performance_offline_tests(self, commands: Tuple[str, ...], result_path: str) -> None:
"""
Model performance offline test stage
Args:
commands: Commands to be executed as part of stage
result_path: Path where results file is stored
Returns:
None
"""
stage = TritonPerformanceOfflineStage(
commands=commands,
result_path=result_path,
result_type=ResultsType.TRITON_PERFORMANCE_OFFLINE,
)
self._stages[stage.label] = stage
def triton_performance_online_tests(self, commands: Tuple[str, ...], result_path: str) -> None:
"""
Model performance online test stage
Args:
commands: Commands to be executed as part of stage
result_path: Path where results file is stored
Returns:
None
"""
stage = TritonPerformanceOnlineStage(
commands=commands,
result_path=result_path,
result_type=ResultsType.TRITON_PERFORMANCE_ONLINE,
)
self._stages[stage.label] = stage
def stages(self):
"""
Generate stages which should be run per experiment
Returns:
Generator with stages object
"""
for stage_name in self._experiment_stages:
stage = self._stages.get(stage_name)
if not stage:
continue
yield stage
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/pipeline.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RunnerException(Exception):
"""
Runner Exception
"""
def __init__(self, message: str):
self._message = message
def __str__(self):
return self._message
@property
def message(self):
"""Get the exception message.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/exceptions.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pathlib
from typing import List
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .config import Config
from .executor import Executor
from .finalizer import ExperimentFinalizer
from .maintainer import DockerMaintainer
from .preparer import ExperimentPreparer
from .runner_proxy import RunnerProxy
from .pipeline_impl import pipeline
class ExperimentRunner(RunnerProxy):
"""
Experiment Runner proxy for runner wrapper
"""
maintainer_cls = DockerMaintainer
executor_cls = Executor
preparer_cls = ExperimentPreparer
finalizer_cls = ExperimentFinalizer
def execute(config_path: str, devices: List[str]):
if len(devices) == 0:
devices = ["0"]
config = Config.from_file(config_path)
runner = ExperimentRunner(config=config, pipeline=pipeline, devices=devices)
runner.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config-path", type=str, required=True, help="Path to configuration file with details.")
parser.add_argument(
"--devices", type=str, nargs="*", required=False, help="Path to configuration file with details."
)
args = parser.parse_args()
config_path = args.config_path
devices = args.devices
execute(config_path, devices) | DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/__main__.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pathlib
import shutil
import traceback
from typing import Dict, List, Optional
from colorama import Fore
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ..deployment_toolkit.core import BackendAccelerator, Precision
from .core import Batching, Measurement, Paths
from .exceptions import RunnerException
from .experiment import ExperimentResult, ExperimentStatus, Status
from .exporter import CommandsExporter
from .logger import LOGGER
from .maintainer import Container, Maintainer
from .pipeline import Pipeline
from .stages import Stage
from .task import Experiment, Task
from .triton import Triton
from .utils import (
clean_directory,
exec_command,
format_env_key,
format_env_value,
get_result_path,
measurement_env_params,
offline_performance_configuration,
online_performance_configuration,
)
class Executor:
"""
Experiments executor
"""
def __init__(
self,
workspace: pathlib.Path,
maintainer: Maintainer,
pipeline: Pipeline,
devices: List[str] = None,
):
"""
Initialize experiments executor
Args:
workspace: Path to workspace to store artifacts
maintainer: maintainer for running commands
pipeline: pipeline definition
devices: List of devices on which Triton Inference Server will be executed
"""
self._maintainer = maintainer
self._pipeline = pipeline
self._devices = devices or ["0"]
self._workspace = workspace
self._executor_workspace = workspace / "executor"
self._shared_dir = self._executor_workspace / "shared"
self._triton_models_repository_dir = self._executor_workspace / "triton_models"
self._scripts_dir = self._executor_workspace / "scripts"
self._libraries_dir = self._executor_workspace / "libs"
self._exporter = CommandsExporter(self._scripts_dir)
self._triton_container: Optional[Container] = None
def start(self, task: Task):
"""
Process the task and execute experiments.
"""
self._create_dirs()
total_experiment = len(task.experiments)
LOGGER.info(f"Total experiments to verify: {total_experiment}")
for idx, experiment in enumerate(task.experiments, start=1):
LOGGER.info(
f"{Fore.CYAN}================ Experiment: {idx}/{total_experiment} Started ================{Fore.RESET}" # noqa: B950
)
results = {}
environment = self._prepare_environment(task, experiment)
LOGGER.info("Experiment details")
LOGGER.info(json.dumps(environment, indent=4))
self._clean_experiment_artifacts(idx, total_experiment)
self._create_experiment_results_dir(task, experiment)
experiment.start()
LOGGER.info("Running Triton Servers:")
log_file = self._workspace / task.logs_dir / f"triton-server-experiment-{idx}.log"
self._triton_container = self._triton_server_container(
triton_container_image=task.triton_container_image,
framework=task.framework,
accelerator=experiment.parameters.get("backend_accelerator")
or experiment.parameters.get("accelerator"),
precision=experiment.parameters["precision"],
custom_library=bool(task.triton_custom_operations is not None),
load_model_method=task.triton_load_model_method,
log_file=log_file,
)
try:
self._triton_container.start()
for stage in self._pipeline.stages():
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total_experiment}] ================ Stage {stage.label} Started ================{Fore.RESET}" # noqa: B950
)
experiment_stage = experiment.stages[stage.label]
experiment_stage.start()
is_ok = self._run_stage(stage=stage)
if not is_ok:
LOGGER.error(f"Stage {stage.label} failed.")
break
self._save_results(task, experiment, stage.label, results)
experiment_stage.end()
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total_experiment}] ================ Stage {stage.label} Finished ================{Fore.RESET}" # noqa: B950
)
except Exception:
message = traceback.format_exc()
LOGGER.error(f"Error running experiment: {message}")
yield ExperimentResult(
status=Status(state=ExperimentStatus.FAILED, message=message),
experiment=experiment,
results=results,
)
finally:
self._triton_container.stop()
experiment.end()
LOGGER.info(
f"{Fore.CYAN}================ Experiment: {idx}/{total_experiment} Finished ================{Fore.RESET}" # noqa: B950
)
yield ExperimentResult(
status=Status(state=ExperimentStatus.SUCCEED, message="Experiment Succeed"),
experiment=experiment,
results=results,
)
def stop(self) -> None:
"""
Stop executor
Returns:
None
"""
if self._triton_container:
self._triton_container.stop()
def _prepare_environment(self, task: Task, experiment: Experiment) -> Dict:
"""
Prepare environment data and export it
Args:
experiment: Experiment data
Returns:
Dictionary with environment data
"""
environment = {
"MODEL_NAME": task.model_name,
"ENSEMBLE_MODEL_NAME": task.ensemble_model_name,
"FRAMEWORK": task.framework,
"SHARED_DIR": self._shared_dir.as_posix(),
"MODEL_REPOSITORY_PATH": self._triton_models_repository_dir.as_posix(),
"TRITON_SERVER_URL": "localhost",
"TRITON_LOAD_MODEL_METHOD": task.triton_load_model_method,
"PERFORMANCE_TOOL": task.performance_tool.value,
"MODEL_BATCHING": task.batching,
}
measurement_params = self._measurement_params(
max_batch_size=experiment.parameters["max_batch_size"],
number_of_model_instances=experiment.parameters["number_of_model_instances"],
batching=task.batching,
steps_online=task.measurement_steps_online,
steps_offline=task.measurement_steps_offline,
)
environment = {
**environment,
**measurement_params,
}
if experiment.checkpoint:
environment["CHECKPOINT_DIR"] = task.checkpoints[experiment.checkpoint].path.as_posix()
if task.datasets_dir:
environment["DATASETS_DIR"] = task.datasets_dir.as_posix()
for key, value in experiment.parameters.items():
key = format_env_key(key)
value = format_env_value(value)
environment[key] = value
for key, value in environment.items():
os.environ[key] = str(value)
return environment
def _triton_server_container(
self,
triton_container_image: str,
framework: str,
load_model_method: str,
accelerator: str,
precision: str,
log_file: pathlib.Path,
custom_library: bool,
) -> Container:
"""
Create Triton Inference Server container for experiment
Args:
triton_container_image: Triton Inference Server container image
framework: Framework used to run model
accelerator: Accelerator used for experiment
precision: Precision used for experiment
load_model_method: Configure how Triton will load model
log_file: File where Triton logs are stored
Returns:
Container object
"""
volumes = {
self._triton_models_repository_dir: {"bind": Paths.MODEL_REPOSITORY_PATH, "mode": "rw"},
self._libraries_dir: {"bind": Paths.LIBRARIES_PATH, "mode": "rw"},
}
environment = {
"MODEL_REPOSITORY_PATH": Paths.MODEL_REPOSITORY_PATH,
"LIBRARIES_PATH": Paths.LIBRARIES_PATH,
"TRITON_LOAD_MODEL_METHOD": load_model_method,
}
if custom_library:
library_path = Triton.library_path(framework=framework)
environment["LD_LIBRARY_PATH"] = f"{library_path}:${{LD_LIBRARY_PATH}}"
environment["LD_PRELOAD"] = Triton.custom_library_path_remote()
if accelerator == BackendAccelerator.TRT.value and precision == Precision.FP16.value:
environment["ORT_TENSORRT_FP16_ENABLE"] = 1
strict_mode = False
command = Triton.command(
framework=framework,
repository_path=Paths.MODEL_REPOSITORY_PATH,
strict_mode=strict_mode,
)
command = f' bash -c "{command}"'
container = self._maintainer.triton_container(
command=command,
image=triton_container_image,
devices=self._devices,
volumes=volumes,
environment=environment,
log_file=log_file,
)
return container
def _save_results(self, task: Task, experiment: Experiment, stage_name: str, results: Dict) -> None:
"""
Update results for stage
Args:
task: Task object
experiment: Experiment for which stage has to be updated
stage_name: Name of stage
results: Results path mapping
Returns:
None
"""
stage = experiment.stages[stage_name]
if not stage.result_path:
LOGGER.debug(f"No results file to copy for {stage.name}")
return
if not stage.result_type:
LOGGER.debug(f"No results type provided for {stage.name}")
return
os.environ["SHARED_DIR"] = self._shared_dir.as_posix()
result_path = get_result_path(result_path=stage.result_path)
result_path = pathlib.Path(result_path)
if not result_path.is_file() and not result_path.is_dir():
raise RunnerException(f"Results file {result_path} not found.")
experiment_dir = self._workspace / task.results_dir / experiment.results_dir
LOGGER.info(f"Saving {stage.result_type} to {experiment_dir}")
if result_path.is_dir():
dst_path = experiment_dir / stage.result_type
shutil.copytree(result_path, dst_path)
elif result_path.is_file():
suffix = result_path.suffix
dst_path = experiment_dir / f"{stage.result_type}{suffix}"
shutil.copy(result_path, dst_path)
else:
raise RunnerException(f"Result not found {result_path}")
LOGGER.info("Done")
results[stage.result_type] = dst_path
def _create_dirs(self) -> None:
"""
Create directories used to store artifacts and final results
Returns:
None
"""
LOGGER.info(
f"{Fore.GREEN}================ Creating Artifacts Directories Started ================{Fore.RESET}"
) # noqa: B950
if self._executor_workspace.is_dir():
LOGGER.info(f"Removing previous executor workspace: {self._executor_workspace}")
shutil.rmtree(self._executor_workspace)
for directory in [
self._libraries_dir,
self._shared_dir,
self._scripts_dir,
self._triton_models_repository_dir,
]:
directory.mkdir(parents=True, exist_ok=True)
LOGGER.info(f"Directory {directory.name} created.")
LOGGER.info(
f"{Fore.GREEN}================ Creating Artifacts Directories Finished ================{Fore.RESET}"
)
def _clean_experiment_artifacts(self, idx: int, total: int) -> None:
"""
Clean artifacts stored between experiments
Returns:
None
"""
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total}] ================ Cleanup Experiment Data Started ================{Fore.RESET}" # noqa: B950
)
for directory in [
self._shared_dir,
self._scripts_dir,
self._triton_models_repository_dir,
]:
clean_directory(directory)
LOGGER.info(f"Location {directory} cleaned.")
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total}] ================ Cleanup Experiment Data Finished ================{Fore.RESET}" # noqa: B950
)
def _create_experiment_results_dir(self, task: Task, experiment: Experiment):
"""
Create result directory for experiment
Returns:
"""
experiment_dir = self._workspace / task.results_dir / experiment.results_dir
experiment_dir.mkdir(parents=True, exist_ok=True)
def _prepare_triton_custom_operations(self, task: Task) -> None:
"""
Prepare Triton Server custom operations library
Returns:
None
"""
if task.triton_custom_operations:
target_library_path = Triton.custom_library_path_local(self._libraries_dir)
target_library_path_dir = target_library_path.parent
target_library_path_dir.mkdir(parents=True, exist_ok=True)
shutil.copy(task.triton_custom_operations, target_library_path)
def _run_stage(self, stage: Stage) -> bool:
"""
Run single stage commands
Args:
stage: Stage object with defined commands
Returns:
True on success, False otherwise
"""
try:
command = self._exporter.export(stage=stage)
exec_command(command)
except RunnerException:
return False
return True
def _measurement_params(
self,
max_batch_size: int,
number_of_model_instances: int,
steps_offline: int,
steps_online: int,
batching: str,
):
max_batch_size = int(max_batch_size)
if batching == Batching.DISABLED.value:
LOGGER.debug("Model does not support batching.")
measurement = Measurement(
offline_batch_sizes=[1],
offline_concurrency=[1],
online_batch_sizes=[1],
online_concurrency=[1],
min_shapes_batch=max_batch_size,
opt_shapes_batch=max_batch_size,
max_shapes_batch=max_batch_size,
)
return measurement
offline_batch_sizes, offline_concurrency = offline_performance_configuration(
steps=steps_offline,
max_batch_size=max_batch_size,
)
if batching == Batching.DYNAMIC.value:
online_batch_sizes, online_concurrency = online_performance_configuration(
steps=steps_online,
max_batch_size=max_batch_size,
number_of_model_instances=number_of_model_instances,
)
else:
online_batch_sizes, online_concurrency = offline_batch_sizes, offline_concurrency
min_batch_size = min(min(offline_batch_sizes), min(online_batch_sizes))
measurement = Measurement(
offline_batch_sizes=offline_batch_sizes,
offline_concurrency=offline_concurrency,
online_batch_sizes=online_batch_sizes,
online_concurrency=online_concurrency,
min_shapes_batch=min_batch_size,
opt_shapes_batch=max_batch_size,
max_shapes_batch=max_batch_size,
)
return measurement_env_params(measurement)
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/executor.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .docker.maintainer import DockerMaintainer
class MaintainerFactory:
@staticmethod
def create_docker_maintainer():
return DockerMaintainer()
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/maintainer/maintainer_factory.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .container import Container # noqa: F401
from .docker.maintainer import DockerMaintainer # noqa: F401
from .maintainer import Maintainer # noqa: F401
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/maintainer/__init__.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from typing import Any
class Container(abc.ABC):
def __init__(self, name: str):
self.name = name
self._container = None
@abc.abstractmethod
def start(self):
"""
Start container
"""
pass
@abc.abstractmethod
def stop(self):
"""
Stop container
"""
@abc.abstractmethod
def run(self, command: str) -> Any:
"""
Run command inside container
Args:
command: command to execute
Returns:
Any
"""
pass
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/maintainer/container.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
from typing import Any, Dict, List, Optional, Union
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .container import Container
class Maintainer(abc.ABC):
@abc.abstractmethod
def triton_container(
self, command: str, image: str, devices: List, volumes: Dict, environment: Dict, log_file: Union[pathlib.Path, str]
) -> Container:
"""
Return triton container
Args:
command: Triton Server command that has to be executed
image: Container image
devices: List of device ids which has to be available in container
volumes: Volumes mapping
environment: Environment variables set in container
log_file: File path where server logs has to be saved
Returns:
Container object
"""
pass
@abc.abstractmethod
def build_image(
self,
*,
image_file_path: pathlib.Path,
image_name: str,
workdir_path: Optional[pathlib.Path] = None,
build_args: Optional[Dict[str, Any]] = None,
) -> None:
pass
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/maintainer/maintainer.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ContainerNotStarted(Exception):
pass
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/maintainer/exceptions.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/maintainer/docker/__init__.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
import docker
from docker.models.containers import ExecResult
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ..container import Container
class DockerContainer(Container):
def __init__(self, name: str):
super().__init__(name)
self._container = None
self._docker_client = docker.from_env()
self._docker_api_client = docker.APIClient()
@abc.abstractmethod
def start(self):
"""
Start container
"""
pass
@abc.abstractmethod
def stop(self):
"""
Stop container
"""
@abc.abstractmethod
def run(self, command: str) -> ExecResult:
"""
Run command inside container
Args:
command: command to execute
Returns:
ExecResult
"""
pass
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/maintainer/docker/container.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import Any, Dict, List, Optional, Union
import docker
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...logger import LOGGER
from ..maintainer import Maintainer
from .container import DockerContainer
from .containers import TritonServerContainer
class DockerMaintainer(Maintainer):
def triton_container(
self, command: str, image: str, devices: List, volumes: Dict, environment: Dict, log_file: Union[pathlib.Path, str]
) -> DockerContainer:
"""
Return triton container
Args:
command: Triton Server command that has to be executed
image: Container image
devices: List of device ids which has to be available in container
volumes: Volumes mapping
environment: Environment variables set in container
log_file: File path where server logs has to be saved
Returns:
DockerContainer object
"""
return TritonServerContainer(
name="triton-server",
command=command,
image=image,
devices=devices,
volumes=volumes,
environment=environment,
log_file=log_file,
)
def build_image(
self,
*,
image_file_path: pathlib.Path,
image_name: str,
workdir_path: Optional[pathlib.Path] = None,
build_args: Optional[Dict[str, Any]] = None,
) -> None:
workdir_path = workdir_path or image_file_path.parent
build_args = build_args or {}
LOGGER.info(f"Building {image_name} docker image.")
LOGGER.debug(f" Using workdir: {workdir_path}")
LOGGER.debug(f" Dockerfile: {image_file_path}")
LOGGER.debug(f" Build args: {build_args}")
build_logs = list()
try:
docker_client = docker.from_env()
_, build_logs = docker_client.images.build(
path=workdir_path.resolve().as_posix(),
dockerfile=image_file_path.resolve().as_posix(),
tag=image_name,
buildargs=build_args,
network_mode="host",
rm=True,
)
except docker.errors.BuildError as e:
build_logs = e.build_log
raise e
finally:
for chunk in build_logs:
log = chunk.get("stream")
if log:
LOGGER.debug(log.rstrip())
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/maintainer/docker/maintainer.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .triton_server_container import TritonServerContainer # noqa: F401
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/maintainer/docker/containers/__init__.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pathlib
from threading import Thread
from typing import Dict, Generator, Union
from docker.models.containers import ExecResult
from docker.types import DeviceRequest, Ulimit
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ....logger import LOGGER
from ...exceptions import ContainerNotStarted
from ..container import DockerContainer
class TritonServerContainer(DockerContainer):
def __init__(
self,
name: str,
command: str,
image: str,
volumes: Dict,
devices: Union[list, int],
environment: Dict,
log_file: Union[pathlib.Path, str],
network: str = "host",
shm_size: str = "1G",
):
"""
Initialize Triton Server Container
Args:
name: Container name
command: Triton Server command to exec on container start
image: Docker Image
volumes: Volumes to mount inside container
devices: Devices which has to be visible in container
environment: Environment variables
log_file: Path where logs should be saved
network: Network mode
shm_size: Shared memory size
"""
super().__init__(name)
self._image = image
self._command = command
self._volumes = volumes
self._devices = devices
self._environment = environment
self._network = network
self._shm_size = shm_size
self._triton_exec = None
self._logging_thread = None
self._log_file_path = pathlib.Path(log_file)
def start(self) -> None:
"""
Start Triton Server Container
"""
devices = [
DeviceRequest(capabilities=[["gpu"]], device_ids=self._devices),
]
LOGGER.info(f"Triton environment: {json.dumps(self._environment, indent=4)}")
LOGGER.info(f"Starting Triton container {self.name}.")
self._container = self._docker_client.containers.run(
image=self._image,
name=self.name,
device_requests=devices,
detach=True,
tty=True,
shm_size=self._shm_size,
ulimits=[
Ulimit(name="memlock", soft=-1, hard=-1),
Ulimit(name="stack", soft=67108864, hard=67108864),
],
volumes=self._volumes,
environment=self._environment,
network_mode=self._network,
auto_remove=True,
ipc_mode="host",
)
LOGGER.info("Triton command:")
LOGGER.info(f" {self._command}")
LOGGER.info(f"Starting Triton Server {self.name}.")
self._triton_exec = self._docker_api_client.exec_create(
container=self._container.id,
cmd=self._command,
)
stream_generator = self._docker_api_client.exec_start(exec_id=self._triton_exec["Id"], stream=True)
self._logging_thread = Thread(target=TritonServerContainer._logging, args=(self, stream_generator), daemon=True)
self._logging_thread.start()
def stop(self) -> None:
"""
Stop Triton Server Container and save logs to file
"""
if self._container is not None:
triton_result = self._docker_api_client.exec_inspect(self._triton_exec["Id"])
if triton_result.get("ExitCode") not in (0, None):
LOGGER.info(
f"Triton Inference Server instance {self.name} failed. Exit code: {triton_result.get('ExitCode')}"
)
LOGGER.info(f"Stopping triton server {self.name}.")
self._container.stop()
self._container = None
self._docker_client.close()
self._docker_api_client.close()
def run(self, command: str) -> ExecResult:
"""
Run command in container
Args:
command: Command to execute
Returns:
ExecResult
"""
if not self._container:
raise ContainerNotStarted("Triton Server Container is not running. Use .start() first.")
return self._container.exec_run(command)
def _logging(self, generator: Generator) -> None:
"""Triton logging thread for Triton Inference Server
Args:
generator (string generator): Triton log stream.
"""
with open(self._log_file_path, mode="w") as file:
try:
while True:
log = next(generator)
txt = log.decode("utf-8")
file.write(txt)
except StopIteration:
LOGGER.info(f"Saving Triton Inference Server {self.name} logs in {self._log_file_path}.")
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/triton/runner/maintainer/docker/containers/triton_server_container.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import os
from typing import Dict, List
from data.outbrain.defaults import TRAIN_MAPPING, TEST_MAPPING, ONEHOT_CHANNEL, MULTIHOT_CHANNEL, NUMERICAL_CHANNEL, LABEL_CHANNEL, MAP_FEATURE_CHANNEL, PARQUET_TYPE
TYPE_SELECTOR = "type"
FEATURES_SELECTOR = "features"
FILES_SELECTOR = "files"
DTYPE_SELECTOR = "dtype"
CARDINALITY_SELECTOR = "cardinality"
MAX_HOTNESS_SELECTOR = "max_hotness"
class FeatureSpec:
def __init__(self, feature_spec=None, source_spec=None, channel_spec=None, metadata=None, base_directory=None):
self.feature_spec: Dict = feature_spec if feature_spec is not None else {}
self.source_spec: Dict = source_spec if source_spec is not None else {}
self.channel_spec: Dict = channel_spec if channel_spec is not None else {}
self.metadata: Dict = metadata if metadata is not None else {}
self.base_directory: str = base_directory
@classmethod
def from_yaml(cls, path):
with open(path, 'r') as feature_spec_file:
base_directory = os.path.dirname(path)
feature_spec = yaml.safe_load(feature_spec_file)
return cls.from_dict(feature_spec, base_directory=base_directory)
@classmethod
def from_dict(cls, source_dict, base_directory):
return cls(base_directory=base_directory, **source_dict)
def to_dict(self) -> Dict:
attributes_to_dump = ['feature_spec', 'source_spec', 'channel_spec', 'metadata']
return {attr: self.__dict__[attr] for attr in attributes_to_dump}
def to_string(self):
return yaml.dump(self.to_dict())
def to_yaml(self, output_path=None):
if not output_path:
output_path = self.base_directory + '/feature_spec.yaml'
with open(output_path, 'w') as output_file:
print(yaml.dump(self.to_dict()), file=output_file)
def _check_one_label_feature(self):
assert len(self.get_names_by_channel(LABEL_CHANNEL)) == 1
def _check_all_required_channels_present(self):
# check that channels are the ones expected
present_channels = list(self.channel_spec.keys())
required_channels = [ONEHOT_CHANNEL, MULTIHOT_CHANNEL, NUMERICAL_CHANNEL, LABEL_CHANNEL, MAP_FEATURE_CHANNEL]
assert sorted(present_channels) == sorted(required_channels)
def _check_all_used_features_are_defined(self):
# check that all features used in channel spec are defined in feature_spec
for channel_features in self.channel_spec.values():
for feature in channel_features:
assert feature in self.feature_spec
def _check_categoricals_have_cardinality(self):
all_categoricals = self.get_names_by_channel(ONEHOT_CHANNEL) + self.get_names_by_channel(MULTIHOT_CHANNEL)
for feature_name in all_categoricals:
feature_dict = self.feature_spec[feature_name]
assert CARDINALITY_SELECTOR in feature_dict
assert isinstance(feature_dict[CARDINALITY_SELECTOR], int)
def _check_required_mappings_present(self):
# check that mappings are the ones expected
mapping_name_list = list(self.source_spec.keys())
assert sorted(mapping_name_list) == sorted([TEST_MAPPING, TRAIN_MAPPING])
def _check_all_chunks_are_parquet(self):
for mapping_name in [TRAIN_MAPPING, TEST_MAPPING]:
mapping = self.source_spec[mapping_name]
for chunk in mapping:
assert chunk[TYPE_SELECTOR] == PARQUET_TYPE
def _check_only_one_chunk_per_mapping(self):
for mapping_name in [TRAIN_MAPPING, TEST_MAPPING]:
mapping = self.source_spec[mapping_name]
assert len(mapping) == 1
def _check_all_features_have_source_where_necessary(self, is_map_channel_active):
for channel_name, channel_features in self.channel_spec.items():
if channel_name != MAP_FEATURE_CHANNEL:
for mapping_name in [TRAIN_MAPPING, TEST_MAPPING]:
# This uses the fact that we require that mappings only have one chunk here
features_in_mapping = set(self.source_spec[mapping_name][0][FEATURES_SELECTOR])
for feature in channel_features:
assert feature in features_in_mapping
else:
map_channel_features = self.get_names_by_channel(MAP_FEATURE_CHANNEL)
if len(map_channel_features) == 1:
# This uses the fact that we require that mappings only have one chunk here
map_feature_name = map_channel_features[0]
test_mapping_features = set(self.source_spec[TEST_MAPPING][0][FEATURES_SELECTOR])
assert map_feature_name in test_mapping_features
def _check_map_feature_selected_if_enabled(self, is_map_feature_required):
map_channel_features = self.get_names_by_channel(MAP_FEATURE_CHANNEL)
assert len(map_channel_features) <= 1
if is_map_feature_required:
assert len(map_channel_features) == 1
def _check_dtype_correct_if_specified(self):
# make sure that if dtype is specified, it is convertible to float32 for numerical and convertible to int64 for categorical
# these are the requirements specified by tf.feature_column.categorical_column_with_identity and tf.feature_column.numeric_column
categorical_features = self.get_names_by_channel(ONEHOT_CHANNEL) + self.get_names_by_channel(MULTIHOT_CHANNEL)
categorical_allowed_types = {"int64", "int32"}
for feature in categorical_features:
feature_dict = self.feature_spec[feature]
if DTYPE_SELECTOR in feature_dict:
assert feature_dict[DTYPE_SELECTOR] in categorical_allowed_types
numerical_features = self.get_names_by_channel(NUMERICAL_CHANNEL)
numerical_allowed_types = {"float32", "float64"}
for feature in numerical_features:
feature_dict = self.feature_spec[feature]
if DTYPE_SELECTOR in feature_dict:
assert feature_dict[DTYPE_SELECTOR] in numerical_allowed_types
def _check_multihots_have_hotness_specified(self):
multihot_features = self.get_names_by_channel(MULTIHOT_CHANNEL)
for feature_name in multihot_features:
feature_dict = self.feature_spec[feature_name]
assert MAX_HOTNESS_SELECTOR in feature_dict
assert isinstance(feature_dict[MAX_HOTNESS_SELECTOR], int)
def _check_enough_files_for_ranks(self, world_size):
if world_size is not None:
for mapping in self.source_spec.values():
only_chunk = mapping[0]
files_number = len(only_chunk[FILES_SELECTOR])
assert files_number >= world_size, "NVTabular dataloader requires parquet to have at least as many partitions as there are workers"
def check_feature_spec(self, require_map_channel, world_size=None):
self._check_required_mappings_present()
self._check_all_required_channels_present()
self._check_one_label_feature()
self._check_map_feature_selected_if_enabled(require_map_channel)
self._check_all_used_features_are_defined()
self._check_categoricals_have_cardinality()
self._check_all_chunks_are_parquet()
self._check_only_one_chunk_per_mapping()
self._check_all_features_have_source_where_necessary(require_map_channel)
self._check_dtype_correct_if_specified()
self._check_multihots_have_hotness_specified()
self._check_enough_files_for_ranks(world_size)
def get_paths_by_mapping(self, mapping: str):
paths_from_fspec = []
chunk_list = self.source_spec[mapping]
for chunk in chunk_list:
paths_from_fspec.extend(chunk[FILES_SELECTOR])
paths = [os.path.join(self.base_directory, p) for p in paths_from_fspec]
return paths
def get_names_by_channel(self, channel_name) -> List[str]:
return self.channel_spec[channel_name]
def get_multihot_hotnesses(self, multihot_features: List[str]) -> Dict[str, int]:
return {feature_name:self.feature_spec[feature_name][MAX_HOTNESS_SELECTOR] for feature_name in multihot_features}
def get_cardinalities(self, features: List[str]) -> Dict[str, int]:
cardinalities = {feature_name: self.feature_spec[feature_name][CARDINALITY_SELECTOR]
for feature_name in features}
return cardinalities | DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/data/feature_spec.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import glob
from data.feature_spec import CARDINALITY_SELECTOR, MAX_HOTNESS_SELECTOR, TYPE_SELECTOR, FEATURES_SELECTOR, \
FILES_SELECTOR, FeatureSpec
from data.outbrain.defaults import TEST_MAPPING, TRAIN_MAPPING, PARQUET_TYPE, MULTIHOT_CHANNEL, ONEHOT_CHANNEL, \
LABEL_CHANNEL, NUMERICAL_CHANNEL, MAP_FEATURE_CHANNEL
import os
DISPLAY_ID_COLUMN = "display_id"
NUMERIC_COLUMNS = [
"document_id_document_id_promo_sim_categories",
"document_id_document_id_promo_sim_topics",
"document_id_document_id_promo_sim_entities",
"document_id_promo_ctr",
"publisher_id_promo_ctr",
"source_id_promo_ctr",
"document_id_promo_count",
"publish_time_days_since_published",
"ad_id_ctr",
"advertiser_id_ctr",
"campaign_id_ctr",
"ad_id_count",
"publish_time_promo_days_since_published",
]
ONEHOT_COLUMNS = [
"ad_id",
"document_id",
"platform",
"document_id_promo",
"campaign_id",
"advertiser_id",
"source_id",
"geo_location",
"geo_location_country",
"geo_location_state",
"publisher_id",
"source_id_promo",
"publisher_id_promo",
]
# Multihot columns with their hotness
MULTIHOT_COLUMNS = {
"topic_id_list": 3,
"entity_id_list": 3,
"category_id_list": 3
}
CATEGORICAL_COLUMNS = ONEHOT_COLUMNS + list(MULTIHOT_COLUMNS.keys())
HASH_BUCKET_SIZES = {
"document_id": 300000,
"ad_id": 250000,
"document_id_promo": 100000,
"source_id_promo": 4000,
"source_id": 4000,
"geo_location": 2500,
"advertiser_id": 2500,
"geo_location_state": 2000,
"publisher_id_promo": 1000,
"publisher_id": 1000,
"geo_location_country": 300,
"platform": 4,
"campaign_id": 5000,
"topic_id_list": 350,
"entity_id_list": 10000,
"category_id_list": 100,
}
EMBEDDING_DIMENSIONS = {
"document_id": 128,
"ad_id": 128,
"document_id_promo": 128,
"source_id_promo": 64,
"source_id": 64,
"geo_location": 64,
"advertiser_id": 64,
"geo_location_state": 64,
"publisher_id_promo": 64,
"publisher_id": 64,
"geo_location_country": 64,
"platform": 19,
"campaign_id": 128,
"topic_id_list": 64,
"entity_id_list": 64,
"category_id_list": 64,
}
LABEL_NAME = "clicked"
def get_features_keys():
return CATEGORICAL_COLUMNS + NUMERIC_COLUMNS + [DISPLAY_ID_COLUMN]
def get_outbrain_feature_spec(base_directory):
multihot_dict = {feature_name: {CARDINALITY_SELECTOR:HASH_BUCKET_SIZES[feature_name],
MAX_HOTNESS_SELECTOR: hotness}
for feature_name, hotness in MULTIHOT_COLUMNS.items()}
onehot_dict = {feature_name: {CARDINALITY_SELECTOR:HASH_BUCKET_SIZES[feature_name]}
for feature_name in ONEHOT_COLUMNS}
numeric_dict = {feature_name: {} for feature_name in NUMERIC_COLUMNS}
feature_dict = {**multihot_dict, **onehot_dict, **numeric_dict, DISPLAY_ID_COLUMN:{}, LABEL_NAME:{}}
# these patterns come from partially our code (output_train_folder and output_valid_folder in utils/setup.py)
# and partially from how nvtabular works (saving as sorted *.parquet in a chosen folder)
train_data_pattern=f"{base_directory}/train/*.parquet"
valid_data_pattern=f"{base_directory}/valid/*.parquet"
absolute_train_paths = sorted(glob.glob(train_data_pattern))
absolute_valid_paths = sorted(glob.glob(valid_data_pattern))
train_paths = [os.path.relpath(p, base_directory) for p in absolute_train_paths]
valid_paths = [os.path.relpath(p, base_directory) for p in absolute_valid_paths]
source_spec = {}
for mapping_name, paths in zip((TRAIN_MAPPING, TEST_MAPPING),(train_paths, valid_paths)):
all_features = [LABEL_NAME] + ONEHOT_COLUMNS + list(MULTIHOT_COLUMNS.keys()) + NUMERIC_COLUMNS
if mapping_name == TEST_MAPPING:
all_features = all_features + [DISPLAY_ID_COLUMN]
source_spec[mapping_name] = []
source_spec[mapping_name].append({TYPE_SELECTOR: PARQUET_TYPE,
FEATURES_SELECTOR: all_features,
FILES_SELECTOR: paths})
channel_spec = {MULTIHOT_CHANNEL: list(MULTIHOT_COLUMNS.keys()),
ONEHOT_CHANNEL: ONEHOT_COLUMNS,
LABEL_CHANNEL: [LABEL_NAME],
NUMERICAL_CHANNEL: NUMERIC_COLUMNS,
MAP_FEATURE_CHANNEL: [DISPLAY_ID_COLUMN]}
return FeatureSpec(feature_spec=feature_dict, source_spec=source_spec, channel_spec=channel_spec, metadata={}) | DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/data/outbrain/features.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
ONEHOT_CHANNEL = "onehot_categorical"
MULTIHOT_CHANNEL = "multihot_categorical"
NUMERICAL_CHANNEL = "numerical"
LABEL_CHANNEL = "label"
MAP_FEATURE_CHANNEL = "map"
TRAIN_MAPPING = "train"
TEST_MAPPING = "test"
PARQUET_TYPE = "parquet"
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/data/outbrain/defaults.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cupy
import horovod.tensorflow as hvd
import tensorflow as tf
from nvtabular.loader.tensorflow import KerasSequenceLoader
from data.outbrain.defaults import LABEL_CHANNEL, MAP_FEATURE_CHANNEL, NUMERICAL_CHANNEL, ONEHOT_CHANNEL, \
MULTIHOT_CHANNEL
cupy.random.seed(None)
def seed_fn():
min_int, max_int = tf.int32.limits
max_rand = max_int // hvd.size()
# Generate a seed fragment on each worker
seed_fragment = cupy.random.randint(0, max_rand).get()
# Aggregate seed fragments from all Horovod workers
seed_tensor = tf.constant(seed_fragment)
reduced_seed = hvd.allreduce(seed_tensor, name="shuffle_seed", op=hvd.mpi_ops.Sum)
return reduced_seed % max_rand
def get_dataset(feature_spec, mapping, batch_size, buffer_size=0.1, parts_per_chunk=1,
map_channel_enabled=False, shuffle=True):
data_paths = feature_spec.get_paths_by_mapping(mapping)
label_names = feature_spec.get_names_by_channel(LABEL_CHANNEL)
cat_names = feature_spec.get_names_by_channel(ONEHOT_CHANNEL) + feature_spec.get_names_by_channel(MULTIHOT_CHANNEL)
cont_names = feature_spec.get_names_by_channel(NUMERICAL_CHANNEL)
if map_channel_enabled:
cat_names += feature_spec.get_names_by_channel(MAP_FEATURE_CHANNEL)
tf_dataset = KerasSequenceLoader(
data_paths,
batch_size=batch_size,
label_names=label_names,
cat_names=cat_names,
cont_names=cont_names,
engine="parquet",
shuffle=shuffle,
buffer_size=buffer_size,
parts_per_chunk=parts_per_chunk,
global_size=hvd.size(),
global_rank=hvd.rank(),
seed_fn=seed_fn,
)
return tf_dataset
def make_padding_function(multihot_hotness_dict):
@tf.function(experimental_relax_shapes=True)
def pad_batch(batch):
batch = batch.copy()
for feature, hotness in multihot_hotness_dict.items():
multihot_tuple = batch[feature]
values = multihot_tuple[0][:, 0]
row_lengths = multihot_tuple[1][:, 0]
padded = tf.RaggedTensor.from_row_lengths(
values, row_lengths, validate=False
).to_tensor(default_value=-1, shape=[None, hotness])
batch[feature] = padded
return batch
return pad_batch
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/data/outbrain/dataloader.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
os.environ["TF_MEMORY_ALLOCATION"] = "0.0"
from data.outbrain.nvtabular.utils.arguments import parse_args
from data.outbrain.nvtabular.utils.setup import create_config
from data.outbrain.nvtabular.utils.workflow import execute_pipeline
from data.outbrain.features import get_outbrain_feature_spec
def is_empty(path):
return not (os.path.exists(path) and (os.path.isfile(path) or os.listdir(path)))
def main():
args = parse_args()
config = create_config(args)
if is_empty(args.metadata_path):
logging.warning(
"Creating parquets into {}".format(config["output_bucket_folder"])
)
execute_pipeline(config)
save_feature_spec(config["output_bucket_folder"])
else:
logging.warning(f"Directory exists {args.metadata_path}")
logging.warning("Skipping NVTabular preprocessing")
def save_feature_spec(base_directory):
feature_spec = get_outbrain_feature_spec(base_directory)
fspec_path = os.path.join(base_directory, 'feature_spec.yaml')
feature_spec.to_yaml(output_path=fspec_path)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/data/outbrain/nvtabular/preproc.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
DEFAULT_DIR = "/outbrain"
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_path",
help="Path with the data required for NVTabular preprocessing. "
"If stats already exists under metadata_path preprocessing phase will be skipped.",
type=str,
default=f"{DEFAULT_DIR}/orig",
nargs="+",
)
parser.add_argument(
"--metadata_path",
help="Path with preprocessed NVTabular stats",
type=str,
default=f"{DEFAULT_DIR}/data",
nargs="+",
)
parser.add_argument(
"--use_dask",
default=False,
action="store_true",
help="Use multi-gpu preprocessing for nvTabular workflow",
)
return parser.parse_args()
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/data/outbrain/nvtabular/utils/arguments.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DISPLAY_ID_COLUMN = "display_id"
BASE_CONT_COLUMNS = [
"publish_time",
"publish_time_promo",
"timestamp",
"document_id_promo_clicked_sum_ctr",
"publisher_id_promo_clicked_sum_ctr",
"source_id_promo_clicked_sum_ctr",
"document_id_promo_count",
"publish_time_days_since_published",
"ad_id_clicked_sum_ctr",
"advertiser_id_clicked_sum_ctr",
"campaign_id_clicked_sum_ctr",
"ad_id_count",
"publish_time_promo_days_since_published",
]
SIM_COLUMNS = [
"doc_event_doc_ad_sim_categories",
"doc_event_doc_ad_sim_topics",
"doc_event_doc_ad_sim_entities",
]
CONTINUOUS_COLUMNS = BASE_CONT_COLUMNS + SIM_COLUMNS + [DISPLAY_ID_COLUMN]
exclude_conts = ["publish_time", "publish_time_promo", "timestamp"]
NUMERIC_COLUMNS = [col for col in CONTINUOUS_COLUMNS if col not in exclude_conts]
CATEGORICAL_COLUMNS = [
"ad_id",
"document_id",
"platform",
"document_id_promo",
"campaign_id",
"advertiser_id",
"source_id",
"publisher_id",
"source_id_promo",
"publisher_id_promo",
]
CTR_INPUTS = [
"ad_id",
"source_id_promo",
"document_id_promo",
"publisher_id_promo",
"advertiser_id",
"campaign_id",
]
EXCLUDE_COLUMNS = [
"publish_time",
"publish_time_promo",
"timestamp",
"ad_id_clicked_sum",
"source_id_promo_count",
"source_id_promo_clicked_sum",
"document_id_promo_clicked_sum",
"publisher_id_promo_count",
"publisher_id_promo_clicked_sum",
"advertiser_id_count",
"advertiser_id_clicked_sum",
"campaign_id_count",
"campaign_id_clicked_sum",
"uuid",
"day_event",
]
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/data/outbrain/nvtabular/utils/feature_description.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from data.outbrain.features import HASH_BUCKET_SIZES
def create_config(args):
data_bucket_folder = args.data_path
output_bucket_folder = args.metadata_path
temporary_folder = os.path.join("/tmp", "preprocessed")
train_path = os.path.join(temporary_folder, "train_gdf.parquet")
valid_path = os.path.join(temporary_folder, "valid_gdf.parquet")
stats_file = os.path.join(temporary_folder, "stats_wnd_workflow")
output_train_folder = os.path.join(output_bucket_folder, "train/")
output_valid_folder = os.path.join(output_bucket_folder, "valid/")
hash_spec = HASH_BUCKET_SIZES
config = {
"stats_file": stats_file,
"data_bucket_folder": data_bucket_folder,
"output_bucket_folder": output_bucket_folder,
"output_train_folder": output_train_folder,
"temporary_folder": temporary_folder,
"train_path": train_path,
"valid_path": valid_path,
"output_valid_folder": output_valid_folder,
"hash_spec": hash_spec,
"dask": args.use_dask,
}
return config
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/data/outbrain/nvtabular/utils/setup.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import cudf
import cupy
import numpy as np
import nvtabular as nvt
import rmm
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
from data.outbrain.features import get_features_keys
from data.outbrain.nvtabular.utils.feature_description import (
CATEGORICAL_COLUMNS, CTR_INPUTS, DISPLAY_ID_COLUMN)
from nvtabular import ColumnGroup
from nvtabular.io import Shuffle
from nvtabular.ops import (Categorify, ColumnSelector, FillMedian, FillMissing,
HashBucket, JoinExternal, JoinGroupby, LambdaOp,
ListSlice, LogOp, Normalize, Operator, Rename)
from nvtabular.ops.column_similarity import ColumnSimilarity
from nvtabular.utils import device_mem_size, get_rmm_size
TIMESTAMP_DELTA = 1465876799998
def get_devices():
try:
devices = [
int(device) for device in os.environ["CUDA_VISIBLE_DEVICES"].split(",")
]
except KeyError:
from pynvml import nvmlDeviceGetCount, nvmlInit
nvmlInit()
devices = list(range(nvmlDeviceGetCount()))
return devices
class DaysSincePublished(Operator):
def transform(self, columns, gdf):
for column in columns.names:
col = gdf[column]
col.loc[col == ""] = None
col = col.astype("datetime64[ns]")
timestamp = (gdf["timestamp"] + TIMESTAMP_DELTA).astype("datetime64[ms]")
delta = (timestamp - col).dt.days
gdf[column + "_days_since_published"] = (
delta * (delta >= 0) * (delta <= 10 * 365)
)
return gdf
def output_column_names(self, columns):
return ColumnSelector(
[column + "_days_since_published" for column in columns.names]
)
def dependencies(self):
return ["timestamp"]
def _df_to_coo(df, row="document_id", col=None, data="confidence_level"):
return cupy.sparse.coo_matrix((df[data].values, (df[row].values, df[col].values)))
def setup_rmm_pool(client, pool_size):
pool_size = get_rmm_size(pool_size)
client.run(rmm.reinitialize, pool_allocator=True, initial_pool_size=pool_size)
return None
def create_client(devices, local_directory):
client = None
if len(devices) > 1:
device_size = device_mem_size(kind="total")
device_limit = int(0.8 * device_size)
device_pool_size = int(0.8 * device_size)
cluster = LocalCUDACluster(
n_workers=len(devices),
CUDA_VISIBLE_DEVICES=",".join(str(x) for x in devices),
device_memory_limit=device_limit,
local_directory=local_directory,
)
client = Client(cluster)
setup_rmm_pool(client, device_pool_size)
return client
def create_workflow(data_bucket_folder, hash_spec, devices, local_directory, dask):
rmm.reinitialize(managed_memory=False)
documents_categories_path = os.path.join(
data_bucket_folder, "documents_categories.csv"
)
documents_topics_path = os.path.join(data_bucket_folder, "documents_topics.csv")
documents_entities_path = os.path.join(data_bucket_folder, "documents_entities.csv")
documents_categories_cudf = cudf.read_csv(documents_categories_path)
documents_topics_cudf = cudf.read_csv(documents_topics_path)
documents_entities_cudf = cudf.read_csv(documents_entities_path)
documents_entities_cudf["entity_id"] = (
documents_entities_cudf["entity_id"].astype("category").cat.codes
)
documents_categories_grouped = (
documents_categories_cudf.groupby("document_id")
.agg({"category_id": "collect", "confidence_level": "collect"})
.reset_index()
)
documents_categories_grouped = documents_categories_grouped.rename(
columns={
"category_id": "category_id_list",
"confidence_level": "confidence_level_cat_list",
}
)
documents_entities_grouped = (
documents_entities_cudf.groupby("document_id")
.agg({"entity_id": "collect", "confidence_level": "collect"})
.reset_index()
)
documents_entities_grouped = documents_entities_grouped.rename(
columns={
"entity_id": "entity_id_list",
"confidence_level": "confidence_level_ent_list",
}
)
documents_topics_grouped = (
documents_topics_cudf.groupby("document_id")
.agg({"topic_id": "collect", "confidence_level": "collect"})
.reset_index()
)
documents_topics_grouped = documents_topics_grouped.rename(
columns={
"topic_id": "topic_id_list",
"confidence_level": "confidence_level_top_list",
}
)
categories = _df_to_coo(documents_categories_cudf, col="category_id")
topics = _df_to_coo(documents_topics_cudf, col="topic_id")
entities = _df_to_coo(documents_entities_cudf, col="entity_id")
del documents_categories_cudf, documents_topics_cudf, documents_entities_cudf
ctr_thresh = {
"ad_id": 5,
"source_id_promo": 10,
"publisher_id_promo": 10,
"advertiser_id": 10,
"campaign_id": 10,
"document_id_promo": 5,
}
cat_cols = ColumnGroup(CATEGORICAL_COLUMNS)
def get_slice(num_char):
def lambda_slice(col, gdf):
return col.str.slice(0, num_char)
return lambda_slice
geo_location = ColumnGroup(["geo_location"])
country = geo_location >> LambdaOp(get_slice(2)) >> Rename(postfix="_country")
state = geo_location >> LambdaOp(get_slice(5)) >> Rename(postfix="_state")
geo_features = geo_location + country + state
dates = ["publish_time", "publish_time_promo"]
date_features = dates >> DaysSincePublished() >> FillMedian() >> LogOp
ctr_inputs = ColumnGroup(CTR_INPUTS)
stat_cols = ctr_inputs >> JoinGroupby(cont_cols=["clicked"], stats=["sum", "count"])
def calculate_ctr_with_filter(col, gdf):
col = col.astype(np.float32)
ctr_col_name = col.name.replace("_clicked_sum", "")
ctr_count_name = col.name.replace("_clicked_sum", "_count")
col = col / gdf[ctr_count_name] # CTR
col = col.where(gdf[ctr_count_name] >= ctr_thresh[ctr_col_name], 0) # Filter
return col
ctr_selected_features = [column + "_clicked_sum" for column in ctr_inputs.names]
dependency_features = [column + "_count" for column in ctr_inputs.names]
ctr_cols = (
stat_cols[ctr_selected_features]
>> LambdaOp(
calculate_ctr_with_filter, dependency=stat_cols[dependency_features]
)
>> Rename(f=lambda x: x.replace("_clicked_sum", "_ctr"))
)
stat_cols = stat_cols >> FillMissing() >> LogOp() >> Normalize()
ctr_cols = ctr_cols >> FillMissing()
cat_cols = cat_cols + geo_features >> HashBucket(dict(list(hash_spec.items())[:-3]))
sim_features_categories = (
[["document_id", "document_id_promo"]]
>> ColumnSimilarity(categories, metric="tfidf", on_device=False)
>> Rename(postfix="_categories")
)
sim_features_topics = (
[["document_id", "document_id_promo"]]
>> ColumnSimilarity(topics, metric="tfidf", on_device=False)
>> Rename(postfix="_topics")
)
sim_features_entities = (
[["document_id", "document_id_promo"]]
>> ColumnSimilarity(entities, metric="tfidf", on_device=False)
>> Rename(postfix="_entities")
)
sim_features = sim_features_categories + sim_features_topics + sim_features_entities
joined = ["document_id"] >> JoinExternal(
documents_categories_grouped,
on=["document_id"],
on_ext=["document_id"],
how="left",
columns_ext=["category_id_list", "confidence_level_cat_list", "document_id"],
cache="device",
)
joined = joined >> JoinExternal(
documents_entities_grouped,
on=["document_id"],
on_ext=["document_id"],
how="left",
columns_ext=["entity_id_list", "confidence_level_ent_list", "document_id"],
cache="device",
)
joined = joined >> JoinExternal(
documents_topics_grouped,
on=["document_id"],
on_ext=["document_id"],
how="left",
columns_ext=["topic_id_list", "confidence_level_top_list", "document_id"],
cache="device",
)
categorified_multihots = (
joined[["topic_id_list", "entity_id_list", "category_id_list"]]
>> Categorify()
>> FillMissing()
>> ListSlice(3)
>> HashBucket(dict(list(hash_spec.items())[-3:]))
)
features = (
date_features
+ ctr_cols
+ stat_cols
+ cat_cols
+ sim_features
+ categorified_multihots
+ ["clicked", "display_id"]
)
client = (
create_client(devices=devices, local_directory=local_directory)
if dask
else None
)
required_features = get_features_keys() + ["clicked"]
workflow = nvt.Workflow(features[required_features], client=client)
return workflow
def create_parquets(data_bucket_folder, train_path, valid_path):
cupy.random.seed(seed=0)
rmm.reinitialize(managed_memory=True)
documents_meta_path = os.path.join(data_bucket_folder, "documents_meta.csv")
clicks_train_path = os.path.join(data_bucket_folder, "clicks_train.csv")
events_path = os.path.join(data_bucket_folder, "events.csv")
promoted_content_path = os.path.join(data_bucket_folder, "promoted_content.csv")
documents_meta = cudf.read_csv(documents_meta_path, na_values=["\\N", ""])
documents_meta["publisher_id"].fillna(
documents_meta["publisher_id"].isnull().cumsum()
+ documents_meta["publisher_id"].max()
+ 1,
inplace=True,
)
merged = (
cudf.read_csv(clicks_train_path, na_values=["\\N", ""])
.merge(
cudf.read_csv(events_path, na_values=["\\N", ""]),
on=DISPLAY_ID_COLUMN,
how="left",
suffixes=("", "_event"),
)
.merge(
cudf.read_csv(promoted_content_path, na_values=["\\N", ""]),
on="ad_id",
how="left",
suffixes=("", "_promo"),
)
.merge(documents_meta, on="document_id", how="left")
.merge(
documents_meta,
left_on="document_id_promo",
right_on="document_id",
how="left",
suffixes=("", "_promo"),
)
)
merged["day_event"] = (merged["timestamp"] / 1000 / 60 / 60 / 24).astype(int)
merged["platform"] = merged["platform"].fillna(1)
merged["platform"] = merged["platform"] - 1
display_event = (
merged[[DISPLAY_ID_COLUMN, "day_event"]].drop_duplicates().reset_index()
)
random_state = cudf.Series(cupy.random.uniform(size=len(display_event)))
valid_ids, train_ids = display_event.scatter_by_map(
((display_event.day_event <= 10) & (random_state > 0.2)).astype(int)
)
valid_ids = valid_ids[DISPLAY_ID_COLUMN].drop_duplicates()
train_ids = train_ids[DISPLAY_ID_COLUMN].drop_duplicates()
valid_set = merged[merged[DISPLAY_ID_COLUMN].isin(valid_ids)]
train_set = merged[merged[DISPLAY_ID_COLUMN].isin(train_ids)]
valid_set = valid_set.sort_values(DISPLAY_ID_COLUMN)
train_set.to_parquet(train_path, compression=None)
valid_set.to_parquet(valid_path, compression=None)
del merged, train_set, valid_set
def save_stats(
data_bucket_folder,
output_train_folder,
train_path,
output_valid_folder,
valid_path,
stats_file,
hash_spec,
local_directory,
dask,
):
devices = get_devices()
shuffle = Shuffle.PER_PARTITION if len(devices) > 1 else True
workflow = create_workflow(
data_bucket_folder=data_bucket_folder,
hash_spec=hash_spec,
devices=devices,
local_directory=local_directory,
dask=dask,
)
train_dataset = nvt.Dataset(train_path, part_size="150MB")
valid_dataset = nvt.Dataset(valid_path, part_size="150MB")
workflow.fit(train_dataset)
workflow.transform(train_dataset).to_parquet(
output_path=output_train_folder, shuffle=shuffle, out_files_per_proc=8
)
workflow.transform(valid_dataset).to_parquet(
output_path=output_valid_folder, shuffle=None, output_files=8
)
workflow.save(stats_file)
return workflow
def clean(path):
shutil.rmtree(path)
def execute_pipeline(config):
required_folders = [
config["temporary_folder"],
config["output_train_folder"],
config["output_valid_folder"],
]
for folder in required_folders:
os.makedirs(folder, exist_ok=True)
create_parquets(
data_bucket_folder=config["data_bucket_folder"],
train_path=config["train_path"],
valid_path=config["valid_path"],
)
save_stats(
data_bucket_folder=config["data_bucket_folder"],
output_train_folder=config["output_train_folder"],
train_path=config["train_path"],
output_valid_folder=config["output_valid_folder"],
valid_path=config["valid_path"],
stats_file=config["stats_file"],
hash_spec=config["hash_spec"],
local_directory=config["temporary_folder"],
dask=config["dask"],
)
clean(config["temporary_folder"])
clean("./categories")
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/data/outbrain/nvtabular/utils/workflow.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import horovod.tensorflow as hvd
import tensorflow as tf
from trainer.utils.benchmark import ThroughputCalculator
from trainer.utils.evaluator import Evaluator
from trainer.utils.schedulers import LearningRateScheduler
from trainer.utils.trainer import Trainer
from data.outbrain.defaults import MAP_FEATURE_CHANNEL, MULTIHOT_CHANNEL
def run(args, model, config):
train_dataset = config["train_dataset"]
eval_dataset = config["eval_dataset"]
feature_spec = config["feature_spec"]
multihot_features = feature_spec.get_names_by_channel(MULTIHOT_CHANNEL)
multihot_hotness_dict = feature_spec.get_multihot_hotnesses(multihot_features)
steps_per_epoch = len(train_dataset)
steps_per_epoch = min(hvd.allgather(tf.constant([steps_per_epoch], dtype=tf.int32)))
steps_per_epoch = steps_per_epoch.numpy()
steps = int(steps_per_epoch * args.num_epochs)
deep_optimizer = tf.keras.optimizers.RMSprop(
learning_rate=args.deep_learning_rate, rho=0.5
)
wide_optimizer = tf.keras.optimizers.Ftrl(learning_rate=args.linear_learning_rate)
if not args.cpu:
deep_optimizer = hvd.DistributedOptimizer(
deep_optimizer, compression=hvd.Compression.fp16
)
wide_optimizer = hvd.DistributedOptimizer(
wide_optimizer, compression=hvd.Compression.fp16
)
if args.amp:
deep_optimizer = tf.keras.mixed_precision.LossScaleOptimizer(
deep_optimizer, dynamic=True
)
wide_optimizer = tf.keras.mixed_precision.LossScaleOptimizer(
wide_optimizer, dynamic=True
)
scheduler = LearningRateScheduler(
args=args, steps_per_epoch=steps_per_epoch, optimizer=deep_optimizer
)
throughput_calculator = ThroughputCalculator(args)
compiled_loss = tf.keras.losses.BinaryCrossentropy()
maybe_map_column = None
if args.map_calculation_enabled:
maybe_map_column = feature_spec.get_names_by_channel(MAP_FEATURE_CHANNEL)[0]
evaluator = Evaluator(
model=model,
throughput_calculator=throughput_calculator,
eval_dataset=eval_dataset,
compiled_loss=compiled_loss,
args=args,
maybe_map_column=maybe_map_column,
multihot_hotnesses_dict=multihot_hotness_dict,
num_auc_thresholds=args.num_auc_thresholds
)
trainer = Trainer(
model=model,
scheduler=scheduler,
deep_optimizer=deep_optimizer,
wide_optimizer=wide_optimizer,
throughput_calculator=throughput_calculator,
compiled_loss=compiled_loss,
steps=steps,
args=args,
train_dataset=train_dataset,
evaluator=evaluator,
multihot_hotnesses_dict=multihot_hotness_dict
)
trainer.maybe_restore_checkpoint()
# Wrap datasets with .epochs(n) method to speed up data loading
current_epoch = trainer.current_epoch
trainer.prepare_dataset(current_epoch)
evaluator.prepare_dataset(current_epoch)
# Update max_steps to make sure that all workers finish training at the same time
max_training_steps = len(trainer.train_dataset)
max_training_steps = min(
hvd.allgather(tf.constant([max_training_steps], dtype=tf.int32))
)
max_training_steps = int(max_training_steps.numpy())
trainer.max_steps = max_training_steps
if args.evaluate:
evaluator.eval(trainer.current_step_var)
else:
trainer.run_loop()
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/trainer/run.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import functools
import itertools
import operator
import os
import pathlib
import re
import pynvml
class Device:
# assume nvml returns list of 64 bit ints
_nvml_bit_affinity = 64
_nvml_affinity_elements = (
os.cpu_count() + _nvml_bit_affinity - 1
) // _nvml_bit_affinity
def __init__(self, device_idx):
super().__init__()
self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx)
def get_name(self):
return pynvml.nvmlDeviceGetName(self.handle)
def get_uuid(self):
return pynvml.nvmlDeviceGetUUID(self.handle)
def get_cpu_affinity(self, scope):
if scope == "socket":
nvml_scope = pynvml.NVML_AFFINITY_SCOPE_SOCKET
elif scope == "node":
nvml_scope = pynvml.NVML_AFFINITY_SCOPE_NODE
else:
raise RuntimeError("Unknown scope")
affinity_string = ""
for j in pynvml.nvmlDeviceGetCpuAffinityWithinScope(
self.handle, Device._nvml_affinity_elements, nvml_scope
):
# assume nvml returns list of 64 bit ints
affinity_string = "{:064b}".format(j) + affinity_string
affinity_list = [int(x) for x in affinity_string]
affinity_list.reverse() # so core 0 is in 0th element of list
ret = [i for i, e in enumerate(affinity_list) if e != 0]
return ret
def get_thread_siblings_list():
"""
Returns a list of 2-element integer tuples representing pairs of
hyperthreading cores.
"""
path = "/sys/devices/system/cpu/cpu*/topology/thread_siblings_list"
thread_siblings_list = []
pattern = re.compile(r"(\d+)\D(\d+)")
for fname in pathlib.Path(path[0]).glob(path[1:]):
with open(fname) as f:
content = f.read().strip()
res = pattern.findall(content)
if res:
pair = tuple(sorted(map(int, res[0])))
thread_siblings_list.append(pair)
thread_siblings_list = list(set(thread_siblings_list))
return thread_siblings_list
def build_thread_siblings_dict(siblings_list):
siblings_dict = {}
for siblings_tuple in siblings_list:
for core in siblings_tuple:
siblings_dict[core] = siblings_tuple
return siblings_dict
def group_list_by_key(the_list, key):
sorted_list = sorted(the_list, key=key)
grouped = [tuple(group) for key, group in itertools.groupby(sorted_list, key=key)]
return grouped
def group_by_siblings(affinities):
siblings_list = get_thread_siblings_list()
siblings_dict = build_thread_siblings_dict(siblings_list)
siblings_key = lambda x: siblings_dict.get(x, (x,))
affinities = [
tuple(group_list_by_key(affinity, key=siblings_key)) for affinity in affinities
]
return affinities
def group_by_node(socket_affinities, node_affinities):
socket_node_assigned_cores = collections.defaultdict(list)
for socket, node_cores in zip(socket_affinities, node_affinities):
socket_node_assigned_cores[socket].extend(node_cores)
socket_node_assigned_cores = {
key: tuple(sorted(set(value)))
for key, value in socket_node_assigned_cores.items()
}
node_grouping = collections.defaultdict(list)
for socket_cores, assigned_cores in socket_node_assigned_cores.items():
unassigned_cores = sorted(list(set(socket_cores) - set(assigned_cores)))
for assigned_core in assigned_cores:
node_grouping[assigned_core].append(assigned_core)
for assigned, unassigned in zip(
itertools.cycle(assigned_cores), unassigned_cores
):
node_grouping[assigned].append(unassigned)
node_grouping = {key: tuple(value) for key, value in node_grouping.items()}
grouped_affinities = [
tuple(node_grouping[item] for item in node_affinity)
for node_affinity in node_affinities
]
return grouped_affinities
def ungroup_by_nodes(affinities, scope):
if scope == "socket":
affinities = [list(itertools.chain(*zip(*affinity))) for affinity in affinities]
elif scope == "node":
affinities = [[group[0] for group in affinity] for affinity in affinities]
return affinities
def ungroup_by_siblings(affinities, cores):
if cores == "all_logical":
affinities = [list(itertools.chain(*affinity)) for affinity in affinities]
elif cores == "single_logical":
affinities = [[group[0] for group in affinity] for affinity in affinities]
else:
raise RuntimeError("Unknown cores mode")
return affinities
def check_core_count(affinities, min_cores=1, max_cores=None):
for gpu_id, affinity in enumerate(affinities):
if len(affinity) < min_cores:
raise RuntimeError(
f"Number of available physical cores for GPU {gpu_id} is less "
f"the predefinied minimum, min_cores={min_cores}, available "
f"physical cores: {affinity} (count={len(affinity)})"
)
if max_cores is not None:
affinities = [affinity[:max_cores] for affinity in affinities]
return affinities
def ungroup_all_and_check_count(affinities, scope, cores, min_cores=1, max_cores=None):
affinities = ungroup_by_nodes(affinities, scope)
affinities = check_core_count(affinities, min_cores, max_cores)
affinities = ungroup_by_siblings(affinities, cores)
return affinities
def check_affinities(affinities):
# sets of cores should be either identical or disjoint
for i, j in itertools.product(affinities, affinities):
if not set(i) == set(j) and not set(i).isdisjoint(set(j)):
raise RuntimeError(
f"Sets of cores should be either identical or disjoint, "
f"but got {i} and {j}."
)
def get_affinities(nproc_per_node, scope, exclude_unavailable_cores=True):
devices = [Device(i) for i in range(nproc_per_node)]
affinities = [dev.get_cpu_affinity(scope) for dev in devices]
if exclude_unavailable_cores:
available_cores = os.sched_getaffinity(0)
affinities = [
sorted(list(set(affinity) & available_cores)) for affinity in affinities
]
check_affinities(affinities)
return affinities
def get_grouped_affinities(nproc_per_node, exclude_unavailable_cores=True):
socket_affinities = get_affinities(
nproc_per_node, "socket", exclude_unavailable_cores
)
node_affinities = get_affinities(nproc_per_node, "node", exclude_unavailable_cores)
sibling_socket_affinities = group_by_siblings(socket_affinities)
sibling_node_affinities = group_by_siblings(node_affinities)
grouped_affinities = group_by_node(
sibling_socket_affinities, sibling_node_affinities
)
return grouped_affinities
def get_all(nproc_per_node, scope, cores, min_cores, max_cores):
"""
The process is assigned with all available physical CPU cores recommended by
pynvml for the GPU with a given id.
Assignment automatically includes available hyperthreading siblings if
cores='all_logical'.
Args:
nproc_per_node: number of processes per node
scope: scope for retrieving affinity from pynvml, 'node' or 'socket'
cores: 'all_logical' or 'single_logical'
"""
affinities = get_affinities(nproc_per_node, scope)
affinities = group_by_siblings(affinities)
node_affinities = group_by_siblings(get_affinities(nproc_per_node, "node"))
all_node_affinities = functools.reduce(operator.add, node_affinities)
affinities = [
tuple(
sorted(
affinity,
key=lambda x: (
0 if x in all_node_affinities else 1,
x,
),
)
)
for affinity in affinities
]
affinities = check_core_count(affinities, min_cores, max_cores)
affinities = ungroup_by_siblings(affinities, cores)
return affinities
def get_single(nproc_per_node, scope, cores, min_cores=1, max_cores=1):
"""
The process is assigned with the first available physical CPU core from the
list of all physical CPU cores recommended by pynvml for the GPU with a
given id.
Assignment automatically includes available hyperthreading siblings if
cores='all_logical'.
Args:
nproc_per_node: number of processes per node
scope: scope for retrieving affinity from pynvml, 'node' or 'socket'
cores: 'all_logical' or 'single_logical'
"""
grouped_affinities = get_grouped_affinities(nproc_per_node)
ungrouped_affinities = ungroup_all_and_check_count(
grouped_affinities, scope, cores, min_cores, max_cores
)
return ungrouped_affinities
def get_single_unique(nproc_per_node, scope, cores, min_cores=1, max_cores=1):
"""
The process is assigned with a single unique available physical CPU core
from the list of all physical CPU cores recommended by pynvml for the GPU
with a given id.
Assignment automatically includes available hyperthreading siblings if
cores='all_logical'.
Args:
nproc_per_node: number of processes per node
scope: scope for retrieving affinity from pynvml, 'node' or 'socket'
cores: 'all_logical' or 'single_logical'
"""
grouped_affinities = get_grouped_affinities(nproc_per_node)
affinities = []
assigned_groups = set()
for grouped_affinity in grouped_affinities:
for group in grouped_affinity:
if group not in assigned_groups:
affinities.append([group])
assigned_groups.add(group)
break
ungrouped_affinities = ungroup_all_and_check_count(
affinities, scope, cores, min_cores, max_cores
)
return ungrouped_affinities
def get_unique(
nproc_per_node,
scope,
cores,
mode,
min_cores,
max_cores,
balanced=True,
):
"""
The process is assigned with a unique subset of available physical CPU
cores from the list of all CPU cores recommended by pynvml for the GPU with
a given id.
Assignment automatically includes available hyperthreading siblings if
cores='all_logical'.
Args:
nproc_per_node: number of processes per node
scope: scope for retrieving affinity from pynvml, 'node' or 'socket'
cores: 'all_logical' or 'single_logical'
mode: 'unique_contiguous' or 'unique_interleaved'
balanced: assign an equal number of physical cores to each process,
"""
grouped_affinities = get_grouped_affinities(nproc_per_node)
grouped_affinities_to_device_ids = collections.defaultdict(list)
for idx, grouped_affinity in enumerate(grouped_affinities):
grouped_affinities_to_device_ids[tuple(grouped_affinity)].append(idx)
# compute minimal number of physical cores per GPU across all GPUs and
# sockets, code assigns this number of cores per GPU if balanced == True
min_physical_cores_per_gpu = min(
[
len(cores) // len(gpus)
for cores, gpus in grouped_affinities_to_device_ids.items()
]
)
grouped_unique_affinities = [None] * nproc_per_node
for (
grouped_affinity,
device_ids,
) in grouped_affinities_to_device_ids.items():
devices_per_group = len(device_ids)
if balanced:
cores_per_device = min_physical_cores_per_gpu
grouped_affinity = grouped_affinity[
: devices_per_group * min_physical_cores_per_gpu
]
else:
cores_per_device = len(grouped_affinity) // devices_per_group
for subgroup_id, device_id in enumerate(device_ids):
# In theory there should be no difference in performance between
# 'interleaved' and 'contiguous' pattern on Intel-based DGX-1,
# but 'contiguous' should be better for DGX A100 because on AMD
# Rome 4 consecutive cores are sharing L3 cache.
# TODO: code doesn't attempt to automatically detect layout of
# L3 cache, also external environment may already exclude some
# cores, this code makes no attempt to detect it and to align
# mapping to multiples of 4.
if mode == "unique_interleaved":
unique_grouped_affinity = list(
grouped_affinity[subgroup_id::devices_per_group]
)
elif mode == "unique_contiguous":
unique_grouped_affinity = list(
grouped_affinity[
subgroup_id
* cores_per_device: (subgroup_id + 1)
* cores_per_device
]
)
else:
raise RuntimeError("Unknown set_unique mode")
grouped_unique_affinities[device_id] = unique_grouped_affinity
ungrouped_affinities = ungroup_all_and_check_count(
grouped_unique_affinities, scope, cores, min_cores, max_cores
)
return ungrouped_affinities
def set_affinity(
gpu_id,
nproc_per_node,
*,
mode="unique_contiguous",
scope="node",
cores="all_logical",
balanced=True,
min_cores=1,
max_cores=None,
):
"""
The process is assigned with a proper CPU affinity that matches CPU-GPU
hardware architecture on a given platform. Usually, setting proper affinity
improves and stabilizes the performance of deep learning training workloads.
This function assumes that the workload runs in multi-process single-device
mode (there are multiple training processes, and each process is running on
a single GPU). This is typical for multi-GPU data-parallel training
workloads (e.g., using `torch.nn.parallel.DistributedDataParallel`).
Available affinity modes:
* 'all' - the process is assigned with all available physical CPU cores
recommended by pynvml for the GPU with a given id.
* 'single' - the process is assigned with the first available
physical CPU core from the list of all physical CPU cores recommended by
pynvml for the GPU with a given id (multiple GPUs could be assigned with
the same CPU core).
* 'single_unique' - the process is assigned with a single unique
available physical CPU core from the list of all CPU cores recommended by
pynvml for the GPU with a given id.
* 'unique_interleaved' - the process is assigned with a unique subset of
available physical CPU cores from the list of all physical CPU cores
recommended by pynvml for the GPU with a given id, cores are assigned with
interleaved indexing pattern
* 'unique_contiguous' - (the default mode) the process is assigned with a
unique subset of available physical CPU cores from the list of all physical
CPU cores recommended by pynvml for the GPU with a given id, cores are
assigned with contiguous indexing pattern
Available "scope" modes:
* 'node' - sets the scope for pynvml affinity queries to NUMA node
* 'socket' - sets the scope for pynvml affinity queries to processor socket
Available "cores" modes:
* 'all_logical' - assigns the process with all logical cores associated with
a given corresponding physical core (i.e., automatically includes all
available hyperthreading siblings)
* 'single_logical' - assigns the process with only one logical core
associated with a given corresponding physical core (i.e., excludes
hyperthreading siblings)
'unique_contiguous' is the recommended mode for deep learning
training workloads on NVIDIA DGX machines.
Args:
gpu_id: integer index of a GPU, value from 0 to 'nproc_per_node' - 1
nproc_per_node: number of processes per node
mode: affinity mode
scope: scope for retrieving affinity from pynvml, 'node' or 'socket'
cores: 'all_logical' or 'single_logical'
balanced: assign an equal number of physical cores to each process,
affects only 'unique_interleaved' and
'unique_contiguous' affinity modes
min_cores: (default=1) the intended minimum number of physical cores per
process, code raises RuntimeError if the number of available cores
is less than 'min_cores'
max_cores: (default=None) the intended maxmimum number of physical cores
per process, the list of assigned cores is trimmed to the first
'max_cores' cores if max_cores is not None
Returns a set of logical CPU cores on which the process is eligible to run.
WARNING: On DGX A100, only half of the CPU cores have direct access to GPUs.
set_affinity with scope='node' restricts execution only to the CPU cores
directly connected to GPUs. On DGX A100, it will limit the code to half of
the CPU cores and half of CPU memory bandwidth (which may be fine for many
DL models). Use scope='socket' to use all available DGX A100 CPU cores.
WARNING: Intel's OpenMP implementation resets affinity on the first call to
an OpenMP function after a fork. It's recommended to run with env variable:
`KMP_AFFINITY=disabled` if the affinity set by gpu_affinity should be
preserved after a fork (e.g. in PyTorch DataLoader workers).
Example:
import argparse
import os
import gpu_affinity
import torch
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--local_rank',
type=int,
default=os.getenv('LOCAL_RANK', 0),
)
args = parser.parse_args()
nproc_per_node = torch.cuda.device_count()
affinity = gpu_affinity.set_affinity(args.local_rank, nproc_per_node)
print(f'{args.local_rank}: core affinity: {affinity}')
if __name__ == "__main__":
main()
Launch the example with:
python -m torch.distributed.launch --nproc_per_node <#GPUs> example.py
"""
pynvml.nvmlInit()
if mode == "all":
affinity = get_all(nproc_per_node, scope, cores, min_cores, max_cores)
elif mode == "single":
affinity = get_single(nproc_per_node, scope, cores)
elif mode == "single_unique":
affinity = get_single_unique(nproc_per_node, scope, cores)
elif mode == "unique_interleaved" or mode == "unique_contiguous":
affinity = get_unique(
nproc_per_node,
scope,
cores,
mode,
min_cores,
max_cores,
balanced,
)
else:
raise RuntimeError("Unknown affinity mode")
os.sched_setaffinity(0, affinity[gpu_id])
set_affinity = os.sched_getaffinity(0)
return set_affinity
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/trainer/utils/gpu_affinity.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import dllogger
import horovod.tensorflow as hvd
import tensorflow as tf
from horovod.tensorflow.mpi_ops import Sum
class ThroughputCalculator:
def __init__(self, args):
self.args = args
self.boundary = max(self.args.benchmark_warmup_steps, 1)
self.step = 0
self.t0 = None
self.start_batch_time = None
with tf.device("/CPU:0"):
self.samples = tf.Variable(0, trainable=False, dtype=tf.int64)
def _init_benchmark(self):
self.t0 = time.perf_counter()
def on_epoch_end_log(self, step, shape):
batch_time = time.perf_counter() - self.start_batch_time
self.samples.assign_add(shape)
workers = hvd.size() if not self.args.cpu else 1
samplesps = shape * workers / batch_time
if self.args.cpu or hvd.rank() == 0:
dllogger.log(data={"batch_samplesps": samplesps}, step=(1, step))
def on_benchmark_end_log(self, eval_benchmark=False):
train_time = time.perf_counter() - self.t0
hvd.join()
if not self.args.cpu:
all_samples = hvd.allreduce(self.samples, op=Sum)
else:
all_samples = self.samples
all_samples = all_samples.numpy()
if self.args.cpu or hvd.rank() == 0:
key = "train_throughput" if not eval_benchmark else "validation_throughput"
throughput = all_samples / train_time
dllogger.log(data={key: throughput}, step=tuple())
def __call__(self, shape, eval_benchmark=False):
if self.args.benchmark:
if self.step == self.boundary:
self._init_benchmark()
if self.step > self.boundary:
self.on_epoch_end_log(self.step, shape)
if self.args.benchmark_steps <= self.step:
self.on_benchmark_end_log(eval_benchmark=eval_benchmark)
exit(0)
self.step += 1
self.start_batch_time = time.perf_counter()
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/trainer/utils/benchmark.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
DEFAULT_DIR = "/outbrain"
def parse_args():
parser = argparse.ArgumentParser(
description="Tensorflow2 WideAndDeep Model",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=True,
)
locations = parser.add_argument_group("location of datasets")
locations.add_argument(
"--dataset_path",
type=str,
default=f"{DEFAULT_DIR}/data",
help="Dataset base directory, relative to which path to feature_spec and paths in feature_spec are resolved"
)
locations.add_argument(
"--fspec_file",
type=str,
default="feature_spec.yaml",
help="Path to the feature spec file, relative to dataset_path"
)
locations.add_argument(
"--embedding_sizes_file",
type=str,
default="data/outbrain/embedding_sizes.json",
help="Path to the file containing a dictionary of embedding sizes for categorical features"
)
locations.add_argument(
"--use_checkpoint",
default=False,
action="store_true",
help="Use checkpoint stored in model_dir path",
)
locations.add_argument(
"--model_dir",
type=str,
default=f"{DEFAULT_DIR}/checkpoints",
help="Destination where the model checkpoint will be saved",
)
locations.add_argument(
"--results_dir",
type=str,
default="/results",
help="Directory to store training results",
)
locations.add_argument(
"--log_filename",
type=str,
default="log.json",
help="Name of the file to store dlloger output",
)
training_params = parser.add_argument_group("training parameters")
training_params.add_argument(
"--global_batch_size",
type=int,
default=131072,
help="Total (global) size of training batch",
)
training_params.add_argument(
"--eval_batch_size",
type=int,
default=131072,
help="Total (global) size of evaluation batch",
)
training_params.add_argument(
"--num_epochs", type=int, default=20, help="Number of training epochs"
)
training_params.add_argument(
"--cpu", default=False, action="store_true", help="Run computations on the CPU"
)
training_params.add_argument(
"--amp",
default=False,
action="store_true",
help="Enable automatic mixed precision conversion",
)
training_params.add_argument(
"--xla", default=False, action="store_true", help="Enable XLA conversion"
)
training_params.add_argument(
"--linear_learning_rate",
type=float,
default=0.02,
help="Learning rate for linear model",
)
training_params.add_argument(
"--deep_learning_rate",
type=float,
default=0.00012,
help="Learning rate for deep model",
)
training_params.add_argument(
"--deep_warmup_epochs",
type=float,
default=6,
help="Number of learning rate warmup epochs for deep model",
)
model_construction = parser.add_argument_group("model construction")
model_construction.add_argument(
"--deep_hidden_units",
type=int,
default=[1024, 1024, 1024, 1024, 1024],
nargs="+",
help="Hidden units per layer for deep model, separated by spaces",
)
model_construction.add_argument(
"--deep_dropout",
type=float,
default=0.1,
help="Dropout regularization for deep model",
)
model_construction.add_argument(
"--combiner",
type=str,
default="sum",
choices=[
"mean",
"sum",
],
help="Type of aggregation used for multi hot categorical features",
)
run_params = parser.add_argument_group("run mode parameters")
run_params.add_argument(
"--num_auc_thresholds",
type=int,
default=8000,
help="Number of thresholds for the AUC computation",
)
run_params.add_argument(
"--disable_map_calculation",
dest="map_calculation_enabled",
action="store_false",
default=True,
help="Disable calculation of MAP metric. See ReadMe for additional dataset requirements keeping it enabled introduces."
)
run_params.add_argument(
"--evaluate",
default=False,
action="store_true",
help="Only perform an evaluation on the validation dataset, don't train",
)
run_params.add_argument(
"--benchmark",
action="store_true",
default=False,
help="Run training or evaluation benchmark to collect performance metrics",
)
run_params.add_argument(
"--benchmark_warmup_steps",
type=int,
default=500,
help="Number of warmup steps before the start of the benchmark",
)
run_params.add_argument(
"--benchmark_steps",
type=int,
default=1000,
help="Number of steps for performance benchmark",
)
run_params.add_argument(
"--affinity",
type=str,
default="unique_interleaved",
choices=[
"all",
"single",
"single_unique",
"unique_interleaved",
"unique_contiguous",
"disabled",
],
help="Type of CPU affinity",
)
return parser.parse_args()
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/trainer/utils/arguments.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import multiprocessing
import os
import dllogger
import horovod.tensorflow.keras as hvd
import tensorflow as tf
from data.feature_spec import FeatureSpec
from data.outbrain.dataloader import get_dataset
from data.outbrain.defaults import TEST_MAPPING, TRAIN_MAPPING, MULTIHOT_CHANNEL, ONEHOT_CHANNEL
from trainer.utils.gpu_affinity import set_affinity
def init_cpu(args, logger):
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
init_logger(full=True, args=args, logger=logger)
logger.warning("--gpu flag not set, running computation on CPU")
raise RuntimeError("CPU not supported with nvTabular dataloader")
def init_gpu(args, logger):
hvd.init()
init_logger(full=hvd.rank() == 0, args=args, logger=logger)
if args.affinity != "disabled":
gpu_id = hvd.local_rank()
affinity = set_affinity(
gpu_id=gpu_id, nproc_per_node=hvd.size(), mode=args.affinity
)
logger.warning(f"{gpu_id}: thread affinity: {affinity}")
tf.config.threading.set_intra_op_parallelism_threads(1)
tf.config.threading.set_inter_op_parallelism_threads(
max(2, (multiprocessing.cpu_count() // hvd.size()) - 2)
)
if args.amp:
tf.keras.mixed_precision.set_global_policy("mixed_float16")
if args.xla:
tf.config.optimizer.set_jit(True)
# Max out L2 cache
import ctypes
_libcudart = ctypes.CDLL("libcudart.so")
pValue = ctypes.cast((ctypes.c_int * 1)(), ctypes.POINTER(ctypes.c_int))
_libcudart.cudaDeviceSetLimit(ctypes.c_int(0x05), ctypes.c_int(128))
_libcudart.cudaDeviceGetLimit(pValue, ctypes.c_int(0x05))
assert pValue.contents.value == 128
def init_logger(args, full, logger):
if full:
logger.setLevel(logging.INFO)
log_path = os.path.join(args.results_dir, args.log_filename)
os.makedirs(args.results_dir, exist_ok=True)
dllogger.init(
backends=[
dllogger.JSONStreamBackend(
verbosity=dllogger.Verbosity.VERBOSE, filename=log_path
),
dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE),
]
)
logger.warning("command line arguments: {}".format(json.dumps(vars(args))))
if not os.path.exists(args.results_dir):
os.mkdir(args.results_dir)
with open("{}/args.json".format(args.results_dir), "w") as f:
json.dump(vars(args), f, indent=4)
else:
logger.setLevel(logging.ERROR)
dllogger.init(backends=[])
dllogger.log(data=vars(args), step="PARAMETER")
dllogger.metadata("streaming_map_val", {"unit": None})
dllogger.metadata("train_throughput", {"unit": "samples/s"})
dllogger.metadata("validation_throughput", {"unit": "samples/s"})
def check_embedding_sizes(embedding_sizes: dict, feature_spec: FeatureSpec) -> None:
onehot_features = feature_spec.get_names_by_channel(ONEHOT_CHANNEL)
multihot_features = feature_spec.get_names_by_channel(MULTIHOT_CHANNEL)
embedded_features = onehot_features + multihot_features
for feature in embedded_features:
assert feature in embedding_sizes
assert isinstance(embedding_sizes[feature], int)
def create_config(args):
assert not (
args.cpu and args.amp
), "Automatic mixed precision conversion works only with GPU"
assert (
not args.benchmark or args.benchmark_warmup_steps < args.benchmark_steps
), "Number of benchmark steps must be higher than warmup steps"
logger = logging.getLogger("tensorflow")
if args.cpu:
init_cpu(args, logger)
else:
init_gpu(args, logger)
num_gpus = 1 if args.cpu else hvd.size()
train_batch_size = args.global_batch_size // num_gpus
eval_batch_size = args.eval_batch_size // num_gpus
fspec_path = os.path.join(args.dataset_path, args.fspec_file)
feature_spec = FeatureSpec.from_yaml(fspec_path)
feature_spec.check_feature_spec(require_map_channel=args.map_calculation_enabled, world_size=hvd.size())
train_dataset = get_dataset(
feature_spec=feature_spec,
mapping=TRAIN_MAPPING,
batch_size=train_batch_size,
shuffle=True,
map_channel_enabled=False
)
eval_dataset = get_dataset(
feature_spec=feature_spec,
mapping=TEST_MAPPING,
batch_size=eval_batch_size,
shuffle=False,
map_channel_enabled=args.map_calculation_enabled
)
with open(args.embedding_sizes_file) as opened:
embedding_sizes = json.load(opened)
check_embedding_sizes(embedding_sizes, feature_spec)
config = {
"train_dataset": train_dataset,
"eval_dataset": eval_dataset,
"feature_spec": feature_spec,
"embedding_dimensions": embedding_sizes
}
return config
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/trainer/utils/setup.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import dllogger
import horovod.tensorflow as hvd
import numpy as np
import tensorflow as tf
from data.outbrain.dataloader import make_padding_function
class Trainer:
def __init__(
self,
model,
scheduler,
deep_optimizer,
wide_optimizer,
throughput_calculator,
compiled_loss,
steps,
args,
train_dataset,
evaluator,
multihot_hotnesses_dict
):
self.model = model
self.scheduler = scheduler
self.deep_optimizer = deep_optimizer
self.wide_optimizer = wide_optimizer
self.throughput_calculator = throughput_calculator
self.steps = steps
self.steps_per_epoch = steps // args.num_epochs
self.args = args
self.train_dataset = train_dataset
self.evaluator = evaluator
self.compiled_loss = compiled_loss
self.logger = logging.getLogger("tensorflow")
self.multihot_hotnesses_dict = multihot_hotnesses_dict
self.padding_function = make_padding_function(self.multihot_hotnesses_dict)
with tf.device("/CPU:0"):
self.current_step_var = tf.Variable(0, trainable=False, dtype=tf.int64)
self._init_checkpoint_manager()
self.max_steps = steps
def _init_checkpoint_manager(self):
self.checkpoint = tf.train.Checkpoint(
deep_optimizer=self.deep_optimizer,
wide_optimizer=self.wide_optimizer,
model=self.model,
current_step=self.current_step_var,
)
self.manager = tf.train.CheckpointManager(
checkpoint=self.checkpoint,
directory=os.path.join(self.args.model_dir, "checkpoint"),
max_to_keep=1,
)
@property
def current_epoch(self):
return int(self.current_step_var.numpy()) // self.steps
@property
def max_steps(self):
return self.__max_steps
@max_steps.setter
def max_steps(self, steps):
self.__max_steps = min(self.steps, steps)
def prepare_dataset(self, current_epoch):
benchmark_needed_steps = self.args.benchmark_steps // self.steps_per_epoch + 1
n = self.args.num_epochs - current_epoch if not self.args.benchmark \
else max(benchmark_needed_steps, self.args.num_epochs)
self.train_dataset = self.train_dataset.epochs(n)
def maybe_restore_checkpoint(self):
if self.args.use_checkpoint:
self.checkpoint.restore(self.manager.latest_checkpoint).expect_partial()
if self.manager.latest_checkpoint:
self.logger.warning(
f"Model restored from checkpoint {self.args.model_dir}"
)
if self.args.benchmark:
self.current_step_var.assign(0)
else:
self.logger.warning(
f"Failed to restore model from checkpoint {self.args.model_dir}"
)
@tf.function
def __call__(self, x, y):
with tf.GradientTape(persistent=True) as tape:
y_pred = self.model(x, training=True)
loss = self.compiled_loss(y, y_pred)
linear_loss = (
self.wide_optimizer.get_scaled_loss(loss) if self.args.amp else loss
)
deep_loss = (
self.deep_optimizer.get_scaled_loss(loss) if self.args.amp else loss
)
if not self.args.cpu:
tape = hvd.DistributedGradientTape(
tape,
sparse_as_dense=True,
num_groups=1,
compression=hvd.Compression.fp16,
)
linear_vars = self.model.linear_model.trainable_variables
dnn_vars = self.model.dnn_model.trainable_variables
linear_grads = tape.gradient(linear_loss, linear_vars)
dnn_grads = tape.gradient(deep_loss, dnn_vars)
if self.args.amp:
linear_grads = self.wide_optimizer.get_unscaled_gradients(linear_grads)
dnn_grads = self.deep_optimizer.get_unscaled_gradients(dnn_grads)
self.wide_optimizer.apply_gradients(zip(linear_grads, linear_vars))
self.deep_optimizer.apply_gradients(zip(dnn_grads, dnn_vars))
if self.current_step_var == 0:
hvd.broadcast_variables(self.model.linear_model.variables, root_rank=0)
hvd.broadcast_variables(self.model.dnn_model.variables, root_rank=0)
hvd.broadcast_variables(self.wide_optimizer.variables(), root_rank=0)
hvd.broadcast_variables(self.deep_optimizer.variables(), root_rank=0)
return loss
@tf.function(experimental_relax_shapes=True)
def _execute_step_calculations(self, x, y):
loss = self(x, y)
with tf.device("/CPU:0"):
self.scheduler(tf.cast(self.current_step_var + 1, tf.float32))
self.current_step_var.assign_add(1)
return loss
def log(self, current_step, loss):
train_data = {"loss": np.around(loss.astype(np.float64), 4)}
dllogger.log(data=train_data, step=(current_step, self.max_steps))
def train_step(self, x, y):
# Graph mode part
loss = self._execute_step_calculations(x, y)
# Eager mode part
current_step = int(self.current_step_var.numpy()) - 1
if self.args.benchmark:
self.throughput_calculator(y.shape[0])
elif (self.args.cpu or hvd.rank() == 0) and current_step % 100 == 0:
self.log(current_step, loss.numpy())
def run_loop(self):
eval_data = {}
current_step = int(self.current_step_var.numpy()) + 1
# Graph mode part
for i, (x, y) in enumerate(self.train_dataset, current_step):
x = self.padding_function(x)
self.train_step(x, y)
if not self.args.benchmark and (
i % self.steps_per_epoch == 0 or i == self.max_steps
):
eval_data = self.evaluator.eval(self.current_step_var)
if self.args.cpu or hvd.rank() == 0:
self.manager.save()
if i == self.max_steps:
break
if self.args.cpu or hvd.rank() == 0:
dllogger.log(data=eval_data, step=tuple())
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/trainer/utils/trainer.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dllogger
import horovod.tensorflow as hvd
import numpy as np
import tensorflow as tf
from data.outbrain.dataloader import make_padding_function
from horovod.tensorflow.mpi_ops import Average, Sum
class MapMetric:
def __init__(self, map_column, cpu):
self.map_column = map_column
self.cpu = cpu
with tf.device("/CPU:0"):
self.current_step_var = tf.Variable(0, trainable=False, dtype=tf.int64)
self.map_id_counter = tf.Variable(
0.0, trainable=False, dtype=tf.float64
)
self.streaming_map = tf.Variable(
0.0, name="STREAMING_MAP", trainable=False, dtype=tf.float64
)
def reset_states(self):
self.map_id_counter.assign(1)
self.current_step_var.assign(1)
self.streaming_map.assign(1)
@tf.function
def calculate_map(self, y, predictions, map_ids):
#flatten all arrays
predictions = tf.reshape(predictions, [-1])
predictions = tf.cast(predictions, tf.float64)
map_ids = tf.reshape(map_ids, [-1])
labels = tf.reshape(y, [-1])
# sort map_ids and reorder other arrays to match
sorted_ids = tf.argsort(map_ids)
map_ids = tf.gather(map_ids, indices=sorted_ids)
predictions = tf.gather(predictions, indices=sorted_ids)
labels = tf.gather(labels, indices=sorted_ids)
# renumber map ids to 0...n and get counts for each occurence
_, map_ids_idx, map_ids_count = tf.unique_with_counts(
map_ids, out_idx=tf.int64
)
# get how many times the most common ad id occurs and calculate the padding
pad_length = 30 - tf.reduce_max(map_ids_count)
# group predictions into rows based on map id idx and turn into tensor
preds = tf.RaggedTensor.from_value_rowids(
predictions, map_ids_idx
).to_tensor()
# ditto for labels
labels = tf.RaggedTensor.from_value_rowids(labels, map_ids_idx).to_tensor()
#get only rows for which there is a positive label
labels_mask = tf.math.reduce_max(labels, 1)
preds_masked = tf.boolean_mask(preds, labels_mask)
labels_masked = tf.boolean_mask(labels, labels_mask)
# get the position of the positive label
labels_masked = tf.argmax(labels_masked, axis=1, output_type=tf.int32)
labels_masked = tf.reshape(labels_masked, [-1, 1])
# add pad_length zeros to each row of the predictions tensor
preds_masked = tf.pad(preds_masked, [(0, 0), (0, pad_length)])
# get indices of the top 12 predictions for each map id
_, predictions_idx = tf.math.top_k(preds_masked, 12)
# get rows in which the true positive is among our top 12
#indicators of our hits
indices = tf.math.equal(predictions_idx, labels_masked)
#indicators of hits per row
indices_mask = tf.math.reduce_any(indices, 1)
masked_indices = tf.boolean_mask(indices, indices_mask)
res = tf.argmax(masked_indices, axis=1)
ap_matrix = tf.divide(1, tf.add(res, 1))
ap_sum = tf.reduce_sum(ap_matrix)
shape = tf.cast(tf.shape(indices)[0], tf.float64)
self.map_id_counter.assign_add(shape)
self.streaming_map.assign_add(ap_sum)
@tf.function
def reduce_results(self):
if not self.cpu:
all_streaming_map = hvd.allreduce(self.streaming_map, op=Sum)
all_map_id_counter = hvd.allreduce(self.map_id_counter, op=Sum)
else:
all_streaming_map = self.streaming_map
all_map_id_counter = self.map_id_counter
map_metric = tf.divide(all_streaming_map, all_map_id_counter)
return map_metric
class Evaluator:
def __init__(
self,
model,
throughput_calculator,
eval_dataset,
compiled_loss,
args,
maybe_map_column,
multihot_hotnesses_dict,
num_auc_thresholds
):
self.model = model
self.steps_per_epoch = len(eval_dataset)
self.args = args
self.throughput_calculator = throughput_calculator
self.compiled_loss = compiled_loss
self.eval_loss = tf.keras.metrics.Mean()
self.metrics = [tf.keras.metrics.AUC(num_thresholds=num_auc_thresholds,
curve='ROC', summation_method='interpolation',
from_logits=True)]
self.map_enabled=False
self.map_column = None
if maybe_map_column is not None:
self.map_metric=MapMetric(maybe_map_column, cpu=args.cpu)
self.map_enabled=True
self.map_column=maybe_map_column
self.metric_names = ["auc_roc"]
self.eval_dataset = eval_dataset
self.multihot_hotnesses_dict = multihot_hotnesses_dict
self.padding_function = make_padding_function(multihot_hotnesses_dict)
def _reset_states(self):
for metric in self.metrics:
metric.reset_states()
if self.map_enabled:
self.map_metric.reset_states()
self.eval_loss.reset_states()
def prepare_dataset(self, current_epoch):
benchmark_needed_steps = self.args.benchmark_steps // self.steps_per_epoch + 1
n = 1 if self.args.evaluate and not self.args.benchmark else self.args.num_epochs - current_epoch \
if not self.args.benchmark else max(benchmark_needed_steps, self.args.num_epochs)
self.eval_dataset = self.eval_dataset.epochs(n)
#todo find a nicer way to do this
@tf.function(experimental_relax_shapes=True)
def _execute_step_calculations_with_map(self, x, y, map_ids):
predictions = self.model(x, training=False)
with tf.device("/CPU:0"):
loss = self.compiled_loss(y, predictions)
for metric in self.metrics:
metric.update_state(y, predictions)
self.eval_loss.update_state(loss)
self.map_metric.calculate_map(y, predictions, map_ids)
return loss
@tf.function(experimental_relax_shapes=True)
def _execute_step_calculations_no_map(self, x, y):
predictions = self.model(x, training=False)
with tf.device("/CPU:0"):
loss = self.compiled_loss(y, predictions)
for metric in self.metrics:
metric.update_state(y, predictions)
self.eval_loss.update_state(loss)
return loss
@tf.function
def _reduce_results(self):
if not self.args.cpu:
eval_loss = hvd.allreduce(self.eval_loss.result(), op=Average)
else:
eval_loss = self.eval_loss.result()
return eval_loss
def _reduce_metrics(self):
if self.args.cpu:
return self.metrics
hand_reduced_metrics = []
for metric in self.metrics:
# as of 6.2022, hvd.allgather_object() cannot gather tf.Variable when amp is enabled
# this is a workaround that instead gathers the tensors that merge_state uses
# verified to be equivalent to just allgather and merge_state for keras.AUC
to_gather = list(x.value() for x in metric.weights)
gathered_weights = hvd.allgather_object(to_gather)
if hvd.rank() == 0:
hand_gather_root = metric
hand_gather_root.reset_state()
for list_of_weights in gathered_weights:
for (base_weight, new_weight) in zip(hand_gather_root.weights, list_of_weights):
base_weight.assign_add(new_weight)
hand_reduced_metrics.append(hand_gather_root)
return hand_reduced_metrics
@staticmethod
def log(eval_data, step):
dllogger.log(data=eval_data, step=(step,))
def eval_step(self, x, y):
if self.map_enabled:
map_ids = x.pop(self.map_column)
self._execute_step_calculations_with_map(x, y, map_ids)
else:
self._execute_step_calculations_no_map(x,y)
if self.args.benchmark:
self.throughput_calculator(y.shape[0], eval_benchmark=True)
def eval(self, step):
eval_data = {}
self._reset_states()
# Graph mode part
for i, (x, y) in enumerate(self.eval_dataset, 1):
x = self.padding_function(x)
self.eval_step(x, y)
if i == self.steps_per_epoch and not self.args.benchmark:
break
eval_loss = self._reduce_results().numpy()
hand_reduced_metrics = self._reduce_metrics()
map_value = None
if self.map_enabled:
map_value = self.map_metric.reduce_results().numpy()
if self.args.cpu or hvd.rank() == 0:
with tf.device("/CPU:0"):
# Eager mode part
current_step = int(step.numpy())
eval_data = {
"loss_val": np.around(eval_loss.astype(np.float64), 4)
}
if map_value is not None:
eval_data["streaming_map_val"] = np.around(map_value, 4)
for metric_name, metric in zip(self.metric_names, hand_reduced_metrics):
eval_data[metric_name] = np.around(metric.result().numpy().astype(np.float64), 4)
self.log(eval_data, current_step)
return eval_data
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/trainer/utils/evaluator.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.