python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
class LearningRateScheduler:
def __init__(self, args, steps_per_epoch, optimizer):
assert args.deep_warmup_epochs <= args.num_epochs, \
"Number of warmup epochs cannot be higher than training epochs"
self.base_lr = args.deep_learning_rate
self.warmup_steps = args.deep_warmup_epochs * steps_per_epoch
bound_epoch = args.deep_warmup_epochs + (args.num_epochs - args.deep_warmup_epochs) / 2
self.boundaries = [bound_epoch * steps_per_epoch]
self.values = [self.base_lr / 4, self.base_lr / 8]
self.optimizer = optimizer
@tf.function
def __call__(self, step):
if step < self.warmup_steps:
warmup_lr = self.base_lr * step / self.warmup_steps
self.optimizer.lr.assign(warmup_lr)
else:
index = tf.reduce_sum(tf.cast(step > self.boundaries, tf.int64))
value = tf.gather(self.values, index)
self.optimizer.lr.assign(value)
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/trainer/utils/schedulers.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import tensorflow as tf
from data.feature_spec import FeatureSpec
from trainer.model import layers as nvtlayers
from data.outbrain.defaults import NUMERICAL_CHANNEL, ONEHOT_CHANNEL, MULTIHOT_CHANNEL
def get_feature_columns(fspec: FeatureSpec, embedding_dimensions: dict, combiner):
logger = logging.getLogger("tensorflow")
wide_columns, deep_columns = [], []
categorical_columns = fspec.get_names_by_channel(ONEHOT_CHANNEL) + fspec.get_names_by_channel(MULTIHOT_CHANNEL)
cardinalities = fspec.get_cardinalities(features=categorical_columns)
for column_name in categorical_columns:
categorical_column = tf.feature_column.categorical_column_with_identity(
column_name, num_buckets=cardinalities[column_name]
)
wrapped_column = tf.feature_column.embedding_column(
categorical_column,
dimension=embedding_dimensions[column_name],
combiner=combiner,
)
wide_columns.append(categorical_column)
deep_columns.append(wrapped_column)
numerics = [
tf.feature_column.numeric_column(column_name, shape=(1,), dtype=tf.float32)
for column_name in fspec.get_names_by_channel(NUMERICAL_CHANNEL)
]
wide_columns.extend(numerics)
deep_columns.extend(numerics)
logger.warning("deep columns: {}".format(len(deep_columns)))
logger.warning("wide columns: {}".format(len(wide_columns)))
logger.warning(
"wide&deep intersection: {}".format(
len(set(wide_columns).intersection(set(deep_columns)))
)
)
return wide_columns, deep_columns
def get_input_features(feature_spec):
features = {}
numeric_columns = feature_spec.get_names_by_channel(NUMERICAL_CHANNEL)
onehot_columns = feature_spec.get_names_by_channel(ONEHOT_CHANNEL)
multihot_columns = feature_spec.get_names_by_channel(MULTIHOT_CHANNEL)
# Numerical
for feature in numeric_columns:
features[feature] = tf.keras.Input(
shape=(1,), batch_size=None, name=feature, dtype=tf.float32, sparse=False
)
# Categorical (One-hot)
for feature in onehot_columns:
features[feature] = tf.keras.Input(
shape=(1,), batch_size=None, name=feature, dtype=tf.int32, sparse=False
)
# Categorical (Multi-hot)
multihot_hotness_dict = feature_spec.get_multihot_hotnesses(multihot_columns)
for feature, hotness in multihot_hotness_dict.items():
features[feature] = tf.keras.Input(
shape=(hotness,),
batch_size=None,
name=f"{feature}",
dtype=tf.int32,
sparse=False,
)
return features
def wide_deep_model(args, feature_spec, embedding_dimensions):
wide_columns, deep_columns = get_feature_columns(fspec=feature_spec,
embedding_dimensions=embedding_dimensions,
combiner=args.combiner)
features = get_input_features(feature_spec)
wide = nvtlayers.LinearFeatures(wide_columns, name="wide_linear")(features)
dnn = nvtlayers.DenseFeatures(deep_columns, name="deep_embedded")(features)
for unit_size in args.deep_hidden_units:
dnn = tf.keras.layers.Dense(units=unit_size, activation="relu")(dnn)
dnn = tf.keras.layers.Dropout(rate=args.deep_dropout)(dnn)
dnn = tf.keras.layers.Dense(units=1)(dnn)
dnn_model = tf.keras.Model(inputs=features, outputs=dnn)
linear_model = tf.keras.Model(inputs=features, outputs=wide)
model = tf.keras.experimental.WideDeepModel(
linear_model, dnn_model, activation="sigmoid"
)
return model, features | DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/trainer/model/widedeep.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorflow as tf
from tensorflow.python.feature_column import feature_column_v2 as fc
# pylint has issues with TF array ops, so disable checks until fixed:
# https://github.com/PyCQA/pylint/issues/3613
# pylint: disable=no-value-for-parameter, unexpected-keyword-arg
def _sort_columns(feature_columns):
return sorted(feature_columns, key=lambda col: col.name)
def _validate_numeric_column(feature_column):
if len(feature_column.shape) > 1:
return (
"Matrix numeric features are not allowed, "
"found feature {} with shape {}".format(
feature_column.key, feature_column.shape
)
)
def _validate_categorical_column(feature_column):
if not isinstance(feature_column, fc.IdentityCategoricalColumn):
return (
"Only acceptable categorical columns for feeding "
"embeddings are identity, found column {} of type {}. "
"Consider using NVTabular online preprocessing to perform "
"categorical transformations".format(
feature_column.name, type(feature_column).__name__
)
)
def _validate_dense_feature_columns(feature_columns):
_errors = []
for feature_column in feature_columns:
if isinstance(feature_column, fc.CategoricalColumn):
if not isinstance(feature_column, fc.BucketizedColumn):
_errors.append(
"All feature columns must be dense, found categorical "
"column {} of type {}. Please wrap categorical columns "
"in embedding or indicator columns before passing".format(
feature_column.name, type(feature_column).__name__
)
)
else:
_errors.append(
"Found bucketized column {}. DenseFeatures layer "
"cannot apply bucketization preprocessing. Consider using "
"NVTabular to do preprocessing offline".format(feature_column.name)
)
elif isinstance(feature_column, (fc.EmbeddingColumn, fc.IndicatorColumn)):
_errors.append(
_validate_categorical_column(feature_column.categorical_column)
)
elif isinstance(feature_column, fc.NumericColumn):
_errors.append(_validate_numeric_column(feature_column))
_errors = list(filter(lambda e: e is not None, _errors))
if len(_errors) > 0:
msg = "Found issues with columns passed to DenseFeatures:"
msg += "\n\t".join(_errors)
raise ValueError(_errors)
def _validate_stack_dimensions(feature_columns):
dims = []
for feature_column in feature_columns:
if isinstance(feature_column, fc.EmbeddingColumn):
dimension = feature_column.dimension
elif isinstance(feature_column, fc.IndicatorColumn):
dimension = feature_column.categorical_column.num_buckets
else:
dimension = feature_column.shape[0]
dims.append(dimension)
dim0 = dims[0]
if not all(dim == dim0 for dim in dims[1:]):
dims = ", ".join(map(str, dims))
raise ValueError(
"'stack' aggregation requires all categorical "
"embeddings and continuous features to have same "
"size. Found dimensions {}".format(dims)
)
def _categorical_embedding_lookup(table, inputs, feature_name, combiner):
# Multi-hots
if inputs[feature_name].shape[1] > 1:
# Multi-hot embedding lookup
x = inputs[feature_name]
embeddings = tf.gather(table, x)
# Remove padded values
# This is an inverse of dataloader pad_batch
mask_array = tf.cast(x >= 0, embeddings.dtype)
mask = tf.expand_dims(mask_array, -1)
embeddings = tf.math.multiply(embeddings, mask)
# Sum aggregation
embeddings = tf.reduce_sum(embeddings, axis=1)
# Divide by number of not zeros if mean aggregation
if combiner == "mean":
row_lengths = tf.reduce_sum(mask_array, axis=1)
row_lengths = tf.cast(row_lengths, embeddings.dtype)
row_lengths = tf.expand_dims(row_lengths, -1)
embeddings = tf.math.divide_no_nan(embeddings, row_lengths)
else:
embeddings = tf.gather(table, inputs[feature_name][:, 0])
return embeddings
def _handle_continuous_feature(inputs, feature_column):
if feature_column.shape[0] > 1:
x = inputs[feature_column.name]
if isinstance(x, tuple):
x = x[0]
return tf.reshape(x, (-1, feature_column.shape[0]))
return inputs[feature_column.name]
class DenseFeatures(tf.keras.layers.Layer):
"""
Layer which maps a dictionary of input tensors to a dense, continuous
vector digestible by a neural network. Meant to reproduce the API exposed
by `tf.keras.layers.DenseFeatures` while reducing overhead for the
case of one-hot categorical and scalar numeric features.
Uses TensorFlow `feature_column` to represent inputs to the layer, but
does not perform any preprocessing associated with those columns. As such,
it should only be passed `numeric_column` objects and their subclasses,
`embedding_column` and `indicator_column`. Preprocessing functionality should
be moved to NVTabular.
For multi-hot categorical or vector continuous data, represent the data for
a feature with a dictionary entry `"<feature_name>__values"` corresponding
to the flattened array of all values in the batch. For multi-hot categorical
data, there should be a corresponding `"<feature_name>__nnzs"` entry that
describes how many categories are present in each sample (and so has length
`batch_size`).
Note that categorical columns should be wrapped in embedding or
indicator columns first, consistent with the API used by
`tf.keras.layers.DenseFeatures`.
Example usage::
column_a = tf.feature_column.numeric_column("a", (1,))
column_b = tf.feature_column.categorical_column_with_identity("b", 100)
column_b_embedding = tf.feature_column.embedding_column(column_b, 4)
inputs = {
"a": tf.keras.Input(name="a", shape=(1,), dtype=tf.float32),
"b": tf.keras.Input(name="b", shape=(1,), dtype=tf.int64)
}
x = DenseFeatures([column_a, column_b_embedding])(inputs)
Parameters
----------
feature_columns : list of `tf.feature_column`
feature columns describing the inputs to the layer
aggregation : str in ("concat", "stack")
how to combine the embeddings from multiple features
"""
def __init__(self, feature_columns, aggregation="concat", name=None, **kwargs):
# sort feature columns to make layer independent of column order
feature_columns = _sort_columns(feature_columns)
_validate_dense_feature_columns(feature_columns)
if aggregation == "stack":
_validate_stack_dimensions(feature_columns)
elif aggregation != "concat":
raise ValueError(
"Unrecognized aggregation {}, must be stack or concat".format(
aggregation
)
)
self.feature_columns = feature_columns
self.aggregation = aggregation
super(DenseFeatures, self).__init__(name=name, **kwargs)
def build(self, input_shapes):
self.embedding_tables = {}
for feature_column in self.feature_columns:
if isinstance(feature_column, fc.NumericColumn):
continue
feature_name = feature_column.categorical_column.key
num_buckets = feature_column.categorical_column.num_buckets
if isinstance(feature_column, fc.EmbeddingColumn):
self.embedding_tables[feature_name] = self.add_weight(
name="{}/embedding_weights".format(feature_name),
trainable=True,
initializer="glorot_normal",
shape=(num_buckets, feature_column.dimension),
)
else:
self.embedding_tables[feature_name] = self.add_weight(
name="{}/embedding_weights".format(feature_name),
trainable=False,
initializer=tf.constant_initializer(np.eye(num_buckets)),
shape=(num_buckets, num_buckets),
)
self.built = True
def call(self, inputs):
features = []
for feature_column in self.feature_columns:
if isinstance(feature_column, fc.NumericColumn):
x = _handle_continuous_feature(inputs, feature_column)
features.append(x)
else:
feature_name = feature_column.categorical_column.name
table = self.embedding_tables[feature_name]
combiner = getattr(feature_column, "combiner", "sum")
embeddings = _categorical_embedding_lookup(
table, inputs, feature_name, combiner
)
features.append(embeddings)
if self.aggregation == "stack":
return tf.stack(features, axis=1)
return tf.concat(features, axis=1)
def compute_output_shape(self, input_shapes):
input_shape = list(input_shapes.values())[0]
if self.aggregation == "concat":
output_dim = len(self.numeric_features) + sum(
[shape[-1] for shape in self.embedding_shapes.values()]
)
return (input_shape[0], output_dim)
else:
embedding_dim = list(self.embedding_shapes.values())[0]
return (input_shape[0], len(self.embedding_shapes), embedding_dim)
def get_config(self):
return {
"feature_columns": self.feature_columns,
"aggregation": self.aggregation,
}
def _validate_linear_feature_columns(feature_columns):
_errors = []
for feature_column in feature_columns:
if isinstance(feature_column, (fc.EmbeddingColumn, fc.IndicatorColumn)):
_errors.append(
"Only pass categorical or numeric columns to ScalarLinearFeatures "
"layer, found column {} of type".format(feature_column)
)
elif isinstance(feature_column, fc.NumericColumn):
_errors.append(_validate_numeric_column(feature_column))
else:
_errors.append(_validate_categorical_column(feature_column))
_errors = list(filter(lambda e: e is not None, _errors))
if len(_errors) > 0:
msg = "Found issues with columns passed to ScalarDenseFeatures:"
msg += "\n\t".join(_errors)
raise ValueError(_errors)
# TODO: is there a clean way to combine these two layers
# into one, maybe with a "sum" aggregation? Major differences
# seem to be whether categorical columns are wrapped in
# embeddings and the numeric matmul, both of which seem
# reasonably easy to check. At the very least, we should
# be able to subclass I think?
class LinearFeatures(tf.keras.layers.Layer):
"""
Layer which implements a linear combination of one-hot categorical
and scalar numeric features. Based on the "wide" branch of the Wide & Deep
network architecture.
Uses TensorFlow ``feature_column``s to represent inputs to the layer, but
does not perform any preprocessing associated with those columns. As such,
it should only be passed ``numeric_column`` and
``categorical_column_with_identity``. Preprocessing functionality should
be moved to NVTabular.
Also note that, unlike ScalarDenseFeatures, categorical columns should
NOT be wrapped in embedding or indicator columns first.
Example usage::
column_a = tf.feature_column.numeric_column("a", (1,))
column_b = tf.feature_column.categorical_column_with_identity("b", 100)
inputs = {
"a": tf.keras.Input(name="a", shape=(1,), dtype=tf.float32),
"b": tf.keras.Input(name="b", shape=(1,), dtype=tf.int64)
}
x = ScalarLinearFeatures([column_a, column_b])(inputs)
Parameters
----------
feature_columns : list of tf.feature_column
feature columns describing the inputs to the layer
"""
def __init__(self, feature_columns, name=None, **kwargs):
feature_columns = _sort_columns(feature_columns)
_validate_linear_feature_columns(feature_columns)
self.feature_columns = feature_columns
super(LinearFeatures, self).__init__(name=name, **kwargs)
def build(self, input_shapes):
# TODO: I've tried combining all the categorical tables
# into a single giant lookup op, but it ends up turning
# out the adding the offsets to lookup indices at call
# time ends up being much slower due to kernel overhead
# Still, a better (and probably custom) solutions would
# probably be desirable
numeric_kernel_dim = 0
self.embedding_tables = {}
for feature_column in self.feature_columns:
if isinstance(feature_column, fc.NumericColumn):
numeric_kernel_dim += feature_column.shape[0]
continue
self.embedding_tables[feature_column.key] = self.add_weight(
name="{}/embedding_weights".format(feature_column.key),
initializer="zeros",
trainable=True,
shape=(feature_column.num_buckets, 1),
)
if numeric_kernel_dim > 0:
self.embedding_tables["numeric"] = self.add_weight(
name="numeric/embedding_weights",
initializer="zeros",
trainable=True,
shape=(numeric_kernel_dim, 1),
)
self.bias = self.add_weight(
name="bias", initializer="zeros", trainable=True, shape=(1,)
)
self.built = True
def call(self, inputs):
x = self.bias
numeric_inputs = []
for feature_column in self.feature_columns:
if isinstance(feature_column, fc.NumericColumn):
numeric_inputs.append(
_handle_continuous_feature(inputs, feature_column)
)
else:
table = self.embedding_tables[feature_column.key]
embeddings = _categorical_embedding_lookup(
table, inputs, feature_column.key, "sum"
)
x = x + embeddings
if len(numeric_inputs) > 0:
numerics = tf.concat(numeric_inputs, axis=1)
x = x + tf.matmul(numerics, self.embedding_tables["numeric"])
return x
def compute_output_shape(self, input_shapes):
batch_size = list(input_shapes.values())[0].shape[0]
return (batch_size, 1)
def get_config(self):
return {
"feature_columns": self.feature_columns,
}
| DeepLearningExamples-master | TensorFlow2/Recommendation/WideAndDeep/trainer/model/layers.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages, setup
setup(name="sim",
package_dir={'sim': 'sim'},
version="1.0.0",
description="Reimplementation of Search-based User Interest Modeling",
packages=find_packages()
)
| DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/setup.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
from pathlib import Path
import click
import dllogger
import horovod.tensorflow as hvd
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
from sim.data.dataloader import get_dataloader_tfrecord
from sim.data.defaults import FILES_SELECTOR, TEST_MAPPING, TRAIN_MAPPING
from sim.data.feature_spec import FeatureSpec
from sim.models.dien_model import DIENModel
from sim.models.din_model import DINModel
from sim.models.sim_model import SIMModel
from sim.utils.benchmark import PerformanceCalculator
from sim.utils.gpu_affinity import set_affinity
from sim.utils.losses import build_sim_loss_fn, dien_auxiliary_loss_fn
from sim.utils.misc import csv_str_to_int_list, dist_print
def init_checkpoint_manager(model, optimizer, save_checkpoint_path, load_checkpoint_path):
checkpoint = tf.train.Checkpoint(
model=model,
optimizer=optimizer,
epoch=tf.Variable(-1, name='epoch')
)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint=checkpoint,
directory=save_checkpoint_path,
max_to_keep=1,
)
if load_checkpoint_path != "":
_maybe_restore_checkpoint(
checkpoint=checkpoint,
checkpoint_path=load_checkpoint_path
)
return checkpoint_manager
def _maybe_restore_checkpoint(checkpoint, checkpoint_path):
# Needed here to support different save and load checkpoint paths
checkpoint_manager = tf.train.CheckpointManager(
checkpoint=checkpoint,
directory=checkpoint_path,
max_to_keep=1,
)
checkpoint.restore(checkpoint_manager.latest_checkpoint).expect_partial()
if checkpoint_manager.latest_checkpoint:
dist_print(f"Model restored from checkpoint {checkpoint_path}")
else:
dist_print(f"Failed to restore model from checkpoint {checkpoint_path}")
def init_logger(results_dir, filename):
if hvd.rank() == 0:
os.makedirs(results_dir, exist_ok=True)
log_path = os.path.join(results_dir, filename)
dllogger.init(
backends=[
dllogger.JSONStreamBackend(
verbosity=dllogger.Verbosity.VERBOSE, filename=log_path
),
dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE),
]
)
dllogger.metadata("test_auc", {"unit": None})
dllogger.metadata("latency_p90", {"unit": "ms"})
dllogger.metadata("train_loss", {"unit": None})
dllogger.metadata("time_to_train", {"unit": "s"})
dllogger.metadata("throughput", {"unit": "samples/s"})
else:
dllogger.init(backends=[])
# In the future, select one of available dataloaders there (tfrecord, csv, etc...)
def get_data_iterator(paths, feature_spec, batch_size, num_gpus, long_seq_length, prefetch_size, num_parallel_calls=None, repeat_count=0,
drop_remainder=False, amp=False, disable_cache=False, prebatch_size=0):
return get_dataloader_tfrecord(
paths,
feature_spec=feature_spec,
batch_size=batch_size,
long_seq_length=long_seq_length,
num_gpus=num_gpus,
id=hvd.rank(),
drop_remainder=drop_remainder,
repeat_count=repeat_count,
disable_cache=disable_cache,
prefetch_buffer_size=prefetch_size,
num_parallel_calls=num_parallel_calls,
prebatch_size=prebatch_size
)
def build_model_and_loss(model_params):
model_type = model_params["model_type"]
if model_type == "sim":
model = SIMModel(
model_params['feature_spec'],
mlp_hidden_dims=model_params["mlp_hidden_dims"],
embedding_dim=model_params["embedding_dim"],
dropout_rate=model_params["dropout_rate"]
)
classification_loss_fn = build_sim_loss_fn()
@tf.function
def model_fn(batch, training=True):
input_data, targets = batch
# take the mask for N-1 timesteps from prepared input data
mask_for_aux_loss = input_data["short_sequence_mask"][:, 1:]
# model forward pass
output_dict = model(input_data, training=training)
# compute loss
classification_loss = classification_loss_fn(
targets, output_dict["stage_one_logits"], output_dict["stage_two_logits"]
)
dien_aux_loss = dien_auxiliary_loss_fn(
output_dict["aux_click_probs"],
output_dict["aux_noclick_probs"],
mask=mask_for_aux_loss,
)
total_loss = classification_loss + dien_aux_loss
logits = output_dict["stage_two_logits"]
loss_dict = {
"total_loss": total_loss,
"classification_loss": classification_loss,
"dien_aux_loss": dien_aux_loss
}
return (targets, logits), loss_dict
elif model_type == "dien":
model = DIENModel(
model_params['feature_spec'],
mlp_hidden_dims={
"classifier": model_params["mlp_hidden_dims"]["stage_2"],
"aux": model_params["mlp_hidden_dims"]["aux"],
},
embedding_dim=model_params["embedding_dim"],
)
classification_loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True)
@tf.function
def model_fn(batch, training=True):
input_data, targets = batch
# take the mask for N-1 timesteps from prepared input data
mask_for_aux_loss = input_data["short_sequence_mask"][:, 1:]
# model forward pass
output_dict = model(input_data, training=training)
# compute loss
classification_loss = classification_loss_fn(targets, output_dict["logits"])
dien_aux_loss = dien_auxiliary_loss_fn(
output_dict["aux_click_probs"],
output_dict["aux_noclick_probs"],
mask=mask_for_aux_loss,
)
total_loss = classification_loss + dien_aux_loss
logits = output_dict["logits"]
loss_dict = {
"total_loss": total_loss,
"classification_loss": classification_loss,
"dien_aux_loss": dien_aux_loss
}
return (targets, logits), loss_dict
elif model_type == "din":
model = DINModel(
model_params['feature_spec'],
mlp_hidden_dims=model_params["mlp_hidden_dims"]["stage_2"],
embedding_dim=model_params["embedding_dim"]
)
classification_loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True)
@tf.function
def model_fn(batch, training=True):
input_data, targets = batch
# model forward pass
output_dict = model(input_data, training=training)
# compute loss
total_loss = classification_loss_fn(
targets, output_dict["logits"]
)
logits = output_dict["logits"]
loss_dict = {"total_loss": total_loss}
return (targets, logits), loss_dict
return model, model_fn
@tf.function
def _update_auc(auc_accumulator, targets, logits):
auc_accumulator.update_state(targets, logits)
def eval(model_fn, data_iterator, num_thresholds=8000, prefix=""):
auc_accumulator = tf.keras.metrics.AUC(
num_thresholds=num_thresholds, name="auc_accumulator", from_logits=True
)
distributed = hvd.size() != 1
local_logits = []
local_targets = []
local_total_losses = []
for batch in data_iterator:
(targets, logits), loss_dict = model_fn(batch, training=False)
local_logits.append(logits)
local_targets.append(targets)
local_total_losses.append(loss_dict["total_loss"])
locals = [local_logits, local_targets, local_total_losses]
for i, local in enumerate(locals):
# wrap empty lists in tensor to allow tf.concat
if len(local) == 0:
local = tf.constant(local)
# concat all local variables into a single tensor
if local is local_total_losses:
local = tf.stack(local, 0)
else:
local = tf.concat(local, 0)
# for single element lists, tf.concat will produce shape=() instead of shape=(1,).
# reshape it for hvd.allgather to work
if len(local.shape) == 0:
local = tf.reshape(local, -1)
locals[i] = local
logits, targets, total_losses = locals
if distributed:
# gather from all nodes
logits = hvd.allgather(logits)
targets = hvd.allgather(targets)
total_losses = hvd.allgather(total_losses)
if hvd.rank() == 0:
# need to convert it to a dataset first
split_batch_size = local_logits[0].shape[0]
metrics_ds = tf.data.Dataset.from_tensor_slices((targets, logits)).batch(split_batch_size)
# run batched version of metrics update
for targets, logits in metrics_ds:
_update_auc(auc_accumulator, targets, logits)
loss = tf.reduce_mean(total_losses).numpy().item()
auc = auc_accumulator.result().numpy().item()
else:
loss = 0.
auc = 0.
return {f"{prefix}auc": auc, f"{prefix}loss": loss}
@tf.function
def model_step(batch, model, model_fn, optimizer, amp, first_batch):
with tf.GradientTape() as tape:
_, loss_dict = model_fn(batch, training=True)
loss = loss_dict["total_loss"]
scaled_loss = optimizer.get_scaled_loss(loss) if amp else loss
tape = hvd.DistributedGradientTape(tape, sparse_as_dense=True, compression=hvd.Compression.fp16)
grads = tape.gradient(scaled_loss, model.trainable_variables)
grads = optimizer.get_unscaled_gradients(grads) if amp else grads
optimizer.apply_gradients(zip(grads, model.trainable_variables))
if first_batch:
hvd.broadcast_variables(model.variables, root_rank=0)
hvd.broadcast_variables(optimizer.variables(), root_rank=0)
return loss_dict
def run_single_epoch(model, model_fn, data_iterator, optimizer, amp, start_epoch, epoch, benchmark, performance_calculator):
for current_step, batch in enumerate(data_iterator):
if benchmark and performance_calculator.completed:
break
is_first_batch = (current_step == 0 and epoch == 0)
step_dict = model_step(batch, model, model_fn, optimizer, amp, is_first_batch)
step_dict = {key: val.numpy().item() for key, val in step_dict.items()}
n_samples = len(batch[1])
step_throughput = performance_calculator(n_samples)
step_dict["samples/s"] = step_throughput
dllogger.log(data=step_dict, step=(start_epoch + epoch, current_step))
def train(model, model_fn, data_iterator_train, data_iterator_test, optimizer, amp, epochs,
benchmark, performance_calculator, save_checkpoint, checkpoint_manager):
"""Train and evaluate the model for a given number of epochs."""
performance_calculator.init()
all_epochs_results = []
start_epoch = checkpoint_manager.checkpoint.epoch.numpy().item() + 1
for epoch in range(epochs - start_epoch):
run_single_epoch(model, model_fn, data_iterator_train, optimizer, amp, start_epoch, epoch, benchmark, performance_calculator)
if not benchmark:
# we dump throughput results for consecutive epochs for a regular training job (w/o --benchmark flag)
results_data = performance_calculator.get_current_benchmark_results()
all_epochs_results.append(results_data)
results_eval_train = eval(model_fn, data_iterator_train, prefix="train_")
results_eval_test = eval(model_fn, data_iterator_test, prefix="test_")
results_data.update(results_eval_train)
results_data.update(results_eval_test)
if save_checkpoint:
checkpoint_manager.checkpoint.epoch.assign(epoch)
checkpoint_manager.save()
if hvd.rank() == 0:
dllogger.log(data=results_data, step=(start_epoch + epoch,))
performance_calculator.init() # restart for another epoch
elif performance_calculator.completed:
break
if benchmark:
results_perf = performance_calculator.results
if not performance_calculator.completed:
# model steps have been exhausted or all steps should be included to calculate throughput
results_perf = performance_calculator.get_current_benchmark_results()
if hvd.rank() == 0:
dllogger.log(data=results_perf, step=tuple())
else:
# calculate convergence metrics
time_to_train = sum([epoch_result['time'] for epoch_result in all_epochs_results])
results = {'time_to_train': time_to_train}
results.update(results_eval_train)
results.update(results_eval_test)
if hvd.rank() == 0:
dllogger.log(data=results, step=tuple())
def inference(model, data_iterator, benchmark, performance_calculator):
"""Forward pass for the model and data loader given."""
performance_calculator.init()
for current_step, (input_data, targets) in enumerate(data_iterator):
if benchmark and performance_calculator.completed:
break
model(input_data, training=False, compute_aux_loss=False)
step_throughput = performance_calculator(len(targets))
dllogger.log(data={"samples/s": step_throughput}, step=(0, current_step))
results_perf = performance_calculator.results
if not performance_calculator.completed:
results_perf = performance_calculator.get_current_benchmark_results()
if hvd.rank() == 0:
dllogger.log(data=results_perf, step=tuple())
@click.command()
@click.option(
"--mode",
default="train",
help="Script mode: available options are 'train' to train and evaluate the model "
"and 'inference' to perform forward pass over a given dataset",
type=click.Choice(["train", "inference"]),
)
@click.option(
"--dataset_dir",
required=True,
help="Path to the dataset directory.",
type=str,
)
@click.option(
"--feature_spec",
default='feature_spec.yaml',
help="Name of the feature spec file in the dataset directory.",
type=str
)
@click.option(
"--results_dir",
default="/tmp/sim",
help="Path to the model files storage.",
type=str,
)
@click.option(
"--log_filename",
default="log.json",
help="Name of the file to store dllogger output.",
type=str,
)
@click.option(
"--long_seq_length",
default=90,
help="length of long history sequence",
type=int
)
@click.option(
"--optimizer",
default="adam",
help="Optimizer to use [adam/lazy_adam/sgd].",
type=click.Choice(["adam", "lazy_adam", "sgd"]),
)
@click.option(
"--affinity",
default="socket_unique_interleaved",
help="Type of CPU affinity",
type=click.Choice([
"socket",
"single",
"single_unique",
"socket_unique_interleaved",
"socket_unique_continuous",
"disabled",
],
),
)
@click.option(
"--seed", default=-1, help="Random seed.", type=int
)
@click.option(
"--lr", default=0.01, help="Learning rate of the selected optimizer.", type=float
)
@click.option(
"--dropout_rate", default=-1, help="Dropout rate for all the classification MLPs (default: -1, disabled).",
type=float
)
@click.option(
"--weight_decay", default=0, help="Parameters decay of the selected optimizer.", type=float
)
@click.option(
"--embedding_dim", default=16, help="Embedding dimension.", type=int
)
@click.option(
"--global_batch_size", default=131072, help="Batch size used to train/eval the model.", type=int
)
@click.option(
"--num_parallel_calls", default=None, help="Parallelism level for tf.data API. If None, heuristic based on number of CPUs and number of GPUs will be used."
)
@click.option(
"--epochs", default=3, help="Train for the following number of epochs.", type=int
)
@click.option("--disable_cache", help="disable dataset caching.", is_flag=True)
@click.option("--drop_remainder", help="Drop remainder batch for training set.", is_flag=True)
@click.option(
"--repeat_count", default=0, help="Repeat training dataset this number of times.", type=int
)
@click.option(
"--benchmark",
is_flag=True
)
@click.option(
"--benchmark_steps",
default=0,
help="Number of steps to use for performance benchmarking. Use benchmark_steps <= 0 to include all iterations. "
"This parameter has no effect when the script is launched without --benchmark flag.",
type=int
)
@click.option(
"--benchmark_warmup_steps",
default=20,
help="Number of warmup steps to use for performance benchmarking (benchmark_warmup_steps <= 0 means no warmup).",
type=int
)
@click.option(
"--stage_one_mlp_dims",
default="200",
help="MLP hidden dimensions for stage one (excluding classification output).",
type=str,
)
@click.option(
"--stage_two_mlp_dims",
default="200,80",
help="MLP hidden dimensions for stage two (excluding classification output).",
type=str,
)
@click.option(
"--aux_mlp_dims",
default="100,50",
help="MLP hidden dimensions for aux loss (excluding classification output).",
type=str,
)
@click.option(
"--model_type",
default="sim",
type=click.Choice(["sim", "din", "dien"])
)
@click.option("--save_checkpoint_path", default="", type=str)
@click.option("--load_checkpoint_path", default="", type=str)
@click.option("--amp", is_flag=True)
@click.option("--xla", is_flag=True)
@click.option(
"--inter_op_parallelism",
default=0,
help="Number of inter op threads.",
type=int
)
@click.option(
"--intra_op_parallelism",
default=0,
help="Number of intra op threads.",
type=int
)
@click.option(
"--prefetch_train_size",
default=10,
help="Number of batches to prefetch in training. "
)
@click.option(
"--prefetch_test_size",
default=2,
help="Number of batches to prefetch in testing"
)
@click.option(
"--prebatch_train_size",
default=0,
help="Information about batch size applied during preprocessing to train dataset"
)
@click.option(
"--prebatch_test_size",
default=0,
help="Information about batch size applied during preprocessing to test dataset"
)
def main(
mode: str,
dataset_dir: str,
feature_spec: str,
results_dir: str,
log_filename: str,
long_seq_length: int,
save_checkpoint_path: str,
load_checkpoint_path: str,
model_type: str,
optimizer: str,
affinity: str,
seed: int,
lr: float,
dropout_rate: float,
weight_decay: float,
embedding_dim: int,
global_batch_size: int,
num_parallel_calls: int,
epochs: int,
disable_cache: bool,
drop_remainder: bool,
repeat_count: int,
benchmark: bool,
benchmark_steps: int,
benchmark_warmup_steps: int,
stage_one_mlp_dims: str,
stage_two_mlp_dims: str,
aux_mlp_dims: str,
xla: bool,
amp: bool,
inter_op_parallelism: int,
intra_op_parallelism: int,
prefetch_train_size: int,
prefetch_test_size: int,
prebatch_train_size: int,
prebatch_test_size: int
):
hvd.init()
if seed >= 0:
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
if affinity != "disabled":
gpu_id = hvd.local_rank()
affinity = set_affinity(
gpu_id=gpu_id, nproc_per_node=hvd.size(), mode=affinity
)
dist_print(f"{gpu_id}: thread affinity: {affinity}")
init_logger(results_dir, log_filename)
gpus = tf.config.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
if amp:
tf.keras.mixed_precision.set_global_policy("mixed_float16")
if inter_op_parallelism > 0:
tf.config.threading.set_inter_op_parallelism_threads(inter_op_parallelism)
if intra_op_parallelism > 0:
tf.config.threading.set_intra_op_parallelism_threads(intra_op_parallelism)
if xla:
tf.config.optimizer.set_jit(True)
if optimizer == "adam":
optimizer = tfa.optimizers.AdamW(learning_rate=lr, weight_decay=weight_decay)
elif optimizer == "lazy_adam":
optimizer = tfa.optimizers.LazyAdam(lr)
elif optimizer == "sgd":
optimizer = tfa.optimizers.SGDW(learning_rate=lr, weight_decay=weight_decay)
optimizer = hvd.DistributedOptimizer(optimizer, compression=hvd.Compression.fp16)
if amp:
optimizer = tf.keras.mixed_precision.LossScaleOptimizer(optimizer, dynamic=True)
num_gpus = hvd.size()
if global_batch_size % num_gpus != 0:
raise ValueError('Global batch size must be divisible by number of gpus. Otherwise it may result in deadlock.')
batch_size = global_batch_size // num_gpus
"""
In case of:
- benchmark: we can load only 1 batch and operate on it for benchmark_steps times (in preload fashion).
- training: we can repeat via a flag
"""
dataset_dir = Path(dataset_dir)
feature_spec = FeatureSpec.from_yaml(dataset_dir / feature_spec)
# since each tfrecord file must include all of the features, it is enough to read first chunk for each split.
train_files = [dataset_dir / file for file in feature_spec.source_spec[TRAIN_MAPPING][0][FILES_SELECTOR]]
data_iterator_train = get_data_iterator(
train_files, feature_spec, batch_size, num_gpus, long_seq_length,
repeat_count=repeat_count, drop_remainder=drop_remainder,
amp=amp, disable_cache=disable_cache, prefetch_size=prefetch_train_size,
num_parallel_calls=num_parallel_calls, prebatch_size=prebatch_train_size
)
if mode == "train":
test_files = [dataset_dir / file for file in feature_spec.source_spec[TEST_MAPPING][0][FILES_SELECTOR]]
data_iterator_test = get_data_iterator(
test_files, feature_spec, batch_size, num_gpus, long_seq_length,
amp=amp, disable_cache=disable_cache, prefetch_size=prefetch_test_size, num_parallel_calls=num_parallel_calls,
prebatch_size=prebatch_test_size
)
else:
data_iterator_test = [] # otherwise not used
stage_one_mlp_dims = csv_str_to_int_list(stage_one_mlp_dims)
stage_two_mlp_dims = csv_str_to_int_list(stage_two_mlp_dims)
aux_mlp_dims = csv_str_to_int_list(aux_mlp_dims)
model_params = {
"feature_spec": feature_spec,
"embedding_dim": embedding_dim,
"mlp_hidden_dims": {
"stage_1": stage_one_mlp_dims,
"stage_2": stage_two_mlp_dims,
"aux": aux_mlp_dims
},
"dropout_rate": dropout_rate,
"model_type": model_type
}
model, model_fn = build_model_and_loss(model_params)
checkpoint_manager = init_checkpoint_manager(
model, optimizer,
save_checkpoint_path, load_checkpoint_path
)
save_checkpoint = save_checkpoint_path != "" and hvd.rank() == 0
performance_calculator = PerformanceCalculator(
benchmark_warmup_steps, benchmark_steps
)
if mode == "train":
train(model, model_fn, data_iterator_train, data_iterator_test, optimizer, amp, epochs,
benchmark, performance_calculator, save_checkpoint, checkpoint_manager)
elif mode == "inference":
inference(model, data_iterator_train, benchmark, performance_calculator)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/main.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessing script for SIM models."""
import logging
import multiprocessing
import os
import click
import cudf
import cupy
import dask.dataframe
import dask_cudf
import rmm
from preprocessing.io import load_metadata, load_review_data, save_metadata
from preprocessing.ops import ExplodeSequence, add_negative_sequence, list_slice, slice_and_pad_left
DASK_TRAIN_DATASET_CHUNKSIZE = 15_000
TRAIN_DATA_DIR = "train"
TEST_DATA_DIR = "test"
TEST_DATA_FILE = "part.0.parquet"
CATEGORIZED_METADATA_FILE = "metadata.json"
OUTPUT_META = {
"label": "int8",
"uid": "int64",
"item": "int32",
"cat": "int32",
"item_sequence": "list",
"cat_sequence": "list",
"neg_item_sequence": "list",
"neg_cat_sequence": "list",
}
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s] %(levelname)s: %(message)s",
)
def add_categorified_column(df, col_name, id_col_name):
unique_values = df[col_name].unique().to_frame()
unique_values[id_col_name] = cupy.arange(len(unique_values), dtype="int32") + 1
df = df.merge(unique_values, how="left", on=col_name)
return df
def categorify_items(all_items_unique: cudf.DataFrame, metadata: cudf.DataFrame) -> cudf.DataFrame:
unique_item_with_category = all_items_unique.merge(metadata, how="left", on="item")
unique_item_with_category = unique_item_with_category.fillna("no_category")
df = add_categorified_column(unique_item_with_category, "item", "item_id")
df = add_categorified_column(df, "cat", "cat_id")
return df
def filter_too_short_sequences(reviews: cudf.DataFrame, min_seq_length: int) -> cudf.DataFrame:
user_counts = reviews["user"].value_counts()
user_counts_filtered = user_counts[user_counts >= min_seq_length]
valid_users = user_counts_filtered.index
reviews = reviews[reviews["user"].isin(valid_users)]
reviews.reset_index(drop=True, inplace=True)
return reviews
def add_items_and_categories_indices(
reviews: cudf.DataFrame,
item_and_cat_with_ids: cudf.DataFrame,
) -> cudf.DataFrame:
return reviews.merge(item_and_cat_with_ids, how="left", on="item")
def categorify_users(reviews: cudf.DataFrame) -> cudf.DataFrame:
return add_categorified_column(reviews, "user", "uid")
def create_sampling_df(
all_items: cudf.DataFrame,
item_and_cat_with_ids: cudf.DataFrame
) -> cudf.DataFrame:
sampling_df = all_items.merge(item_and_cat_with_ids, how="left", on="item")
sampling_df = sampling_df[["item_id", "cat_id"]]
sampling_df = sampling_df.sort_values(by="item_id")
sampling_df.reset_index(drop=True, inplace=True)
return sampling_df
def aggregate_per_user(df):
df = df.sort_values(by=["unixReviewTime", "item"])
df = df.groupby("uid").agg({
"item_id": list,
"cat_id": list,
})
df.reset_index(inplace=True)
df = df.rename(columns={
"item_id": "item_sequence",
"cat_id": "cat_sequence",
})
df["item"] = df["item_sequence"].list.get(-1)
df["cat"] = df["cat_sequence"].list.get(-1)
df["item_sequence"] = list_slice(df["item_sequence"], 0, -1)
df["cat_sequence"] = list_slice(df["cat_sequence"], 0, -1)
return df
def explode_sequence(df: cudf.DataFrame, min_elements: int, max_elements: int) -> cudf.DataFrame:
df = ExplodeSequence(
col_names=["item_sequence", "cat_sequence"],
keep_cols=["uid"],
max_elements=max_elements + 1,
).transform(df)
df["item"] = df["item_sequence"].list.get(-1)
df["cat"] = df["cat_sequence"].list.get(-1)
df["item_sequence"] = list_slice(df["item_sequence"], 0, -1)
df["cat_sequence"] = list_slice(df["cat_sequence"], 0, -1)
df = df[df.item_sequence.list.len() >= min_elements]
return df
def add_negative_label(pos_df: cudf.DataFrame, sampling_df: cudf.DataFrame) -> cudf.DataFrame:
neg_df = pos_df.copy()
pos_df["label"] = cupy.int8(1)
neg_df["label"] = cupy.int8(0)
neg = cupy.random.randint(
low=0,
high=len(sampling_df),
size=len(neg_df),
dtype=int,
)
neg_item_ids = sampling_df["item_id"].iloc[neg].values
neg_df["item"] = neg_item_ids
neg_cat_ids = sampling_df["cat_id"].iloc[neg].values
neg_df["cat"] = neg_cat_ids
df = cudf.concat([pos_df, neg_df])
return df
def add_negative_sampling(df: cudf.DataFrame, sampling_df: cudf.DataFrame) -> cudf.DataFrame:
df = add_negative_label(df, sampling_df)
neg = cupy.random.randint(
low=0,
high=len(sampling_df),
size=int(df.item_sequence.list.len().sum()),
dtype=int,
)
item_samples = sampling_df["item_id"].iloc[neg]
cat_samples = sampling_df["cat_id"].iloc[neg]
df["neg_item_sequence"] = add_negative_sequence(df["item_sequence"], item_samples)
df["neg_cat_sequence"] = add_negative_sequence(df["cat_sequence"], cat_samples)
return df
def pad_with_zeros(df: cudf.DataFrame, max_elements: int) -> cudf.DataFrame:
df["item_sequence"] = slice_and_pad_left(df["item_sequence"], max_elements)
df["cat_sequence"] = slice_and_pad_left(df["cat_sequence"], max_elements)
df["neg_item_sequence"] = slice_and_pad_left(df["neg_item_sequence"], max_elements)
df["neg_cat_sequence"] = slice_and_pad_left(df["neg_cat_sequence"], max_elements)
return df
def create_train_dataset(
df: cudf.DataFrame,
sampling_df: cudf.DataFrame,
min_elements: int,
max_elements: int,
output_path: str,
seed: int,
dask_scheduler: str = "processes",
) -> None:
def transform(df, sampling_df, partition_info):
part_seed = seed + partition_info["number"] + 1
cupy.random.seed(part_seed)
df = explode_sequence(df, min_elements, max_elements)
df = add_negative_sampling(df, sampling_df)
df = pad_with_zeros(df, max_elements)
df = df.sort_values(by=["uid"])
df.reset_index(drop=True, inplace=True)
df = df[list(OUTPUT_META)]
return df
ddf = dask_cudf.from_cudf(df, chunksize=DASK_TRAIN_DATASET_CHUNKSIZE)
ddf = ddf.map_partitions(transform, meta=OUTPUT_META, sampling_df=sampling_df)
ddf = ddf.clear_divisions()
with dask.config.set(scheduler=dask_scheduler):
ddf.to_parquet(output_path, write_index=False, overwrite=True)
def create_test_dataset(
df: cudf.DataFrame,
sampling_df: cudf.DataFrame,
max_elements: int,
output_path: str,
) -> None:
df = add_negative_sampling(df, sampling_df)
df = pad_with_zeros(df, max_elements)
df = df.sort_values(by=["uid"])
df.reset_index(drop=True, inplace=True)
df = df[list(OUTPUT_META)]
os.makedirs(output_path, exist_ok=True)
output_file = os.path.join(output_path, TEST_DATA_FILE)
df.to_parquet(output_file, index=False)
@click.command()
@click.option(
"--amazon_dataset_path",
required=True,
help="Path to the dataset. Must contain both reviews and metadata json files.",
type=str,
)
@click.option(
"--output_path",
required=True,
help="Path where preprocessed dataset is saved.",
type=str,
)
@click.option(
"--metadata_file_name",
default="meta_Books.json",
help="Path to the dataset. Must contain both reviews and metadata json files.",
type=str,
)
@click.option(
"--reviews_file_name",
default="reviews_Books.json",
help="Path where preprocessed dataset is saved.",
type=str,
)
@click.option(
"--max_sequence_length",
default=100,
help="Take only `max_sequence_length` last elements of a sequence.",
)
@click.option(
"--shortest_sequence_for_user",
default=20,
help="Specifies what is a minimal length of a sequence. "
"Every user with a sequence shorter than this value will be discarded."
)
@click.option(
"--shortest_sequence_for_training",
default=1,
help="Specifies what is a minimal length of a sequence in a training set.",
)
@click.option(
"--metadata_loader_n_proc",
default=multiprocessing.cpu_count(),
help="Specifies the number of processes used to parse metadata.",
)
@click.option(
"--review_loader_num_workers",
default=20,
help="Specifies the number of dask workers used to read reviews data. "
"Note that, as each worker is a new process, too high value might cause GPU OOM errors."
)
@click.option(
"--seed",
default=12345,
help="Seed for reproducibility."
"Note that the results can still differ between machines because of dask/cudf non-determinism.",
type=int,
)
def main(
amazon_dataset_path: str,
output_path: str,
metadata_file_name: str,
reviews_file_name: str,
max_sequence_length: int,
shortest_sequence_for_user: int,
shortest_sequence_for_training: int,
metadata_loader_n_proc: int,
review_loader_num_workers: int,
seed: int,
):
cupy.random.seed(seed)
rmm.reinitialize(managed_memory=True)
metadata_path = os.path.join(amazon_dataset_path, metadata_file_name)
reviews_path = os.path.join(amazon_dataset_path, reviews_file_name)
logging.info("Loading metadata")
metadata = load_metadata(metadata_path, metadata_loader_n_proc)
assert len(metadata) == metadata["item"].nunique(), "metadata should contain unique items"
logging.info("Loading review data")
reviews = load_review_data(reviews_path, review_loader_num_workers)
logging.info("Removing short user sequences")
reviews = filter_too_short_sequences(reviews, shortest_sequence_for_user)
logging.info("Categorifying users, items, categories")
all_items_unique = reviews["item"].unique().to_frame()
item_and_cat_with_ids = categorify_items(all_items_unique, metadata)
reviews = add_items_and_categories_indices(reviews, item_and_cat_with_ids)
reviews = categorify_users(reviews)
logging.info("Aggregating data per user")
df = aggregate_per_user(reviews)
logging.info("Preparing dataframe for negative sampling")
all_items = reviews["item"].to_frame()
sampling_df = create_sampling_df(all_items, item_and_cat_with_ids)
os.makedirs(output_path, exist_ok=True)
logging.info("Creating train dataset")
create_train_dataset(
df,
sampling_df,
min_elements=shortest_sequence_for_training,
max_elements=max_sequence_length,
output_path=os.path.join(output_path, TRAIN_DATA_DIR),
seed=seed,
)
logging.info("Creating test dataset")
create_test_dataset(
df,
sampling_df,
max_elements=max_sequence_length,
output_path=os.path.join(output_path, TEST_DATA_DIR),
)
logging.info("Saving metadata")
save_metadata(
number_of_items=len(item_and_cat_with_ids),
number_of_categories=item_and_cat_with_ids["cat_id"].nunique(),
number_of_users=len(df),
output_path=os.path.join(output_path, CATEGORIZED_METADATA_FILE),
)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/preprocessing/sim_preprocessing.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import multiprocessing
import os
import pathlib
from functools import partial
import click
import pandas as pd
import numpy as np
import tensorflow as tf
from sim.data.feature_spec import FeatureSpec
from sim.data.defaults import TRAIN_MAPPING, TEST_MAPPING, REMAINDER_FILENAME, FILES_SELECTOR
# Docker image sets it to "python" for NVTabular purposes (bugfix), which slows down the script 20x
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s] %(levelname)s: %(message)s",
)
def prepare_record(sample, all_feature_names, sequential_data_start, prebatch):
feature = {}
for idx, (f_name, data) in enumerate(zip(all_feature_names, sample.values())):
if idx >= sequential_data_start:
if prebatch:
data = np.array(data).flatten()
else:
if not prebatch:
data = [data]
feature[f_name] = tf.train.Feature(int64_list=tf.train.Int64List(value=data))
return tf.train.Example(features=tf.train.Features(feature=feature)).SerializeToString()
def save_records(output_path, records, base_output_path, feature_spec, mapping):
with tf.io.TFRecordWriter(str(output_path)) as file_writer:
for record_bytes in records:
file_writer.write(record_bytes)
feature_spec.source_spec[mapping][0][FILES_SELECTOR].append(
str(output_path.relative_to(base_output_path))
)
logging.info(f'Created: {output_path}')
@click.command()
@click.option(
"--amazon_dataset_path",
required=True,
help="Path to the dataset directory.",
type=str,
)
@click.option(
"--tfrecord_output_dir",
required=True,
help="Path of directory to output tfrecord files.",
type=str,
)
@click.option(
"--number_of_user_features",
default=1,
help="number of user specific features. Default is 1 for amazon books dataset (user_id).",
type=int
)
@click.option(
"--max_seq_len",
default=100,
help="maximum possible length of history. (Entries will be padded to that length later).",
type=int
)
@click.option(
"--n_proc",
default=multiprocessing.cpu_count(),
help="Number of processes started to speed up conversion to tfrecord.",
type=int,
)
@click.option(
"--train_split_dir",
default='train',
help="Name of directory within amazon dataset directory containing train data.",
type=str
)
@click.option(
"--test_split_dir",
default='test',
help="Name of directory within amazon dataset directory containing test data.",
type=str,
)
@click.option(
"--metadata_file",
default='metadata.json',
help="Name of metadata file within amazon dataset directory (containing feature cardinalities).",
type=str
)
@click.option(
"--train_output_dir",
default='train',
help="Name of train directory within output directory.",
type=str
)
@click.option(
"--test_output_dir",
default='test',
help='Name of test directory within output directory.',
type=str
)
@click.option(
"--train_parts",
default=8,
help="Number of output train files.",
type=int
)
@click.option(
"--test_parts",
default=4,
help="Number of output test files.",
type=int
)
@click.option(
"--prebatch_train_size",
default=0,
help='Apply batching to data in preprocessing. If prebatch_size == 0, no prebatching is done.',
type=int
)
@click.option(
"--prebatch_test_size",
default=0,
help='Apply batching to data in preprocessing. If prebatch_size == 0, no prebatching is done.',
type=int
)
def main(
amazon_dataset_path: str,
tfrecord_output_dir: str,
number_of_user_features: int,
max_seq_len: int,
n_proc: int,
train_split_dir: str,
test_split_dir: str,
metadata_file: str,
train_output_dir: str,
test_output_dir: str,
train_parts: int,
test_parts: int,
prebatch_train_size: int,
prebatch_test_size: int
):
"""
read_parquet()
create tf.train.Features
create default FeatureSpec
dump to Tfrecords
"""
amazon_dataset_path = pathlib.Path(amazon_dataset_path)
tfrecord_output_dir = pathlib.Path(tfrecord_output_dir)
input_splits = [
amazon_dataset_path / train_split_dir,
amazon_dataset_path / test_split_dir
]
output_splits = [
tfrecord_output_dir / train_output_dir,
tfrecord_output_dir / test_output_dir
]
for split_dir in output_splits:
os.makedirs(split_dir, exist_ok=True)
with open(amazon_dataset_path / metadata_file, 'r') as file:
metadata = json.load(file)
feature_cardinalities = []
for cardinality in metadata['cardinalities']:
feature_cardinalities.append(cardinality['value'])
user_features_cardinalities = feature_cardinalities[:number_of_user_features]
item_features_cardinalities = feature_cardinalities[number_of_user_features:]
feature_spec = FeatureSpec.get_default_feature_spec(user_features_cardinalities, item_features_cardinalities, max_seq_len)
number_of_item_features = len(item_features_cardinalities)
sequential_data_start = 1 + number_of_user_features + number_of_item_features
all_feature_names = FeatureSpec.get_default_features_names(number_of_user_features, number_of_item_features)
prebatch_per_split = [prebatch_train_size, prebatch_test_size]
parts_per_split = [train_parts, test_parts]
mappings = [TRAIN_MAPPING, TEST_MAPPING]
for mapping, input_dir, output_dir, output_parts, prebatch_size in zip(mappings, input_splits, output_splits, parts_per_split, prebatch_per_split):
prebatch = prebatch_size > 0
prepare_record_function = partial(prepare_record, all_feature_names=all_feature_names,
sequential_data_start=sequential_data_start, prebatch=prebatch)
save_records_function = partial(save_records, base_output_path=tfrecord_output_dir, feature_spec=feature_spec, mapping=mapping)
logging.info(f"Started conversion, will output to {output_dir}")
df = pd.read_parquet(input_dir, engine='pyarrow')
logging.info("Parquet loaded")
if prebatch:
df['batch_index'] = df.index // prebatch_size
df = df.groupby('batch_index').agg(list)
if len(df.iloc[-1, 0]) < prebatch_size:
remainder = df[-1:].to_dict('records')[0]
remainder = prepare_record_function(remainder)
df = df[:-1]
logging.info("Prebatching applied")
df = df.to_dict('records')
with multiprocessing.Pool(n_proc) as pool:
records = pool.map(prepare_record_function, df)
logging.info("Records created")
records = np.array_split(records, output_parts)
for i, records_part in enumerate(records):
if len(records_part) > 0:
save_records_function(output_dir / f'part_{i}.tfrecord', records_part)
if prebatch:
save_records_function(output_dir / REMAINDER_FILENAME, [remainder])
feature_spec.to_yaml(tfrecord_output_dir / 'feature_spec.yaml')
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/preprocessing/parquet_to_tfrecord.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import json
import multiprocessing
from typing import Dict
import cudf
import dask.dataframe
JSON_READ_BLOCKSIZE = 100_000_000
def _read_metadata_line(line: str) -> Dict[str, str]:
dict_line = ast.literal_eval(line)
return {"item": dict_line["asin"], "cat": dict_line["categories"][0][-1]}
def load_metadata(
path: str,
n_proc: int,
) -> cudf.DataFrame:
metadata = []
with open(path) as fp:
metadata = fp.readlines()
with multiprocessing.Pool(n_proc) as pool:
metadata = pool.map(_read_metadata_line, metadata)
df = cudf.DataFrame(metadata)
return df
def _read_json(*args, **kwargs):
df = cudf.read_json(*args, **kwargs)
df = df.rename(columns={"reviewerID": "user", "asin": "item"})
df = df[["user", "item", "unixReviewTime"]]
return df
def load_review_data(
path: str,
num_workers: int,
dask_scheduler="processes",
) -> cudf.DataFrame:
ddf = dask.dataframe.read_json(
path,
lines=True,
blocksize=JSON_READ_BLOCKSIZE,
engine=_read_json,
)
df = ddf.compute(scheduler=dask_scheduler, num_workers=num_workers)
return df
def save_metadata(
number_of_items: int,
number_of_categories: int,
number_of_users: int,
output_path: str,
):
data = {
"cardinalities": [
{"name": "uid", "value": number_of_users},
{"name": "item", "value": number_of_items},
{"name": "cat", "value": number_of_categories},
],
}
with open(output_path, "w") as fp:
json.dump(data, fp)
| DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/preprocessing/io.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List, Optional
import cudf
import cupy
import numba.cuda
from nvtabular import ops
from nvtabular.dispatch import _build_cudf_list_column as nvt_build_list_column
THREADS = 32
logging.getLogger("numba").setLevel(logging.WARNING)
def list_slice(seq_col, start: int, end: Optional[int] = None):
"""Slices a list column
This is an nvtabular.ops.ListSlice wrapper that can be used with cuDF or dask-cuDF.
"""
df = cudf.DataFrame(seq_col)
col_selector = ops.ColumnSelector(seq_col.name)
slicer = ops.ListSlice(start, end)
transformed = slicer.transform(col_selector, df)
return transformed[seq_col.name]
@numba.cuda.jit
def _calculate_row_sizes(offsets, row_sizes, max_elements):
rowid = numba.cuda.grid(1)
if rowid < offsets.size - 1:
original_row_size = offsets[rowid + 1] - offsets[rowid]
for i in range(original_row_size):
row_sizes[1 + offsets[rowid] + i] = min(i + 1, max_elements)
@numba.cuda.jit
def _generate_rows(offsets, chunk_offsets, elements, new_elements, max_elements):
rowid = numba.cuda.grid(1)
if rowid < offsets.size - 1:
original_row_size = offsets[rowid + 1] - offsets[rowid]
chunk_offset = chunk_offsets[rowid]
row_offset = 0
for current_row_size in range(1, original_row_size + 1):
original_row_offset = offsets[rowid] + max(0, current_row_size - max_elements)
current_row_size = min(current_row_size, max_elements)
for i in range(current_row_size):
new_elements[chunk_offset + row_offset + i] = elements[original_row_offset + i]
row_offset += current_row_size
@numba.cuda.jit
def _preserve_data(offsets, values, new_values):
rowid = numba.cuda.grid(1)
if rowid < offsets.size - 1:
for i in range(offsets[rowid], offsets[rowid + 1]):
new_values[i] = values[rowid]
@numba.cuda.jit
def _slice_rjust(max_elements, offsets, elements, new_offsets, new_elements):
rowid = numba.cuda.grid(1)
if rowid < new_offsets.size - 1:
row_size = min(offsets[rowid + 1] - offsets[rowid], max_elements)
offset = offsets[rowid + 1] - row_size
new_start = new_offsets[rowid + 1] - row_size
for i in range(row_size):
new_elements[new_start + i] = elements[offset + i]
def slice_and_pad_left(seq_col, max_elements, pad_value=0):
c = seq_col._column
offsets = c.offsets.values
elements = c.elements.values
threads = THREADS
blocks = (offsets.size + threads - 1) // threads
new_offsets = cupy.arange(offsets.size, dtype=offsets.dtype) * max_elements
new_elements = cupy.full(
new_offsets[-1].item(), fill_value=pad_value, dtype=elements.dtype
)
_slice_rjust[blocks, threads](
max_elements, offsets, elements, new_offsets, new_elements
)
new_col = nvt_build_list_column(new_elements, new_offsets)
return new_col
class ExplodeSequence:
"""
For each row create a new one with a subsequence of the original list columns.
Keep at most `max_elements` of elements of a list.
WARNING: All lists in the same row must have equal lengths!
"""
def __init__(
self,
col_names: List[str],
keep_cols: List[str],
max_elements: int,
):
self.col_names = col_names
self.keep_cols = keep_cols
self.max_elements = max_elements
if not self.col_names:
raise ValueError("`col_names` cannot be empty")
def transform(self, df: cudf.DataFrame) -> cudf.DataFrame:
ret = cudf.DataFrame()
for col_name in self.col_names:
c = df[col_name]._column
offsets = c.offsets.values
elements = c.elements.values
threads = THREADS
blocks = (offsets.size + threads - 1) // threads
lengths = df[col_name].list.len().values
sizes = cupy.minimum(lengths, self.max_elements)
sizes = sizes * (sizes + 1) / 2
truncated = cupy.maximum(lengths - self.max_elements, 0) * self.max_elements
chunk_sizes = (sizes + truncated).astype(offsets.dtype)
chunk_offsets = cupy.zeros(len(offsets), dtype=offsets.dtype)
cupy.cumsum(chunk_sizes, dtype=offsets.dtype, out=chunk_offsets[1:])
new_offsets_size = int(lengths.sum() + 1)
new_elements_size = int(chunk_sizes.sum())
new_offsets = cupy.zeros(new_offsets_size, dtype=offsets.dtype)
_calculate_row_sizes[blocks, threads](
offsets, new_offsets, self.max_elements
)
new_offsets = cupy.cumsum(new_offsets).astype(offsets.dtype)
new_elements = cupy.zeros(new_elements_size, dtype=elements.dtype)
_generate_rows[blocks, threads](
offsets, chunk_offsets, elements, new_elements, self.max_elements
)
col = nvt_build_list_column(new_elements, new_offsets)
ret[col_name] = col
for col in self.keep_cols:
new_values = cupy.zeros(new_offsets_size - 1, dtype=int)
_preserve_data[blocks, threads](
offsets, df[col].values, new_values
)
ret[col] = new_values
ret = ret[self.keep_cols + self.col_names]
return ret
def add_negative_sequence(seq_col, samples):
c = seq_col._column
offsets = c.offsets.values
elements = c.elements.values
new_offsets = offsets.copy()
new_elements = cupy.empty_like(elements)
new_elements = cupy.array(samples.to_gpu_array())
col = nvt_build_list_column(new_elements, new_offsets)
return col
| DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/preprocessing/ops.py |
DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/sim/__init__.py |
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import tensorflow as tf
class CTRClassificationMLP(tf.keras.layers.Layer):
def __init__(
self,
layer_sizes=(200,),
num_outputs=1,
activation_function=partial(
tf.keras.layers.PReLU, alpha_initializer=tf.keras.initializers.Constant(0.1)
),
use_bn=False,
dropout_rate=-1
):
super().__init__()
self.layer_sizes = layer_sizes
self.activation_function = activation_function
self.use_bn = use_bn
self.dropout_rate = dropout_rate
if self.use_bn:
self.batch_norm = tf.keras.layers.BatchNormalization()
self.layers = []
for layer_size in self.layer_sizes:
# add dense layer and activation
self.layers.append(tf.keras.layers.Dense(layer_size))
self.layers.append(self.activation_function())
if self.dropout_rate > 0.0:
# add dropout between final representation and classification layer
self.layers.append(tf.keras.layers.Dropout(rate=self.dropout_rate))
# add the scoring layer
scoring_layer = tf.keras.layers.Dense(num_outputs, dtype='float32')
self.layers.append(scoring_layer)
def call(self, input, training=False):
if self.use_bn:
input = self.batch_norm(input, training=training)
for layer in self.layers:
input = layer(input, training=training)
return input
| DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/sim/layers/ctr_classification_mlp.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from sim.layers.item_item_interaction import DIENAttentionUnit
from sim.layers.rnn import AUGRU
@tf.function
def compute_item_sequence_attention(item, sequence, mask, attention_op):
"""
Computes normalized attention scores between a given sequence and item
"""
scores_unnormalized = attention_op((sequence, tf.expand_dims(item, axis=1)))
if mask is not None:
min_value_for_dtype = scores_unnormalized.dtype.min
mask = tf.equal(mask, tf.ones_like(mask))
paddings = tf.ones_like(scores_unnormalized) * min_value_for_dtype
scores_unnormalized = tf.where(mask, scores_unnormalized, paddings) # [B, 1, T]
scores = tf.nn.softmax(scores_unnormalized)
return scores
class DINItemSequenceInteractionBlock(tf.keras.layers.Layer):
def __init__(self, item_item_interaction):
super(DINItemSequenceInteractionBlock, self).__init__()
self.item_item_interaction = item_item_interaction
@tf.function
def call(self, inputs):
item, item_sequence, mask = inputs
# compute attention scores between item_sequence and item
scores = compute_item_sequence_attention(
item, item_sequence, mask, self.item_item_interaction
)
# equivalent to tf.matmul(scores[:,None,:], item_sequence)
return (
tf.reduce_sum(tf.expand_dims(scores, axis=-1) * item_sequence, [1]),
scores,
)
class DIENItemSequenceInteractionBlock(tf.keras.layers.Layer):
def __init__(self, hidden_size: int):
super(DIENItemSequenceInteractionBlock, self).__init__()
self.hidden_size = hidden_size # hidden=emb_dim*6
self.item_item_interaction = DIENAttentionUnit(self.hidden_size)
self.layer_1 = tf.keras.layers.GRU(self.hidden_size, return_sequences=True)
self.layer_2 = AUGRU(self.hidden_size)
@tf.function
def call(self, inputs):
"""
Returns:
- final_seq_repr: final vector representation of the sequence
- features_layer_1: for auxiliary loss
"""
item, item_sequence, mask = inputs
# compute h(1),...,h(T) from e(1),...,e(T)
features_layer_1 = self.layer_1(item_sequence)
# compute attention scores between features_layer_1 and item
attention_scores = compute_item_sequence_attention(
item, features_layer_1, mask, self.item_item_interaction
)
attention_scores = tf.expand_dims(attention_scores, -1)
# compute h'(T)
final_seq_repr = self.layer_2([features_layer_1, attention_scores])
# [B, 1, E] -> [B, E]
final_seq_repr = tf.squeeze(final_seq_repr)
return final_seq_repr, features_layer_1
| DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/sim/layers/item_sequence_interaction.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
class EmbeddingInitializer(tf.keras.initializers.Initializer):
def __call__(self, shape, dtype=tf.float32):
maxval = tf.sqrt(tf.constant(1.) / tf.cast(shape[0], tf.float32))
maxval = tf.cast(maxval, dtype=dtype)
minval = -maxval
weights = tf.random.uniform(shape, minval=minval, maxval=maxval, dtype=dtype)
weights = tf.cast(weights, dtype=tf.float32)
return weights
def get_config(self):
return {}
# https://github.com/NVIDIA/DeepLearningExamples/blob/81ee705868a11d6fe18c12d237abe4a08aab5fd6/TensorFlow2/Recommendation/DLRM/embedding.py#L94
class Embedding(tf.keras.layers.Layer):
def __init__(
self,
input_dim,
output_dim,
*,
trainable=True,
embedding_name=None,
initializer=EmbeddingInitializer()
):
super(Embedding, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.embedding_name = (
embedding_name if embedding_name is not None else "embedding_table"
)
self.embedding_table = None
self.trainable = trainable
self.initializer = initializer
def build(self, input_shape):
self.embedding_table = self.add_weight(
self.embedding_name,
shape=[self.input_dim, self.output_dim],
dtype=tf.float32,
initializer=self.initializer,
trainable=self.trainable,
)
@tf.function
def call(self, indices):
return tf.gather(params=self.embedding_table, indices=indices)
| DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/sim/layers/embedding.py |
DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/sim/layers/__init__.py |
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
class DotItemItemInteraction(tf.keras.layers.Layer):
@tf.function
def call(self, inputs):
item1, item2 = inputs
return tf.reduce_sum(item1 * item2, axis=-1)
class DINActivationUnit(tf.keras.layers.Layer):
def __init__(self):
super(DINActivationUnit, self).__init__()
self.dense1 = tf.keras.layers.Dense(80, activation="sigmoid")
self.dense2 = tf.keras.layers.Dense(40, activation="sigmoid")
self.linear = tf.keras.layers.Dense(1)
@tf.function
def call(self, inputs):
targets, item = inputs
items = tf.tile(item, [1, targets.shape[1], 1])
combined = tf.concat(
[items, targets, items - targets, items * targets], axis=-1
)
output = self.dense1(combined)
output = self.dense2(output)
output = self.linear(output)
# (B, T, 1) -> (B, T)
output = tf.squeeze(output)
return output
class DIENAttentionUnit(tf.keras.layers.Layer):
def __init__(self, embedding_dim):
"""
NOTE(alexo): this looks very similar to DINActivationUnit.
Besides the input item adaptation, the remaining part stays the same.
"""
super(DIENAttentionUnit, self).__init__()
# Adaptation of input item
self.item_dense = tf.keras.layers.Dense(embedding_dim)
self.item_prelu = tf.keras.layers.PReLU(
alpha_initializer=tf.keras.initializers.Constant(0.1)
)
#
self.dense1 = tf.keras.layers.Dense(80, activation="sigmoid")
self.dense2 = tf.keras.layers.Dense(40, activation="sigmoid")
self.linear = tf.keras.layers.Dense(1)
@tf.function
def call(self, inputs):
targets, item = inputs
item = self.item_dense(item)
item = self.item_prelu(item)
items = tf.tile(item, [1, targets.shape[1], 1])
combined = tf.concat(
[items, targets, items - targets, items * targets], axis=-1
)
output = self.dense1(combined)
output = self.dense2(output)
output = self.linear(output) # unnormalized scores
# (B, T, 1) -> (B, T)
output = tf.squeeze(output)
return output
| DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/sim/layers/item_item_interaction.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
class VecAttGRUCell(tf.keras.layers.Layer):
"""
Modification of Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).
Args:
units: int, The number of units in the GRU cell.
"""
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
self._activation = tf.math.tanh
self._gate_linear = tf.keras.layers.Dense(
2 * self.units,
bias_initializer=tf.constant_initializer(1.0),
kernel_initializer=None,
)
self._candidate_linear = tf.keras.layers.Dense(
self.units,
bias_initializer=tf.constant_initializer(0.0),
kernel_initializer=None,
)
super(VecAttGRUCell, self).__init__(**kwargs)
def call(self, inputs_attscore, states):
"""Gated recurrent unit (GRU) with nunits cells."""
inputs, att_score = inputs_attscore
state = states[0]
value = tf.math.sigmoid(self._gate_linear(tf.concat([inputs, state], axis=-1)))
r, u = tf.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
c = self._activation(
self._candidate_linear(tf.concat([inputs, r_state], axis=-1))
)
u = (1.0 - att_score) * u
new_h = u * state + (1 - u) * c
return new_h, [new_h]
class AUGRU(tf.keras.layers.Layer):
def __init__(self, num_units=None, return_sequence=True, **kwargs):
self.num_units = num_units
self.return_sequence = return_sequence
super(AUGRU, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.internal_rnn = tf.keras.layers.RNN(VecAttGRUCell(self.num_units))
# Be sure to call this somewhere!
super(AUGRU, self).build(input_shape)
def call(self, input_list):
"""
:param concated_embeds_value: None * field_size * embedding_size
:return: None*1
"""
return self.internal_rnn(tuple(input_list))
| DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/sim/layers/rnn.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import horovod.tensorflow as hvd
def csv_str_to_int_list(s):
"""
Example: '200,80' -> [200, 80]
"""
return list(map(int, s.split(",")))
def dist_print(*args, force=False, **kwargs):
if hvd.rank() == 0 or force:
print(*args, **kwargs) | DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/sim/utils/misc.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import math
import os
import pathlib
import re
import pynvml
pynvml.nvmlInit()
class Device:
# assume nvml returns list of 64 bit ints
_nvml_affinity_elements = math.ceil(os.cpu_count() / 64)
def __init__(self, device_idx):
super().__init__()
self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx)
def get_cpu_affinity(self):
affinity_string = ''
for j in pynvml.nvmlDeviceGetCpuAffinity(
self.handle, Device._nvml_affinity_elements
):
# assume nvml returns list of 64 bit ints
affinity_string = '{:064b}'.format(j) + affinity_string
affinity_list = [int(x) for x in affinity_string]
affinity_list.reverse() # so core 0 is in 0th element of list
ret = [i for i, e in enumerate(affinity_list) if e != 0]
return ret
def set_socket_affinity(gpu_id):
dev = Device(gpu_id)
affinity = dev.get_cpu_affinity()
os.sched_setaffinity(0, affinity)
def set_single_affinity(gpu_id):
dev = Device(gpu_id)
affinity = dev.get_cpu_affinity()
os.sched_setaffinity(0, affinity[:1])
def set_single_unique_affinity(gpu_id, nproc_per_node):
devices = [Device(i) for i in range(nproc_per_node)]
socket_affinities = [dev.get_cpu_affinity() for dev in devices]
siblings_list = get_thread_siblings_list()
siblings_dict = dict(siblings_list)
# remove siblings
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values()))
affinities = []
assigned = []
for socket_affinity in socket_affinities:
for core in socket_affinity:
if core not in assigned:
affinities.append([core])
assigned.append(core)
break
os.sched_setaffinity(0, affinities[gpu_id])
def set_socket_unique_affinity(gpu_id, nproc_per_node, mode):
device_ids = [Device(i) for i in range(nproc_per_node)]
socket_affinities = [dev.get_cpu_affinity() for dev in device_ids]
siblings_list = get_thread_siblings_list()
siblings_dict = dict(siblings_list)
# remove siblings
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values()))
socket_affinities_to_device_ids = collections.defaultdict(list)
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities_to_device_ids[tuple(socket_affinity)].append(idx)
for socket_affinity, device_ids in socket_affinities_to_device_ids.items():
devices_per_group = len(device_ids)
cores_per_device = len(socket_affinity) // devices_per_group
for group_id, device_id in enumerate(device_ids):
if device_id == gpu_id:
if mode == 'interleaved':
affinity = list(socket_affinity[group_id::devices_per_group])
elif mode == 'continuous':
affinity = list(socket_affinity[group_id*cores_per_device:(group_id+1)*cores_per_device])
else:
raise RuntimeError('Unknown set_socket_unique_affinity mode')
# reintroduce siblings
affinity += [siblings_dict[aff] for aff in affinity if aff in siblings_dict]
os.sched_setaffinity(0, affinity)
def get_thread_siblings_list():
path = '/sys/devices/system/cpu/cpu*/topology/thread_siblings_list'
thread_siblings_list = []
pattern = re.compile(r'(\d+)\D(\d+)')
for fname in pathlib.Path(path[0]).glob(path[1:]):
with open(fname) as f:
content = f.read().strip()
res = pattern.findall(content)
if res:
pair = tuple(map(int, res[0]))
thread_siblings_list.append(pair)
return thread_siblings_list
def set_affinity(gpu_id, nproc_per_node, mode='socket'):
if mode == 'socket':
set_socket_affinity(gpu_id)
elif mode == 'single':
set_single_affinity(gpu_id)
elif mode == 'single_unique':
set_single_unique_affinity(gpu_id, nproc_per_node)
elif mode == 'socket_unique_interleaved':
set_socket_unique_affinity(gpu_id, nproc_per_node, 'interleaved')
elif mode == 'socket_unique_continuous':
set_socket_unique_affinity(gpu_id, nproc_per_node, 'continuous')
else:
raise RuntimeError('Unknown affinity mode')
affinity = os.sched_getaffinity(0)
return affinity
| DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/sim/utils/gpu_affinity.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import perf_counter
import horovod.tensorflow as hvd
import numpy as np
import tensorflow as tf
from horovod.tensorflow.mpi_ops import Sum
class PerformanceCalculator:
"""
PerformanceCalculator for throughput and latency statistics.
Computes the statistics over a given number of steps. Timers should be initialized by the user by
calling init() at the right moment -- just before running consecutive iterations of training.
Attributes:
warmup_steps (int): Number of initial steps to ignore for computing results.
total_steps (int): Number of steps to collect data for (excluding warmup_steps); use <= 0 for unbounded horizon.
"""
def __init__(self, warmup_steps=0, total_steps=0):
self.warmup_steps = max(warmup_steps, 0)
self.total_steps = self.warmup_steps + max(total_steps, 0)
self.step = 0
self.step_start_time = None
self.benchmark_start_time = None
self.benchmark_after_warmup_start_time = None
self.step_latencies = []
self.latency_percentiles = (90, 95, 99)
self._results = {}
with tf.device("/CPU:0"):
self.samples = tf.Variable(0, trainable=False, dtype=tf.int64)
def init(self):
self.samples.assign(0)
self.step_latencies = []
self._results = {}
# used to represent duration of entire training
self.benchmark_start_time = perf_counter()
# used to represent a time interval from post-warmup until the end
self.benchmark_after_warmup_start_time = perf_counter()
self.step_start_time = perf_counter()
@property
def results(self):
return self._results.copy()
@property
def completed(self):
return bool(self._results)
def get_current_benchmark_results(self):
if self.benchmark_start_time is None:
raise RuntimeError(f"{self.__class__.__name__} has not been initialized")
if self.step <= self.warmup_steps:
raise RuntimeError(f"{self.__class__.__name__} is in warmup phase")
results = self._calculate_throughput()
results.update(self._calculate_latency())
return results
def _calculate_latency(self):
latency_stats = {"latency_mean": 1000 * np.mean(self.step_latencies)} # in milliseconds
for p in self.latency_percentiles:
latency_stats[f"latency_p{p}"] = 1000 * np.percentile(self.step_latencies, p)
return latency_stats
def _calculate_throughput(self):
time_elapsed = perf_counter() - self.benchmark_start_time
time_elapsed_after_warmup = perf_counter() - self.benchmark_after_warmup_start_time
all_samples = hvd.allreduce(self.samples, op=Sum)
benchmark_throughput = all_samples.numpy() / time_elapsed_after_warmup
return {"throughput": benchmark_throughput, "time": time_elapsed}
def __call__(self, n_samples):
self.samples.assign_add(n_samples)
step_latency = perf_counter() - self.step_start_time
step_throughput = n_samples * hvd.size() / step_latency
self.step_latencies.append(step_latency)
self.step += 1
if self.step == self.warmup_steps:
self.samples.assign(0)
self.step_latencies = []
self.benchmark_after_warmup_start_time = perf_counter()
elif self.step == self.total_steps:
self._results = self.get_current_benchmark_results()
self.step_start_time = perf_counter()
return step_throughput
| DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/sim/utils/benchmark.py |
DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/sim/utils/__init__.py |
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
def build_sim_loss_fn(alpha=1.0, beta=1.0):
cross_entropy_loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
@tf.function
def sim_loss_fn(targets, gsu_logits, esu_logits):
gsu_loss = cross_entropy_loss(targets, gsu_logits)
esu_loss = cross_entropy_loss(targets, esu_logits)
return 0.5 * (alpha * gsu_loss + beta * esu_loss)
return sim_loss_fn
@tf.function
def dien_auxiliary_loss_fn(click_probs, noclick_probs, mask=None):
if mask is None:
mask = tf.ones_like(click_probs)
click_loss_term = -tf.math.log(click_probs) * mask
noclick_loss_term = -tf.math.log(1.0 - noclick_probs) * mask
return tf.reduce_mean(click_loss_term + noclick_loss_term)
| DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/sim/utils/losses.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
import tensorflow as tf
from sim.data.defaults import (CARDINALITY_SELECTOR, NEGATIVE_HISTORY_CHANNEL, POSITIVE_HISTORY_CHANNEL,
TARGET_ITEM_FEATURES_CHANNEL, USER_FEATURES_CHANNEL)
from sim.layers.ctr_classification_mlp import CTRClassificationMLP
from sim.layers.embedding import Embedding
class SequentialRecommenderModel(tf.keras.Model, ABC):
def __init__(self, feature_spec, embedding_dim, classifier_dense_sizes=(200,)):
super(SequentialRecommenderModel, self).__init__()
self.embedding_dim = embedding_dim
features = feature_spec.feature_spec
channel_spec = feature_spec.channel_spec
embedding_names = []
user_feature_fstring = "user_feat{}"
item_feature_fstring = "item_feat{}"
# Features in the same embedding group will share embedding table
embedding_group_counter = 0
feature_groups_cardinalities = []
self.feature_name_to_embedding_group = {}
for i, user_feature in enumerate(channel_spec[USER_FEATURES_CHANNEL]):
self.feature_name_to_embedding_group[user_feature] = embedding_group_counter
cardinality = features[user_feature][CARDINALITY_SELECTOR]
feature_groups_cardinalities.append(cardinality)
embedding_names.append(user_feature_fstring.format(i))
embedding_group_counter += 1
# Group corresponding item features from different item channels together
zipped_item_features = zip(channel_spec[TARGET_ITEM_FEATURES_CHANNEL],
channel_spec[POSITIVE_HISTORY_CHANNEL], channel_spec[NEGATIVE_HISTORY_CHANNEL])
for i, (feature_target, feature_pos, feature_neg) in enumerate(zipped_item_features):
self.feature_name_to_embedding_group[feature_target] = embedding_group_counter
self.feature_name_to_embedding_group[feature_pos] = embedding_group_counter
self.feature_name_to_embedding_group[feature_neg] = embedding_group_counter
cardinality = features[feature_target][CARDINALITY_SELECTOR]
feature_groups_cardinalities.append(cardinality)
embedding_names.append(item_feature_fstring.format(i))
embedding_group_counter += 1
self.variable_embeddings_groups = []
for embedding_name, cardinality in zip(embedding_names, feature_groups_cardinalities):
self.variable_embeddings_groups.append(
Embedding(
embedding_name=embedding_name,
input_dim=cardinality + 1, # ids in range <1, cardinality> (boundries included)
output_dim=embedding_dim
)
)
self.classificationMLP = CTRClassificationMLP(
layer_sizes=classifier_dense_sizes
)
def embed(self, features):
embeddings = []
for (variable, id) in features.items():
embedding_group = self.feature_name_to_embedding_group[variable]
embeddings.append(self.variable_embeddings_groups[embedding_group](id))
return tf.concat(embeddings, -1)
@abstractmethod
def call(self, inputs):
pass
| DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/sim/models/sequential_recommender_model.py |
DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/sim/models/__init__.py |
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import tensorflow as tf
from sim.layers.ctr_classification_mlp import CTRClassificationMLP
from sim.layers.item_sequence_interaction import DIENItemSequenceInteractionBlock
from sim.models.sequential_recommender_model import SequentialRecommenderModel
EPS = 1e-06
DIEN_ITEM_SEQ_INTERACTION_SIZE = 6 # Value taken from TF1 original code
def compute_auxiliary_probs(auxiliary_net, rnn_states, items_hist, training=False):
"""
Given h(1),..,h(T) GRU sequence outputs and e(1),..,e(T) encoded user
sequence or negative user sequence behaviours, compute probabilities
for auxiliary loss term.
Args:
auxiliary_net: model that computes a probability of interaction
rnn_states: sequence of GRU outputs
items_hist: sequence of user behaviours or negative user behaviours
Returns:
click_prob: clicking probability for each timestep
"""
# for rnn_states, select h(1),..,h(T-1)
rnn_states = rnn_states[:, :-1, :]
# for items_hist, select e(2),..,e(T)
items_hist = items_hist[:, 1:, :]
# concatenate over feature dimension
click_input = tf.concat([rnn_states, items_hist], -1)
# forward pass
click_logits = auxiliary_net(click_input, training=training)
click_probs = tf.nn.sigmoid(click_logits) + EPS
return tf.squeeze(click_probs, axis=2)
class DIENModel(SequentialRecommenderModel):
def __init__(
self,
feature_spec,
mlp_hidden_dims,
embedding_dim=4
):
super(DIENModel, self).__init__(
feature_spec, embedding_dim, mlp_hidden_dims["classifier"]
)
# DIEN block
self.dien_block = DIENItemSequenceInteractionBlock(
hidden_size=embedding_dim * DIEN_ITEM_SEQ_INTERACTION_SIZE
)
# aux_loss uses an MLP in TF1 code
self.auxiliary_net = CTRClassificationMLP(
mlp_hidden_dims["aux"],
activation_function=partial(
tf.keras.layers.Activation, activation="sigmoid"
),
)
@tf.function
def call(
self,
inputs,
compute_aux_loss=True,
training=False,
):
user_features = inputs["user_features"]
target_item_features = inputs["target_item_features"]
short_sequence_features = inputs["short_sequence_features"]
short_neg_sequence_features = inputs["short_neg_sequence_features"]
short_sequence_mask = inputs["short_sequence_mask"]
output_dict = {}
user_embedding = self.embed(user_features)
target_item_embedding = self.embed(target_item_features)
short_sequence_embeddings = self.embed(short_sequence_features)
short_sequence_embeddings = short_sequence_embeddings * tf.expand_dims(
short_sequence_mask, axis=-1
)
# Pass sequence_embeddings and target_item_embedding to a DIEN block
# it needs to output h'(T) for concatenation and h(1),...,h(T) for aux_loss
final_seq_repr, features_layer_1 = self.dien_block(
(target_item_embedding, short_sequence_embeddings, short_sequence_mask)
)
# short_features_layer_1 = features_layer_1[:, -short_seq_len:, :]
if compute_aux_loss:
# Embed negative sequence features
short_neg_sequence_embeddings = self.embed(short_neg_sequence_features)
short_neg_sequence_embeddings = short_neg_sequence_embeddings * tf.expand_dims(
short_sequence_mask, axis=-1
)
# compute auxiliary logits
aux_click_probs = compute_auxiliary_probs(
self.auxiliary_net,
features_layer_1,
short_sequence_embeddings,
training=training,
)
output_dict["aux_click_probs"] = aux_click_probs
aux_noclick_probs = compute_auxiliary_probs(
self.auxiliary_net,
features_layer_1,
short_neg_sequence_embeddings,
training=training,
)
output_dict["aux_noclick_probs"] = aux_noclick_probs
combined_embeddings = tf.concat([
target_item_embedding,
final_seq_repr,
user_embedding
], -1)
classification_logits = self.classificationMLP(combined_embeddings)
output_dict["logits"] = classification_logits
return output_dict
| DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/sim/models/dien_model.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import tensorflow as tf
from sim.layers.ctr_classification_mlp import CTRClassificationMLP
from sim.layers.item_item_interaction import DotItemItemInteraction
from sim.layers.item_sequence_interaction import DIENItemSequenceInteractionBlock, DINItemSequenceInteractionBlock
from sim.models.dien_model import compute_auxiliary_probs
from sim.models.sequential_recommender_model import SequentialRecommenderModel
@tf.function
def masked_temporal_mean(sequence_batch, mask):
masked_sum = tf.reduce_sum(sequence_batch * mask[:, :, None], 1)
masked_counts = tf.reduce_sum(mask, 1, keepdims=True)
return masked_sum / (masked_counts + 1.0)
class SIMModel(SequentialRecommenderModel):
def __init__(self, feature_spec, mlp_hidden_dims, embedding_dim=4, k=50, dropout_rate=-1):
super(SIMModel, self).__init__(
feature_spec, embedding_dim
)
self.k = k
self.stage_one_classifier = CTRClassificationMLP(
layer_sizes=mlp_hidden_dims["stage_1"],
dropout_rate=dropout_rate
)
self.stage_two_classifier = CTRClassificationMLP(
layer_sizes=mlp_hidden_dims["stage_2"],
dropout_rate=dropout_rate
)
self.stage_two_auxiliary_net = CTRClassificationMLP(
layer_sizes=mlp_hidden_dims["aux"],
activation_function=partial(
tf.keras.layers.Activation, activation="sigmoid"
),
dropout_rate=dropout_rate
)
self.stage_one_item_seq_interaction = DINItemSequenceInteractionBlock(
item_item_interaction=DotItemItemInteraction()
)
self.stage_two_item_seq_interaction = DIENItemSequenceInteractionBlock(
hidden_size=embedding_dim * 6
)
def select_top_k_items(self, embeddings, scores):
top_k = tf.math.top_k(scores, k=self.k)
top_k_values, top_k_indices = top_k.values, top_k.indices
top_k_mask = tf.cast(tf.greater(top_k_values, tf.zeros_like(top_k_values)), embeddings.dtype)
best_k_embeddings = tf.gather(embeddings, top_k_indices, batch_dims=1)
return best_k_embeddings, top_k_mask
@tf.function
def call(
self,
inputs,
compute_aux_loss=True,
training=False,
):
user_features = inputs["user_features"]
target_item_features = inputs["target_item_features"]
long_sequence_features = inputs["long_sequence_features"]
short_sequence_features = inputs["short_sequence_features"]
short_neg_sequence_features = inputs["short_neg_sequence_features"]
long_sequence_mask = inputs["long_sequence_mask"]
short_sequence_mask = inputs["short_sequence_mask"]
output_dict = {}
# GSU Stage
user_embedding = self.embed(user_features)
target_item_embedding = self.embed(target_item_features)
long_sequence_embeddings = self.embed(long_sequence_features)
long_sequence_embeddings = long_sequence_embeddings * tf.expand_dims(
long_sequence_mask, axis=-1
)
stage_one_interaction_embedding, gsu_scores = self.stage_one_item_seq_interaction(
(target_item_embedding, long_sequence_embeddings, long_sequence_mask)
)
# combine all the stage 1 embeddings
stage_one_embeddings = tf.concat(
[target_item_embedding, stage_one_interaction_embedding, user_embedding], -1
)
stage_one_logits = self.stage_one_classifier(
stage_one_embeddings, training=training
)
# ESU Stage
user_embedding = self.embed(user_features)
target_item_embedding = self.embed(target_item_features)
short_sequence_embeddings = self.embed(short_sequence_features)
short_sequence_embeddings = short_sequence_embeddings * tf.expand_dims(
short_sequence_mask, axis=-1
)
# ---- Attention part
# Take embeddings of k best items produced by GSU at Stage 1
best_k_long_seq_embeddings, top_k_mask = self.select_top_k_items(
long_sequence_embeddings, gsu_scores
)
# Run attention mechanism to produce a single representation
att_fea, _ = self.stage_one_item_seq_interaction(
(target_item_embedding, best_k_long_seq_embeddings, top_k_mask),
)
# Take a mean representation of best_k_long_seq_embeddings
item_his_sum_emb = masked_temporal_mean(best_k_long_seq_embeddings, top_k_mask)
# ---- DIEN part
(
stage_two_interaction_embedding,
short_features_layer_1,
) = self.stage_two_item_seq_interaction(
(target_item_embedding, short_sequence_embeddings, short_sequence_mask),
)
# Compute auxiliary logits for DIEN
if compute_aux_loss:
# Embed negative sequence features
short_neg_sequence_embeddings = self.embed(short_neg_sequence_features)
short_neg_sequence_embeddings = (
short_neg_sequence_embeddings
* tf.expand_dims(short_sequence_mask, axis=-1)
)
aux_click_probs = compute_auxiliary_probs(
self.stage_two_auxiliary_net,
short_features_layer_1,
short_sequence_embeddings,
training=training,
)
output_dict["aux_click_probs"] = aux_click_probs
aux_noclick_probs = compute_auxiliary_probs(
self.stage_two_auxiliary_net,
short_features_layer_1,
short_neg_sequence_embeddings,
training=training,
)
output_dict["aux_noclick_probs"] = aux_noclick_probs
# combine all the stage 2 embeddings
stage_two_embeddings = tf.concat(
[
att_fea,
item_his_sum_emb,
target_item_embedding,
stage_two_interaction_embedding,
user_embedding
],
-1,
)
stage_two_logits = self.stage_two_classifier(
stage_two_embeddings, training=training
)
output_dict["stage_one_logits"] = stage_one_logits
output_dict["stage_two_logits"] = stage_two_logits
return output_dict
| DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/sim/models/sim_model.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from sim.layers.item_item_interaction import DINActivationUnit, DotItemItemInteraction
from sim.layers.item_sequence_interaction import DINItemSequenceInteractionBlock
from sim.models.sequential_recommender_model import SequentialRecommenderModel
class DINModel(SequentialRecommenderModel):
def __init__(
self,
feature_spec,
mlp_hidden_dims=(200, 80),
embedding_dim=4,
item_item_interaction="dot",
):
super(DINModel, self).__init__(
feature_spec, embedding_dim, mlp_hidden_dims
)
if item_item_interaction == "dot":
item_item_interaction_block = DotItemItemInteraction()
elif item_item_interaction == "activation_unit":
item_item_interaction_block = DINActivationUnit()
self.item_seq_interaction = DINItemSequenceInteractionBlock(
item_item_interaction=item_item_interaction_block
)
@tf.function
def call(
self,
inputs,
training=False
):
user_features = inputs["user_features"]
target_item_features = inputs["target_item_features"]
long_sequence_features = inputs["long_sequence_features"]
short_sequence_features = inputs["short_sequence_features"]
long_sequence_mask = inputs["long_sequence_mask"]
short_sequence_mask = inputs["short_sequence_mask"]
user_embedding = self.embed(user_features)
target_item_embedding = self.embed(target_item_features)
long_sequence_embeddings = self.embed(long_sequence_features)
short_sequence_embeddings = self.embed(short_sequence_features)
# Concat over time axis
sequence_embeddings = tf.concat([long_sequence_embeddings, short_sequence_embeddings], axis=1)
mask = tf.concat([long_sequence_mask, short_sequence_mask], axis=1)
sequence_embeddings = sequence_embeddings * tf.expand_dims(
mask, axis=-1
)
item_sequence_interaction_embedding, _ = self.item_seq_interaction(
(target_item_embedding, sequence_embeddings, mask)
)
combined_embeddings = tf.concat([
target_item_embedding, item_sequence_interaction_embedding, user_embedding
], -1)
logits = self.classificationMLP(combined_embeddings, training=training)
return {"logits": logits}
| DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/sim/models/din_model.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict
import numpy as np
import yaml
from sim.data.defaults import (CARDINALITY_SELECTOR, DIMENSIONS_SELECTOR, DTYPE_SELECTOR, LABEL_CHANNEL,
NEGATIVE_HISTORY_CHANNEL, POSITIVE_HISTORY_CHANNEL, TARGET_ITEM_FEATURES_CHANNEL,
TEST_MAPPING, TRAIN_MAPPING, USER_FEATURES_CHANNEL)
class FeatureSpec:
def __init__(self, feature_spec=None, source_spec=None, channel_spec=None, metadata=None, base_directory=None):
self.feature_spec: Dict = feature_spec if feature_spec is not None else {}
self.source_spec: Dict = source_spec if source_spec is not None else {}
self.channel_spec: Dict = channel_spec if channel_spec is not None else {}
self.metadata: Dict = metadata if metadata is not None else {}
self.base_directory: str = base_directory
@classmethod
def from_yaml(cls, path):
with open(path, 'r') as feature_spec_file:
base_directory = os.path.dirname(path)
feature_spec = yaml.safe_load(feature_spec_file)
return cls.from_dict(feature_spec, base_directory=base_directory)
@classmethod
def from_dict(cls, source_dict, base_directory):
return cls(base_directory=base_directory, **source_dict)
def to_dict(self):
attributes_to_dump = ['feature_spec', 'source_spec', 'channel_spec', 'metadata']
return {attr: self.__dict__[attr] for attr in attributes_to_dump}
def to_string(self):
return yaml.dump(self.to_dict())
def to_yaml(self, output_path=None):
if not output_path:
output_path = self.base_directory + '/feature_spec.yaml'
with open(output_path, 'w') as output_file:
print(yaml.dump(self.to_dict()), file=output_file)
@staticmethod
def get_default_features_names(number_of_user_features, number_of_item_features):
user_feature_fstring = 'user_feat_{}'
item_feature_fstring = 'item_feat_{}_{}'
label_feature_name = "label"
item_channels_feature_name_suffixes = ['trgt', 'pos', 'neg']
user_features_names = [user_feature_fstring.format(i) for i in range(number_of_user_features)]
item_features_names = [item_feature_fstring.format(i, channel_suffix)
for channel_suffix in item_channels_feature_name_suffixes
for i in range(number_of_item_features)]
return [label_feature_name] + user_features_names + item_features_names
@staticmethod
def get_default_feature_spec(user_features_cardinalities, item_features_cardinalities, max_seq_len):
number_of_user_features = len(user_features_cardinalities)
number_of_item_features = len(item_features_cardinalities)
all_features_names = FeatureSpec.get_default_features_names(number_of_user_features, number_of_item_features)
user_features = {
f_name: {
DTYPE_SELECTOR: str(np.dtype(np.int64)),
CARDINALITY_SELECTOR: int(cardinality)
} for i, (f_name, cardinality)
in enumerate(zip(all_features_names[1:1+number_of_user_features], user_features_cardinalities))
}
item_channels = [TARGET_ITEM_FEATURES_CHANNEL, POSITIVE_HISTORY_CHANNEL, NEGATIVE_HISTORY_CHANNEL]
item_channels_feature_dicts = [{} for _ in range(len(item_channels))]
item_channels_info = list(zip(item_channels, item_channels_feature_dicts))
for i, cardinality in enumerate(item_features_cardinalities):
for j, (channel, dictionary) in enumerate(item_channels_info):
feature_name = all_features_names[1 + number_of_user_features + i + j * number_of_item_features]
dictionary[feature_name] = {
DTYPE_SELECTOR: str(np.dtype(np.int64)),
CARDINALITY_SELECTOR: int(cardinality)
}
if channel != TARGET_ITEM_FEATURES_CHANNEL:
dictionary[feature_name][DIMENSIONS_SELECTOR] = [max_seq_len]
feature_spec = {
feat_name: feat_spec
for dictionary in [user_features] + item_channels_feature_dicts
for feat_name, feat_spec in dictionary.items()
}
feature_spec[all_features_names[0]] = {DTYPE_SELECTOR: str(np.dtype(np.bool))}
channel_spec = {
USER_FEATURES_CHANNEL: list(user_features),
TARGET_ITEM_FEATURES_CHANNEL: list(item_channels_feature_dicts[0]),
POSITIVE_HISTORY_CHANNEL: list(item_channels_feature_dicts[1]),
NEGATIVE_HISTORY_CHANNEL: list(item_channels_feature_dicts[2]),
LABEL_CHANNEL: all_features_names[:1]
}
source_spec = {
split: [
{
'type': 'tfrecord',
'features': all_features_names,
'files': []
}
] for split in [TRAIN_MAPPING, TEST_MAPPING]
}
return FeatureSpec(feature_spec=feature_spec, channel_spec=channel_spec, source_spec=source_spec)
| DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/sim/data/feature_spec.py |
DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/sim/data/__init__.py |
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
REMAINDER_FILENAME = 'remainder.tfrecord'
USER_FEATURES_CHANNEL = 'user_features'
TARGET_ITEM_FEATURES_CHANNEL = 'target_item_features'
POSITIVE_HISTORY_CHANNEL = 'positive_history'
NEGATIVE_HISTORY_CHANNEL = 'negative_history'
LABEL_CHANNEL = 'label'
TRAIN_MAPPING = "train"
TEST_MAPPING = "test"
FILES_SELECTOR = "files"
DTYPE_SELECTOR = "dtype"
CARDINALITY_SELECTOR = "cardinality"
DIMENSIONS_SELECTOR = 'dimensions'
| DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/sim/data/defaults.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
from functools import partial
import tensorflow as tf
from sim.data.defaults import (DIMENSIONS_SELECTOR, LABEL_CHANNEL, NEGATIVE_HISTORY_CHANNEL, POSITIVE_HISTORY_CHANNEL,
TARGET_ITEM_FEATURES_CHANNEL, USER_FEATURES_CHANNEL, REMAINDER_FILENAME)
def _remap_column_values_tfrecord(sample, feature_spec, long_seq_length):
channel_spec = feature_spec.channel_spec
features = feature_spec.feature_spec
user_features = {
f_name: tf.reshape(sample[f_name], [-1]) for f_name in channel_spec[USER_FEATURES_CHANNEL]
}
target_item_features = {
f_name: tf.reshape(sample[f_name], [-1]) for f_name in channel_spec[TARGET_ITEM_FEATURES_CHANNEL]
}
padded_positive = {
f_name: tf.reshape(sample[f_name], [-1, features[f_name][DIMENSIONS_SELECTOR][0]])
for f_name in channel_spec[POSITIVE_HISTORY_CHANNEL]
}
padded_negative = {
f_name: tf.reshape(sample[f_name], [-1, features[f_name][DIMENSIONS_SELECTOR][0]])
for f_name in channel_spec[NEGATIVE_HISTORY_CHANNEL]
}
long_sequence_features = {
f_name: val[:, :long_seq_length] for f_name, val in padded_positive.items()
}
short_sequence_features = {
f_name: val[:, long_seq_length:] for f_name, val in padded_positive.items()
}
short_neg_sequence_features = {
f_name: val[:, long_seq_length:] for f_name, val in padded_negative.items()
}
first_positive_feature_name = channel_spec[POSITIVE_HISTORY_CHANNEL][0]
first_positive_feature = padded_positive[first_positive_feature_name]
history_mask = tf.cast(tf.greater(first_positive_feature, 0), tf.float32)
long_sequence_mask = history_mask[:, :long_seq_length]
short_sequence_mask = history_mask[:, long_seq_length:]
label_name = channel_spec[LABEL_CHANNEL][0]
target = tf.reshape(sample[label_name], [-1])
return {
"user_features": user_features,
"target_item_features": target_item_features,
"long_sequence_features": long_sequence_features,
"short_sequence_features": short_sequence_features,
"short_neg_sequence_features": short_neg_sequence_features,
"long_sequence_mask": long_sequence_mask,
"short_sequence_mask": short_sequence_mask,
"other_features": None
}, target
def split_prebatch(sample, split_into):
res = {}
for f_name, val in sample.items():
res[f_name] = tf.reshape(val, [split_into, -1])
return tf.data.Dataset.from_tensor_slices(res)
def get_dataloader_tfrecord(
file_paths,
feature_spec,
batch_size,
long_seq_length,
num_gpus=1,
id=0,
drop_remainder=False,
repeat_count=0,
prefetch_buffer_size=90,
num_parallel_calls=None,
disable_cache=False,
prebatch_size=0
):
features = feature_spec.feature_spec
prebatched = prebatch_size > 0
remainder_file = None
if file_paths[-1].name == REMAINDER_FILENAME:
remainder_file = file_paths[-1:]
file_paths = file_paths[:-1]
tf_feature_spec = {}
for name, feature in features.items():
dimensions = feature.get(DIMENSIONS_SELECTOR)
if dimensions is None:
dimensions = [1] if prebatched else []
if prebatched:
dimensions = dimensions.copy()
dimensions[0] *= prebatch_size
tf_feature_spec[name] = tf.io.FixedLenFeature(dimensions, tf.int64)
if num_parallel_calls is None:
num_cpus = multiprocessing.cpu_count()
num_parallel_calls = 4 * num_cpus // num_gpus
dataset = tf.data.TFRecordDataset(file_paths, num_parallel_reads=num_parallel_calls)
dataset = dataset.shard(num_gpus, id)
splitting_function = None
if prebatched:
if batch_size >= prebatch_size:
batch_size = batch_size // prebatch_size
else:
split_into = prebatch_size // batch_size
splitting_function = partial(split_prebatch, split_into=split_into)
batch_size = 1
dataset = dataset.batch(
batch_size, drop_remainder=drop_remainder, num_parallel_calls=num_parallel_calls
)
dataset = dataset.map(
map_func=partial(tf.io.parse_example, features=tf_feature_spec),
num_parallel_calls=num_parallel_calls
)
if splitting_function is not None:
dataset = dataset.flat_map(splitting_function)
if not drop_remainder and id == 0 and remainder_file is not None:
tf_feature_spec_remainder = {
name: tf.io.RaggedFeature(tf.int64) for name in tf_feature_spec
}
remainder = tf.data.TFRecordDataset(remainder_file)
remainder = remainder.map(
map_func=partial(tf.io.parse_example, features=tf_feature_spec_remainder)
)
dataset = dataset.concatenate(remainder)
dataset = dataset.map(
map_func=partial(_remap_column_values_tfrecord, feature_spec=feature_spec, long_seq_length=long_seq_length),
num_parallel_calls=num_parallel_calls
)
if repeat_count > 0:
dataset = dataset.repeat(
count=repeat_count
)
if prefetch_buffer_size > 0:
dataset = dataset.prefetch(
buffer_size=prefetch_buffer_size
)
if not disable_cache:
dataset = dataset.cache()
return dataset
| DeepLearningExamples-master | TensorFlow2/Recommendation/SIM/sim/data/dataloader.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
from absl import app, flags
def define_dcnv2_specific_flags():
flags.DEFINE_integer("batch_size", default=64 * 1024, help="Batch size used for training")
flags.DEFINE_integer("valid_batch_size", default=64 * 1024, help="Batch size used for validation")
flags.DEFINE_list("top_mlp_dims", [1024, 1024, 512, 256, 1], "Linear layer sizes for the top MLP")
flags.DEFINE_list("bottom_mlp_dims", [512, 256, 128], "Linear layer sizes for the bottom MLP")
flags.DEFINE_string("embedding_dim", default='128', help='Number of columns in the embedding tables')
flags.DEFINE_enum("optimizer", default="adam", enum_values=['sgd', 'adam'],
help='The optimization algorithm to be used.')
flags.DEFINE_enum("interaction", default="cross", enum_values=["dot_custom_cuda", "dot_tensorflow", "cross"],
help="Feature interaction implementation to use")
flags.DEFINE_float("learning_rate", default=0.0001, help="Learning rate")
flags.DEFINE_float("beta1", default=0.9, help="Beta1 for the Adam optimizer")
flags.DEFINE_float("beta2", default=0.999, help="Bea2 for the Adam optimizer")
flags.DEFINE_integer("warmup_steps", default=100,
help='Number of steps over which to linearly increase the LR at the beginning')
flags.DEFINE_integer("decay_start_step", default=48000, help='Optimization step at which to start the poly LR decay')
flags.DEFINE_integer("decay_steps", default=24000, help='Number of steps over which to decay from base LR to 0')
flags.DEFINE_integer("num_cross_layers", default=3, help='Number of cross layers for DCNv2')
flags.DEFINE_integer("cross_layer_projection_dim", default=512, help='Projection dimension used in the cross layers')
define_dcnv2_specific_flags()
import main
def _main(argv):
main.main()
if __name__ == '__main__':
app.run(_main)
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/dcnv2.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
from absl import app, flags
def define_dlrm_specific_flags():
flags.DEFINE_integer("batch_size", default=64 * 1024, help="Batch size used for training")
flags.DEFINE_integer("valid_batch_size", default=64 * 1024, help="Batch size used for validation")
flags.DEFINE_list("top_mlp_dims", [1024, 1024, 512, 256, 1], "Linear layer sizes for the top MLP")
flags.DEFINE_list("bottom_mlp_dims", [512, 256, 128], "Linear layer sizes for the bottom MLP")
flags.DEFINE_string("embedding_dim", default='128', help='Number of columns in the embedding tables')
flags.DEFINE_enum("optimizer", default="sgd", enum_values=['sgd', 'adam'],
help='The optimization algorithm to be used.')
flags.DEFINE_enum("interaction", default="dot_custom_cuda", enum_values=["dot_custom_cuda", "dot_tensorflow", "cross"],
help="Feature interaction implementation to use")
flags.DEFINE_float("learning_rate", default=24, help="Learning rate")
flags.DEFINE_float("beta1", default=0.9, help="Beta1 for the Adam optimizer")
flags.DEFINE_float("beta2", default=0.999, help="Bea2 for the Adam optimizer")
flags.DEFINE_integer("warmup_steps", default=8000,
help='Number of steps over which to linearly increase the LR at the beginning')
flags.DEFINE_integer("decay_start_step", default=48000, help='Optimization step at which to start the poly LR decay')
flags.DEFINE_integer("decay_steps", default=24000, help='Number of steps over which to decay from base LR to 0')
flags.DEFINE_integer("num_cross_layers", default=3, help='Number of cross layers for DCNv2')
flags.DEFINE_integer("cross_layer_projection_dim", default=512, help='Projection dimension used in the cross layers')
define_dlrm_specific_flags()
import main
def _main(argv):
main.main()
if __name__ == '__main__':
app.run(_main)
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/dlrm.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
from absl import app, flags
import os
import sys
import json
from distributed_embeddings.python.layers import dist_model_parallel as dmp
# Define the flags first before importing TensorFlow.
# Otherwise, enabling XLA-Lite would be impossible with a command-line flag
def define_common_flags():
flags.DEFINE_enum("mode", default="train", enum_values=['inference', 'eval', 'train'],
help='Choose "train" to train the model, "inference" to benchmark inference'
' and "eval" to run validation')
# Debug parameters
flags.DEFINE_bool("run_eagerly", default=False, help="Disable all tf.function decorators for debugging")
flags.DEFINE_bool("tfdata_debug", default=False, help="Run tf.data operations eagerly (experimental)")
flags.DEFINE_integer("seed", default=None, help="Random seed")
flags.DEFINE_bool("embedding_zeros_initializer", default=False,
help="Initialize the embeddings to zeros. This takes much less time so it's useful"
" for benchmarking and debugging.")
flags.DEFINE_bool("embedding_trainable", default=True, help="If True the embeddings will be trainable, otherwise frozen")
# Hardware and performance features
flags.DEFINE_bool("amp", default=False, help="Enable automatic mixed precision")
flags.DEFINE_bool("use_mde_embeddings", default=True,
help="Use the embedding implementation from the TensorFlow Distributed Embeddings package")
flags.DEFINE_bool("concat_embedding", default=False,
help="Concatenate embeddings with the same dimension. Only supported for singleGPU.")
flags.DEFINE_string("dist_strategy", default='memory_balanced',
help="Strategy for the Distributed Embeddings to use. Supported options are"
"'memory_balanced', 'basic' and 'memory_optimized'")
flags.DEFINE_integer("column_slice_threshold", default=10*1000*1000*1000,
help='Number of elements above which a distributed embedding will be sliced across'
'multiple devices')
flags.DEFINE_integer("cpu_offloading_threshold_gb", default=75,
help='Size of the embedding tables in GB above which '
'offloading to CPU memory should be employed.'
'Applies only to singleGPU at the moment.')
flags.DEFINE_bool('cpu', default=False, help='Place the entire model on CPU')
flags.DEFINE_bool("xla", default=False, help="Enable XLA")
flags.DEFINE_integer("loss_scale", default=65536, help="Static loss scale to use with mixed precision training")
flags.DEFINE_integer("inter_op_parallelism", default=None, help='Number of inter op threads')
flags.DEFINE_integer("intra_op_parallelism", default=None, help='Number of intra op threads')
# Checkpointing
flags.DEFINE_string("save_checkpoint_path", default=None,
help="Path to which to save a checkpoint file at the end of the training")
flags.DEFINE_string("restore_checkpoint_path", default=None,
help="Path from which to restore a checkpoint before training")
# Evaluation, logging, profiling
flags.DEFINE_integer("auc_thresholds", default=8000,
help="Number of thresholds for the AUC computation")
flags.DEFINE_integer("epochs", default=1, help="Number of epochs to train for")
flags.DEFINE_integer("max_steps", default=-1, help="Stop the training/inference after this many optimiation steps")
flags.DEFINE_integer("evals_per_epoch", default=1, help='Number of evaluations per epoch')
flags.DEFINE_float("print_freq", default=100, help='Number of steps between debug prints')
flags.DEFINE_integer("profiler_start_step", default=None, help='Step at which to start profiling')
flags.DEFINE_integer("profiled_rank", default=1, help='Rank to profile')
flags.DEFINE_string("log_path", default='dlrm_tf_log.json', help="Path to JSON file for storing benchmark results")
# dataset and dataloading settings
flags.DEFINE_string("dataset_path", default=None,
help="Path to dataset directory")
flags.DEFINE_string("feature_spec", default="feature_spec.yaml",
help="Name of the feature spec file in the dataset directory")
flags.DEFINE_enum("dataset_type", default="tf_raw",
enum_values=['tf_raw', 'synthetic', 'split_tfrecords'],
help='The type of the dataset to use')
# Synthetic dataset settings
flags.DEFINE_boolean("synthetic_dataset_use_feature_spec", default=False,
help="Create a temporary synthetic dataset based on a real one. "
"Uses --dataset_path and --feature_spec"
"Overrides synthetic dataset dimension flags, except the number of batches")
flags.DEFINE_integer('synthetic_dataset_train_batches', default=64008,
help='Number of training batches in the synthetic dataset')
flags.DEFINE_integer('synthetic_dataset_valid_batches', default=1350,
help='Number of validation batches in the synthetic dataset')
flags.DEFINE_list('synthetic_dataset_cardinalities', default=26*[1000],
help='Number of categories for each embedding table of the synthetic dataset')
flags.DEFINE_list('synthetic_dataset_hotness', default=26*[20],
help='Number of categories for each embedding table of the synthetic dataset')
flags.DEFINE_integer('synthetic_dataset_num_numerical_features', default=13,
help='Number of numerical features of the synthetic dataset')
define_common_flags()
FLAGS = flags.FLAGS
app.define_help_flags()
app.parse_flags_with_usage(sys.argv)
if FLAGS.xla:
if FLAGS.cpu:
os.environ['TF_XLA_FLAGS'] = '--tf_xla_auto_jit=fusible --tf_xla_cpu_global_jit'
else:
os.environ['TF_XLA_FLAGS'] = '--tf_xla_auto_jit=fusible'
import time
import tensorflow as tf
import tensorflow_addons as tfa
import numpy as np
import horovod.tensorflow as hvd
from tensorflow.keras.mixed_precision import LossScaleOptimizer
import dllogger
from utils.logging import IterTimer, init_logging
from utils.distributed import dist_print
from dataloading.dataloader import create_input_pipelines, get_dataset_metadata
from nn.lr_scheduler import LearningRateScheduler
from nn.model import Model
from nn.evaluator import Evaluator
from nn.trainer import Trainer
def init_tf(FLAGS):
"""
Set global options for TensorFlow
"""
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
visible_gpus = []
if gpus and not FLAGS.cpu:
visible_gpus = gpus[hvd.local_rank()]
tf.config.experimental.set_visible_devices(visible_gpus, 'GPU')
if FLAGS.amp:
policy = tf.keras.mixed_precision.Policy("mixed_float16")
tf.keras.mixed_precision.set_global_policy(policy)
tf.config.run_functions_eagerly(FLAGS.run_eagerly)
if FLAGS.tfdata_debug:
tf.data.experimental.enable_debug_mode()
if FLAGS.inter_op_parallelism:
tf.config.threading.set_inter_op_parallelism_threads(FLAGS.inter_op_parallelism)
if FLAGS.intra_op_parallelism:
tf.config.threading.set_intra_op_parallelism_threads(FLAGS.intra_op_parallelism)
tf.random.set_seed(hash((FLAGS.seed, hvd.rank())))
def parse_embedding_dimension(embedding_dim, num_embeddings):
try:
embedding_dim = int(embedding_dim)
embedding_dim = [embedding_dim] * num_embeddings
return embedding_dim
except:
pass
if not isinstance(embedding_dim, str):
return ValueError(f'Unsupported embedding_dimension type: f{type(embedding_dim)}')
if os.path.exists(embedding_dim):
# json file with a list of dimensions for each feature
with open(embedding_dim) as f:
edim = json.load(f)
else:
edim = embedding_dim.split(',')
edim = [int(d) for d in edim]
if len(edim) != num_embeddings:
raise ValueError(f'Length of specified embedding dimensions ({len(edim)}) does not match'
f' the number of embedding layers in the neural network ({num_embeddings})')
return edim
def compute_eval_points(train_batches, evals_per_epoch):
eval_points = np.linspace(0, train_batches - 1, evals_per_epoch + 1)[1:]
eval_points = np.round(eval_points).tolist()
return eval_points
def inference_benchmark(validation_pipeline, dlrm, timer, FLAGS):
if FLAGS.max_steps == -1:
FLAGS.max_steps = 1000
evaluator = Evaluator(model=dlrm, timer=timer, auc_thresholds=FLAGS.auc_thresholds,
max_steps=FLAGS.max_steps, cast_dtype=None)
auc, test_loss, latencies = evaluator(validation_pipeline)
# don't benchmark the first few warmup steps
latencies = latencies[10:]
result_data = {
'mean_inference_throughput': FLAGS.valid_batch_size / np.mean(latencies),
'mean_inference_latency': np.mean(latencies)
}
for percentile in [90, 95, 99]:
result_data[f'p{percentile}_inference_latency'] = np.percentile(latencies, percentile)
result_data['auc'] = auc
if hvd.rank() == 0:
dllogger.log(data=result_data, step=tuple())
def validate_cmd_line_flags():
if FLAGS.cpu and hvd.size() > 1:
raise ValueError('MultiGPU mode is not supported when training on CPU')
if FLAGS.cpu and FLAGS.interaction == 'custom_cuda':
raise ValueError('"custom_cuda" dot interaction not supported for CPU. '
'Please specify "--dot_interaction tensorflow" if you want to run on CPU')
if FLAGS.concat_embedding and hvd.size() != 1:
raise ValueError('Concat embedding is currently unsupported in multiGPU mode.')
if FLAGS.concat_embedding and FLAGS.dataset_type != 'tf_raw':
raise ValueError('Concat embedding is only supported for dataset_type="tf_raw",'
f'got dataset_type={FLAGS.dataset_type}')
all_embedding_dims_equal = all(dim == FLAGS.embedding_dim[0] for dim in FLAGS.embedding_dim)
if FLAGS.concat_embedding and not all_embedding_dims_equal:
raise ValueError('Concat embedding is only supported when all embeddings have the same output dimension,'
f'got embedding_dim={FLAGS.embedding_dim}')
def create_optimizers(flags):
if flags.optimizer == 'sgd':
embedding_optimizer = tf.keras.optimizers.legacy.SGD(learning_rate=flags.learning_rate, momentum=0)
if flags.amp:
embedding_optimizer = LossScaleOptimizer(embedding_optimizer,
initial_scale=flags.loss_scale,
dynamic=False)
mlp_optimizer = embedding_optimizer
elif flags.optimizer == 'adam':
embedding_optimizer = tfa.optimizers.LazyAdam(learning_rate=flags.learning_rate,
beta_1=flags.beta1, beta_2=flags.beta2)
mlp_optimizer = tf.keras.optimizers.legacy.Adam(learning_rate=flags.learning_rate,
beta_1=flags.beta1, beta_2=flags.beta2)
if flags.amp:
# only wrap the mlp optimizer and not the embedding optimizer because the embeddings are not run in FP16
mlp_optimizer = LossScaleOptimizer(mlp_optimizer, initial_scale=flags.loss_scale, dynamic=False)
return mlp_optimizer, embedding_optimizer
def main():
hvd.init()
init_logging(log_path=FLAGS.log_path, params_dict=FLAGS.flag_values_dict(), enabled=hvd.rank()==0)
init_tf(FLAGS)
dataset_metadata = get_dataset_metadata(FLAGS.dataset_path, FLAGS.feature_spec)
FLAGS.embedding_dim = parse_embedding_dimension(FLAGS.embedding_dim,
num_embeddings=len(dataset_metadata.categorical_cardinalities))
validate_cmd_line_flags()
if FLAGS.restore_checkpoint_path is not None:
model = Model.create_from_checkpoint(FLAGS.restore_checkpoint_path)
else:
model = Model(**FLAGS.flag_values_dict(), num_numerical_features=dataset_metadata.num_numerical_features,
categorical_cardinalities=dataset_metadata.categorical_cardinalities,
transpose=False)
train_pipeline, validation_pipeline = create_input_pipelines(dataset_type=FLAGS.dataset_type,
dataset_path=FLAGS.dataset_path,
train_batch_size=FLAGS.batch_size,
test_batch_size=FLAGS.valid_batch_size,
table_ids=model.sparse_model.get_local_table_ids(hvd.rank()),
feature_spec=FLAGS.feature_spec,
rank=hvd.rank(), world_size=hvd.size(),
concat_features=FLAGS.concat_embedding)
mlp_optimizer, embedding_optimizer = create_optimizers(FLAGS)
scheduler = LearningRateScheduler([mlp_optimizer, embedding_optimizer],
warmup_steps=FLAGS.warmup_steps,
base_lr=FLAGS.learning_rate,
decay_start_step=FLAGS.decay_start_step,
decay_steps=FLAGS.decay_steps)
timer = IterTimer(train_batch_size=FLAGS.batch_size, test_batch_size=FLAGS.batch_size,
optimizer=embedding_optimizer, print_freq=FLAGS.print_freq, enabled=hvd.rank() == 0)
if FLAGS.mode == 'inference':
inference_benchmark(validation_pipeline, model, timer, FLAGS)
return
elif FLAGS.mode == 'eval':
evaluator = Evaluator(model=model, timer=timer, auc_thresholds=FLAGS.auc_thresholds, max_steps=FLAGS.max_steps)
test_auc, test_loss, _ = evaluator(validation_pipeline)
if hvd.rank() == 0:
dllogger.log(data=dict(auc=test_auc, test_loss=test_loss), step=tuple())
return
eval_points = compute_eval_points(train_batches=len(train_pipeline),
evals_per_epoch=FLAGS.evals_per_epoch)
trainer = Trainer(model, embedding_optimizer=embedding_optimizer, mlp_optimizer=mlp_optimizer, amp=FLAGS.amp,
lr_scheduler=scheduler, tf_dataset_op=train_pipeline.op, cpu=FLAGS.cpu)
evaluator = Evaluator(model=model, timer=timer, auc_thresholds=FLAGS.auc_thresholds, distributed=hvd.size() > 1)
best_auc = 0
best_loss = 1e6
train_begin = time.time()
for epoch in range(FLAGS.epochs):
print('Starting epoch: ', epoch)
for step in range(len(train_pipeline)):
if step == FLAGS.profiler_start_step and hvd.rank() == FLAGS.profiled_rank:
tf.profiler.experimental.start('logdir')
if FLAGS.profiler_start_step and step == FLAGS.profiler_start_step + 100 and hvd.rank() == FLAGS.profiled_rank:
tf.profiler.experimental.stop()
loss = trainer.train_step()
if step == 0 and hvd.size() > 1:
dmp.broadcast_variables(model.variables, root_rank=0)
if step % FLAGS.print_freq == 0:
if tf.math.is_nan(loss):
print('NaN loss encountered in training. Aborting.')
break
timer.step_train(loss=loss)
if FLAGS.max_steps != -1 and step > FLAGS.max_steps:
dist_print(f'Max steps of {FLAGS.max_steps} reached, exiting')
break
if step in eval_points:
test_auc, test_loss, _ = evaluator(validation_pipeline)
dist_print(f'Evaluation completed, AUC: {test_auc:.6f}, test_loss: {test_loss:.6f}')
timer.test_idx = 0
best_auc = max(best_auc, test_auc)
best_loss = min(best_loss, test_loss)
elapsed = time.time() - train_begin
if FLAGS.save_checkpoint_path is not None:
model.save_checkpoint(FLAGS.save_checkpoint_path)
if hvd.rank() == 0:
dist_print(f'Training run completed, elapsed: {elapsed:.0f} [s]')
results = {
'throughput': FLAGS.batch_size / timer.mean_train_time(),
'mean_step_time_ms': timer.mean_train_time() * 1000,
'auc': best_auc,
'validation_loss': best_loss
}
dllogger.log(data=results, step=tuple())
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/main.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import tensorflow as tf
import horovod.tensorflow as hvd
import numpy as np
import json
from distributed_embeddings.python.layers import dist_model_parallel as dmp
from distributed_embeddings.python.layers import embedding
from utils.checkpointing import get_variable_path
from .embedding import EmbeddingInitializer, DualEmbeddingGroup
sparse_model_parameters = ['use_mde_embeddings', 'embedding_dim', 'column_slice_threshold',
'embedding_zeros_initializer', 'embedding_trainable', 'categorical_cardinalities',
'concat_embedding', 'cpu_offloading_threshold_gb']
class SparseModel(tf.keras.Model):
def __init__(self, **kwargs):
super(SparseModel, self).__init__()
sparse_model_kwargs = {k:kwargs[k] for k in sparse_model_parameters}
for field in sparse_model_kwargs.keys():
self.__dict__[field] = kwargs[field]
self.num_all_categorical_features = len(self.categorical_cardinalities)
self.use_concat_embedding = self.concat_embedding and (hvd.size() == 1) and \
all(dim == self.embedding_dim[0] for dim in self.embedding_dim)
self._create_embeddings()
def _create_embeddings(self):
self.embedding_layers = []
initializer_cls = tf.keras.initializers.Zeros if self.embedding_zeros_initializer else EmbeddingInitializer
# use a concatenated embedding for singleGPU when all embedding dimensions are equal
if self.use_concat_embedding:
self.embedding = DualEmbeddingGroup(cardinalities=self.categorical_cardinalities,
output_dim=self.embedding_dim[0],
memory_threshold=self.cpu_offloading_threshold_gb,
trainable=self.trainable,
use_mde_embeddings=self.use_mde_embeddings)
return
for table_size, dim in zip(self.categorical_cardinalities, self.embedding_dim):
if hvd.rank() == 0:
print(f'Creating embedding with size: {table_size} {dim}')
if self.use_mde_embeddings:
e = embedding.Embedding(input_dim=table_size, output_dim=dim,
combiner='sum', embeddings_initializer=initializer_cls())
else:
e = tf.keras.layers.Embedding(input_dim=table_size, output_dim=dim,
embeddings_initializer=initializer_cls())
self.embedding_layers.append(e)
self.embedding = dmp.DistributedEmbedding(self.embedding_layers,
strategy='memory_balanced',
dp_input=False,
column_slice_threshold=self.column_slice_threshold)
def get_local_table_ids(self, rank):
if self.use_concat_embedding:
return list(range(self.num_all_categorical_features))
else:
return self.embedding.strategy.input_ids_list[rank]
@tf.function
def call(self, cat_features):
embedding_outputs = self._call_embeddings(cat_features)
return embedding_outputs
def _call_embeddings(self, cat_features):
if self.use_concat_embedding:
x = self.embedding(cat_features)
else:
x = self.embedding(cat_features)
x = tf.concat(x, axis=1)
x = tf.cast(x, dtype=self.compute_dtype)
return x
def force_initialization(self, global_batch_size=64):
categorical_features = [tf.zeros(shape=[global_batch_size, 1], dtype=tf.int32)
for _ in range(len(self.get_local_table_ids(hvd.rank())))]
_ = self(categorical_features)
def save_checkpoint(self, checkpoint_path):
print('Gathering the embedding weights...')
full_embedding_weights = self.embedding.get_weights()
print('Saving the embedding weights...')
for i, weight in enumerate(full_embedding_weights):
filename = get_variable_path(checkpoint_path, f'feature_{i}')
np.save(file=filename, arr=weight)
print('Embedding checkpoint saved.')
def load_checkpoint(self, checkpoint_path):
self.force_initialization()
paths = []
for i in range(self.num_all_categorical_features):
path = get_variable_path(checkpoint_path, f'feature_{i}')
paths.append(path)
self.embedding.set_weights(weights=paths)
def save_config(self, path):
config = {k : self.__dict__[k] for k in sparse_model_parameters}
with open(path, 'w') as f:
json.dump(obj=config, fp=f, indent=4)
@staticmethod
def from_config(path):
with open(path) as f:
config = json.load(fp=f)
return SparseModel(**config)
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/nn/sparse_model.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import tensorflow as tf
class LearningRateScheduler:
"""
LR Scheduler combining Polynomial Decay with Warmup at the beginning.
TF-based cond operations necessary for performance in graph mode.
"""
def __init__(self, optimizers, base_lr, warmup_steps, decay_start_step, decay_steps):
self.optimizers = optimizers
self.warmup_steps = tf.constant(warmup_steps, dtype=tf.int32)
self.decay_start_step = tf.constant(decay_start_step, dtype=tf.int32)
self.decay_steps = tf.constant(decay_steps)
self.decay_end_step = decay_start_step + decay_steps
self.poly_power = 2
self.base_lr = base_lr
with tf.device('/CPU:0'):
self.step = tf.Variable(0)
@tf.function
def __call__(self):
with tf.device('/CPU:0'):
# used for the warmup stage
warmup_step = tf.cast(1 / self.warmup_steps, tf.float32)
lr_factor_warmup = 1 - tf.cast(self.warmup_steps - self.step, tf.float32) * warmup_step
lr_factor_warmup = tf.cast(lr_factor_warmup, tf.float32)
# used for the constant stage
lr_factor_constant = tf.cast(1., tf.float32)
# used for the decay stage
lr_factor_decay = (self.decay_end_step - self.step) / self.decay_steps
lr_factor_decay = tf.math.pow(lr_factor_decay, self.poly_power)
lr_factor_decay = tf.cast(lr_factor_decay, tf.float32)
poly_schedule = tf.cond(self.step < self.decay_start_step, lambda: lr_factor_constant,
lambda: lr_factor_decay)
lr_factor = tf.cond(self.step < self.warmup_steps, lambda: lr_factor_warmup,
lambda: poly_schedule)
lr = self.base_lr * lr_factor
for optimizer in self.optimizers:
optimizer.lr.assign(lr)
self.step.assign(self.step + 1)
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/nn/lr_scheduler.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorflow as tf
class DotInteractionGather(tf.keras.layers.Layer):
def __init__(self, num_features):
super(DotInteractionGather, self).__init__()
self.num_features = num_features
self.indices = []
for i in range(self.num_features):
for j in range(i):
self.indices.append(i * num_features + j)
def call(self, features, bottom_mlp_out=None):
interactions = tf.matmul(features, features, transpose_b=True)
interactions = tf.reshape(interactions, shape=[-1, self.num_features * self.num_features])
x = tf.gather(params=interactions, indices=self.indices, axis=1)
if bottom_mlp_out is not None:
x = tf.concat([bottom_mlp_out, x], axis=1)
return x | DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/nn/interaction.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import math
import tensorflow as tf
import numpy as np
from distributed_embeddings.python.layers import embedding
from utils.checkpointing import get_variable_path
# write embedding checkpoints of 1M rows at a time
_embedding_checkpoint_batch = 1024 * 1024
@tf.keras.utils.register_keras_serializable()
class EmbeddingInitializer(tf.keras.initializers.Initializer):
def __call__(self, shape, dtype=None):
if dtype is None:
dtype = tf.float32
with tf.device('/CPU:0'):
maxval = tf.sqrt(tf.constant(1.) / tf.cast(shape[0], tf.float32))
maxval = tf.cast(maxval, dtype=dtype)
minval = -maxval
weights = tf.random.uniform(shape, minval=minval, maxval=maxval, dtype=dtype)
weights = tf.cast(weights, dtype=dtype)
return weights
def get_config(self):
return {}
class Embedding(tf.keras.layers.Layer):
def __init__(self, input_dim, output_dim, trainable=True, dtype=tf.float32, feature_name=None,
embeddings_initializer=None):
super(Embedding, self).__init__(dtype=dtype)
self.input_dim = int(input_dim)
self.output_dim = int(output_dim)
self.embedding_table = None
self.trainable = trainable
self.feature_name = feature_name
if not self.feature_name:
self.feature_name = ''
self.initializer = embeddings_initializer if embeddings_initializer else EmbeddingInitializer()
def build(self, input_shape):
self.embedding_table = self.add_weight("embedding_table",
shape=[self.input_dim, self.output_dim],
dtype=self.dtype,
initializer=self.initializer,
trainable=self.trainable)
def call(self, indices):
return tf.gather(params=self.embedding_table, indices=indices)
def save_checkpoint(self, checkpoint_path):
filename = get_variable_path(checkpoint_path, self.feature_name)
indices = tf.range(start=0, limit=self.embedding_table.shape[0], dtype=tf.int32)
arr = tf.gather(params=self.embedding_table, indices=indices, axis=0)
arr = arr.numpy()
np.save(arr=arr, file=filename)
def restore_checkpoint(self, checkpoint_path):
filename = get_variable_path(checkpoint_path, self.feature_name)
print('restoring embedding table from: ', filename)
numpy_arr = np.load(file=filename, mmap_mode='r')
num_chunks = math.ceil(numpy_arr.shape[0] / _embedding_checkpoint_batch)
for i in range(num_chunks):
begin = i * _embedding_checkpoint_batch
end = (i+1) * _embedding_checkpoint_batch
end = min(end, numpy_arr.shape[0])
indices = tf.range(start=begin, limit=end, dtype=tf.int32)
update = tf.IndexedSlices(values=numpy_arr[begin:end, :],
indices=indices,
dense_shape=self.embedding_table.shape)
self.embedding_table.scatter_update(sparse_delta=update)
class EmbeddingGroup(tf.keras.layers.Layer):
def __init__(self, table_sizes, output_dim, dtype=tf.float32, feature_names=None, trainable=True):
super(EmbeddingGroup, self).__init__(dtype=dtype)
self.table_sizes = table_sizes
self.output_dim = output_dim
self.feature_names = feature_names
if not self.feature_names:
self.feature_names = [f'feature_{i}' for i in range(len(table_sizes))]
self.embedding_layers = []
for fname, ts in zip(self.feature_names, self.table_sizes):
self.embedding_layers.append(Embedding(ts, output_dim, dtype=self.dtype,
feature_name=fname, trainable=trainable))
def call(self, indices):
outputs = []
for i, l in enumerate(self.embedding_layers):
out = l(indices[:, i])
out = tf.expand_dims(out, axis=1)
outputs.append(out)
result = tf.concat(outputs, axis=1)
return result
def save_checkpoint(self, checkpoint_path):
for e in self.embedding_layers:
e.save_checkpoint(checkpoint_path)
def restore_checkpoint(self, checkpoint_path):
for e in self.embedding_layers:
e.restore_checkpoint(checkpoint_path)
class FusedEmbeddingInitializer(tf.keras.initializers.Initializer):
def __init__(self, table_sizes, embedding_dim, wrapped):
self.table_sizes = table_sizes
self.wrapped = wrapped
self.embedding_dim = embedding_dim
def __call__(self, shape, dtype=tf.float32):
with tf.device('/CPU:0'):
subtables = []
for table_size in self.table_sizes:
subtable = self.wrapped()(shape=[table_size, self.embedding_dim], dtype=dtype)
subtables.append(subtable)
weights = tf.concat(subtables, axis=0)
return weights
def get_config(self):
return {}
class FusedEmbedding(tf.keras.layers.Layer):
def __init__(self, table_sizes, output_dim, dtype=tf.float32, feature_names=None, trainable=True,
use_mde_embeddings=True):
super(FusedEmbedding, self).__init__(dtype=dtype)
self.table_sizes = table_sizes
self.output_dim = output_dim
self.offsets = np.array([0] + table_sizes, dtype=np.int32).cumsum()
self.offsets.reshape([1, -1])
self.offsets = tf.constant(self.offsets, dtype=tf.int32)
self.use_mde_embeddings = use_mde_embeddings
self.feature_names = feature_names
if not self.feature_names:
self.feature_names = [f'feature_{i}' for i in range(len(table_sizes))]
self.trainable = trainable
initializer = FusedEmbeddingInitializer(table_sizes=self.table_sizes,
embedding_dim=self.output_dim,
wrapped=EmbeddingInitializer)
embedding_cls = embedding.Embedding if use_mde_embeddings else Embedding
self.wrapped = embedding_cls(input_dim=self.offsets[-1], output_dim=self.output_dim,
embeddings_initializer=initializer)
def _get_embedding_table(self):
if self.use_mde_embeddings:
return self.wrapped.variables[0]
else:
return self.wrapped.variables[0]
def call(self, indices):
indices = indices + self.offsets[:-1]
return self.wrapped(indices)
def save_checkpoint(self, checkpoint_path):
for j in range(len(self.offsets) - 1):
nrows = self.offsets[j+1] - self.offsets[j]
name = self.feature_names[j]
filename = get_variable_path(checkpoint_path, name)
indices = tf.range(start=self.offsets[j], limit=self.offsets[j] + nrows, dtype=tf.int32)
arr = tf.gather(params=self._get_embedding_table(), indices=indices, axis=0)
arr = arr.numpy()
np.save(arr=arr, file=filename)
def restore_checkpoint(self, checkpoint_path):
for j in range(len(self.offsets) - 1):
name = self.feature_names[j]
filename = get_variable_path(checkpoint_path, name)
print('restoring embedding table from: ', filename)
numpy_arr = np.load(file=filename, mmap_mode='r')
num_chunks = math.ceil(numpy_arr.shape[0] / _embedding_checkpoint_batch)
for i in range(num_chunks):
begin = i * _embedding_checkpoint_batch
end = (i+1) * _embedding_checkpoint_batch
end = min(end, numpy_arr.shape[0])
indices = tf.range(start=begin, limit=end, dtype=tf.int32) + self.offsets[j]
update = tf.IndexedSlices(values=numpy_arr[begin:end, :],
indices=indices,
dense_shape=self._get_embedding_table().shape)
self._get_embedding_table().scatter_update(sparse_delta=update)
class DualEmbeddingGroup(tf.keras.layers.Layer):
"""
A group of embeddings with the same output dimension.
If it runs out of GPU memory it will use CPU memory for the largest tables.
"""
def __init__(self, cardinalities, output_dim, memory_threshold,
cpu_embedding='multitable', gpu_embedding='fused', dtype=tf.float32,
feature_names=None, trainable=True, use_mde_embeddings=True):
# TODO: throw an exception if the features are not sorted by cardinality in reversed order
super(DualEmbeddingGroup, self).__init__(dtype=dtype)
if dtype not in [tf.float32, tf.float16]:
raise ValueError(f'Only float32 and float16 embedding dtypes are currently supported. Got {dtype}.')
cpu_embedding_class = EmbeddingGroup if cpu_embedding == 'multitable' else FusedEmbedding
gpu_embedding_class = EmbeddingGroup if gpu_embedding == 'multitable' else FusedEmbedding
print('Dual embedding cardinalities: ', cardinalities)
self.cardinalities = np.array(cardinalities)
self.memory_threshold = memory_threshold
self.bytes_per_element = 2 if self.dtype == tf.float16 else 4
self.table_sizes = self.cardinalities * output_dim * self.bytes_per_element
self._find_first_gpu_index()
if not feature_names:
feature_names = [f'feature_{i}' for i in range(len(self.table_sizes))]
self.feature_names = feature_names
self.gpu_embedding = gpu_embedding_class(table_sizes=self.gpu_cardinalities.tolist(),
output_dim=output_dim, dtype=self.dtype,
feature_names=[feature_names[i] for i in self.gpu_inputs],
trainable=trainable, use_mde_embeddings=use_mde_embeddings)
# Force using FP32 for CPU embeddings, FP16 performance is much worse
self.cpu_embedding = cpu_embedding_class(table_sizes=self.cpu_cardinalities,
output_dim=output_dim, dtype=tf.float32,
feature_names=[feature_names[i] for i in self.cpu_inputs],
trainable=trainable)
def _find_first_gpu_index(self):
# order from smallest to largest
idx_mapping = np.argsort(self.table_sizes)
reversed_sizes = self.table_sizes[idx_mapping]
cumulative_size = np.cumsum(reversed_sizes)
cumulative_indicators = (cumulative_size > self.memory_threshold * 2 ** 30).tolist()
if True in cumulative_indicators:
index = cumulative_indicators.index(True)
else:
index = len(cumulative_size)
self.first_cpu_index = index
self.gpu_inputs = sorted(idx_mapping[:self.first_cpu_index])
self.cpu_inputs = sorted(idx_mapping[self.first_cpu_index:])
self.cpu_cardinalities = self.cardinalities[self.cpu_inputs]
self.gpu_cardinalities = self.cardinalities[self.gpu_inputs]
self.cpu_sizes = self.table_sizes[self.cpu_inputs]
self.gpu_sizes = self.table_sizes[self.gpu_inputs]
print(f'self.cpu_inputs: {self.cpu_inputs}')
print(f'self.gpu_inputs: {self.gpu_inputs}')
print(f'Total size of GPU tables: {sum(self.gpu_sizes) / 10 ** 9:.3f}[GB]')
print(f'Total size of CPU tables: {sum(self.cpu_sizes) / 10 ** 9:.3f}[GB]')
def call(self, indices):
cpu_indices, gpu_indices = [], []
if not self.cpu_inputs:
return self.gpu_embedding(indices)
if not self.gpu_inputs:
with tf.device('/CPU:0'):
return self.cpu_embedding(indices)
for i in self.cpu_inputs:
cpu_indices.append(indices[:, i])
for i in self.gpu_inputs:
gpu_indices.append(indices[:, i])
to_concat = []
# at least one cpu-based embedding
with tf.device('/CPU:0'):
cpu_indices = tf.stack(cpu_indices, axis=1)
cpu_results = self.cpu_embedding(cpu_indices)
cpu_results = tf.cast(cpu_results, dtype=self.dtype)
to_concat.append(cpu_results)
# at least one gpu-based embedding
with tf.device('/GPU:0'):
gpu_indices = tf.stack(gpu_indices, axis=1)
gpu_results = self.gpu_embedding(gpu_indices)
to_concat.append(gpu_results)
result = tf.concat(to_concat, axis=1)
reorder_indices = np.concatenate([self.cpu_inputs, self.gpu_inputs], axis=0).argsort().tolist()
split_result = tf.split(result, num_or_size_splits=indices.shape[1], axis=1)
result = [split_result[i] for i in reorder_indices]
result = tf.concat(result, axis=1)
return result
def save_checkpoint(self, checkpoint_path):
self.gpu_embedding.save_checkpoint(checkpoint_path)
self.cpu_embedding.save_checkpoint(checkpoint_path)
def restore_checkpoint(self, checkpoint_path):
self.gpu_embedding.restore_checkpoint(checkpoint_path)
self.cpu_embedding.restore_checkpoint(checkpoint_path)
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/nn/embedding.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/nn/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import tensorflow as tf
import horovod.tensorflow as hvd
import time
import os
from utils.distributed import dist_print
from .dense_model import DenseModel, dense_model_parameters
from .sparse_model import SparseModel, sparse_model_parameters
from .nn_utils import create_inputs_dict
class Model(tf.keras.Model):
def __init__(self, **kwargs):
super(Model, self).__init__()
if kwargs:
dense_model_kwargs = {k:kwargs[k] for k in dense_model_parameters}
self.dense_model = DenseModel(**dense_model_kwargs)
sparse_model_kwargs = {k:kwargs[k] for k in sparse_model_parameters}
self.sparse_model = SparseModel(**sparse_model_kwargs)
@staticmethod
def create_from_checkpoint(checkpoint_path):
if checkpoint_path is None:
return None
model = Model()
model.dense_model = DenseModel.from_config(os.path.join(checkpoint_path, 'dense', 'config.json'))
model.sparse_model = SparseModel.from_config(os.path.join(checkpoint_path, 'sparse', 'config.json'))
model.restore_checkpoint(checkpoint_path)
return model
def force_initialization(self, global_batch_size):
numerical_features = tf.zeros(shape=[global_batch_size // hvd.size(),
self.dense_model.num_numerical_features])
categorical_features = [tf.zeros(shape=[global_batch_size, 1], dtype=tf.int32)
for _ in range(len(self.sparse_model.get_local_table_ids(hvd.rank())))]
inputs = create_inputs_dict(numerical_features, categorical_features)
self(inputs=inputs)
@tf.function
def call(self, inputs, sigmoid=False, training=False):
numerical_features, cat_features = list(inputs.values())
embedding_outputs = self.sparse_model(cat_features)
embedding_outputs = tf.reshape(embedding_outputs, shape=[-1])
x = self.dense_model(numerical_features, embedding_outputs, sigmoid=sigmoid, training=training)
return x
def save_checkpoint(self, checkpoint_path):
dist_print('Saving a checkpoint...')
begin_save = time.time()
os.makedirs(checkpoint_path, exist_ok=True)
if hvd.rank() == 0:
dense_checkpoint_dir = os.path.join(checkpoint_path, 'dense')
os.makedirs(dense_checkpoint_dir, exist_ok=True)
self.dense_model.save_config(os.path.join(dense_checkpoint_dir, 'config.json'))
self.dense_model.save_weights(os.path.join(dense_checkpoint_dir, 'dense'))
sparse_checkpoint_dir = os.path.join(checkpoint_path, 'sparse')
os.makedirs(sparse_checkpoint_dir, exist_ok=True)
self.sparse_model.save_config(os.path.join(sparse_checkpoint_dir, 'config.json'))
self.sparse_model.save_checkpoint(sparse_checkpoint_dir)
end_save = time.time()
dist_print('Saved a checkpoint to ', checkpoint_path)
dist_print(f'Saving a checkpoint took {end_save - begin_save:.3f}')
def restore_checkpoint(self, checkpoint_path):
begin = time.time()
dist_print('Restoring a checkpoint...')
local_batch = 64
self.force_initialization(global_batch_size=hvd.size()*local_batch)
dense_checkpoint_path = os.path.join(checkpoint_path, 'dense', 'dense')
self.dense_model.load_weights(dense_checkpoint_path)
sparse_checkpoint_dir = os.path.join(checkpoint_path, 'sparse')
self.sparse_model.load_checkpoint(sparse_checkpoint_dir)
end = time.time()
dist_print(f'Restoring a checkpoint took: {end-begin:.3f} seconds')
return self
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/nn/model.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import tensorflow as tf
import horovod.tensorflow as hvd
from distributed_embeddings.python.layers import dist_model_parallel as dmp
from .nn_utils import create_inputs_dict
class Trainer:
def __init__(self, model, embedding_optimizer, mlp_optimizer, amp, lr_scheduler, tf_dataset_op, cpu):
self.model = model
self.embedding_optimizer = embedding_optimizer
self.mlp_optimizer = mlp_optimizer
self.amp = amp
self.lr_scheduler = lr_scheduler
self.bce = tf.keras.losses.BinaryCrossentropy(reduction=tf.keras.losses.Reduction.NONE, from_logits=True)
self.cpu = cpu
self.tf_dataset_op = tf_dataset_op
self.dataset_iter = iter(self.tf_dataset_op())
def _weight_update(self, gradients):
if self.amp:
gradients = self.mlp_optimizer.get_unscaled_gradients(gradients)
dense_gradients, dense_variables = [], []
embedding_gradients, embedding_variables = [], []
embedding_refs = set(v.ref() for v in self.model.sparse_model.trainable_variables)
for var, grad in zip(self.model.trainable_variables, gradients):
if var.ref() in embedding_refs:
embedding_variables.append(var)
embedding_gradients.append(grad)
else:
dense_variables.append(var)
dense_gradients.append(grad)
self.mlp_optimizer.apply_gradients(zip(dense_gradients, dense_variables))
self.embedding_optimizer.apply_gradients(zip(embedding_gradients, embedding_variables))
@tf.function
def train_step(self):
device = '/CPU:0' if self.cpu else '/GPU:0'
with tf.device(device):
self.lr_scheduler()
with tf.name_scope("dataloading"):
(numerical_features, categorical_features), labels = self.dataset_iter.get_next()
inputs = create_inputs_dict(numerical_features, categorical_features)
with tf.GradientTape() as tape:
predictions = self.model(inputs=inputs, training=True)
unscaled_loss = self.bce(labels, predictions)
# tf keras doesn't reduce the loss when using a Custom Training Loop
unscaled_loss = tf.math.reduce_mean(unscaled_loss)
scaled_loss = self.mlp_optimizer.get_scaled_loss(unscaled_loss) if self.amp else unscaled_loss
if hvd.size() > 1:
tape = dmp.DistributedGradientTape(tape)
gradients = tape.gradient(scaled_loss, self.model.trainable_variables)
self._weight_update(gradients)
if hvd.size() > 1:
# compute mean loss for all workers for reporting
mean_loss = hvd.allreduce(unscaled_loss, name="mean_loss", op=hvd.Average)
else:
mean_loss = unscaled_loss
return mean_loss
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/nn/trainer.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import json
import tensorflow.keras.initializers as initializers
import math
from tensorflow.python.keras.saving.saving_utils import model_input_signature
from .dcn import CrossNetwork
from . import interaction
import tensorflow as tf
import horovod.tensorflow as hvd
try:
from tensorflow_dot_based_interact.python.ops import dot_based_interact_ops
except ImportError:
print('WARNING: Could not import the custom dot-interaction kernels')
dense_model_parameters = ['embedding_dim', 'interaction', 'bottom_mlp_dims',
'top_mlp_dims', 'num_numerical_features', 'categorical_cardinalities',
'transpose', 'num_cross_layers', 'cross_layer_projection_dim',
'batch_size']
class DenseModel(tf.keras.Model):
def __init__(self, **kwargs):
super(DenseModel, self).__init__()
for field in dense_model_parameters:
self.__dict__[field] = kwargs[field]
self.num_all_categorical_features = len(self.categorical_cardinalities)
self.bottom_mlp_dims = [int(d) for d in self.bottom_mlp_dims]
self.top_mlp_dims = [int(d) for d in self.top_mlp_dims]
if self.interaction != 'cross' and any(dim != self.embedding_dim[0] for dim in self.embedding_dim):
raise ValueError(f'For DLRM all embedding dimensions should be equal, '
f'got interaction={interaction}, embedding_dim={self.embedding_dim}')
if self.interaction != 'cross' and self.bottom_mlp_dims[-1] != self.embedding_dim[0]:
raise ValueError(f'Final dimension of the Bottom MLP should match embedding dimension. '
f'Got: {self.bottom_mlp_dims[-1]} and {self.embedding_dim} respectively.')
self._create_interaction_op()
self._create_bottom_mlp()
self._create_top_mlp()
self.bottom_mlp_padding = self._compute_padding(num_features=self.num_numerical_features)
self.top_mlp_padding = self._compute_padding(num_features=self._get_top_mlp_input_features())
def _create_interaction_op(self):
if self.interaction == 'dot_custom_cuda':
self.interact_op = dot_based_interact_ops.dot_based_interact
elif self.interaction == 'dot_tensorflow':
# TODO: add support for datasets with no dense features
self.interact_op = interaction.DotInteractionGather(num_features=self.num_all_categorical_features + 1)
elif self.interaction == 'cross':
self.interact_op = CrossNetwork(num_layers=self.num_cross_layers,
projection_dim=self.cross_layer_projection_dim)
else:
raise ValueError(f'Unknown interaction {self.interaction}')
@staticmethod
def _compute_padding(num_features, multiple=8):
pad_to = math.ceil(num_features / multiple) * multiple
return pad_to - num_features
def _get_top_mlp_input_features(self):
if self.interaction == 'cross':
num_features = sum(self.embedding_dim)
if self.num_numerical_features != 0:
num_features += self.bottom_mlp_dims[-1]
return num_features
else:
num_features = self.num_all_categorical_features
if self.num_numerical_features != 0:
num_features += 1
num_features = num_features * (num_features - 1)
num_features = num_features // 2
num_features = num_features + self.bottom_mlp_dims[-1]
return num_features
def _create_bottom_mlp(self):
self.bottom_mlp_layers = []
for dim in self.bottom_mlp_dims:
kernel_initializer = initializers.GlorotNormal()
bias_initializer = initializers.RandomNormal(stddev=math.sqrt(1. / dim))
l = tf.keras.layers.Dense(dim, activation='relu',
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer)
self.bottom_mlp_layers.append(l)
def _create_top_mlp(self):
self.top_mlp = []
for i, dim in enumerate(self.top_mlp_dims):
if i == len(self.top_mlp_dims) - 1:
# final layer
activation = 'linear'
else:
activation = 'relu'
kernel_initializer = initializers.GlorotNormal()
bias_initializer = initializers.RandomNormal(stddev=math.sqrt(1. / dim))
l = tf.keras.layers.Dense(dim, activation=activation,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer)
self.top_mlp.append(l)
def transpose_nonequal_embedding_dim(self, embedding_outputs, numerical_features):
# We get a table-major format here for inference,
# but the sizes of the tables are not the same.
# Therefore a simple transposition will not work,
# we need to perform multiple splits and concats instead.
# TODO: test this.
embedding_outputs = tf.reshape(embedding_outputs, shape=[-1])
batch_size = numerical_features.shape[0]
split_sizes = [batch_size * dim for dim in self.embedding_dim]
embedding_outputs = tf.split(embedding_outputs, num_or_size_splits=split_sizes)
embedding_outputs = [tf.split(eout, num_or_size_splits=dim) for eout, dim in zip(embedding_outputs,
self.emdedding_dim)]
transposed_outputs = [] * batch_size
for i, o in enumerate(transposed_outputs):
ith_sample = [out[i] for out in embedding_outputs]
ith_sample = tf.concat(ith_sample, axis=1)
transposed_outputs[i] = ith_sample
transposed_outputs = tf.concat(transposed_outputs, axis=0)
return tf.reshape(transposed_outputs, shape=[batch_size, sum(self.embedding_dim)])
def transpose_input(self, embedding_outputs, numerical_features):
if any(dim != self.embedding_dim[0] for dim in self.embedding_dim):
return self.transpose_nonequal_embedding_dim(embedding_outputs, numerical_features)
else:
embedding_outputs = tf.reshape(embedding_outputs, shape=[self.num_all_categorical_features, -1, self.embedding_dim[0]])
return tf.transpose(embedding_outputs, perm=[1, 0, 2])
def reshape_input(self, embedding_outputs):
if self.interaction == 'cross':
return tf.reshape(embedding_outputs, shape=[-1, sum(self.embedding_dim)])
else:
return tf.reshape(embedding_outputs, shape=[-1, self.num_all_categorical_features, self.embedding_dim[0]])
@tf.function
def call(self, numerical_features, embedding_outputs, sigmoid=False, training=False):
numerical_features = tf.reshape(numerical_features, shape=[-1, self.num_numerical_features])
bottom_mlp_out = self._call_bottom_mlp(numerical_features, training)
if self.transpose:
embedding_outputs = self.transpose_input(embedding_outputs, numerical_features)
embedding_outputs = self.reshape_input(embedding_outputs)
x = self._call_interaction(embedding_outputs, bottom_mlp_out)
x = self._call_top_mlp(x)
if sigmoid:
x = tf.math.sigmoid(x)
x = tf.cast(x, tf.float32)
return x
def _pad_bottom_mlp_input(self, numerical_features, training):
if training:
# When training, padding with a statically fixed batch size so that XLA has better shape information.
# This yields a significant (~15%) speedup for singleGPU DLRM.
padding = tf.zeros(shape=[self.batch_size // hvd.size(), self.bottom_mlp_padding],
dtype=self.compute_dtype)
x = tf.concat([numerical_features, padding], axis=1)
else:
# For inference, use tf.pad.
# This way inference can be performed with any batch size on the deployed SavedModel.
x = tf.pad(numerical_features, [[0, 0], [0, self.bottom_mlp_padding]])
return x
def _call_bottom_mlp(self, numerical_features, training):
numerical_features = tf.cast(numerical_features, dtype=self.compute_dtype)
x = self._pad_bottom_mlp_input(numerical_features, training)
with tf.name_scope('bottom_mlp'):
for l in self.bottom_mlp_layers:
x = l(x)
x = tf.expand_dims(x, axis=1)
bottom_mlp_out = x
return bottom_mlp_out
def _call_interaction(self, embedding_outputs, bottom_mlp_out):
if self.interaction == 'cross':
bottom_mlp_out = tf.reshape(bottom_mlp_out, [-1, self.bottom_mlp_dims[-1]])
x = tf.concat([bottom_mlp_out, embedding_outputs], axis=1)
x = self.interact_op(x)
else:
bottom_part_output = tf.concat([bottom_mlp_out, embedding_outputs], axis=1)
x = tf.reshape(bottom_part_output, shape=[-1, self.num_all_categorical_features + 1, self.embedding_dim[0]])
bottom_mlp_out = tf.reshape(bottom_mlp_out, shape=[-1, self.bottom_mlp_dims[-1]])
x = self.interact_op(x, bottom_mlp_out)
return x
def _call_top_mlp(self, x):
if self.interaction != 'dot_custom_cuda':
x = tf.reshape(x, [-1, self._get_top_mlp_input_features()])
x = tf.pad(x, [[0, 0], [0, self.top_mlp_padding]])
with tf.name_scope('top_mlp'):
for i, l in enumerate(self.top_mlp):
x = l(x)
return x
def save_model(self, path, save_input_signature=False):
if save_input_signature:
input_sig = model_input_signature(self, keep_original_batch_size=True)
call_graph = tf.function(self)
signatures = call_graph.get_concrete_function(input_sig[0])
else:
signatures = None
tf.keras.models.save_model(model=self, filepath=path, overwrite=True, signatures=signatures)
def force_initialization(self, batch_size=64, training=False, flattened_input=True):
if flattened_input:
embeddings_output = tf.zeros([batch_size * sum(self.embedding_dim)])
numerical_input = tf.zeros([batch_size * self.num_numerical_features])
else:
embeddings_output = tf.zeros([batch_size, sum(self.embedding_dim)])
numerical_input = tf.zeros([batch_size, self.num_numerical_features])
_ = self(numerical_input, embeddings_output, sigmoid=False, training=training)
@staticmethod
def load_model(path):
print('Loading a saved model from', path)
loaded = tf.keras.models.load_model(path)
return loaded
def save_config(self, path):
config = {k : self.__dict__[k] for k in dense_model_parameters}
with open(path, 'w') as f:
json.dump(obj=config, fp=f, indent=4)
@staticmethod
def from_config(path):
with open(path) as f:
config = json.load(fp=f)
return DenseModel(**config)
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/nn/dense_model.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
from collections import OrderedDict
def create_inputs_dict(numerical_features, categorical_features):
# Passing inputs as (numerical_features, categorical_features) changes the model
# input signature to (<tensor, [list of tensors]>).
# This leads to errors while loading the saved model.
# TF flattens the inputs while loading the model,
# so the inputs are converted from (<tensor, [list of tensors]>) -> [list of tensors]
# see _set_inputs function in training_v1.py:
# https://github.com/tensorflow/tensorflow/blob/7628750678786f1b65e8905fb9406d8fbffef0db/tensorflow/python/keras/engine/training_v1.py#L2588)
inputs = OrderedDict()
inputs['numerical_features'] = numerical_features
inputs['categorical_features'] = categorical_features
return inputs
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/nn/nn_utils.py |
# Copyright 2021 The TensorFlow Recommenders Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Implements `Cross` Layer, the cross layer in Deep & Cross Network (DCN)."""
from typing import Union, Text, Optional
import tensorflow as tf
@tf.keras.utils.register_keras_serializable()
class Cross(tf.keras.layers.Layer):
"""Cross Layer in Deep & Cross Network to learn explicit feature interactions.
A layer that creates explicit and bounded-degree feature interactions
efficiently. The `call` method accepts `inputs` as a tuple of size 2
tensors. The first input `x0` is the base layer that contains the original
features (usually the embedding layer); the second input `xi` is the output
of the previous `Cross` layer in the stack, i.e., the i-th `Cross`
layer. For the first `Cross` layer in the stack, x0 = xi.
The output is x_{i+1} = x0 .* (W * xi + bias + diag_scale * xi) + xi,
where .* designates elementwise multiplication, W could be a full-rank
matrix, or a low-rank matrix U*V to reduce the computational cost, and
diag_scale increases the diagonal of W to improve training stability (
especially for the low-rank case).
References:
1. [R. Wang et al.](https://arxiv.org/pdf/2008.13535.pdf)
See Eq. (1) for full-rank and Eq. (2) for low-rank version.
2. [R. Wang et al.](https://arxiv.org/pdf/1708.05123.pdf)
Example:
```python
# after embedding layer in a functional model:
input = tf.keras.Input(shape=(None,), name='index', dtype=tf.int64)
x0 = tf.keras.layers.Embedding(input_dim=32, output_dim=6)
x1 = Cross()(x0, x0)
x2 = Cross()(x0, x1)
logits = tf.keras.layers.Dense(units=10)(x2)
model = tf.keras.Model(input, logits)
```
Args:
projection_dim: project dimension to reduce the computational cost.
Default is `None` such that a full (`input_dim` by `input_dim`) matrix
W is used. If enabled, a low-rank matrix W = U*V will be used, where U
is of size `input_dim` by `projection_dim` and V is of size
`projection_dim` by `input_dim`. `projection_dim` need to be smaller
than `input_dim`/2 to improve the model efficiency. In practice, we've
observed that `projection_dim` = d/4 consistently preserved the
accuracy of a full-rank version.
diag_scale: a non-negative float used to increase the diagonal of the
kernel W by `diag_scale`, that is, W + diag_scale * I, where I is an
identity matrix.
use_bias: whether to add a bias term for this layer. If set to False,
no bias term will be used.
kernel_initializer: Initializer to use on the kernel matrix.
bias_initializer: Initializer to use on the bias vector.
kernel_regularizer: Regularizer to use on the kernel matrix.
bias_regularizer: Regularizer to use on bias vector.
Input shape: A tuple of 2 (batch_size, `input_dim`) dimensional inputs.
Output shape: A single (batch_size, `input_dim`) dimensional output.
"""
def __init__(
self,
projection_dim: Optional[int] = None,
diag_scale: Optional[float] = 0.0,
use_bias: bool = True,
kernel_initializer: Union[
Text, tf.keras.initializers.Initializer] = "truncated_normal",
bias_initializer: Union[Text,
tf.keras.initializers.Initializer] = "zeros",
kernel_regularizer: Union[Text, None,
tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Union[Text, None,
tf.keras.regularizers.Regularizer] = None,
**kwargs):
super(Cross, self).__init__(**kwargs)
self._projection_dim = projection_dim
self._diag_scale = diag_scale
self._use_bias = use_bias
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._input_dim = None
self._supports_masking = True
if self._diag_scale < 0:
raise ValueError(
"`diag_scale` should be non-negative. Got `diag_scale` = {}".format(
self._diag_scale))
def build(self, input_shape):
last_dim = input_shape[-1]
if self._projection_dim is None:
self._dense = tf.keras.layers.Dense(
last_dim,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
use_bias=self._use_bias,
)
else:
self._dense_u = tf.keras.layers.Dense(
self._projection_dim,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_bias=False,
)
self._dense_v = tf.keras.layers.Dense(
last_dim,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
use_bias=self._use_bias,
)
self.built = True
def call(self, x0: tf.Tensor, x: Optional[tf.Tensor] = None) -> tf.Tensor:
"""Computes the feature cross.
Args:
x0: The input tensor
x: Optional second input tensor. If provided, the layer will compute
crosses between x0 and x; if not provided, the layer will compute
crosses between x0 and itself.
Returns:
Tensor of crosses.
"""
if not self.built:
self.build(x0.shape)
if x is None:
x = x0
if x0.shape[-1] != x.shape[-1]:
raise ValueError(
"`x0` and `x` dimension mismatch! Got `x0` dimension {}, and x "
"dimension {}. This case is not supported yet.".format(
x0.shape[-1], x.shape[-1]))
if self._projection_dim is None:
prod_output = self._dense(x)
else:
prod_output = self._dense_v(self._dense_u(x))
if self._diag_scale:
prod_output = prod_output + self._diag_scale * x
return x0 * prod_output + x
def get_config(self):
config = {
"projection_dim":
self._projection_dim,
"diag_scale":
self._diag_scale,
"use_bias":
self._use_bias,
"kernel_initializer":
tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer":
tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer":
tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer":
tf.keras.regularizers.serialize(self._bias_regularizer),
}
base_config = super(Cross, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class CrossNetwork(tf.Module):
def __init__(self, num_layers, projection_dim=None):
self.cross_layers = []
for _ in range(num_layers):
self.cross_layers.append(Cross(projection_dim=projection_dim))
def __call__(self, x0):
x = x0
for cl in self.cross_layers:
x = cl(x0=x0, x=x)
return x
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/nn/dcn.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import tensorflow as tf
import time
from .nn_utils import create_inputs_dict
class Evaluator:
def __init__(self, model, timer, auc_thresholds, max_steps=None, cast_dtype=None, distributed=False):
self.model = model
self.timer = timer
self.max_steps = max_steps
self.cast_dtype = cast_dtype
self.distributed = distributed
if self.distributed:
import horovod.tensorflow as hvd
self.hvd = hvd
else:
self.hvd = None
self.auc_metric = tf.keras.metrics.AUC(num_thresholds=auc_thresholds, curve='ROC',
summation_method='interpolation', from_logits=True)
self.bce_op = tf.keras.losses.BinaryCrossentropy(reduction=tf.keras.losses.Reduction.NONE, from_logits=True)
def _reset(self):
self.latencies, self.all_test_losses = [], []
self.auc_metric.reset_state()
@tf.function
def update_auc_metric(self, labels, y_pred):
self.auc_metric.update_state(labels, y_pred)
@tf.function
def compute_bce_loss(self, labels, y_pred):
return self.bce_op(labels, y_pred)
def _step(self, pipe):
begin = time.time()
batch = pipe.get_next()
(numerical_features, categorical_features), labels = batch
if self.cast_dtype is not None:
numerical_features = tf.cast(numerical_features, self.cast_dtype)
inputs = create_inputs_dict(numerical_features, categorical_features)
y_pred = self.model(inputs, sigmoid=False, training=False)
end = time.time()
self.latencies.append(end - begin)
if self.distributed:
y_pred = self.hvd.allgather(y_pred)
labels = self.hvd.allgather(labels)
self.timer.step_test()
if not self.distributed or self.hvd.rank() == 0:
self.update_auc_metric(labels, y_pred)
test_loss = self.compute_bce_loss(labels, y_pred)
self.all_test_losses.append(test_loss)
def __call__(self, validation_pipeline):
self._reset()
auc, test_loss = 0, 0
pipe = iter(validation_pipeline.op())
num_steps = len(validation_pipeline)
if self.max_steps is not None and self.max_steps >= 0:
num_steps = min(num_steps, self.max_steps)
for _ in range(num_steps):
self._step(pipe)
if not self.distributed or self.hvd.rank() == 0:
auc = self.auc_metric.result().numpy().item()
test_loss = tf.reduce_mean(self.all_test_losses).numpy().item()
return auc, test_loss, self.latencies
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/nn/evaluator.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import time
import dllogger
import json
def init_logging(log_path, params_dict, enabled=True):
if not enabled:
return
json_backend = dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE,
filename=log_path)
stdout_backend = dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE)
stdout_backend._metadata['auc'].update({'format': '0:.6f'})
stdout_backend._metadata['validation_loss'].update({'format': '0:.6f'})
stdout_backend._metadata['throughput'].update({'format': ':.3e'})
stdout_backend._metadata['mean_step_time_ms'].update({'format': '0:.3f'})
stdout_backend._metadata['mean_inference_throughput'].update({'format': ':.3e'})
stdout_backend._metadata['mean_inference_latency'].update({'format': '0:.5f'})
for percentile in [90, 95, 99]:
stdout_backend._metadata[f'p{percentile}_inference_latency'].update({'format': '0:.5f'})
dllogger.init(backends=[json_backend, stdout_backend])
dllogger.log(data=params_dict, step='PARAMETER')
print("Command line flags:")
print(json.dumps(params_dict, indent=4))
class IterTimer:
def __init__(self, train_batch_size, test_batch_size, optimizer, print_freq=50,
enabled=True, benchmark_warmup_steps=None):
self.previous_tick = None
self.train_idx = 0
self.test_idx = 0
self.train_batch_size = train_batch_size
self.test_batch_size = test_batch_size
self.print_freq = print_freq
self.optimizer = optimizer
self.enabled = enabled
self.training_steps_time = 0
self.steps_measured = 0
if benchmark_warmup_steps is None:
self.benchmark_warmup_steps = print_freq * 2
else:
self.benchmark_warmup_steps = benchmark_warmup_steps
def step_train(self, loss=None):
if not self.enabled:
return
if self.train_idx < self.benchmark_warmup_steps:
self.train_idx += 1
return
if self.train_idx % self.print_freq == 0 and self.train_idx > 0:
if self.previous_tick is None:
self.previous_tick = time.time()
self.train_idx += 1
return
current_time = time.time()
elapsed = current_time - self.previous_tick
throughput = (self.train_batch_size * self.print_freq) / elapsed
throughput_in_millions = throughput / 1e6
step_time_ms = elapsed / self.print_freq * 1000
lr = f'{self.optimizer.lr.numpy().item():.4f}'
print(f'step={self.train_idx}, throughput={throughput_in_millions:.3f}M, step_time={step_time_ms:.3f} ms, learning_rate={lr}, loss={loss:.8f},')
self.previous_tick = current_time
self.training_steps_time += elapsed
self.steps_measured += self.print_freq
self.train_idx += 1
def mean_train_time(self):
if self.steps_measured == 0:
print("Run too short to measure mean training time")
return float('nan')
return self.training_steps_time / self.steps_measured
def step_test(self):
if not self.enabled:
return
if self.previous_tick is None:
self.previous_tick = time.time()
self.test_idx += 1
return
if self.test_idx % self.print_freq == self.print_freq - 1:
current_time = time.time()
elapsed = current_time - self.previous_tick
throughput = (self.test_batch_size * self.print_freq) / elapsed
throughput_in_millions = throughput / 1e6
step_time_ms = elapsed / self.print_freq * 1000
print(f'validation_step={self.test_idx}, validation_throughput={throughput_in_millions:.3f}M, step_time={step_time_ms:.3f} ms')
self.previous_tick = current_time
self.test_idx += 1
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/utils/logging.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/utils/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import horovod.tensorflow as hvd
def dist_print(*args, force=False, **kwargs):
if hvd.rank() == 0 or force:
print(*args, **kwargs)
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/utils/distributed.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import os
def get_variable_path(checkpoint_path, name):
tokens = name.split('/')
tokens = [t for t in tokens if 'model_parallel' not in t and 'data_parallel' not in t]
name = '_'.join(tokens)
name = name.replace(':', '_')
filename = name + '.npy'
return os.path.join(checkpoint_path, filename)
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/utils/checkpointing.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import Extension
from setuptools import find_packages
from setuptools import setup
from setuptools.dist import Distribution
__version__ = '0.0.1'
REQUIRED_PACKAGES = [
'tensorflow >= 2.3.1',
]
project_name = 'tensorflow-dot-based-interact'
from setuptools.command.install import install
class InstallPlatlib(install):
def finalize_options(self):
install.finalize_options(self)
self.install_lib = self.install_platlib
class BinaryDistribution(Distribution):
"""This class is needed in order to create OS specific wheels."""
def has_ext_modules(self):
return True
def is_pure(self):
return False
setup(
name=project_name,
version=__version__,
description=('tensorflow-dot-based-interact is a CUDA Dot Based Interact custom op for TensorFlow'),
author='NVIDIA Corporation',
author_email='[email protected]',
# Contained modules and scripts.
packages=find_packages(),
install_requires=REQUIRED_PACKAGES,
# Add in any packaged data.
include_package_data=True,
zip_safe=False,
distclass=BinaryDistribution,
cmdclass={'install': InstallPlatlib},
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
],
license='Apache 2.0',
keywords='tensorflow custom op machine learning',
)
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/tensorflow-dot-based-interact/setup.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from tensorflow_dot_based_interact.python.ops.dot_based_interact_ops import dot_based_interact
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/tensorflow-dot-based-interact/tensorflow_dot_based_interact/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/tensorflow-dot-based-interact/tensorflow_dot_based_interact/python/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.framework import test_util
try:
from tensorflow_dot_based_interact.python.ops import dot_based_interact_ops
except ImportError:
import dot_based_interact_ops
def dot_based_interact_native(input, bottom_mlp_output):
# Dot Based Interact of the "input" tensor
concat_features = tf.cast(input, tf.float32)
interactions = tf.matmul(concat_features, concat_features, transpose_b=True)
ones = tf.ones_like(interactions, dtype=concat_features.dtype)
upper_tri_mask = tf.linalg.band_part(ones, 0, -1)
feature_dim = tf.shape(interactions)[-1]
lower_tri_mask = ones - upper_tri_mask
activations = tf.boolean_mask(interactions, lower_tri_mask)
out_dim = feature_dim * (feature_dim - 1) // 2
activations = tf.reshape(activations, shape=[-1, out_dim])
# Top Concatenation of the bottom_mlp_output with the interactions
bottom_mlp_output = tf.cast(tf.squeeze(bottom_mlp_output, axis=1), tf.float32)
top_concat = tf.concat([bottom_mlp_output, activations], axis=1)
# Zero Padding for performance in upstream ops
padding = tf.zeros([concat_features.shape[0], 1])
zero_padded = tf.concat([top_concat, padding], axis=1)
return zero_padded
class DotBasedInteractTest(test.TestCase):
def input(self, batch_size, num_rows, num_cols, dtype):
# Creates two random tensors to use as sample inputs to test with:
# - input: With shape [batch_size, num_rows, num_cols]
# - bottom_mlp_output: With shape [batch_size, 1, num_cols]
# Where the first row of input is a copy of bottom_mlp_output
mlp_rows = 1
emb_rows = num_rows - mlp_rows
bottom_mlp_output = tf.random.uniform(shape=[batch_size, mlp_rows, num_cols], dtype=dtype)
embeddings = tf.random.uniform(shape=[batch_size, emb_rows, num_cols], dtype=dtype)
input = tf.concat([bottom_mlp_output, embeddings], axis=1)
return tf.Variable(input), tf.Variable(bottom_mlp_output)
def forward(self, batch_size, num_rows, num_cols, dtype):
with self.test_session() as sess:
with ops.device("/gpu:0"):
input, bottom_mlp_output = self.input(batch_size, num_rows, num_cols, dtype)
expected = dot_based_interact_native(input, bottom_mlp_output)
result = dot_based_interact_ops.dot_based_interact(input, bottom_mlp_output)
return result, expected
def backward(self, batch_size, num_rows, num_cols, dtype):
with self.test_session() as sess:
with ops.device("/gpu:0"):
input, bottom_mlp_output = self.input(batch_size, num_rows, num_cols, dtype)
with tf.GradientTape() as tape:
output = dot_based_interact_native(input, bottom_mlp_output)
expected = tape.gradient(output, [input, bottom_mlp_output])
with tf.GradientTape() as tape:
output = dot_based_interact_ops.dot_based_interact(input, bottom_mlp_output)
result = tape.gradient(output, [input, bottom_mlp_output])
return result[0], expected[0]
def test_fp32(self):
# Higher than normal tolerance on FP32 due to TF32 on Ampere
self.assertAllClose(*self.forward(16, 32, 32, tf.float32), rtol=1e-03)
def test_fp32_not_aligned(self):
self.assertAllClose(*self.forward(17, 31, 37, tf.float32), rtol=1e-03)
def test_grad_fp32(self):
self.assertAllClose(*self.backward(16, 32, 32, tf.float32), rtol=1e-03)
def test_grad_fp32_not_aligned(self):
self.assertAllClose(*self.backward(17, 31, 37, tf.float32), rtol=1e-03)
def test_fp16(self):
self.assertAllCloseAccordingToType(*self.forward(16, 32, 32, tf.float16))
def test_fp16_not_aligned(self):
self.assertAllCloseAccordingToType(*self.forward(15, 31, 37, tf.float16))
def test_grad_fp16(self):
self.assertAllCloseAccordingToType(*self.backward(16, 32, 32, tf.float16))
def test_grad_fp16_not_aligned(self):
self.assertAllCloseAccordingToType(*self.backward(17, 31, 37, tf.float16))
extended_argset_1 = [ #within both old bounds
#batch_size, num_rows, num_cols
(16,31,31),
(16,31,32),
(16,31,33),
(16,32,31),
(16,32,32),
(16,32,33),
(255,31,32),
(255,31,31),
(255,31,33),
(255,32,31),
(255,32,32),
(255,32,33)
]
extended_argset_2 = [ #exceeding num_rows bound
#batch_size, num_rows, num_cols
(16,33,31),
(16,33,32),
(16,33,33),
(255,33,31),
(255,33,32),
(255,33,33)
]
extended_argset_3 = [ #exceeding num_cols bound
#batch_size, num_rows, num_cols
(16,31,255),
(16,31,256),
(16,31,257),
(16,32,255),
(16,32,256),
(16,32,257),
(255,31,255),
(255,31,256),
(255,31,257),
(255,32,255),
(255,32,256),
(255,32,257)
]
extended_argset_4 = [ #exceeding both bounds
#batch_size, num_rows, num_cols
(16,39,255),
(16,39,256),
(16,39,257),
(16,40,255),
(16,40,256),
(16,40,257),
(16,41,255),
(16,41,256),
(16,41,257),
(255,39,255),
(255,39,256),
(255,39,257),
(255,40,255),
(255,40,256),
(255,40,257),
(255,41,255),
(255,41,256),
(255,41,257)
]
def test_fp32_extended_1(self):
# Higher than normal tolerance on FP32 due to TF32 on Ampere
for batch_size, num_rows, num_cols in self.extended_argset_1:
self.assertAllClose(*self.forward(batch_size, num_rows, num_cols, tf.float32), rtol=1e-03)
def test_grad_fp32_extended_1(self):
for batch_size, num_rows, num_cols in self.extended_argset_1:
self.assertAllClose(*self.backward(batch_size, num_rows, num_cols, tf.float32), rtol=1e-03)
def test_fp16_extended_1(self):
for batch_size, num_rows, num_cols in self.extended_argset_1:
self.assertAllCloseAccordingToType(*self.forward(batch_size, num_rows, num_cols, tf.float16))
def test_grad_fp16_extended_1(self):
for batch_size, num_rows, num_cols in self.extended_argset_1:
self.assertAllCloseAccordingToType(*self.backward(batch_size, num_rows, num_cols, tf.float16))
def test_fp32_extended_2(self):
# Higher than normal tolerance on FP32 due to TF32 on Ampere
for batch_size, num_rows, num_cols in self.extended_argset_2:
self.assertAllClose(*self.forward(batch_size, num_rows, num_cols, tf.float32), rtol=1e-03)
def test_grad_fp32_extended_2(self):
for batch_size, num_rows, num_cols in self.extended_argset_2:
self.assertAllClose(*self.backward(batch_size, num_rows, num_cols, tf.float32), rtol=1e-03)
def test_fp16_extended_2(self):
for batch_size, num_rows, num_cols in self.extended_argset_2:
self.assertAllCloseAccordingToType(*self.forward(batch_size, num_rows, num_cols, tf.float16))
def test_grad_fp16_extended_2(self):
for batch_size, num_rows, num_cols in self.extended_argset_2:
self.assertAllCloseAccordingToType(*self.backward(batch_size, num_rows, num_cols, tf.float16))
def test_fp32_extended_3(self):
# Higher than normal tolerance on FP32 due to TF32 on Ampere
for batch_size, num_rows, num_cols in self.extended_argset_3:
self.assertAllClose(*self.forward(batch_size, num_rows, num_cols, tf.float32), rtol=1e-03)
def test_grad_fp32_extended_3(self):
for batch_size, num_rows, num_cols in self.extended_argset_3:
self.assertAllClose(*self.backward(batch_size, num_rows, num_cols, tf.float32), rtol=1e-03)
def test_fp16_extended_3(self):
for batch_size, num_rows, num_cols in self.extended_argset_3:
self.assertAllCloseAccordingToType(*self.forward(batch_size, num_rows, num_cols, tf.float16))
def test_grad_fp16_extended_3(self):
for batch_size, num_rows, num_cols in self.extended_argset_3:
self.assertAllCloseAccordingToType(*self.backward(batch_size, num_rows, num_cols, tf.float16))
def test_fp32_extended_4(self):
# Higher than normal tolerance on FP32 due to TF32 on Ampere
for batch_size, num_rows, num_cols in self.extended_argset_4:
self.assertAllClose(*self.forward(batch_size, num_rows, num_cols, tf.float32), rtol=1e-03)
def test_grad_fp32_extended_4(self):
for batch_size, num_rows, num_cols in self.extended_argset_4:
self.assertAllClose(*self.backward(batch_size, num_rows, num_cols, tf.float32), rtol=1e-03)
def test_fp16_extended_4(self):
for batch_size, num_rows, num_cols in self.extended_argset_4:
self.assertAllCloseAccordingToType(*self.forward(batch_size, num_rows, num_cols, tf.float16))
def test_grad_fp16_extended_4(self):
for batch_size, num_rows, num_cols in self.extended_argset_4:
self.assertAllCloseAccordingToType(*self.backward(batch_size, num_rows, num_cols, tf.float16))
if __name__ == '__main__':
test.main()
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/tensorflow-dot-based-interact/tensorflow_dot_based_interact/python/ops/dot_based_interact_ops_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/tensorflow-dot-based-interact/tensorflow_dot_based_interact/python/ops/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader
dot_based_interact_ops = load_library.load_op_library(
resource_loader.get_path_to_datafile('_dot_based_interact_ops.so'))
dot_based_interact = dot_based_interact_ops.dot_based_interact
@ops.RegisterGradient("DotBasedInteract")
def dot_based_interact_grad(op, grad):
input = op.inputs[0]
return dot_based_interact_ops.dot_based_interact_grad(input, grad)
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/tensorflow-dot-based-interact/tensorflow_dot_based_interact/python/ops/dot_based_interact_ops.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import os
from joblib import Parallel, delayed
import glob
import argparse
import tqdm
import subprocess
def process_file(f, dst):
label = '_c0'
dense_columns = [f'_c{i}' for i in range(1, 14)]
categorical_columns = [f'_c{i}' for i in range(14, 40)]
all_columns_sorted = [f'_c{i}' for i in range(0, 40)]
data = pd.read_parquet(f)
data = data[all_columns_sorted]
data[label] = data[label].astype(np.int32)
data[dense_columns] = data[dense_columns].astype(np.float32)
data[categorical_columns] = data[categorical_columns].astype(np.int32)
data = data.to_records(index=False)
data = data.tobytes()
dst_file = dst + '/' + f.split('/')[-1] + '.bin'
with open(dst_file, 'wb') as dst_fd:
dst_fd.write(data)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--src_dir', type=str)
parser.add_argument('--intermediate_dir', type=str)
parser.add_argument('--dst_dir', type=str)
parser.add_argument('--parallel_jobs', default=40, type=int)
args = parser.parse_args()
print('Processing train files...')
train_src_files = glob.glob(args.src_dir + '/train/*.parquet')
train_intermediate_dir = os.path.join(args.intermediate_dir, 'train')
os.makedirs(train_intermediate_dir, exist_ok=True)
Parallel(n_jobs=args.parallel_jobs)(delayed(process_file)(f, train_intermediate_dir) for f in tqdm.tqdm(train_src_files))
print('Train files conversion done')
print('Processing test files...')
test_src_files = glob.glob(args.src_dir + '/test/*.parquet')
test_intermediate_dir = os.path.join(args.intermediate_dir, 'test')
os.makedirs(test_intermediate_dir, exist_ok=True)
Parallel(n_jobs=args.parallel_jobs)(delayed(process_file)(f, test_intermediate_dir) for f in tqdm.tqdm(test_src_files))
print('Test files conversion done')
print('Processing validation files...')
valid_src_files = glob.glob(args.src_dir + '/validation/*.parquet')
valid_intermediate_dir = os.path.join(args.intermediate_dir, 'validation')
os.makedirs(valid_intermediate_dir, exist_ok=True)
Parallel(n_jobs=args.parallel_jobs)(delayed(process_file)(f, valid_intermediate_dir) for f in tqdm.tqdm(valid_src_files))
print('Validation files conversion done')
os.makedirs(args.dst_dir, exist_ok=True)
print('Concatenating train files')
os.system(f'cat {train_intermediate_dir}/*.bin > {args.dst_dir}/train_data.bin')
print('Concatenating test files')
os.system(f'cat {test_intermediate_dir}/*.bin > {args.dst_dir}/test_data.bin')
print('Concatenating validation files')
os.system(f'cat {valid_intermediate_dir}/*.bin > {args.dst_dir}/validation_data.bin')
print('Done')
if __name__ == '__main__':
main()
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/preproc/parquet_to_binary.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import math
from tqdm import tqdm
import numpy as np
from typing import Sequence
# Workaround to avoid duplicating code from the main module, without building it outright.
import sys
sys.path.append('/workspace/dlrm')
from feature_spec import FeatureSpec, get_categorical_feature_type
def split_binary_file(
binary_file_path: str,
output_dir: str,
categorical_feature_sizes: Sequence[int],
num_numerical_features: int,
batch_size: int,
source_data_type: str = 'int32',
):
record_width = 1 + num_numerical_features + len(categorical_feature_sizes) # label + numerical + categorical
bytes_per_feature = np.__dict__[source_data_type]().nbytes
bytes_per_entry = record_width * bytes_per_feature
total_size = os.path.getsize(binary_file_path)
batches_num = int(math.ceil((total_size // bytes_per_entry) / batch_size))
cat_feature_types = [get_categorical_feature_type(cat_size) for cat_size in categorical_feature_sizes]
file_streams = []
try:
input_data_f = open(binary_file_path, "rb")
file_streams.append(input_data_f)
numerical_f = open(os.path.join(output_dir, "numerical.bin"), "wb+")
file_streams.append(numerical_f)
label_f = open(os.path.join(output_dir, 'label.bin'), 'wb+')
file_streams.append(label_f)
categorical_fs = []
for i in range(len(categorical_feature_sizes)):
fs = open(os.path.join(output_dir, f'cat_{i}.bin'), 'wb+')
categorical_fs.append(fs)
file_streams.append(fs)
for _ in tqdm(range(batches_num)):
raw_data = np.frombuffer(input_data_f.read(bytes_per_entry * batch_size), dtype=np.int32)
batch_data = raw_data.reshape(-1, record_width)
numerical_features = batch_data[:, 1:1 + num_numerical_features].view(dtype=np.float32)
numerical_f.write(numerical_features.astype(np.float16).tobytes())
label = batch_data[:, 0]
label_f.write(label.astype(np.bool).tobytes())
cat_offset = num_numerical_features + 1
for cat_idx, cat_feature_type in enumerate(cat_feature_types):
cat_data = batch_data[:, (cat_idx + cat_offset):(cat_idx + cat_offset + 1)].astype(cat_feature_type)
categorical_fs[cat_idx].write(cat_data.tobytes())
finally:
for stream in file_streams:
stream.close()
def split_dataset(dataset_dir: str, output_dir: str, batch_size: int, numerical_features: int):
categorical_sizes_file = os.path.join(dataset_dir, "model_size.json")
with open(categorical_sizes_file) as f:
# model_size.json contains the max value of each feature instead of the cardinality.
# For feature spec this is changed for consistency and clarity.
categorical_cardinalities = [int(v)+1 for v in json.load(f).values()]
train_file = os.path.join(dataset_dir, "train_data.bin")
test_file = os.path.join(dataset_dir, "test_data.bin")
val_file = os.path.join(dataset_dir, "validation_data.bin")
target_train = os.path.join(output_dir, "train")
target_test = os.path.join(output_dir, "test")
target_val = os.path.join(output_dir, "validation")
os.makedirs(output_dir, exist_ok=True)
os.makedirs(target_train, exist_ok=True)
os.makedirs(target_test, exist_ok=True)
os.makedirs(target_val, exist_ok=True)
# VALIDATION chunk is ignored in feature spec on purpose
feature_spec = FeatureSpec.get_default_feature_spec(number_of_numerical_features=numerical_features,
categorical_feature_cardinalities=categorical_cardinalities)
feature_spec.to_yaml(os.path.join(output_dir, 'feature_spec.yaml'))
split_binary_file(test_file, target_test, categorical_cardinalities, numerical_features, batch_size)
split_binary_file(train_file, target_train, categorical_cardinalities, numerical_features, batch_size)
split_binary_file(val_file, target_val, categorical_cardinalities, numerical_features, batch_size)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, required=True)
parser.add_argument('--output', type=str, required=True)
parser.add_argument('--batch_size', type=int, default=32768)
parser.add_argument('--numerical_features', type=int, default=13)
args = parser.parse_args()
split_dataset(
dataset_dir=args.dataset,
output_dir=args.output,
batch_size=args.batch_size,
numerical_features=args.numerical_features
)
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/preproc/split_dataset.py |
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import sys
from argparse import ArgumentParser
from collections import OrderedDict
from contextlib import contextmanager
from operator import itemgetter
from time import time
from pyspark import broadcast
from pyspark.sql import Row, SparkSession, Window
from pyspark.sql.functions import *
from pyspark.sql.types import *
LABEL_COL = 0
INT_COLS = list(range(1, 14))
CAT_COLS = list(range(14, 40))
def get_column_counts_with_frequency_limit(df, frequency_limit = None):
cols = ['_c%d' % i for i in CAT_COLS]
df = (df
.select(posexplode(array(*cols)))
.withColumnRenamed('pos', 'column_id')
.withColumnRenamed('col', 'data')
.filter('data is not null')
.groupBy('column_id', 'data')
.count())
if frequency_limit:
frequency_limit = frequency_limit.split(",")
exclude = []
default_limit = None
for fl in frequency_limit:
frequency_pair = fl.split(":")
if len(frequency_pair) == 1:
default_limit = int(frequency_pair[0])
elif len(frequency_pair) == 2:
df = df.filter((col('column_id') != int(frequency_pair[0]) - CAT_COLS[0]) | (col('count') >= int(frequency_pair[1])))
exclude.append(int(frequency_pair[0]))
if default_limit:
remain = [x - CAT_COLS[0] for x in CAT_COLS if x not in exclude]
df = df.filter((~col('column_id').isin(remain)) | (col('count') >= default_limit))
# for comparing isin and separate filter
# for i in remain:
# df = df.filter((col('column_id') != i - CAT_COLS[0]) | (col('count') >= default_limit))
return df
def assign_id_with_window(df):
windowed = Window.partitionBy('column_id').orderBy(desc('count'))
return (df
.withColumn('id', row_number().over(windowed))
.withColumnRenamed('count', 'model_count'))
def assign_low_mem_partial_ids(df):
# To avoid some scaling issues with a simple window operation, we use a more complex method
# to compute the same thing, but in a more distributed spark specific way
df = df.orderBy(asc('column_id'), desc('count'))
# The monotonically_increasing_id is the partition id in the top 31 bits and the rest
# is an increasing count of the rows within that partition. So we split it into two parts,
# the partion id part_id and the count mono_id
df = df.withColumn('part_id', spark_partition_id())
return df.withColumn('mono_id', monotonically_increasing_id() - shiftLeft(col('part_id'), 33))
def assign_low_mem_final_ids(df):
# Now we can find the minimum and maximum mono_ids within a given column/partition pair
sub_model = df.groupBy('column_id', 'part_id').agg(max('mono_id').alias('top'), min('mono_id').alias('bottom'))
sub_model = sub_model.withColumn('diff', col('top') - col('bottom') + 1)
sub_model = sub_model.drop('top')
# This window function is over aggregated column/partition pair table. It will do a running sum of the rows
# within that column
windowed = Window.partitionBy('column_id').orderBy('part_id').rowsBetween(Window.unboundedPreceding, -1)
sub_model = sub_model.withColumn('running_sum', sum('diff').over(windowed)).na.fill(0, ["running_sum"])
joined = df.withColumnRenamed('column_id', 'i_column_id')
joined = joined.withColumnRenamed('part_id', 'i_part_id')
joined = joined.withColumnRenamed('count', 'model_count')
# Then we can join the original input with the pair it is a part of
joined = joined.join(sub_model, (col('i_column_id') == col('column_id')) & (col('part_id') == col('i_part_id')))
# So with all that we can subtract bottom from mono_id makeing it start at 0 for each partition
# and then add in the running_sum so the id is contiguous and unique for the entire column. + 1 to make it match the 1 based indexing
# for row_number
ret = joined.select(col('column_id'),
col('data'),
(col('mono_id') - col('bottom') + col('running_sum') + 1).cast(IntegerType()).alias('id'),
col('model_count'))
return ret
def get_column_models(combined_model):
for i in CAT_COLS:
model = (combined_model
.filter('column_id == %d' % (i - CAT_COLS[0]))
.drop('column_id'))
yield i, model
def col_of_rand_long():
return (rand() * (1 << 52)).cast(LongType())
def skewed_join(df, model, col_name, cutoff):
# Most versions of spark don't have a good way
# to deal with a skewed join out of the box.
# Some do and if you want to replace this with
# one of those that would be great.
# Because we have statistics about the skewedness
# that we can used we divide the model up into two parts
# one part is the highly skewed part and we do a
# broadcast join for that part, but keep the result in
# a separate column
b_model = broadcast(model.filter(col('model_count') >= cutoff)
.withColumnRenamed('data', col_name)
.drop('model_count'))
df = (df
.join(b_model, col_name, how='left')
.withColumnRenamed('id', 'id_tmp'))
# We also need to spread the skewed data that matched
# evenly. We will use a source of randomness for this
# but use a -1 for anything that still needs to be matched
if 'ordinal' in df.columns:
rand_column = col('ordinal')
else:
rand_column = col_of_rand_long()
df = df.withColumn('join_rand',
# null values are not in the model, they are filtered out
# but can be a source of skewedness so include them in
# the even distribution
when(col('id_tmp').isNotNull() | col(col_name).isNull(), rand_column)
.otherwise(lit(-1)))
# Null out the string data that already matched to save memory
df = df.withColumn(col_name,
when(col('id_tmp').isNotNull(), None)
.otherwise(col(col_name)))
# Now do the second join, which will be a non broadcast join.
# Sadly spark is too smart for its own good and will optimize out
# joining on a column it knows will always be a constant value.
# So we have to make a convoluted version of assigning a -1 to the
# randomness column for the model itself to work around that.
nb_model = (model
.withColumn('join_rand', when(col('model_count') < cutoff, lit(-1)).otherwise(lit(-2)))
.filter(col('model_count') < cutoff)
.withColumnRenamed('data', col_name)
.drop('model_count'))
df = (df
.join(nb_model, ['join_rand', col_name], how='left')
.drop(col_name, 'join_rand')
# Pick either join result as an answer
.withColumn(col_name, coalesce(col('id'), col('id_tmp')))
.drop('id', 'id_tmp'))
return df
def apply_models(df, models, broadcast_model = False, skew_broadcast_pct = 1.0):
# sort the models so broadcast joins come first. This is
# so we reduce the amount of shuffle data sooner than later
# If we parsed the string hex values to ints early on this would
# not make a difference.
models = sorted(models, key=itemgetter(3), reverse=True)
for i, model, original_rows, would_broadcast in models:
col_name = '_c%d' % i
if not (would_broadcast or broadcast_model):
# The data is highly skewed so we need to offset that
cutoff = int(original_rows * skew_broadcast_pct/100.0)
df = skewed_join(df, model, col_name, cutoff)
else:
# broadcast joins can handle skewed data so no need to
# do anything special
model = (model.drop('model_count')
.withColumnRenamed('data', col_name))
model = broadcast(model) if broadcast_model else model
df = (df
.join(model, col_name, how='left')
.drop(col_name)
.withColumnRenamed('id', col_name))
return df.fillna(0, ['_c%d' % i for i in CAT_COLS])
def transform_log(df, transform_log = False):
cols = ['_c%d' % i for i in INT_COLS]
if transform_log:
for col_name in cols:
df = df.withColumn(col_name, log(df[col_name] + 3))
return df.fillna(0, cols)
def would_broadcast(spark, str_path):
sc = spark.sparkContext
config = sc._jsc.hadoopConfiguration()
path = sc._jvm.org.apache.hadoop.fs.Path(str_path)
fs = sc._jvm.org.apache.hadoop.fs.FileSystem.get(config)
stat = fs.listFiles(path, True)
sum = 0
while stat.hasNext():
sum = sum + stat.next().getLen()
sql_conf = sc._jvm.org.apache.spark.sql.internal.SQLConf()
cutoff = sql_conf.autoBroadcastJoinThreshold() * sql_conf.fileCompressionFactor()
return sum <= cutoff
def delete_data_source(spark, path):
sc = spark.sparkContext
config = sc._jsc.hadoopConfiguration()
path = sc._jvm.org.apache.hadoop.fs.Path(path)
sc._jvm.org.apache.hadoop.fs.FileSystem.get(config).delete(path, True)
def load_raw(spark, folder, day_range):
label_fields = [StructField('_c%d' % LABEL_COL, IntegerType())]
int_fields = [StructField('_c%d' % i, IntegerType()) for i in INT_COLS]
str_fields = [StructField('_c%d' % i, StringType()) for i in CAT_COLS]
schema = StructType(label_fields + int_fields + str_fields)
paths = [os.path.join(folder, 'day_%d' % i) for i in day_range]
return (spark
.read
.schema(schema)
.option('sep', '\t')
.csv(paths))
def rand_ordinal(df):
# create a random long from the double precision float.
# The fraction part of a double is 52 bits, so we try to capture as much
# of that as possible
return df.withColumn('ordinal', col_of_rand_long())
def day_from_ordinal(df, num_days):
return df.withColumn('day', (col('ordinal') % num_days).cast(IntegerType()))
def day_from_input_file(df):
return df.withColumn('day', substring_index(input_file_name(), '_', -1).cast(IntegerType()))
def psudo_sort_by_day_plus(spark, df, num_days):
# Sort is very expensive because it needs to calculate the partitions
# which in our case may involve rereading all of the data. In some cases
# we can avoid this by repartitioning the data and sorting within a single partition
shuffle_parts = int(spark.conf.get('spark.sql.shuffle.partitions'))
extra_parts = int(shuffle_parts/num_days)
if extra_parts <= 0:
df = df.repartition('day')
else:
#We want to spread out the computation to about the same amount as shuffle_parts
divided = (col('ordinal') / num_days).cast(LongType())
extra_ident = divided % extra_parts
df = df.repartition(col('day'), extra_ident)
return df.sortWithinPartitions('day', 'ordinal')
def load_combined_model(spark, model_folder):
path = os.path.join(model_folder, 'combined.parquet')
return spark.read.parquet(path)
def save_combined_model(df, model_folder, mode=None):
path = os.path.join(model_folder, 'combined.parquet')
df.write.parquet(path, mode=mode)
def delete_combined_model(spark, model_folder):
path = os.path.join(model_folder, 'combined.parquet')
delete_data_source(spark, path)
def load_low_mem_partial_ids(spark, model_folder):
path = os.path.join(model_folder, 'partial_ids.parquet')
return spark.read.parquet(path)
def save_low_mem_partial_ids(df, model_folder, mode=None):
path = os.path.join(model_folder, 'partial_ids.parquet')
df.write.parquet(path, mode=mode)
def delete_low_mem_partial_ids(spark, model_folder):
path = os.path.join(model_folder, 'partial_ids.parquet')
delete_data_source(spark, path)
def load_column_models(spark, model_folder, count_required):
for i in CAT_COLS:
path = os.path.join(model_folder, '%d.parquet' % i)
df = spark.read.parquet(path)
if count_required:
values = df.agg(sum('model_count').alias('sum'), count('*').alias('size')).collect()
else:
values = df.agg(sum('model_count').alias('sum')).collect()
yield i, df, values[0], would_broadcast(spark, path)
def save_column_models(column_models, model_folder, mode=None):
for i, model in column_models:
path = os.path.join(model_folder, '%d.parquet' % i)
model.write.parquet(path, mode=mode)
def save_model_size(model_size, path, write_mode):
if os.path.exists(path) and write_mode == 'errorifexists':
print('Error: model size file %s exists' % path)
sys.exit(1)
os.makedirs(os.path.dirname(os.path.abspath(path)), exist_ok=True)
with open(path, 'w') as fp:
json.dump(model_size, fp, indent=4)
_benchmark = {}
@contextmanager
def _timed(step):
start = time()
yield
end = time()
_benchmark[step] = end - start
def _parse_args():
parser = ArgumentParser()
parser.add_argument(
'--mode',
required=True,
choices=['generate_models', 'transform'])
parser.add_argument('--days', required=True)
parser.add_argument('--input_folder', required=True)
parser.add_argument('--output_folder')
parser.add_argument('--model_size_file')
parser.add_argument('--model_folder', required=True)
parser.add_argument(
'--write_mode',
choices=['overwrite', 'errorifexists'],
default='errorifexists')
parser.add_argument('--frequency_limit')
parser.add_argument('--no_numeric_log_col', action='store_true')
#Support for running in a lower memory environment
parser.add_argument('--low_mem', action='store_true')
parser.add_argument(
'--output_ordering',
choices=['total_random', 'day_random', 'any', 'input'],
default='total_random')
parser.add_argument(
'--output_partitioning',
choices=['day', 'none'],
default='none')
parser.add_argument('--dict_build_shuffle_parallel_per_day', type=int, default=2)
parser.add_argument('--apply_shuffle_parallel_per_day', type=int, default=25)
parser.add_argument('--skew_broadcast_pct', type=float, default=1.0)
parser.add_argument('--debug_mode', action='store_true')
args = parser.parse_args()
start, end = args.days.split('-')
args.day_range = list(range(int(start), int(end) + 1))
args.days = len(args.day_range)
return args
def _main():
args = _parse_args()
spark = SparkSession.builder.getOrCreate()
df = load_raw(spark, args.input_folder, args.day_range)
if args.mode == 'generate_models':
spark.conf.set('spark.sql.shuffle.partitions', args.days * args.dict_build_shuffle_parallel_per_day)
with _timed('generate models'):
col_counts = get_column_counts_with_frequency_limit(df, args.frequency_limit)
if args.low_mem:
# in low memory mode we have to save an intermediate result
# because if we try to do it in one query spark ends up assigning the
# partial ids in two different locations that are not guaranteed to line up
# this prevents that from happening by assigning the partial ids
# and then writeing them out.
save_low_mem_partial_ids(
assign_low_mem_partial_ids(col_counts),
args.model_folder,
args.write_mode)
save_combined_model(
assign_low_mem_final_ids(load_low_mem_partial_ids(spark, args.model_folder)),
args.model_folder,
args.write_mode)
if not args.debug_mode:
delete_low_mem_partial_ids(spark, args.model_folder)
else:
save_combined_model(
assign_id_with_window(col_counts),
args.model_folder,
args.write_mode)
save_column_models(
get_column_models(load_combined_model(spark, args.model_folder)),
args.model_folder,
args.write_mode)
if not args.debug_mode:
delete_combined_model(spark, args.model_folder)
if args.mode == 'transform':
spark.conf.set('spark.sql.shuffle.partitions', args.days * args.apply_shuffle_parallel_per_day)
with _timed('transform'):
if args.output_ordering == 'total_random':
df = rand_ordinal(df)
if args.output_partitioning == 'day':
df = day_from_ordinal(df, args.days)
elif args.output_ordering == 'day_random':
df = rand_ordinal(df)
df = day_from_input_file(df)
elif args.output_ordering == 'input':
df = df.withColumn('ordinal', monotonically_increasing_id())
if args.output_partitioning == 'day':
df = day_from_input_file(df)
else: # any ordering
if args.output_partitioning == 'day':
df = day_from_input_file(df)
models = list(load_column_models(spark, args.model_folder, bool(args.model_size_file)))
if args.model_size_file:
save_model_size(
OrderedDict(('_c%d' % i, agg.size) for i, _, agg, _ in models),
args.model_size_file,
args.write_mode)
models = [(i, df, agg.sum, flag) for i, df, agg, flag in models]
df = apply_models(
df,
models,
not args.low_mem,
args.skew_broadcast_pct)
df = transform_log(df, not args.no_numeric_log_col)
if args.output_partitioning == 'day':
partitionBy = 'day'
else:
partitionBy = None
if args.output_ordering == 'total_random':
if args.output_partitioning == 'day':
df = psudo_sort_by_day_plus(spark, df, args.days)
else: # none
# Don't do a full sort it is expensive. Order is random so
# just make it random
df = df.repartition('ordinal').sortWithinPartitions('ordinal')
df = df.drop('ordinal')
elif args.output_ordering == 'day_random':
df = psudo_sort_by_day_plus(spark, df, args.days)
df = df.drop('ordinal')
if args.output_partitioning != 'day':
df = df.drop('day')
elif args.output_ordering == 'input':
if args.low_mem:
# This is the slowest option. We totally messed up the order so we have to put
# it back in the correct order
df = df.orderBy('ordinal')
else:
# Applying the dictionary happened within a single task so we are already really
# close to the correct order, just need to sort within the partition
df = df.sortWithinPartitions('ordinal')
df = df.drop('ordinal')
if args.output_partitioning != 'day':
df = df.drop('day')
# else: any ordering so do nothing the ordering does not matter
df.write.parquet(
args.output_folder,
mode=args.write_mode,
partitionBy=partitionBy)
print('=' * 100)
print(_benchmark)
if __name__ == '__main__':
_main()
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/preproc/spark_data_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected]), Tomasz Cheda ([email protected])
import tensorflow as tf
import os
import numpy as np
from itertools import chain
from collections import namedtuple
from typing import Optional, Tuple, List
import tqdm
from .defaults import CATEGORICAL_CHANNEL, NUMERICAL_CHANNEL, LABEL_CHANNEL, DTYPE_SELECTOR
from .feature_spec import FeatureSpec, FEATURES_SELECTOR, FILES_SELECTOR, get_categorical_feature_type
DatasetMetadata = namedtuple('DatasetMetadata', ['num_numerical_features',
'categorical_cardinalities'])
fspec_type_to_tf_type = {
'int8': tf.int8,
'int16': tf.int16,
'int32': tf.int32
}
def create_reader(filename, bytes_per_batch):
fd = os.open(filename, os.O_RDONLY)
file_len = os.fstat(fd).st_size
os.close(fd)
num_batches = int(file_len / bytes_per_batch)
file_len_patched = num_batches * bytes_per_batch
footer_bytes = file_len - file_len_patched
reader = tf.data.FixedLengthRecordDataset(filenames=[filename],
record_bytes=bytes_per_batch,
footer_bytes=footer_bytes)
return reader, num_batches
class TfRawBinaryDataset:
"""Dataset for reading labels, numerical and categorical features from
a set of binary files. Internally uses TensorFlow's FixedLengthRecordDataset
and decode_raw for best performance.
"""
def __init__(
self,
feature_spec: FeatureSpec,
instance: str,
local_categorical_feature_names: List[str],
batch_size: int = 1,
numerical_features_enabled: bool = False,
rank: int = 0,
world_size: int = 1,
concat_features: bool = False
):
self._concat_features = concat_features
self._feature_spec = feature_spec
self._batch_size = batch_size
local_batch_size = int(batch_size / world_size)
batch_sizes_per_gpu = [local_batch_size] * world_size
indices = tuple(np.cumsum([0] + list(batch_sizes_per_gpu)))
self.dp_begin_idx = indices[rank]
self.dp_end_idx = indices[rank + 1]
self._rank = rank
self._world_size = world_size
self._instance = instance
feature_spec.check_feature_spec()
self._create_readers(feature_spec, local_categorical_feature_names, numerical_features_enabled)
self._categorical_types_tf = [fspec_type_to_tf_type[feature_spec.feature_spec[feature][DTYPE_SELECTOR]] for
feature in
local_categorical_feature_names]
def _create_readers(self, feature_spec, local_categorical_feature_names, numerical_features_enabled):
categorical_features = feature_spec.channel_spec[CATEGORICAL_CHANNEL]
numerical_features = feature_spec.channel_spec[NUMERICAL_CHANNEL]
label_features = feature_spec.channel_spec[LABEL_CHANNEL]
self._number_of_numerical_features = len(numerical_features) if numerical_features_enabled else 0
set_of_categorical_features = set(categorical_features)
set_of_numerical_features = set(numerical_features)
set_of_label_features = set(label_features)
set_of_categoricals_to_read = set(local_categorical_feature_names)
bytes_per_feature = {feature_name: np.dtype(feature_spec.feature_spec[feature_name][DTYPE_SELECTOR]).itemsize
for feature_name in feature_spec.feature_spec.keys()}
chosen_instance = feature_spec.source_spec[self._instance]
categorical_feature_readers = {}
root_path = feature_spec.base_directory
number_of_batches = None
for chunk in chosen_instance:
contained_features = chunk[FEATURES_SELECTOR]
containing_file = chunk[FILES_SELECTOR][0]
path_to_open = os.path.join(root_path, containing_file)
first_feature = contained_features[0]
if first_feature in set_of_categorical_features:
# Load categorical
# We verified earlier that only one feature is present per chunk
if first_feature not in set_of_categoricals_to_read:
continue # skip chunk
bytes_per_batch = bytes_per_feature[first_feature] * self._batch_size
reader, batches = create_reader(path_to_open, bytes_per_batch)
categorical_feature_readers[first_feature] = reader
elif first_feature in set_of_numerical_features:
# Load numerical
# We verified earlier that all numerical features are in one chunk
if not numerical_features_enabled:
self._numerical = tuple()
continue # skip chunk
numerical_bytes_per_batch = bytes_per_feature[numerical_features[0]] * \
len(numerical_features) * self._batch_size
self._numerical, batches = create_reader(path_to_open, numerical_bytes_per_batch)
elif first_feature in set_of_label_features:
# Load label
# We verified earlier that there is only one label feature
label_bytes_per_batch = np.dtype(np.bool).itemsize * self._batch_size
self._label, batches = create_reader(path_to_open, label_bytes_per_batch)
else:
raise ValueError("Unknown chunk type")
if number_of_batches is not None:
if batches != number_of_batches:
raise ValueError(f'Size mismatch. Expected: {number_of_batches}, got: {batches}')
else:
number_of_batches = batches
self._categorical = tuple(categorical_feature_readers[feature] for feature in local_categorical_feature_names)
self.num_batches = number_of_batches
def __len__(self):
return self.num_batches
def op(self):
pipeline = tf.data.Dataset.zip((self._label, self._numerical, self._categorical))
pipeline = pipeline.map(self.decode_batch, num_parallel_calls=tf.data.AUTOTUNE)
pipeline = pipeline.batch(batch_size=1)
# Only one gpu is set to visible
pipeline = pipeline.apply(tf.data.experimental.prefetch_to_device(f'/gpu:0'))
pipeline = pipeline.unbatch()
pipeline = pipeline.repeat()
return pipeline
@tf.function
def decode_batch(self, labels, numerical_features, categorical_features, concat_features=False):
labels = tf.io.decode_raw(labels, out_type=tf.int8)
labels = labels[self.dp_begin_idx:self.dp_end_idx]
if self._number_of_numerical_features > 0:
numerical_features = tf.io.decode_raw(numerical_features, out_type=tf.float16)
numerical_features = tf.reshape(numerical_features,
shape=[-1, self._number_of_numerical_features])
numerical_features = numerical_features[self.dp_begin_idx:self.dp_end_idx, :]
if self._categorical:
cat_data = []
for dtype, feature in zip(self._categorical_types_tf, categorical_features):
feature = tf.io.decode_raw(feature, out_type=dtype)
feature = tf.cast(feature, dtype=tf.int32)
feature = tf.expand_dims(feature, axis=1)
feature = tf.reshape(feature, [self._batch_size, 1])
cat_data.append(feature)
if self._concat_features:
cat_data = tf.concat(cat_data, axis=1)
else:
cat_data = tuple(cat_data)
return (numerical_features, cat_data), labels
@staticmethod
def generate(src_train, src_test, feature_spec, dst_dir, dst_feature_spec,
max_batches_train, max_batches_test):
categorical_sizes = feature_spec.get_categorical_sizes()
num_numerical = feature_spec.get_number_of_numerical_features()
feature_spec = FeatureSpec.get_default_feature_spec(number_of_numerical_features=num_numerical,
categorical_feature_cardinalities=categorical_sizes)
feature_spec.to_yaml(output_path=os.path.join(dst_dir, dst_feature_spec))
sources = [(src_train, 'train', max_batches_train), (src_test, 'test', max_batches_test)]
cat_feature_types = [get_categorical_feature_type(cat_size) for cat_size in categorical_sizes]
for src_dataset, split, max_batches in sources:
os.makedirs(os.path.join(dst_dir, split), exist_ok=True)
categorical_fs = []
for i in range(len(categorical_sizes)):
fs = open(os.path.join(dst_dir, split, f'cat_{i}.bin'), 'wb+')
categorical_fs.append(fs)
label_f = open(os.path.join(dst_dir, split, 'label.bin'), 'wb+')
numerical_f = open(os.path.join(dst_dir, split, "numerical.bin"), "wb+")
for batch_idx, src_batch in tqdm.tqdm(enumerate(src_dataset),
total=max_batches,
desc=f'Generating the {split} data'):
if batch_idx == max_batches:
break
(numerical_features, categorical_features), label = src_batch
for ftype, stream, feature in zip(cat_feature_types, categorical_fs, categorical_features):
if isinstance(feature, tf.RaggedTensor):
feature = feature.values
raw_data = feature.numpy().astype(ftype).tobytes()
stream.write(raw_data)
label_f.write(label.numpy().astype(np.bool).tobytes())
numerical_f.write(numerical_features.numpy().astype(np.float16).tobytes())
for stream in chain(*categorical_fs, [label_f, numerical_f]):
stream.close()
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/dataloading/raw_binary_dataset.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import os
from typing import Dict
from typing import List
import numpy as np
from .defaults import CATEGORICAL_CHANNEL, NUMERICAL_CHANNEL, LABEL_CHANNEL, \
TRAIN_MAPPING, TEST_MAPPING, \
CARDINALITY_SELECTOR, DTYPE_SELECTOR, \
SPLIT_BINARY
""" For performance reasons, numerical features are required to appear in the same order
in both source_spec and channel_spec.
For more detailed requirements, see the check_feature_spec method"""
TYPE_SELECTOR = "type"
FEATURES_SELECTOR = "features"
FILES_SELECTOR = "files"
class FeatureSpec:
"""
This class contains the metadata necessary to find, interpret, load and dataset and supply it to the model.
feature_spec section contains the definitions and per-feature metadata of features used in the model
source_spec contains the specifics of how the feature data is sourced. It is a dict of configurations, each
providing an instance of the dataset, for example a train or test part
channel_spec the configuration of which features are used by which channels of the model
metadata is an optional dictionary of additional, dataset-wide metadata
base_directory is the path relative to which all paths contained in FeatureSpec are interpreted
"""
def __init__(self, feature_spec=None, source_spec=None, channel_spec=None, metadata=None, base_directory=None):
self.feature_spec: Dict = feature_spec if feature_spec is not None else {}
self.source_spec: Dict = source_spec if source_spec is not None else {}
self.channel_spec: Dict = channel_spec if channel_spec is not None else {}
self.metadata: Dict = metadata if metadata is not None else {}
self.base_directory: str = base_directory
@classmethod
def from_yaml(cls, path):
with open(path, 'r') as feature_spec_file:
base_directory = os.path.dirname(path)
feature_spec = yaml.safe_load(feature_spec_file)
return cls.from_dict(feature_spec, base_directory=base_directory)
@classmethod
def from_dict(cls, source_dict, base_directory):
return cls(base_directory=base_directory, **source_dict)
def to_dict(self) -> Dict:
attributes_to_dump = ['feature_spec', 'source_spec', 'channel_spec', 'metadata']
return {attr: self.__dict__[attr] for attr in attributes_to_dump}
def to_string(self):
return yaml.dump(self.to_dict())
def to_yaml(self, output_path=None):
if not output_path:
output_path = self.base_directory + '/feature_spec.yaml'
with open(output_path, 'w') as output_file:
print(yaml.dump(self.to_dict()), file=output_file)
def get_number_of_numerical_features(self) -> int:
numerical_features = self.channel_spec[NUMERICAL_CHANNEL]
return len(numerical_features)
def cat_positions_to_names(self, positions: List[int]):
# Ordering needs to correspond to the one in get_categorical_sizes()
feature_names = self.get_categorical_feature_names()
return [feature_names[i] for i in positions]
def get_categorical_feature_names(self):
""" Provides the categorical feature names. The returned order should me maintained."""
return self.channel_spec[CATEGORICAL_CHANNEL]
def get_categorical_sizes(self) -> List[int]:
"""For a given feature spec, this function is expected to return the sizes in the order corresponding to the
order in the channel_spec section """
categorical_features = self.get_categorical_feature_names()
cardinalities = [self.feature_spec[feature_name][CARDINALITY_SELECTOR] for feature_name in
categorical_features]
return cardinalities
# *** Feature Spec checking *** #
def _check_feature_spec_general(self):
# check that correct dtypes are provided for all features
for feature_dict in self.feature_spec.values():
assert DTYPE_SELECTOR in feature_dict
try:
np.dtype(feature_dict[DTYPE_SELECTOR])
except TypeError:
assert False, "Type not understood by numpy"
def _check_source_spec_section_model_specific(self):
set_of_categorical_features = set(self.channel_spec[CATEGORICAL_CHANNEL])
set_of_numerical_features = set(self.channel_spec[NUMERICAL_CHANNEL])
set_of_label_features = set(self.channel_spec[LABEL_CHANNEL])
numerical_features_list = self.channel_spec[NUMERICAL_CHANNEL]
# check that mappings are the ones expected
mapping_name_list = list(self.source_spec.keys())
assert sorted(mapping_name_list) == sorted([TEST_MAPPING, TRAIN_MAPPING])
for mapping_name in [TRAIN_MAPPING, TEST_MAPPING]:
mapping = self.source_spec[mapping_name]
mapping_features = set()
for chunk in mapping:
# check that chunk has the correct type
assert chunk[TYPE_SELECTOR] == SPLIT_BINARY
contained_features = chunk[FEATURES_SELECTOR]
containing_files = chunk[FILES_SELECTOR]
# check that features are unique in mapping
for feature in contained_features:
assert feature not in mapping_features
mapping_features.add(feature)
# check that chunk has at least one features
assert len(contained_features) >= 1
# check that chunk has exactly file
assert len(containing_files) == 1
first_feature = contained_features[0]
if first_feature in set_of_categorical_features:
# check that each categorical feature is in a different file
assert len(contained_features) == 1
# check that the type is one of the supported
assert self.feature_spec[first_feature][DTYPE_SELECTOR] in {'int8', 'int16', 'int32'}
elif first_feature in set_of_numerical_features:
# check that numerical features are all in one chunk
assert sorted(contained_features) == sorted(numerical_features_list)
# check that ordering is exactly same as in channel spec - required for performance
assert contained_features == numerical_features_list
# check numerical dtype
for feature in contained_features:
assert np.dtype(self.feature_spec[feature][DTYPE_SELECTOR]) == np.float16
elif first_feature in set_of_label_features:
# check that label feature is in a separate chunk
assert len(contained_features) == 1
# check label dtype
assert np.dtype(self.feature_spec[first_feature][DTYPE_SELECTOR]) == np.bool
else:
assert False, "Feature of unknown type"
# check that all features appeared in mapping
assert sorted(mapping_features) == sorted(list(self.feature_spec.keys()))
def _check_channel_spec_section_model_specific(self):
categorical_features_list = self.channel_spec[CATEGORICAL_CHANNEL]
numerical_features_list = self.channel_spec[NUMERICAL_CHANNEL]
label_features_list = self.channel_spec[LABEL_CHANNEL]
set_of_categorical_features = set(categorical_features_list)
set_of_numerical_features = set(numerical_features_list)
# check that exactly one label feature is selected
assert len(label_features_list) == 1
label_feature_name = label_features_list[0]
# check that channels are the ones expected
channel_name_list = list(self.channel_spec.keys())
assert sorted(channel_name_list) == sorted([CATEGORICAL_CHANNEL, NUMERICAL_CHANNEL, LABEL_CHANNEL])
# check that all features used in channel spec are exactly ones defined in feature_spec
feature_spec_features = list(self.feature_spec.keys())
channel_spec_features = list(set.union(set_of_categorical_features,
set_of_numerical_features,
{label_feature_name}))
assert sorted(feature_spec_features) == sorted(channel_spec_features)
# check that lists in channel spec contain unique names
assert sorted(list(set_of_categorical_features)) == sorted(categorical_features_list)
assert sorted(list(set_of_numerical_features)) == sorted(numerical_features_list)
def _check_feature_spec_section_model_specific(self):
# check that categorical features have cardinality provided
set_of_categorical_features = set(self.channel_spec[CATEGORICAL_CHANNEL])
for feature_name, feature_dict in self.feature_spec.items():
if feature_name in set_of_categorical_features:
assert CARDINALITY_SELECTOR in feature_dict
assert isinstance(feature_dict[CARDINALITY_SELECTOR], int)
def _check_feature_spec_model_specific(self):
self._check_channel_spec_section_model_specific()
self._check_feature_spec_section_model_specific()
self._check_source_spec_section_model_specific()
def check_feature_spec(self):
self._check_feature_spec_general()
self._check_feature_spec_model_specific()
# TODO check if cardinality fits in dtype, check if base directory is set
@staticmethod
def get_default_feature_spec(number_of_numerical_features, categorical_feature_cardinalities):
numerical_feature_fstring = "num_{}"
categorical_feature_fstring = "cat_{}.bin"
label_feature_name = "label"
numerical_file_name = "numerical.bin"
categorical_file_fstring = "{}" # TODO remove .bin from feature name, add to file name
label_file_name = "label.bin"
number_of_categorical_features = len(categorical_feature_cardinalities)
numerical_feature_names = [numerical_feature_fstring.format(i) for i in range(number_of_numerical_features)]
categorical_feature_names = [categorical_feature_fstring.format(i) for i in
range(number_of_categorical_features)]
cat_feature_types = [get_categorical_feature_type(int(cat_size)) for cat_size in
categorical_feature_cardinalities]
feature_dict = {f_name: {DTYPE_SELECTOR: str(np.dtype(f_type)), CARDINALITY_SELECTOR: f_size}
for f_name, f_type, f_size in
zip(categorical_feature_names, cat_feature_types, categorical_feature_cardinalities)}
for f_name in numerical_feature_names:
feature_dict[f_name] = {DTYPE_SELECTOR: str(np.dtype(np.float16))}
feature_dict[label_feature_name] = {DTYPE_SELECTOR: str(np.dtype(np.bool))}
channel_spec = {CATEGORICAL_CHANNEL: categorical_feature_names,
NUMERICAL_CHANNEL: numerical_feature_names,
LABEL_CHANNEL: [label_feature_name]}
source_spec = {}
for filename in (TRAIN_MAPPING, TEST_MAPPING):
source_spec[filename] = []
dst_folder = filename
numerical_file_path = os.path.join(dst_folder, numerical_file_name)
source_spec[filename].append({TYPE_SELECTOR: SPLIT_BINARY,
FEATURES_SELECTOR: numerical_feature_names,
FILES_SELECTOR: [numerical_file_path]})
label_file_path = os.path.join(dst_folder, label_file_name)
source_spec[filename].append({TYPE_SELECTOR: SPLIT_BINARY,
FEATURES_SELECTOR: [label_feature_name],
FILES_SELECTOR: [label_file_path]})
for feature_name in categorical_feature_names:
categorical_file_name = categorical_file_fstring.format(feature_name)
categorical_file_path = os.path.join(dst_folder, categorical_file_name)
source_spec[filename].append({TYPE_SELECTOR: SPLIT_BINARY,
FEATURES_SELECTOR: [feature_name],
FILES_SELECTOR: [categorical_file_path]})
return FeatureSpec(feature_spec=feature_dict, source_spec=source_spec, channel_spec=channel_spec, metadata={})
def get_mapping_paths(self, mapping_name: str):
label_feature_name = self.channel_spec[LABEL_CHANNEL][0]
set_of_categorical_features = set(self.channel_spec[CATEGORICAL_CHANNEL])
set_of_numerical_features = set(self.channel_spec[NUMERICAL_CHANNEL])
label_path = None
numerical_path = None
categorical_paths = dict()
for chunk in self.source_spec[mapping_name]:
local_path = os.path.join(self.base_directory, chunk[FILES_SELECTOR][0])
if chunk[FEATURES_SELECTOR][0] in set_of_numerical_features:
numerical_path = local_path
elif chunk[FEATURES_SELECTOR][0] in set_of_categorical_features:
local_feature = chunk[FEATURES_SELECTOR][0]
categorical_paths[local_feature] = local_path
elif chunk[FEATURES_SELECTOR][0] == label_feature_name:
label_path = local_path
return label_path, numerical_path, categorical_paths
def get_categorical_feature_type(size: int):
"""This function works both when max value and cardinality is passed.
Consistency by the user is required"""
types = (np.int8, np.int16, np.int32)
for numpy_type in types:
if size < np.iinfo(numpy_type).max:
return numpy_type
raise RuntimeError(f"Categorical feature of size {size} is too big for defined types") | DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/dataloading/feature_spec.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import yaml
import argparse
variants = dict(
# Generates 16 GiB embedding tables
criteo_t15_synthetic=dict(
num_numerical=13,
cardinalities=[7912889, 33823, 17139, 7339, 20046, 4, 7105, 1382, 63, 5554114, 582469, 245828, 11, 2209,
10667, 104, 4, 968, 15, 8165896, 2675940, 7156453, 302516, 12022, 97, 35],
hotness=26 * [1],
alpha=26 * [1.45]
),
# Generates 85 GiB embedding tables
criteo_t3_synthetic=dict(
num_numerical=13,
cardinalities=[45833188,36747,1572176,345139,11,2209,11268,128,4,975,15,48937457,17246,11316796,40094537,
452104,12607,105,36,7414,20244,4,7115,1442,63,29275261],
hotness=26 * [1],
alpha=26 * [1.45]
),
# Generates 421 GiB
criteo_t0_synthetic=dict(
num_numerical=13,
cardinalities=[227605432, 39061, 3067956, 405283, 11, 2209, 11939, 155, 4, 977, 15, 292775614, 17296,
40790948, 187188510, 590152, 12974, 109, 37, 7425, 20266, 4, 7123, 1544, 64, 130229467],
hotness=26 * [1],
alpha=26 * [1.45]
),
)
def main():
parser = argparse.ArgumentParser(description="Generate a synthetic feature spec")
parser.add_argument('--dst', default='feature_spec.yaml', type=str, help='Output path')
parser.add_argument('--variant', choices=list(variants.keys()), required=True, type=str,
help='Variant of the synthetic dataset to be used')
args = parser.parse_args()
num_numerical, cardinalities, hotness, alphas = tuple(variants[args.variant].values())
feature_spec = {}
for i, (c, h, a) in enumerate(zip(cardinalities, hotness, alphas)):
name = f'cat_{i}'
f = dict(cardinality=c, hotness=h, alpha=a, dtype='int32')
feature_spec[name] = f
for i in range(num_numerical):
name = f'num_{i}'
feature_spec[name] = dict(dtype='float16')
feature_spec['label'] = dict(dtype='int8')
channel_spec = {}
channel_spec['categorical'] = [k for k in feature_spec.keys() if 'cat' in k]
channel_spec['numerical'] = [k for k in feature_spec.keys() if 'num' in k]
channel_spec['label'] = ['label']
source_spec = None
full_spec = dict(feature_spec=feature_spec, channel_spec=channel_spec, source_spec=source_spec)
with open(args.dst, 'w') as f:
yaml.dump(data=full_spec, stream=f)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/dataloading/generate_feature_spec.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
import os
from collections import defaultdict
import numpy as np
import pandas as pd
from .feature_spec import FeatureSpec, get_categorical_feature_type
from .defaults import CATEGORICAL_CHANNEL, NUMERICAL_CHANNEL, LABEL_CHANNEL, CARDINALITY_SELECTOR
def parse_args():
parser = ArgumentParser()
parser.add_argument('--input', type=str, default='',
help='Path to input data directory')
parser.add_argument('--feature_spec_in', type=str, default='feature_spec.yaml',
help='Name of the input feature specification file')
parser.add_argument('--output', type=str, default='/data',
help='Path to output data directory')
parser.add_argument('--feature_spec_out', type=str, default='feature_spec.yaml',
help='Name of the output feature specification file')
parser.add_argument('--chunk_size', type=int, default=65536)
return parser.parse_args()
def main():
args = parse_args()
args_output = args.output
args_input = args.input
args_feature_spec_in = args.feature_spec_in
args_feature_spec_out = args.feature_spec_out
batch_size = args.chunk_size
fspec_in_path = os.path.join(args_input, args_feature_spec_in)
fspec_in = FeatureSpec.from_yaml(fspec_in_path)
input_label_feature_name = fspec_in.channel_spec[LABEL_CHANNEL][0]
input_numerical_features_list = fspec_in.channel_spec[NUMERICAL_CHANNEL]
input_categorical_features_list = fspec_in.channel_spec[CATEGORICAL_CHANNEL]
# Do a pass to establish the cardinalities: they influence the type we save the dataset as
found_cardinalities = defaultdict(lambda: 0)
for mapping_name, mapping in fspec_in.source_spec.items():
df_iterators = []
for chunk in mapping:
assert chunk['type'] == 'csv', "Only csv files supported in this transcoder"
assert len(chunk['files']) == 1, "Only one file per chunk supported in this transcoder"
path_to_load = os.path.join(fspec_in.base_directory, chunk['files'][0])
chunk_iterator = pd.read_csv(path_to_load, header=None, chunksize=batch_size, names=chunk['features'])
df_iterators.append(chunk_iterator)
zipped = zip(*df_iterators)
for chunks in zipped:
mapping_df = pd.concat(chunks, axis=1)
for feature in input_categorical_features_list:
mapping_cardinality = mapping_df[feature].max() + 1
previous_cardinality = found_cardinalities[feature]
found_cardinalities[feature] = max(previous_cardinality, mapping_cardinality)
for feature in input_categorical_features_list:
declared_cardinality = fspec_in.feature_spec[feature][CARDINALITY_SELECTOR]
if declared_cardinality == 'auto':
pass
else:
assert int(declared_cardinality) >= found_cardinalities[feature]
found_cardinalities[feature] = int(declared_cardinality)
categorical_cardinalities = [found_cardinalities[f] for f in input_categorical_features_list]
number_of_numerical_features = fspec_in.get_number_of_numerical_features()
fspec_out = FeatureSpec.get_default_feature_spec(number_of_numerical_features=number_of_numerical_features,
categorical_feature_cardinalities=categorical_cardinalities)
fspec_out.base_directory = args.output
for mapping_name, mapping in fspec_in.source_spec.items():
# open files for outputting
label_path, numerical_path, categorical_paths = fspec_out.get_mapping_paths(mapping_name)
for path in [label_path, numerical_path, *categorical_paths.values()]:
os.makedirs(os.path.dirname(path), exist_ok=True)
output_categorical_features_list = fspec_out.get_categorical_feature_names()
numerical_f = open(numerical_path, "ab+")
label_f = open(label_path, "ab+")
categorical_fs = [open(categorical_paths[name], "ab+") for name in output_categorical_features_list]
categorical_feature_types = [get_categorical_feature_type(card) for card in categorical_cardinalities]
df_iterators = []
for chunk in mapping:
# We checked earlier it's a single file chunk
path_to_load = os.path.join(fspec_in.base_directory, chunk['files'][0])
chunk_iterator = pd.read_csv(path_to_load, header=None, chunksize=batch_size, names=chunk['features'])
df_iterators.append(chunk_iterator)
zipped = zip(*df_iterators)
for chunks in zipped:
mapping_df = pd.concat(chunks, axis=1) # This takes care of making sure feature names are unique
# Choose the right columns
numerical_df = mapping_df[input_numerical_features_list]
categorical_df = mapping_df[input_categorical_features_list]
label_df = mapping_df[[input_label_feature_name]]
# Append them to the binary files
numerical_f.write(numerical_df.values.astype(np.float16).tobytes())
label_f.write(label_df.values.astype(np.bool).tobytes())
categorical_arr = categorical_df.values
for cat_idx, cat_feature_type in enumerate(categorical_feature_types):
categorical_fs[cat_idx].write(
categorical_arr[:, cat_idx].astype(cat_feature_type).tobytes())
feature_spec_save_path = os.path.join(args_output, args_feature_spec_out)
fspec_out.to_yaml(output_path=feature_spec_save_path)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/dataloading/transcode.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/dataloading/__init__.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import os
import tqdm
from absl import app, flags
from .defaults import DTYPE_SELECTOR, TRAIN_MAPPING, TEST_MAPPING
from .synthetic_dataset import SyntheticDataset
from .feature_spec import FeatureSpec
FLAGS = flags.FLAGS
flags.DEFINE_integer("synthetic_dataset_num_entries",
default=int(32768 * 1024), # 1024 batches for single-GPU training by default
help="Number of samples per epoch for the synthetic dataset."
"This is rounded down to a multiple of batch size")
flags.DEFINE_integer("synthetic_dataset_batch_size",
default=int(32768), help="Batch size - number of unique records")
flags.DEFINE_integer("num_numerical_features", default=13,
help="Number of numerical features in the dataset. Defaults to 13 for the Criteo Terabyte Dataset")
flags.DEFINE_list("synthetic_dataset_table_sizes", default=','.join(26 * [str(10 ** 5)]),
help="Cardinality of each categorical feature")
flags.DEFINE_string("feature_spec", default=None,
help="Feature specification file describing the desired dataset."
"Only feature_spec and channel_spec sections are required and used."
"Overrides num_numerical_features and synthetic_dataset_table_sizes")
flags.DEFINE_string("synthetic_dataset_dir", default="/tmp/dlrm_synthetic_data",
help="Destination of the saved synthetic dataset")
flags.DEFINE_integer("seed", default=12345, help="Set a seed for generating synthetic data")
def write_dataset_to_disk(dataset_train, dataset_test, feature_spec: FeatureSpec) -> None:
feature_spec.check_feature_spec() # We rely on the feature spec being properly formatted
categorical_features_list = feature_spec.get_categorical_feature_names()
categorical_features_types = [feature_spec.feature_spec[feature_name][DTYPE_SELECTOR]
for feature_name in categorical_features_list]
number_of_numerical_features = feature_spec.get_number_of_numerical_features()
number_of_categorical_features = len(categorical_features_list)
for mapping_name, dataset in zip((TRAIN_MAPPING, TEST_MAPPING),
(dataset_train, dataset_test)):
file_streams = []
label_path, numerical_path, categorical_paths = feature_spec.get_mapping_paths(mapping_name)
try:
os.makedirs(os.path.dirname(numerical_path), exist_ok=True)
numerical_f = open(numerical_path, "wb+")
file_streams.append(numerical_f)
os.makedirs(os.path.dirname(label_path), exist_ok=True)
label_f = open(label_path, 'wb+')
file_streams.append(label_f)
categorical_fs = []
for feature_name in categorical_features_list:
local_path = categorical_paths[feature_name]
os.makedirs(os.path.dirname(local_path), exist_ok=True)
fs = open(local_path, 'wb+')
categorical_fs.append(fs)
file_streams.append(fs)
pipe = iter(dataset.op())
for _ in tqdm.tqdm(
range(len(dataset)), desc=mapping_name + " dataset saving"):
(numerical, categorical), label = pipe.get_next()
categoricals = tf.split(categorical, number_of_categorical_features, axis=1)
assert (numerical.shape[-1] == number_of_numerical_features)
assert (len(categoricals) == number_of_categorical_features)
numerical_f.write(numerical.numpy().astype('float16').tobytes()) # numerical is always float16
label_f.write(label.numpy().astype('bool').tobytes()) # label is always boolean
for cat_type, cat_tensor, cat_file in zip(categorical_features_types, categoricals, categorical_fs):
cat_file.write(cat_tensor.numpy().astype(cat_type).tobytes())
finally:
for stream in file_streams:
stream.close()
feature_spec.to_yaml()
def main(argv):
tf.random.set_seed(FLAGS.seed)
number_of_entries = FLAGS.synthetic_dataset_num_entries
batch_size = FLAGS.synthetic_dataset_batch_size
number_of_batches = number_of_entries // batch_size
if FLAGS.feature_spec is not None:
fspec = FeatureSpec.from_yaml(FLAGS.feature_spec)
else:
cardinalities = [int(s) for s in FLAGS.synthetic_dataset_table_sizes]
fspec = FeatureSpec.get_default_feature_spec(number_of_numerical_features=FLAGS.num_numerical_features,
categorical_feature_cardinalities=cardinalities)
fspec.base_directory = FLAGS.synthetic_dataset_dir
fspec.check_feature_spec()
number_of_numerical_features = fspec.get_number_of_numerical_features()
categorical_feature_sizes = fspec.get_categorical_sizes()
train_dataset = SyntheticDataset(batch_size=batch_size, num_numerical_features=number_of_numerical_features,
categorical_feature_cardinalities=categorical_feature_sizes,
num_batches=number_of_batches)
test_dataset = SyntheticDataset(batch_size=batch_size, num_numerical_features=number_of_numerical_features,
categorical_feature_cardinalities=categorical_feature_sizes,
num_batches=number_of_batches)
write_dataset_to_disk(
dataset_train=train_dataset,
dataset_test=test_dataset,
feature_spec=fspec
)
if __name__ == '__main__':
app.run(main)
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/dataloading/prepare_synthetic_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import os
import argparse
from .feature_spec import FeatureSpec
from .dataloader import create_input_pipelines
from .split_tfrecords_multihot_dataset import SplitTFRecordsDataset
from .raw_binary_dataset import TfRawBinaryDataset
def parse_args():
p = argparse.ArgumentParser(description="Transcribe from one dataset format to another")
p.add_argument('--src_dataset_path', default='synthetic_dataset', type=str, help='Path to the source directory')
p.add_argument('--src_dataset_type', default='tf_raw',
choices=['tf_raw', 'synthetic', 'binary_multihot', 'tfrecords_multihot', 'nvt', 'split_tfrecords'],
help='The type of the source dataset')
p.add_argument('--src_feature_spec', default='feature_spec.yaml', type=str, help='Feature spec filename')
p.add_argument('--src_batch_size', default=65536, type=int, help='Batch size of the source dataset')
p.add_argument('--src_synthetic_dataset_use_feature_spec', action='store_true',
help='Use feature spec for the synthetic dataset')
p.add_argument('--dst_dataset_path', default='synthetic_dataset', type=str, help='Path to the destination directory')
p.add_argument('--dst_prebatch_size', default=65536, type=int, help='Prebatch size for the dst dataset')
p.add_argument('--dst_feature_spec', type=str, default='feature_spec.yaml',
help='Dst feature spec filename')
p.add_argument('--dst_dataset_type', default='split_tfrecords',
choices=['tf_raw', 'synthetic', 'binary_multihot', 'tfrecords_multihot', 'nvt', 'split_tfrecords'],
help='The type of the source dataset')
p.add_argument('--max_batches_train', default=-1, type=int,
help='Max number of train batches to transcribe. Passing -1 will transcribe all the data.')
p.add_argument('--max_batches_test', default=-1, type=int,
help='Max number of test batches to transcribe. Passing -1 will transcribe all the data.')
p.add_argument('--train_only', action='store_true', default=False, help='Only transcribe the train dataset.')
return p.parse_args()
def main():
args = parse_args()
fspec_path = os.path.join(args.src_dataset_path, args.src_feature_spec)
feature_spec = FeatureSpec.from_yaml(fspec_path)
table_ids = list(range(len(feature_spec.get_categorical_sizes())))
src_train, src_test = create_input_pipelines(dataset_type=args.src_dataset_type, dataset_path=args.src_dataset_path,
train_batch_size=args.src_batch_size,
test_batch_size=args.src_batch_size,
table_ids=table_ids, feature_spec=args.src_feature_spec,
rank=0, world_size=1)
os.makedirs(args.dst_dataset_path, exist_ok=True)
if args.dst_dataset_type == 'split_tfrecords':
SplitTFRecordsDataset.generate(src_train=src_train, src_test=src_test, feature_spec=feature_spec,
dst_dir=args.dst_dataset_path, dst_feature_spec=args.dst_feature_spec,
prebatch_size=args.dst_prebatch_size, max_batches_train=args.max_batches_train,
max_batches_test=args.max_batches_test)
elif args.dst_dataset_type == 'tf_raw':
TfRawBinaryDataset.generate(src_train=src_train, src_test=src_test, feature_spec=feature_spec,
dst_dir=args.dst_dataset_path, dst_feature_spec=args.dst_feature_spec,
max_batches_train=args.max_batches_train, max_batches_test=args.max_batches_test)
else:
raise ValueError(f'Unimplemented dst_dataset_type: {args.dst_dataset_type}')
print('Done.')
if __name__ == '__main__':
main()
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/dataloading/transcribe.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import tensorflow as tf
import os
import glob
import json
import numpy as np
import tqdm
def serialize_composite(rt):
components = tf.nest.flatten(rt, expand_composites=True)
tensor = tf.stack([tf.io.serialize_tensor(t) for t in components])
return tf.io.serialize_tensor(tensor)
def deserialize_composite(serialized, type_spec):
data = tf.io.parse_tensor(serialized, tf.string)
component_specs = tf.nest.flatten(type_spec, expand_composites=True)
components = [tf.io.parse_tensor(data[i], out_type=spec.dtype)
for i, spec in enumerate(component_specs)]
return tf.nest.pack_sequence_as(type_spec, components, expand_composites=True)
def length_filename(dataset_dir):
return f'{dataset_dir}/length.json'
class PrebatchStreamWriter:
def __init__(self, dst_dir, dtype, feature_name='data', multihot=False, batches_per_file=1):
self.dst_dir = dst_dir
os.makedirs(dst_dir, exist_ok=True)
self.dtype = dtype
self.feature_name = feature_name
self.multihot = multihot
self.batches_per_file = batches_per_file
self.writer = None
self._file_idx = -1
self._batches_saved = 0
def _new_file(self):
if self.writer:
self.writer.close()
self._file_idx += 1
self.writer = tf.io.TFRecordWriter(os.path.join(self.dst_dir, f'data_{self._file_idx}.tfrecords'))
def save(self, prebatch):
if self._batches_saved % self.batches_per_file == 0:
self._new_file()
if self.multihot:
serialized = serialize_composite(tf.cast(prebatch, self.dtype)).numpy()
else:
if isinstance(prebatch, tf.RaggedTensor):
prebatch = prebatch.to_tensor()
serialized = tf.io.serialize_tensor(tf.cast(prebatch, dtype=self.dtype)).numpy()
features = tf.train.Features(feature={
self.feature_name: tf.train.Feature(bytes_list=tf.train.BytesList(value=[serialized]))
})
example = tf.train.Example(features=features)
self.writer.write(example.SerializeToString())
self._batches_saved += 1
def close(self):
self.writer.close()
def create_writer(dst_dir, dtype, feature_name='data', multihot=False,
format='tfrecords', num_features=1, batches_per_file=1):
if format == 'tfrecords':
writer = PrebatchStreamWriter(dst_dir=dst_dir, dtype=dtype, multihot=multihot, batches_per_file=batches_per_file)
metadata = dict(format=format, dtype=dtype.name, multihot=multihot,
feature_name=feature_name,num_features=num_features, batches_per_file=batches_per_file)
with open(os.path.join(dst_dir, 'format.json'), 'w') as f:
json.dump(metadata, f)
return writer
else:
raise ValueError(f'Unknown feature format: {format}')
def create_reader(src_dir, batch_size, world_size=1, rank=0, data_parallel=True):
with open(os.path.join(src_dir, 'format.json')) as f:
metadata = json.load(f)
if metadata['format'] == 'tfrecords':
reader = SingleFeatureTFRecordsFileReader(dst_dir=src_dir, batch_size=batch_size,
dtype=tf.dtypes.as_dtype(metadata['dtype']),
multihot=metadata['multihot'],
feature_name=metadata['feature_name'],
num_features=metadata['num_features'],
world_size=world_size, rank=rank, data_parallel=data_parallel)
return reader
else:
raise ValueError(f'Unknown feature format: {metadata["format"]}')
class SingleFeatureTFRecordsFileReader:
def __init__(self, dst_dir, batch_size, dtype, rank=0, world_size=1,
num_features=1, feature_name='data', multihot=False,
data_parallel=True, parallel_calls=4):
self.filenames = glob.glob(os.path.join(dst_dir, 'data_*.tfrecords'))
self.feature_name = feature_name
self.multihot = multihot
self.batch_size = batch_size
self.num_features = num_features
self.dtype = dtype
self.feature_description = {self.feature_name: tf.io.FixedLenFeature([], tf.string, default_value='')}
self.data_parallel = data_parallel
self.parallel_calls = parallel_calls
self.rank = rank
self.world_size = world_size
if self.data_parallel:
local_batch_size = int(self.batch_size / world_size)
batch_sizes_per_gpu = [local_batch_size] * world_size
indices = tuple(np.cumsum([0] + list(batch_sizes_per_gpu)))
self.dp_begin_idx = indices[rank]
self.dp_end_idx = indices[rank + 1]
def __len__(self):
pass
def _data_parallel_split(self, x):
return x[self.dp_begin_idx:self.dp_end_idx, ...]
def _parse_function(self, proto):
parsed = tf.io.parse_single_example(proto, self.feature_description)
if self.multihot:
rt_spec = tf.RaggedTensorSpec(dtype=tf.int32, shape=[self.batch_size, None],
row_splits_dtype=tf.int32, ragged_rank=1)
tensor = parsed[self.feature_name]
tensor = deserialize_composite(serialized=tensor, type_spec=rt_spec)
else:
tensor = tf.io.parse_tensor(parsed[self.feature_name], out_type=self.dtype)
tensor = tf.reshape(tensor, shape=[self.batch_size, self.num_features])
if self.data_parallel:
tensor = self._data_parallel_split(tensor)
return tensor
def op(self):
num_parallel_reads = 8
dataset = tf.data.TFRecordDataset(self.filenames, num_parallel_reads=num_parallel_reads)
dataset = dataset.map(self._parse_function, num_parallel_calls=self.parallel_calls, deterministic=True)
dataset = dataset.prefetch(buffer_size=1)
dataset = dataset.repeat()
return dataset
class SplitTFRecordsDataset:
def __init__(self, dataset_dir, feature_ids, num_numerical, batch_size, world_size, rank):
self.dataset_dir = dataset_dir
self.feature_ids = feature_ids
self.num_numerical = num_numerical
self.batch_size = batch_size
self.world_size = world_size
self.rank = rank
self.numerical_reader = create_reader(src_dir=os.path.join(dataset_dir, 'numerical'),
world_size=world_size, rank=rank, batch_size=batch_size,
data_parallel=True)
self.label_reader = create_reader(src_dir=os.path.join(dataset_dir, 'label'),
world_size=world_size, rank=rank, data_parallel=True,
batch_size=batch_size)
self.categorical_readers = []
for feature_id in feature_ids:
reader = create_reader(src_dir=os.path.join(dataset_dir, f'categorical_{feature_id}'),
batch_size=batch_size, data_parallel=False)
self.categorical_readers.append(reader)
filename = length_filename(self.dataset_dir)
with open(filename) as f:
self.length = json.load(f)
def __len__(self):
return self.length
def op(self):
categorical_tf_datasets = tuple(d.op() for d in self.categorical_readers)
features_datasets = (self.numerical_reader.op(), categorical_tf_datasets)
structure_to_zip = (features_datasets, self.label_reader.op())
dataset = tf.data.Dataset.zip(structure_to_zip)
return dataset
@staticmethod
def generate(src_train, src_test, feature_spec, dst_dir, dst_feature_spec, prebatch_size, max_batches_train, max_batches_test):
local_table_sizes = feature_spec.get_categorical_sizes()
names = feature_spec.get_categorical_feature_names()
local_table_hotness = [feature_spec.feature_spec[name].get('hotness', 1) for name in names]
os.makedirs(dst_dir, exist_ok=True)
num_files = 1
feature_spec.to_yaml(output_path=os.path.join(dst_dir, dst_feature_spec))
sources = [(src_train, 'train', max_batches_train), (src_test, 'test', max_batches_test)]
for src, dst_suffix, max_batches in sources:
num_batches = min(len(src), max_batches)
if num_batches % num_files != 0:
raise ValueError('The length of the dataset must be evenly divided by the number of TFRecords files')
dst_subdir = os.path.join(dst_dir, dst_suffix)
numerical_writer = create_writer(dst_dir=os.path.join(dst_subdir, 'numerical'), dtype=tf.float16,
num_features=feature_spec.get_number_of_numerical_features(),
batches_per_file=num_batches // num_files)
label_writer = create_writer(dst_dir=os.path.join(dst_subdir, 'label'), dtype=tf.int8,
batches_per_file=num_batches // num_files)
categorical_writers = []
for i, (hotness, cardinality) in enumerate(zip(local_table_hotness, local_table_sizes)):
# TODO: possibly optimize the dtype by using cardinality here
writer = create_writer(dst_dir=os.path.join(dst_subdir, f'categorical_{i}'), dtype=tf.int32,
multihot=hotness > 1,
batches_per_file=num_batches // num_files)
categorical_writers.append(writer)
with open(length_filename(dst_subdir), 'w') as f:
json.dump(num_batches, f)
for batch_idx, batch in tqdm.tqdm(enumerate(src.op()),
total=max_batches,
desc=f'Generating the {dst_suffix} data'):
print('writing batch: ', batch_idx)
if batch_idx == max_batches:
break
print(batch_idx)
(numerical, categorical), label = batch
if label.shape[0] != prebatch_size:
raise ValueError(f'Source dataset batch size ({label.shape[0]}) '
f'different from the prebatch size ({prebatch_size}). Unsupported.')
numerical_writer.save(numerical)
label_writer.save(label)
for writer, feature in zip(categorical_writers, categorical):
writer.save(feature)
numerical_writer.close()
label_writer.close()
for writer in categorical_writers:
writer.close()
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/dataloading/split_tfrecords_multihot_dataset.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CATEGORICAL_CHANNEL = "categorical"
NUMERICAL_CHANNEL = "numerical"
LABEL_CHANNEL = "label"
SPLIT_BINARY = "split_binary"
TRAIN_MAPPING = "train"
TEST_MAPPING = "test"
DTYPE_SELECTOR = "dtype"
CARDINALITY_SELECTOR = "cardinality"
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/dataloading/defaults.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected]), Tomasz Cheda ([email protected])
import os
from .defaults import TRAIN_MAPPING, TEST_MAPPING
from .feature_spec import FeatureSpec
from .raw_binary_dataset import TfRawBinaryDataset, DatasetMetadata
from .synthetic_dataset import SyntheticDataset
from .split_tfrecords_multihot_dataset import SplitTFRecordsDataset
def get_dataset_metadata(dataset_path, feature_spec):
fspec_path = os.path.join(dataset_path, feature_spec)
feature_spec = FeatureSpec.from_yaml(fspec_path)
dataset_metadata = DatasetMetadata(num_numerical_features=feature_spec.get_number_of_numerical_features(),
categorical_cardinalities=feature_spec.get_categorical_sizes())
return dataset_metadata
def _create_pipelines_synthetic_fspec(**kwargs):
fspec_path = os.path.join(kwargs['dataset_path'], kwargs['feature_spec'])
feature_spec = FeatureSpec.from_yaml(fspec_path)
dataset_metadata = DatasetMetadata(num_numerical_features=feature_spec.get_number_of_numerical_features(),
categorical_cardinalities=feature_spec.get_categorical_sizes())
local_table_sizes = [dataset_metadata.categorical_cardinalities[i] for i in kwargs['table_ids']]
names = feature_spec.get_categorical_feature_names()
local_names = [names[i] for i in kwargs['table_ids']]
local_table_hotness = [feature_spec.feature_spec[name]["hotness"] for name in local_names]
local_table_alpha = [feature_spec.feature_spec[name]["alpha"] for name in local_names]
print('local table sizes: ', local_table_sizes)
print('Local table hotness: ', local_table_hotness)
train_dataset = SyntheticDataset(batch_size=kwargs['train_batch_size'],
num_numerical_features=dataset_metadata.num_numerical_features,
categorical_feature_cardinalities=local_table_sizes,
categorical_feature_hotness=local_table_hotness,
categorical_feature_alpha=local_table_alpha,
num_batches=kwargs.get('synthetic_dataset_train_batches', int(1e9)),
num_workers=kwargs['world_size'],
variable_hotness=False)
test_dataset = SyntheticDataset(batch_size=kwargs['test_batch_size'],
num_numerical_features=dataset_metadata.num_numerical_features,
categorical_feature_cardinalities=local_table_sizes,
categorical_feature_hotness=local_table_hotness,
categorical_feature_alpha=local_table_alpha,
num_batches=kwargs.get('synthetic_dataset_valid_batches', int(1e9)),
num_workers=kwargs['world_size'],
variable_hotness=False)
return train_dataset, test_dataset
def _create_pipelines_tf_raw(**kwargs):
fspec_path = os.path.join(kwargs['dataset_path'], kwargs['feature_spec'])
feature_spec = FeatureSpec.from_yaml(fspec_path)
local_categorical_names = feature_spec.cat_positions_to_names(kwargs['table_ids'])
train_dataset = TfRawBinaryDataset(feature_spec=feature_spec,
instance=TRAIN_MAPPING,
batch_size=kwargs['train_batch_size'],
numerical_features_enabled=True,
local_categorical_feature_names=local_categorical_names,
rank=kwargs['rank'],
world_size=kwargs['world_size'],
concat_features=kwargs['concat_features'])
test_dataset = TfRawBinaryDataset(feature_spec=feature_spec,
instance=TEST_MAPPING,
batch_size=kwargs['test_batch_size'],
numerical_features_enabled=True,
local_categorical_feature_names=local_categorical_names,
rank=kwargs['rank'],
world_size=kwargs['world_size'],
concat_features=kwargs['concat_features'])
return train_dataset, test_dataset
def _create_pipelines_split_tfrecords(**kwargs):
fspec_path = os.path.join(kwargs['dataset_path'], kwargs['feature_spec'])
feature_spec = FeatureSpec.from_yaml(fspec_path)
train_dataset = SplitTFRecordsDataset(dataset_dir=feature_spec.base_directory + '/train/',
feature_ids=kwargs['table_ids'],
num_numerical=feature_spec.get_number_of_numerical_features(),
rank=kwargs['rank'], world_size=kwargs['world_size'],
batch_size=kwargs['train_batch_size'])
test_dataset = SplitTFRecordsDataset(dataset_dir=feature_spec.base_directory + '/test/',
feature_ids=kwargs['table_ids'],
num_numerical=feature_spec.get_number_of_numerical_features(),
rank=kwargs['rank'], world_size=kwargs['world_size'],
batch_size=kwargs['test_batch_size'])
return train_dataset, test_dataset
def create_input_pipelines(dataset_type, dataset_path, train_batch_size, test_batch_size,
table_ids, feature_spec, rank=0, world_size=1, concat_features=False):
# pass along all arguments except dataset type
kwargs = locals()
del kwargs['dataset_type']
#hardcoded for now
kwargs['synthetic_dataset_use_feature_spec'] = True
if dataset_type == 'synthetic' and not kwargs['synthetic_dataset_use_feature_spec']:
return _create_pipelines_synthetic(**kwargs)
elif dataset_type == 'synthetic' and kwargs['synthetic_dataset_use_feature_spec']: # synthetic based on feature spec
return _create_pipelines_synthetic_fspec(**kwargs)
elif dataset_type == 'tf_raw':
return _create_pipelines_tf_raw(**kwargs)
elif dataset_type == 'split_tfrecords':
return _create_pipelines_split_tfrecords(**kwargs)
else:
raise ValueError(f'Unsupported dataset type: {dataset_type}')
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/dataloading/dataloader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
from . import dataloader
import argparse
import os
import time
import tensorflow as tf
import horovod.tensorflow as hvd
from .feature_spec import FeatureSpec
def compute_bytes_per_batch(batch):
bytes_per_dtype = dict(
float16=2,
int32=4,
int8=1
)
(numerical, categorical), label = batch
numerical_bytes = numerical.shape[0] * numerical.shape[1] * bytes_per_dtype[numerical.dtype.name]
categorical_bytes = []
for c in categorical:
if hasattr(c, 'flat_values'):
# ragged tensor
values = c.flat_values
values_bytes = values.shape[0] * bytes_per_dtype[values.dtype.name]
categorical_bytes.append(values_bytes)
else:
# dense tensor
num_bytes = c.shape[0] * c.shape[1] * bytes_per_dtype[c.dtype.name]
categorical_bytes.append(num_bytes)
categorical_bytes = sum(categorical_bytes)
label_bytes = label.shape[0] * bytes_per_dtype[label.dtype.name]
return numerical_bytes + categorical_bytes + label_bytes
def main():
parser = argparse.ArgumentParser(description="Benchmark a dataloader")
parser.add_argument('--dataset_path', default='synthetic_dataset', type=str,
help='Path to the destination directory')
parser.add_argument('--dataset_type', type=str, choices=['tf_raw', 'split_tfrecords'])
parser.add_argument('--batch_size', default=65536, type=int, help='Batch size')
parser.add_argument('--xla', default=False, action='store_true', help='Batch size')
parser.add_argument('--amp', default=False, action='store_true', help='Batch size')
parser.add_argument('--run_eagerly', default=False, action='store_true', help='Batch size')
parser.add_argument('--tfdata_debug', default=False, action='store_true', help='Batch size')
parser.add_argument('--feature_spec', type=str, default='feature_spec.yaml',
help='Filename of the feature spec describing the dataset')
parser.add_argument('--max_batches', type=int, default=100,
help='Stop after this many batches, even if there is still some data to be read')
parser.add_argument('--warmup_steps', type=int, default=5,
help='Number of warmup steps that are not benchmarked')
parser.add_argument('--sleep', type=int, default=0,
help='Sleep for this many seconds after creating the dataloader. For debug only.')
args = parser.parse_args()
args.synthetic_dataset_use_feature_spec = False
args.valid_batch_size = args.batch_size
if args.dataset_type == 'nvt' and not args.run_eagerly:
raise ValueError('NVT dataloader does not support graph mode. Please specify --run_eagerly to use it.')
if args.xla:
os.environ['TF_XLA_FLAGS'] = '--tf_xla_auto_jit=fusible'
hvd.init()
gpus = tf.config.experimental.list_physical_devices('GPU')
if args.dataset_type != 'nvt':
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
visible_gpus = []
if gpus:
visible_gpus = gpus[hvd.local_rank()]
tf.config.experimental.set_visible_devices(visible_gpus, 'GPU')
if args.amp:
policy = tf.keras.mixed_precision.Policy("mixed_float16")
tf.keras.mixed_precision.set_global_policy(policy)
tf.config.run_functions_eagerly(args.run_eagerly)
if args.tfdata_debug:
tf.data.experimental.enable_debug_mode()
fspec_path = os.path.join(args.dataset_path, args.feature_spec)
feature_spec = FeatureSpec.from_yaml(fspec_path)
table_ids = list(range(len(feature_spec.get_categorical_sizes())))
table_ids = table_ids[hvd.rank()::hvd.size()]
print('Creating the pipelines')
train_pipeline, validation_pipeline = dataloader.create_input_pipelines(args, table_ids=table_ids,
rank=hvd.rank(),
world_size=hvd.size())
print('Benchmarking...')
it = iter(train_pipeline.op())
reduce_input = tf.convert_to_tensor([0], dtype=tf.float32, name='reduce_input')
@tf.function
def step():
device = '/GPU:0'
with tf.device(device):
b = next(it)
_ = hvd.allreduce(reduce_input, name='barrier')
return
for i in range(args.warmup_steps):
print('warmup step:', i)
l = step()
rank = hvd.rank()
if args.sleep != 0:
print('sleeping...')
time.sleep(args.sleep)
begin = time.time()
current = begin
for idx in range(args.max_batches):
l = step()
new = time.time()
if rank == 0:
print(f'batch: {idx}, step time: {current - new:.3f}')
current = new
end = time.time()
print('Benchmark done')
num_batches = (idx + 1)
elapsed = (end - begin)
batches_per_second = num_batches / elapsed
samples_per_second = batches_per_second * args.batch_size
if rank == 0:
print(f'Batches per second: {batches_per_second:.2e}')
print(f'Samples per second: {samples_per_second:.2e}')
if __name__ == '__main__':
main()
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/dataloading/dataloader_benchmark.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
import pandas as pd
import os
import numpy as np
from .defaults import NUMERICAL_CHANNEL, LABEL_CHANNEL
from .feature_spec import FeatureSpec
def parse_args():
parser = ArgumentParser()
parser.add_argument('--feature_spec_in', type=str, default='feature_spec.yaml',
help='Name of the input feature specification file')
parser.add_argument('--output', type=str, default='/data')
parser.add_argument('--size', type=int, default=1000)
return parser.parse_args()
def main():
args = parse_args()
dataset_size = args.size
fspec_in = FeatureSpec.from_yaml(args.feature_spec_in)
fspec_in.base_directory = args.output
cat_cardinalities = fspec_in.get_categorical_sizes()
cat_names = fspec_in.get_categorical_feature_names()
cardinalities = {name: cardinality for name, cardinality in zip(cat_names, cat_cardinalities)}
input_label_feature_name = fspec_in.channel_spec[LABEL_CHANNEL][0]
numerical_names_set = set(fspec_in.channel_spec[NUMERICAL_CHANNEL])
for mapping_name, mapping in fspec_in.source_spec.items():
for chunk in mapping:
assert chunk['type'] == 'csv', "Only csv files supported in this generator"
assert len(chunk['files']) == 1, "Only one file per chunk supported in this transcoder"
path_to_save = os.path.join(fspec_in.base_directory, chunk['files'][0])
data = []
for name in chunk['features']:
if name == input_label_feature_name:
data.append(np.random.randint(0, 1, size=dataset_size))
elif name in numerical_names_set:
data.append(np.random.rand(dataset_size))
else:
local_cardinality = cardinalities[name]
data.append(np.random.randint(0, local_cardinality, size=dataset_size))
values = np.stack(data).T
to_save = pd.DataFrame(values, columns=chunk['features'])
os.makedirs(os.path.dirname(path_to_save), exist_ok=True)
to_save.to_csv(path_to_save, index=False, header=False)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/dataloading/gen_csv.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorflow as tf
import numpy as np
def power_law(k_min, k_max, alpha, x):
"""convert uniform distribution to power law distribution"""
gamma = 1 - alpha
y = pow(x * (pow(k_max, gamma) - pow(k_min, gamma)) + pow(k_min, gamma), 1.0 / gamma)
return y.astype(np.int32)
def gen_power_law_data(batch_size, hotness, num_rows, alpha, variable_hotness):
"""naive power law distribution generator
NOTE: Repetition is allowed in multi hot data.
NOTE: The resulting values are sorted by frequency, that is, the index=0 is the most frequently occurring etc.
"""
if variable_hotness:
# at least one element fetched for each feature
row_lengths = power_law(1, hotness, alpha, np.random.rand(batch_size))
total_elements = np.sum(row_lengths)
y = power_law(1, num_rows + 1, alpha, np.random.rand(total_elements)) - 1
result = tf.RaggedTensor.from_row_lengths(values=y, row_lengths=row_lengths)
else:
y = power_law(1, num_rows + 1, alpha, np.random.rand(batch_size * hotness)) - 1
row_lengths = tf.ones(shape=[batch_size], dtype=tf.int32) * hotness
result = tf.RaggedTensor.from_row_lengths(values=y, row_lengths=row_lengths)
return result
class SyntheticDataset:
def __init__(self, batch_size, num_numerical_features, categorical_feature_cardinalities,
categorical_feature_hotness, categorical_feature_alpha, num_workers, variable_hotness=True,
constant=False, num_batches=int(1e9)):
self.batch_size = batch_size
self.num_numerical_features = num_numerical_features
self.categorical_feature_cardinalities = categorical_feature_cardinalities
self.categorical_feature_hotness = categorical_feature_hotness
self.categorical_feature_alpha = categorical_feature_alpha
self.variable_hotness = variable_hotness
self.num_workers = num_workers
self.num_batches = num_batches
if len(categorical_feature_hotness) != len(categorical_feature_cardinalities):
raise ValueError("DummyDataset mismatch between cardinalities and hotness lengths."
f"Got {len(categorical_feature_cardinalities)} cardinalities and "
f"{len(categorical_feature_hotness)} hotnesses")
self.cat_features_count = len(
categorical_feature_cardinalities) if categorical_feature_cardinalities is not None else 0
self.num_features_count = num_numerical_features if num_numerical_features is not None else 0
self.constant = constant
if self.constant:
(self.numerical_features, self.categorical_features), self.labels = self._generate()
def _generate(self):
numerical_features = tf.random.uniform(shape=[self.batch_size // self.num_workers, self.num_numerical_features],
dtype=tf.float32) if self.num_features_count else -1
labels = tf.cast(tf.random.uniform(shape=[self.batch_size // self.num_workers, 1],
maxval=2, dtype=tf.int32), tf.float32)
categorical_features = []
for cardinality, hotness, alpha in zip(self.categorical_feature_cardinalities,
self.categorical_feature_hotness,
self.categorical_feature_alpha):
feature = gen_power_law_data(batch_size=self.batch_size, hotness=hotness,
num_rows=cardinality, alpha=alpha,
variable_hotness=self.variable_hotness)
categorical_features.append(feature)
return (numerical_features, categorical_features), labels
def __next__(self):
if self.constant:
return (self.numerical_features, self.categorical_features), self.labels
else:
return self._generate()
def __len__(self):
return self.num_batches
def op(self):
return self
def __iter__(self):
return self
def get_next(self):
return self.__next__()
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/dataloading/synthetic_dataset.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import os
import pathlib
import base64
import tensorflow as tf
import numpy as np
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
import dataloading.feature_spec
from dataloading.dataloader import create_input_pipelines, get_dataset_metadata
from deployment.hps import constants
from deployment.hps.triton_ensemble_wrapper import NumpyToHpsInputConverter
from deployment.deployment_toolkit.core import EvaluationMode, MeasurementMode, OfflineMode
from deployment.deployment_toolkit.triton_performance_runner import TritonPerformanceRunner
LOGGER = logging.getLogger("run_performance_on_triton")
def b64_tensor(x):
return {'b64': base64.b64encode(x.flatten()).decode("utf-8")}
def create_input_data(sparse_backend, *args, **kwargs):
if sparse_backend == 'hps':
return create_input_data_hps(*args, **kwargs)
elif sparse_backend == 'tf-savedmodel':
return create_input_data_tf(*args, **kwargs)
else:
raise ValueError(f'Unknown sparse backend: {sparse_backend}')
def create_input_data_tf(batch_sizes, dataset_path, dataset_type, feature_spec,
total_benchmark_samples, fused_embedding):
fspec = dataloading.feature_spec.FeatureSpec.from_yaml(
os.path.join(dataset_path, feature_spec)
)
num_tables = len(fspec.get_categorical_sizes())
table_ids = list(range(num_tables))
filename = f"/tmp/triton_input_data_batch.json"
print("generating input data: ", filename)
_, dataloader = create_input_pipelines(dataset_type=dataset_type, dataset_path=dataset_path, train_batch_size=1,
test_batch_size=1, table_ids=table_ids, feature_spec=feature_spec,
rank=0, world_size=1)
generated = 0
samples = []
for sample in dataloader.op():
features, labels = sample
numerical_features, cat_features = features
cat_features = tf.concat(cat_features, axis=1).numpy().astype(np.int32)
numerical_features = numerical_features.numpy().astype(np.float32)
sample = {
"categorical_features": b64_tensor(cat_features),
"numerical_features": b64_tensor(numerical_features),
}
samples.append(sample)
generated += 1
if generated >= total_benchmark_samples:
break
with open(filename, "w") as f:
json.dump(obj={"data": samples}, fp=f, indent=4)
shapes = [
f"categorical_features:{cat_features.shape[1]}",
f"numerical_features:{numerical_features.shape[1]}",
]
input_data = {}
for batch_size in batch_sizes:
input_data[batch_size] = (filename, shapes)
return input_data
def create_input_data_hps(batch_sizes, dataset_path, dataset_type, feature_spec,
total_benchmark_samples, fused_embedding):
input_data = {}
for batch_size in batch_sizes:
filename = f"/tmp/triton_input_data_batch{batch_size}.json"
print("generating input data: ", filename)
shapes = create_input_data_hps_batch(batch_size=batch_size, dst_path=filename, dataset_path=dataset_path,
dataset_type=dataset_type, feature_spec=feature_spec,
total_benchmark_samples=total_benchmark_samples,
fused_embedding=fused_embedding)
input_data[batch_size] = (filename, shapes)
return input_data
def create_input_data_hps_batch(batch_size, dst_path, dataset_path, dataset_type, feature_spec,
total_benchmark_samples, fused_embedding):
fspec = dataloading.feature_spec.FeatureSpec.from_yaml(
os.path.join(dataset_path, feature_spec)
)
num_tables = len(fspec.get_categorical_sizes())
table_ids = list(range(num_tables))
converter = NumpyToHpsInputConverter(categorical_sizes=fspec.get_categorical_sizes(),
fused_embedding=fused_embedding)
_, dataloader = create_input_pipelines(dataset_type=dataset_type, dataset_path=dataset_path,
train_batch_size=batch_size, test_batch_size=batch_size,
table_ids=table_ids, feature_spec=feature_spec, rank=0, world_size=1)
generated = 0
batches = []
for batch in dataloader.op():
features, labels = batch
numerical_features, cat_features = features
key_tensor, nkey_tensor, numerical_features = converter(
numerical_features, cat_features
)
batch = {
constants.key_global_prefix: b64_tensor(key_tensor),
constants.numkey_global_prefix: b64_tensor(nkey_tensor),
constants.ens_numerical_features_name: b64_tensor(numerical_features)
}
batches.append(batch)
generated += batch_size
if generated >= total_benchmark_samples:
break
with open(dst_path, "w") as f:
json.dump(obj={"data": batches}, fp=f, indent=4)
shapes = [
f"{constants.key_global_prefix}:{key_tensor.shape[1]}",
f"{constants.numkey_global_prefix}:{nkey_tensor.shape[1]}",
f"{constants.ens_numerical_features_name}:{numerical_features.shape[1]}",
]
return shapes
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model-name",
type=str,
required=True,
help="Name of the model to test",
)
parser.add_argument(
"--result-path",
type=pathlib.Path,
required=True,
help="Path where results files is stored.",
)
parser.add_argument(
"--server-url",
type=str,
default="http://127.0.0.1:8000",
help="Url to Triton server",
)
parser.add_argument(
"--model-version",
type=str,
default=1,
help="Version of model",
)
parser.add_argument(
"--sparse-format",
type=str,
help="Target format of dense model part in ensemble.",
choices=["tf-savedmodel", "hps"],
required=True,
default="tf-savedmodel",
)
parser.add_argument(
"--fused-embedding",
action="store_true",
help="Use the fused embedding API for HPS",
)
parser.add_argument(
"--batch-sizes",
type=int,
default=[256, 512, 1024, 2048, 4096, 8192, 16384, 32768],
help="List of batch sizes to test.",
nargs="*",
)
parser.add_argument(
"--concurrency",
type=int,
default=[1],
help="List of concurrency modes.",
nargs="*",
)
parser.add_argument(
"--measurement-mode",
choices=[item.value for item in MeasurementMode],
default=MeasurementMode.COUNT_WINDOWS.value,
type=str,
help="Select measurement mode "
"'time_windows' stabilize performance on measurement window. "
"'count_windows' stabilize performance on number of samples.",
)
parser.add_argument(
"--measurement-interval",
help="Time window perf_analyzer will wait to stabilize the measurement",
default=1000,
type=int,
)
parser.add_argument(
"--measurement-request-count",
help="Number of samples on which perf_analyzer will stabilize the measurement",
default=20,
type=int,
)
parser.add_argument(
"--evaluation-mode",
choices=[item.value for item in EvaluationMode],
default=EvaluationMode.OFFLINE.value,
type=str,
help="Select evaluation mode "
"'offline' run offline analysis and use GPU memory to pass tensors. "
"'online' run online analysis and use HTTP protocol.",
)
parser.add_argument(
"--offline-mode",
choices=[item.value for item in OfflineMode],
default=OfflineMode.SYSTEM.value,
type=str,
help="Select offline mode "
"'system' pass tensors through CPU RAM memory. "
"'cuda' pass tensors through GPU RAM memory.",
)
parser.add_argument(
"--output-shared-memory-size",
default=524288,
type=int,
help="Size of memory buffer allocated for output with dynamic shapes in bytes. "
"Has to be equal to maximal size of output tensor.",
)
parser.add_argument(
"--warmup",
help="Enable model warmup before performance test",
action="store_true",
default=False,
)
parser.add_argument(
"--timeout",
help="Timeout for performance analysis",
type=int,
default=None,
required=False,
)
parser.add_argument(
"-v",
"--verbose",
help="Verbose logs",
action="store_true",
default=False,
)
# dataset and dataloading settings
parser.add_argument(
"--dataset_path", default=None, required=True, help="Path to dataset directory"
)
parser.add_argument(
"--feature_spec",
default="feature_spec.yaml",
help="Name of the feature spec file in the dataset directory",
)
parser.add_argument(
"--dataset_type",
default="tf_raw",
choices=["tf_raw", "synthetic", "split_tfrecords"],
help="The type of the dataset to use",
)
parser.add_argument(
"--num-benchmark-samples",
default=2**18,
type=int,
help="The type of the dataset to use",
)
args = parser.parse_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
input_data = create_input_data(sparse_backend=args.sparse_format,
batch_sizes=args.batch_sizes, dataset_path=args.dataset_path,
dataset_type=args.dataset_type, feature_spec=args.feature_spec,
total_benchmark_samples=args.num_benchmark_samples,
fused_embedding=args.fused_embedding)
runner = TritonPerformanceRunner(
server_url=args.server_url,
model_name=args.model_name,
input_data=input_data,
batch_sizes=args.batch_sizes,
measurement_mode=MeasurementMode(args.measurement_mode),
measurement_interval=args.measurement_interval,
measurement_request_count=args.measurement_request_count,
concurrency=args.concurrency,
evaluation_mode=EvaluationMode(args.evaluation_mode),
offline_mode=OfflineMode(args.offline_mode),
output_shared_memory_size=args.output_shared_memory_size,
result_path=args.result_path,
warmup=args.warmup,
timeout=args.timeout,
verbose=args.verbose,
flattened_input=args.sparse_format == 'hps'
)
runner.run()
for _, (filename, _) in input_data.items():
if os.path.exists(filename):
os.remove(filename)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/evaluate_latency.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import argparse
import os
import tensorflow as tf
import horovod.tensorflow as hvd
import deployment.tf
import deployment.hps
def clear_and_create_directory(repo_path):
print("creating directory:", repo_path)
os.makedirs(repo_path, exist_ok=True)
def create_model_repo(dst, sparse_model_name, dense_model_name, ensemble_name):
clear_and_create_directory(dst)
created = []
for name in sparse_model_name, dense_model_name, ensemble_name:
d = os.path.join(dst, name)
clear_and_create_directory(d)
created.append(d)
return created
def set_tf_memory_growth():
physical_devices = tf.config.list_physical_devices("GPU")
for d in physical_devices:
tf.config.experimental.set_memory_growth(d, True)
def main():
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--checkpoint-dir", type=str, help="Source directory with a checkpoint"
)
parser.add_argument(
"--model-repository-path",
type=str,
help="Destination directory with Triton model repository",
)
parser.add_argument(
"--model-name",
type=str,
help="The name of the model used for inference.",
required=True,
)
parser.add_argument(
"--sparse-model-name",
type=str,
default='sparse'
)
parser.add_argument(
"--dense-model-name",
type=str,
default='dense'
)
parser.add_argument(
"--model-version",
type=int,
help="The version of the model used for inference.",
required=False,
default=1,
)
parser.add_argument(
"--dense-format",
type=str,
help="Target format of dense model part in ensemble.",
choices=["tf-savedmodel", "onnx", "trt"],
required=True,
default="tf-savedmodel",
)
parser.add_argument(
"--sparse-format",
type=str,
help="Target format of dense model part in ensemble.",
choices=["tf-savedmodel", "hps"],
required=True,
default="tf-savedmodel",
)
parser.add_argument(
"--model-precision",
type=str,
help="Target precision of dense model part in ensemble.",
choices=["fp16", "fp32"],
required=True,
default="fp32",
)
parser.add_argument(
"--max-batch-size",
type=int,
help="The maximal batch size for deployed model.",
required=False,
default=32768,
)
parser.add_argument(
"--trt-optimal-batch-size",
type=int,
help="Batch size to optimize TensorRT performance for.",
required=False,
default=1024,
)
parser.add_argument(
"--memory-threshold-gb",
type=int,
help="Amount of memory in GB after reaching which CPU offloading will be used",
required=False,
default=70,
)
parser.add_argument(
"--engine-count-per-device",
type=int,
default=1,
help="Number of model instances per GPU",
)
parser.add_argument(
"--num_gpus",
type=int,
default=1,
help="Number of GPUs to deploy HPS onto",
)
parser.add_argument(
"--fused_embedding",
action="store_true",
default=False,
help="Fuse the embedding table together for better GPU utilization.",
)
parser.add_argument(
"--hps_gpucacheper",
type=float,
default=0.25,
help="Fraction of the embeddings to store in GPU cache.",
)
parser.add_argument(
"--server-url",
type=str,
default="grpc://127.0.0.1:8001",
help="Url of Triton Inference Server",
required=False,
)
parser.add_argument(
"--load-model",
action="store_true",
default=False,
help="Call load model Triton endpoint after creating model store.",
)
parser.add_argument(
"--load-model-timeout-s",
type=int,
default=120,
help="Timeout of load model operation.",
required=False,
)
parser.add_argument(
"--verbose",
action="store_true",
default=False,
help="Enable verbose logging",
)
parser.add_argument(
"--cpu",
action="store_true",
default=False,
help="Run the entire model on CPU",
)
parser.add_argument(
"--monolithic",
action="store_true",
default=False,
help="Don't use the ensemble paradigm. Instead, save everything into a single large SavedModel file",
)
args = parser.parse_args()
hvd.init()
set_tf_memory_growth()
deployment_package = deployment.hps if args.sparse_format == 'hps' else deployment.tf
if args.monolithic:
deployment_package.deploy_monolithic(sparse_src=os.path.join(args.checkpoint_dir, "sparse"),
dense_src=os.path.join(args.checkpoint_dir, "dense"),
dst=args.model_repository_path,
model_name='dlrm',
max_batch_size=65536,
engine_count_per_device=1,
num_gpus=1,
version="1",
cpu=args.cpu,
model_precision='fp32')
return
sparse_dst, dense_dst, ensemble_dst = create_model_repo(
dst=args.model_repository_path, ensemble_name=args.model_name,
sparse_model_name=args.sparse_model_name, dense_model_name=args.dense_model_name
)
num_numerical_features = deployment_package.deploy_dense(
src=os.path.join(args.checkpoint_dir, "dense"),
dst=dense_dst,
model_name=args.dense_model_name,
model_format=args.dense_format,
model_precision=args.model_precision,
max_batch_size=args.max_batch_size,
trt_optimal_batch_size=args.trt_optimal_batch_size,
engine_count_per_device=args.engine_count_per_device,
)
num_cat_features = deployment_package.deploy_sparse(
src=os.path.join(args.checkpoint_dir, "sparse"),
dst=sparse_dst,
model_name=args.sparse_model_name,
num_gpus=args.num_gpus,
fused=args.fused_embedding,
max_batch_size=args.max_batch_size,
gpucacheper=args.hps_gpucacheper,
engine_count_per_device=args.engine_count_per_device,
memory_threshold_gb=args.memory_threshold_gb
)
deployment_package.deploy_ensemble(
dst=ensemble_dst,
model_name=args.model_name,
sparse_model_name=args.sparse_model_name,
dense_model_name=args.dense_model_name,
num_cat_features=num_cat_features,
num_numerical_features=num_numerical_features,
version=args.model_version,
max_batch_size=args.max_batch_size,
)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/deploy.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import dataloading.feature_spec
import os
import numpy as np
import argparse
import dllogger
from dataloading.dataloader import create_input_pipelines
from nn.evaluator import Evaluator
from utils.logging import IterTimer, init_logging
import deployment.tf.triton_ensemble_wrapper
import deployment.hps.triton_ensemble_wrapper
def log_results(auc, test_loss, latencies, batch_size, compute_latencies=False, warmup_steps=10):
# don't benchmark the first few warmup steps
latencies = latencies[warmup_steps:]
result_data = {
'mean_inference_throughput': batch_size / np.mean(latencies),
'mean_inference_latency': np.mean(latencies)
}
if compute_latencies:
for percentile in [90, 95, 99]:
result_data[f'p{percentile}_inference_latency'] = np.percentile(latencies, percentile)
result_data['auc'] = auc
result_data['test_loss'] = test_loss
dllogger.log(data=result_data, step=tuple())
def parse_args():
parser = argparse.ArgumentParser(description='')
parser.add_argument('--dataset_path', type=str, required=True, help='')
parser.add_argument('--dataset_type', default='tf_raw', type=str, help='')
parser.add_argument('--feature_spec', default='feature_spec.yaml', type=str, help='')
parser.add_argument('--batch_size', type=int, default=32768, help='Batch size')
parser.add_argument('--auc_thresholds', type=int, default=8000, help='')
parser.add_argument('--max_steps', type=int, default=None, help='')
parser.add_argument('--print_freq', type=int, default=10, help='')
parser.add_argument('--log_path', type=str, default='dlrm_tf_log.json', help='triton_inference_log.json')
parser.add_argument('--verbose', action='store_true', default=False, help='')
parser.add_argument('--test_on_train', action='store_true', default=False,
help='Run validation on the training set.')
parser.add_argument('--fused_embedding', action='store_true', default=False,
help='Fuse the embedding table together for better GPU utilization.')
parser.add_argument("--model_name", type=str, help="The name of the model used for inference.", required=True)
parser.add_argument("--sparse_input_format", type=str, choices=["tf-savedmodel", "hps"],
required=True, default="tf-savedmodel")
args = parser.parse_args()
return args
def main():
args = parse_args()
init_logging(log_path=args.log_path, params_dict=args.__dict__)
fspec = dataloading.feature_spec.FeatureSpec.from_yaml(os.path.join(args.dataset_path, args.feature_spec))
num_tables = len(fspec.get_categorical_sizes())
table_ids = list(range(num_tables)) # possibly wrong ordering, to be tested
train_pipeline, validation_pipeline = create_input_pipelines(dataset_type=args.dataset_type,
dataset_path=args.dataset_path,
train_batch_size=args.batch_size,
test_batch_size=args.batch_size,
table_ids=table_ids,
feature_spec=args.feature_spec,
rank=0, world_size=1)
if args.test_on_train:
validation_pipeline = train_pipeline
if args.sparse_input_format == 'hps':
wrapper_cls = deployment.hps.triton_ensemble_wrapper.RecsysTritonEnsemble
else:
wrapper_cls = deployment.tf.triton_ensemble_wrapper.RecsysTritonEnsemble
model = wrapper_cls(model_name=args.model_name, num_tables=num_tables, verbose=args.verbose,
categorical_sizes=fspec.get_categorical_sizes(), fused_embedding=args.fused_embedding)
timer = IterTimer(train_batch_size=args.batch_size, test_batch_size=args.batch_size,
optimizer=None, print_freq=args.print_freq, enabled=True)
evaluator = Evaluator(model=model, timer=timer, auc_thresholds=args.auc_thresholds,
max_steps=args.max_steps, cast_dtype=None)
auc, test_loss, latencies = evaluator(validation_pipeline=validation_pipeline)
log_results(auc, test_loss, latencies, batch_size=args.batch_size)
print('DONE')
if __name__ == '__main__':
main() | DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/evaluate_accuracy.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import time
from enum import Enum
from typing import Any, Dict, Optional
# pytype: disable=import-error
from .utils import parse_server_url
try:
import tritonclient.grpc as grpc_client
from tritonclient import utils as client_utils # noqa: F401
except ImportError:
try:
import tritonclientutils as client_utils # noqa: F401
import tritongrpcclient as grpc_client
except ImportError:
client_utils = None
grpc_client = None
try:
import tritonclient.http as http_client
except (ImportError, RuntimeError):
try:
import tritonhttpclient as http_client
except (ImportError, RuntimeError):
http_client = None
# pytype: enable=import-error
LOGGER = logging.getLogger(__name__)
class TritonServerNotReadyException(Exception):
pass
# TODO: in which state "native" warm-up takes place?
class ModelState(Enum):
"""Describe model state in Triton.
Attributes:
LOADING: Loading of model
UNLOADING: Unloading of model
UNAVAILABLE: Model is missing or could not be loaded
READY: Model is ready for inference
"""
LOADING = "LOADING"
UNLOADING = "UNLOADING"
UNAVAILABLE = "UNAVAILABLE"
READY = "READY"
class TritonClientProtocol(Enum):
"""Describe protocol with which client communicates with Triton"""
GRPC = "grpc"
HTTP = "http"
# TODO: How to obtain models that are available but not loaded yet?
# TODO: encode model_name and model_version as for ex. model_name/model_version (here and in many other places)
# TODO: How to obtain server model loading mode
class TritonClient:
"""Provide high-level API for communicating with Triton.
Usage:
>>> client = TritonClient("grpc://127.0.0.1:8001")
>>> client.load_model("ResNet50")
Above sample loads model on Triton and run inference iterating over provided dataloader.
Args:
server_url: url where Triton is binded in format `<protocol>://<address/hostname>:<port>`
verbose: provide verbose logs from tritonclient library
Attributes:
client: handle to low-level API client obtained from tritonclient python package
Raises:
RuntimeError: in case of missing tritonclient library for selected protocol
or problems with connecting to Triton or its not in ready state yet.
ValueError: in case of errors in parsing provided server_url. Example source of errors are: missing protocol unknown protocol was requested.
InferenceServerClient: in case of error in processing initial requests on server side
"""
def __init__(self, server_url: str, *, verbose: bool = False):
self.server_url = server_url
self._verbose = verbose
self.client = self._create_client(server_url=server_url, verbose=verbose)
def wait_for_server_ready(self, timeout: int):
"""
Parameters
----------
timeout : int
timeout in seconds to send a ready status
request to the server before raising
an exception
Raises
------
TritonModelAnalyzerException
If server readiness could not be
determined in given num_retries
"""
retries = timeout
while retries > 0:
try:
if self.client.is_server_ready() and self.client.is_server_live():
return
else:
time.sleep(1)
retries -= 1
except Exception as e:
time.sleep(1)
retries -= 1
if retries == 0:
return TritonServerNotReadyException(e)
raise TritonServerNotReadyException(
"Could not determine server readiness. " "Number of retries exceeded."
)
def get_server_metadata(self):
"""Returns `server metadata <https://github.com/kubeflow/kfserving/blob/master/docs/predict-api/v2/required_api.md#server-metadata-response-json-object>`_.
>>> client.get_server_metadata()
{name: "triton", version: "2.5.0", extensions: ["classification", "sequence", "model_repository", "schedule_policy", "model_configuration", "system_shared_memory", "cuda_shared_memory", "binary_tensor_data", "statistics"}
Returns:
Dictionary with server metadata.
Raises:
InferenceServerClient: in case of error in processing request on server side
"""
server_metadata = self.client.get_server_metadata()
server_metadata = self._format_response(server_metadata)
return server_metadata
def get_model_metadata(self, model_name: str, model_version: Optional[str] = None):
"""Returns `model metadata <https://github.com/kubeflow/kfserving/blob/master/docs/predict-api/v2/required_api.md#model-metadata>`_.
Args:
model_name: name of the model which metadata is requested to obtain.
model_version: version of the model which metadata is requested to obtain.
Returns:
Dictionary with model metadata.
Raises:
InferenceServerClient: in case of error in processing request on server side.
"""
model_metadata = self.client.get_model_metadata(model_name, model_version)
model_metadata = self._format_response(model_metadata)
return model_metadata
def load_model(self, model_name: str) -> None:
"""Requests that a model be loaded into Triton, or reloaded if the model is already loaded.
Args:
model_name: name of the model to load
Raises:
InferenceServerException: in case of error in processing request on server side.
"""
self.client.load_model(model_name)
def wait_for_model(
self,
*,
model_name: str,
model_version: str,
timeout_s: int = 120,
check_interval_s: int = 5,
) -> Dict[str, Any]:
"""Iteratively check for model state until model is ready or unavailable.
Args:
model_name: name of the model to wait for
model_version: version of the model to wait for
timeout_s: how long in seconds to wait till model is in ready or in unavailable state
check_interval_s: time intervals in seconds at which state of model is should be checked
Returns:
Dictionary with model metadata.
Raises:
RuntimeError: in case model is not ready yet (is marked unavailable or timeout has been reached)
InferenceServerException: in case of error in processing request on server side.
"""
def _shall_wait(model_state: ModelState) -> bool:
return model_state not in [ModelState.UNAVAILABLE, ModelState.READY]
elapsed_time_s = 0
start_time_s = time.time()
state = self.get_model_state(model_name, model_version)
while elapsed_time_s < timeout_s and _shall_wait(state):
LOGGER.info(
f"waiting for model... {elapsed_time_s:.0f}/{timeout_s} state={state}"
)
time.sleep(check_interval_s)
state = self.get_model_state(model_name, model_version)
elapsed_time_s = time.time() - start_time_s
if not self.client.is_model_ready(model_name):
raise RuntimeError(
f"Model {model_name} requested to be loaded, but is not ready"
)
model_metadata = self.client.get_model_metadata(model_name)
model_metadata = self._format_response(model_metadata)
return model_metadata
def get_model_state(self, model_name: str, model_version: str) -> ModelState:
"""Obtains the state of a model on Triton.
Args:
model_name: name of the model which state is requested to obtain.
model_version: version of the model which state is requested to obtain.
Returns:
Requested model state.
Raises:
InferenceServerException: in case of error in processing request on server side.
"""
def handle_http_response(models):
models_states = {}
for model in models:
if not model.get("version"):
continue
model_state = (
ModelState(model["state"])
if model.get("state")
else ModelState.LOADING
)
models_states[(model["name"], model["version"])] = model_state
return models_states
def handle_grpc_response(models):
models_states = {}
for model in models:
if not model.version:
continue
model_state = (
ModelState(model.state) if model.state else ModelState.LOADING
)
models_states[(model.name, model.version)] = model_state
return models_states
repository_index = self.client.get_model_repository_index()
if isinstance(repository_index, list):
models_states = handle_http_response(models=repository_index)
else:
models_states = handle_grpc_response(models=repository_index.models)
return models_states.get((model_name, model_version), ModelState.UNAVAILABLE)
def _format_response(self, response):
if not isinstance(response, dict):
response = json.loads(
grpc_client.MessageToJson(response, preserving_proto_field_name=True)
)
return response
def _create_client(self, server_url: str, verbose: bool):
protocol, host, port = parse_server_url(server_url)
if protocol == TritonClientProtocol.HTTP and http_client is None:
raise RuntimeError(
"Could not obtain Triton HTTP client. Install extras while installing tritonclient wheel. "
"Example installation call: "
"find /workspace/install/python/ -iname triton*manylinux*.whl -exec pip install {}[all] \\;"
)
LOGGER.debug(f"Connecting to {server_url}")
client_lib = {
TritonClientProtocol.HTTP.value: http_client,
TritonClientProtocol.GRPC.value: grpc_client,
}[protocol.value]
server_url = f"{host}:{port}"
# pytype: disable=attribute-error
client = client_lib.InferenceServerClient(url=server_url, verbose=verbose)
# pytype: enable=attribute-error
return client
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/deployment_toolkit/triton_client.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/deployment_toolkit/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import importlib
import logging
import os
import time
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
import numpy as np
LOGGER = logging.getLogger(__name__)
DATALOADER_FN_NAME = "get_dataloader_fn"
GET_MODEL_FN_NAME = "get_model"
GET_SERVING_INPUT_RECEIVER_FN = "get_serving_input_receiver_fn"
GET_ARGPARSER_FN_NAME = "update_argparser"
def load_from_file(file_path, label, target):
spec = importlib.util.spec_from_file_location(name=label, location=file_path)
my_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
return getattr(my_module, target, None)
class BaseMetricsCalculator(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
def calc(
self,
*,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
) -> Dict[str, float]:
"""
Calculates error/accuracy metrics
Args:
ids: List of ids identifying each sample in the batch
y_pred: model output as dict where key is output name and value is output value
x: model input as dict where key is input name and value is input value
y_real: input ground truth as dict where key is output name and value is output value
Returns:
dictionary where key is metric name and value is its value
"""
pass
@abc.abstractmethod
def update(
self,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
):
pass
@property
@abc.abstractmethod
def metrics(self) -> Dict[str, Any]:
pass
class ShapeSpec(NamedTuple):
min: Tuple
opt: Tuple
max: Tuple
class MeasurementMode(Enum):
"""
Available measurement stabilization modes
"""
COUNT_WINDOWS = "count_windows"
TIME_WINDOWS = "time_windows"
class EvaluationMode(Enum):
"""
Available evaluation modes
"""
OFFLINE = "offline"
ONLINE = "online"
class OfflineMode(Enum):
"""
Available offline mode for memory
"""
SYSTEM = "system"
CUDA = "cuda"
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/deployment_toolkit/core.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import json
import pickle
import threading
from pathlib import Path
from typing import Dict, Iterator, List, Union
import numpy as np
MB2B = 2 ** 20
B2MB = 1 / MB2B
FLUSH_THRESHOLD_B = 256 * MB2B
def _validate_batch(name: str, value: Union[list, np.ndarray]):
if not isinstance(value, (list, np.ndarray)):
raise ValueError(f"Values shall be lists or np.ndarrays; current type {type(value)}")
def _validate_prefix_data(prefix_data: Dict[str, List[np.ndarray]]):
batch_sizes_per_io_name = {name: [len(batch) for batch in batches] for name, batches in prefix_data.items()}
names = list(batch_sizes_per_io_name)
for io_name in names:
for batch_idx, batch_size in enumerate(batch_sizes_per_io_name[io_name]):
if not all([batch_sizes_per_io_name[other_name][batch_idx] == batch_size for other_name in names]):
non_equal_batch_sizes = {
other_name: batch_sizes_per_io_name[other_name][batch_idx] for other_name in names
}
non_equal_batch_sizes_str = ", ".join(
[f"{name}={batch_size}" for name, batch_size in non_equal_batch_sizes.items()]
)
raise ValueError(
"All inputs/outputs should have same number of batches with equal batch_size. "
f"At batch_idx={batch_idx} there are batch_sizes: {non_equal_batch_sizes_str}"
)
# ensure if each io has same number of batches with equal size
def _get_nitems_and_batches(prefix_data: Dict[str, List[np.ndarray]]):
nitems = 0
nbatches = 0
if prefix_data:
nitems_per_io_name = {name: sum(len(batch) for batch in batches) for name, batches in prefix_data.items()}
nbatches_per_io_name = {name: len(batches) for name, batches in prefix_data.items()}
nitems = list(nitems_per_io_name.values())[0]
nbatches = list(nbatches_per_io_name.values())[0]
return nitems, nbatches
class BaseDumpWriter(abc.ABC):
FILE_SUFFIX = ".abstract"
def __init__(self, output_dir: Union[str, Path]):
self._output_dir = Path(output_dir)
# outer dict key is prefix (i.e. input/output/labels/...), inner dict key is input/output name
# list is list of batches
self._items_cache: Dict[str, Dict[str, List[np.ndarray]]] = {}
# key is prefix
self._items_counters: Dict[str, int] = {}
self._cache_lock = threading.RLock()
self._flush_threshold_b = FLUSH_THRESHOLD_B
@property
def cache_size(self):
def _get_bytes_size(name, batch):
_validate_batch(name, batch)
if not isinstance(batch, np.ndarray):
batch = np.narray(batch)
return batch.nbytes
with self._cache_lock:
return {
prefix: sum(_get_bytes_size(name, batch) for name, batches in data.items() for batch in batches)
for prefix, data in self._items_cache.items()
}
def _append_to_cache(self, prefix, prefix_data):
if prefix_data is None:
return
if not isinstance(prefix_data, dict):
raise ValueError(f"{prefix} data to store shall be dict")
with self._cache_lock:
cached_prefix_data = self._items_cache.setdefault(prefix, {})
for name, batch in prefix_data.items():
_validate_batch(name, batch)
if not isinstance(batch, np.ndarray):
batch = np.array(batch)
cached_batches = cached_prefix_data.setdefault(name, [])
cached_batches += [batch]
def write(self, **kwargs):
with self._cache_lock:
for prefix, prefix_data in kwargs.items():
self._append_to_cache(prefix, prefix_data)
biggest_prefix_data_size = max(self.cache_size.values())
if biggest_prefix_data_size > self._flush_threshold_b:
self.flush()
def flush(self):
with self._cache_lock:
for prefix, prefix_data in self._items_cache.items():
_validate_prefix_data(prefix_data)
output_path = self._output_dir / self._get_filename(prefix)
self._dump(prefix_data, output_path)
nitems, nbatches = _get_nitems_and_batches(prefix_data)
self._items_counters[prefix] += nitems
self._items_cache = {}
def _get_filename(self, prefix):
idx = self._items_counters.setdefault(prefix, 0)
return f"{prefix}-{idx:012d}{self.FILE_SUFFIX}"
@abc.abstractmethod
def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path):
pass
def __enter__(self):
if self._output_dir.exists() and len(list(self._output_dir.iterdir())):
raise ValueError(f"{self._output_dir.as_posix()} is not empty")
self._output_dir.mkdir(parents=True, exist_ok=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.flush()
class PickleDumpWriter(BaseDumpWriter):
FILE_SUFFIX = ".pkl"
def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path):
output_path.parent.mkdir(parents=True, exist_ok=True)
with output_path.open("wb") as pickle_file:
pickle.dump(prefix_data, pickle_file)
class JsonDumpWriter(BaseDumpWriter):
FILE_SUFFIX = ".json"
def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path):
repacked_prefix_data = self._format_data(prefix_data)
output_path.parent.mkdir(parents=True, exist_ok=True)
with output_path.open("w") as json_file:
json.dump(repacked_prefix_data, json_file)
def _format_data(self, prefix_data: Dict[str, List[np.ndarray]]) -> Dict:
def _format_batch_for_perf_analyzer_json_format(batch: np.ndarray):
return {
"content": batch.flatten().tolist(),
"shape": list(batch.shape),
"dtype": str(batch.dtype),
}
_, nbatches = _get_nitems_and_batches(prefix_data)
batches = [{} for _ in range(nbatches)]
for io_name, batches_per_io in prefix_data.items():
for batch_idx, batch in enumerate(batches_per_io):
batches[batch_idx][io_name] = _format_batch_for_perf_analyzer_json_format(batch)
return {"data": batches}
class BaseDumpReader(abc.ABC):
FILE_SUFFIX = ".abstract"
def __init__(self, dump_dir: Union[Path, str]):
self._dump_dir = Path(dump_dir)
def get(self, prefix: str) -> Iterator[Dict[str, np.ndarray]]:
dump_files_paths = sorted(self._dump_dir.glob(f"{prefix}*{self.FILE_SUFFIX}"))
for dump_file_path in dump_files_paths:
prefix_data = self._load_file(dump_file_path)
nitems, nbatches = _get_nitems_and_batches(prefix_data)
for batch_idx in range(nbatches):
yield {io_name: prefix_data[io_name][batch_idx] for io_name in prefix_data}
@abc.abstractmethod
def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]:
pass
def iterate_over(self, prefix_list: List[str]) -> Iterator:
iterators = [self.get(prefix) for prefix in prefix_list]
empty_iterators = [False] * len(iterators)
while not all(empty_iterators):
values = [None] * len(iterators)
for idx, iterator in enumerate(iterators):
if empty_iterators[idx]:
continue
try:
values[idx] = next(iterator)
except StopIteration:
empty_iterators[idx] = True
if all(empty_iterators):
break
if not all(empty_iterators):
yield values
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class PickleDumpReader(BaseDumpReader):
FILE_SUFFIX = ".pkl"
def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]:
with dump_file_path.open("rb") as pickle_file:
return pickle.load(pickle_file)
class JsonDumpReader(BaseDumpReader):
FILE_SUFFIX = ".json"
def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]:
with dump_file_path.open("rb") as json_file:
data = json.load(json_file)
return self._repack_data(data)
def _repack_data(self, data: Dict) -> Dict[str, List[np.ndarray]]:
result: Dict[str, List[np.ndarray]] = {}
batches = data["data"]
for batch in batches:
for io_name, batch_as_dict in batch.items():
io_batches = result.setdefault(io_name, [])
flat_array = batch_as_dict["content"]
shape = batch_as_dict["shape"]
dtype = batch_as_dict["dtype"]
batch_as_array = np.array(flat_array).reshape(shape).astype(dtype)
io_batches.append(batch_as_array)
return result
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/deployment_toolkit/dump.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from enum import Enum
from typing import Any, Dict, Tuple
LOGGER = logging.getLogger(__name__)
class TritonClientProtocol(Enum):
"""Describe protocol with which client communicates with Triton"""
GRPC = "grpc"
HTTP = "http"
def parse_server_url(server_url: str) -> Tuple[TritonClientProtocol, str, int]:
DEFAULT_PORTS = {
TritonClientProtocol.HTTP: 8000,
TritonClientProtocol.GRPC: 8001,
}
# extract protocol
server_url_items = server_url.split("://")
if len(server_url_items) != 2:
raise ValueError("Prefix server_url with protocol ex.: grpc://127.0.0.1:8001")
requested_protocol, server_url = server_url_items
requested_protocol = TritonClientProtocol(requested_protocol.lower())
if requested_protocol not in DEFAULT_PORTS:
raise ValueError(f"Unsupported protocol: {requested_protocol}")
# extract host and port
default_port = DEFAULT_PORTS[requested_protocol]
server_url_items = server_url.split(":")
if len(server_url_items) == 1:
host, port = server_url, default_port
elif len(server_url_items) == 2:
host, port = server_url_items
port = int(port)
if port != default_port:
LOGGER.warning(
f"Current server URL is {server_url} while default {requested_protocol} port is {default_port}"
)
else:
raise ValueError(f"Could not parse {server_url}. Example of correct server URL: grpc://127.0.0.1:8001")
return requested_protocol, host, port
def log_dict(title: str, dict_: Dict[str, Any]):
LOGGER.info(title)
for key, value in dict_.items():
LOGGER.info(f"\t{key} = {value}")
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/deployment_toolkit/utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import inspect
import logging
from typing import Callable, Dict, List, Optional, Union
from .core import GET_ARGPARSER_FN_NAME, load_from_file
LOGGER = logging.getLogger(__name__)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def filter_fn_args(args: Union[dict, argparse.Namespace], fn: Callable) -> dict:
signature = inspect.signature(fn)
parameters_names = list(signature.parameters)
if isinstance(args, argparse.Namespace):
args = vars(args)
args = {k: v for k, v in args.items() if k in parameters_names}
return args
def add_args_for_fn_signature(parser, fn) -> argparse.ArgumentParser:
parser.conflict_handler = "resolve"
signature = inspect.signature(fn)
for parameter in signature.parameters.values():
if parameter.name in ["self", "args", "kwargs"]:
continue
argument_kwargs = {}
if parameter.annotation != inspect.Parameter.empty:
is_optional = is_optional_generic(parameter.annotation)
if is_optional:
annotation = parameter.annotation.__args__[
0
] # Optional[cls] will be changed into Union[cls, None]
else:
annotation = parameter.annotation
is_list = is_list_generic(annotation)
is_dict = is_dict_generic(annotation)
if parameter.annotation == bool:
argument_kwargs["type"] = str2bool
argument_kwargs["choices"] = [0, 1]
elif is_list:
argument_kwargs["type"] = annotation.__args__[0] # List[cls] -> cls
elif is_dict:
raise RuntimeError(
f"Could not prepare argument parser for {parameter.name}: {parameter.annotation} in {fn}"
)
else:
argument_kwargs["type"] = annotation
if parameter.default != inspect.Parameter.empty:
if parameter.annotation == bool:
argument_kwargs["default"] = str2bool(parameter.default)
else:
argument_kwargs["default"] = parameter.default
else:
argument_kwargs["required"] = True
name = parameter.name.replace("_", "-")
LOGGER.debug(f"Adding argument {name} with {argument_kwargs}")
parser.add_argument(f"--{name}", **argument_kwargs)
return parser
class ArgParserGenerator:
def __init__(self, cls_or_fn, module_path: Optional[str] = None):
self._cls_or_fn = cls_or_fn
init_method_name = "__init__"
self._handle = (
cls_or_fn
if inspect.isfunction(cls_or_fn)
else getattr(cls_or_fn, init_method_name, None)
)
input_is_python_file = module_path and module_path.endswith(".py")
self._input_path = module_path if input_is_python_file else None
self._required_fn_name_for_signature_parsing = getattr(
cls_or_fn, "required_fn_name_for_signature_parsing", None
)
def update_argparser(self, parser):
name = self._handle.__name__
group_parser = parser.add_argument_group(name)
add_args_for_fn_signature(group_parser, fn=self._handle)
self._update_argparser(group_parser)
def get_args(self, args: argparse.Namespace):
filtered_args = filter_fn_args(args, fn=self._handle)
tmp_parser = argparse.ArgumentParser(allow_abbrev=False)
self._update_argparser(tmp_parser)
custom_names = [
p.dest.replace("-", "_")
for p in tmp_parser._actions
if not isinstance(p, argparse._HelpAction)
]
custom_params = {n: getattr(args, n) for n in custom_names}
filtered_args = {**filtered_args, **custom_params}
return filtered_args
def from_args(self, args: Union[argparse.Namespace, Dict]):
args = self.get_args(args)
LOGGER.info(f"Initializing {self._cls_or_fn.__name__}({args})")
return self._cls_or_fn(**args)
def _update_argparser(self, parser):
label = "argparser_update"
if self._input_path:
update_argparser_handle = load_from_file(
self._input_path, label=label, target=GET_ARGPARSER_FN_NAME
)
if update_argparser_handle:
update_argparser_handle(parser)
elif self._required_fn_name_for_signature_parsing:
fn_handle = load_from_file(
self._input_path,
label=label,
target=self._required_fn_name_for_signature_parsing,
)
if fn_handle:
add_args_for_fn_signature(parser, fn_handle)
def is_optional_generic(type_):
from typing_inspect import is_optional_type
return is_optional_type(type_)
def is_list_generic(type_):
from typing_inspect import get_args, get_origin, is_generic_type
is_optional = is_optional_generic(type_)
if is_optional:
type_, _ = get_args(type_, evaluate=True)
return is_generic_type(type_) and get_origin(type_) in [list, List]
def is_dict_generic(type_):
from typing_inspect import get_args, get_origin, is_generic_type
is_optional = is_optional_generic(type_)
if is_optional:
type_, _ = get_args(type_, evaluate=True)
return is_generic_type(type_) and get_origin(type_) in [dict, Dict]
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/deployment_toolkit/args.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import re
from typing import Dict, List
from natsort import natsorted
from tabulate import tabulate
def sort_results(results: List):
results = natsorted(results, key=lambda item: [item[key] for key in item.keys()])
return results
def save_results(filename: str, data: List, formatted: bool = False):
data = format_data(data=data) if formatted else data
with open(filename, "a") as csvfile:
fieldnames = data[0].keys()
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in data:
writer.writerow(row)
def format_data(data: List[Dict]) -> List[Dict]:
formatted_data = list()
for item in data:
formatted_item = format_keys(data=item)
formatted_data.append(formatted_item)
return formatted_data
def format_keys(data: Dict) -> Dict:
keys = {format_key(key=key): value for key, value in data.items()}
return keys
def format_key(key: str) -> str:
key = " ".join([k.capitalize() for k in re.split("_| ", key)])
return key
def show_results(results: List[Dict]):
headers = list(results[0].keys())
summary = map(lambda x: list(map(lambda item: item[1], x.items())), results)
print(tabulate(summary, headers=headers))
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/deployment_toolkit/report.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# method from PEP-366 to support relative import in executed modules
import logging
import pathlib
from typing import List, Optional, Dict, Tuple
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ..core import EvaluationMode, MeasurementMode, OfflineMode
from .perf_analyzer import PerfAnalyzerRunner, PerfAnalyzerWarmupRunner
LOGGER = logging.getLogger("triton_performance_runner")
class TritonPerformanceRunner:
def __init__(
self,
server_url: str,
model_name: str,
input_data: Dict[int, Tuple],
batch_sizes: List[int],
concurrency: List[int],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
output_shared_memory_size: int,
result_path: pathlib.Path,
warmup: bool,
timeout: Optional[int],
verbose: bool,
flattened_input: bool,
):
self._warmup_runner = None
if warmup:
LOGGER.info("Running warmup before the main test")
self._warmup_runner = PerfAnalyzerWarmupRunner(
server_url=server_url,
model_name=model_name,
input_data=input_data,
batch_sizes=batch_sizes,
concurrency=concurrency,
measurement_mode=measurement_mode,
measurement_interval=measurement_interval,
measurement_request_count=measurement_request_count,
evaluation_mode=evaluation_mode,
offline_mode=offline_mode,
output_shared_memory_size=output_shared_memory_size,
timeout=timeout,
flattened_input=flattened_input
)
LOGGER.info("Using Perf Analyzer for performance evaluation")
self._runner = PerfAnalyzerRunner(
server_url=server_url,
model_name=model_name,
input_data=input_data,
batch_sizes=batch_sizes,
measurement_mode=measurement_mode,
measurement_interval=measurement_interval,
measurement_request_count=measurement_request_count,
concurrency=concurrency,
evaluation_mode=evaluation_mode,
offline_mode=offline_mode,
output_shared_memory_size=output_shared_memory_size,
result_path=result_path,
timeout=timeout,
verbose=verbose,
flattened_input=flattened_input
)
def run(self):
if self._warmup_runner:
self._warmup_runner.run()
self._runner.run()
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/deployment_toolkit/triton_performance_runner/runner.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .runner import TritonPerformanceRunner # noqa: F401
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/deployment_toolkit/triton_performance_runner/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import logging
import os
import pathlib
import sys
from distutils.version import LooseVersion
from typing import Dict, List, Optional, Tuple
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...core import EvaluationMode, MeasurementMode, OfflineMode
from ...report import save_results, show_results, sort_results
from ...utils import log_dict, parse_server_url
from .perf_analyzer import PerfAnalyzer
from .perf_config import PerfAnalyzerConfig
if LooseVersion(sys.version) >= LooseVersion("3.8.0"):
from importlib.metadata import version
TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient"))
else:
import pkg_resources
TRITON_CLIENT_VERSION = LooseVersion(
pkg_resources.get_distribution("tritonclient").version
)
LOGGER = logging.getLogger("triton_performance_runner.perf_analyzer")
class PerfAnalyzerRunner:
def __init__(
self,
server_url: str,
model_name: str,
input_data: Dict[int, Tuple],
batch_sizes: List[int],
concurrency: List[int],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
result_path: pathlib.Path,
output_shared_memory_size: int = 102400,
timeout: Optional[int] = None,
verbose: bool = False,
flattened_input: bool = False,
):
log_dict(
"Selected configuration",
{
"server_url": server_url,
"model_name": model_name,
"input_data": input_data,
"batch_sizes": batch_sizes,
"concurrency": concurrency,
"measurement_mode": measurement_mode,
"measurement_interval": measurement_interval,
"measurement_request_count": measurement_request_count,
"evaluation_mode": evaluation_mode,
"offline_mode": offline_mode,
"output_shared_memory_size": output_shared_memory_size,
"result_path": result_path,
"timeout": timeout,
"verbose": verbose,
},
)
if result_path.suffix != ".csv":
raise ValueError(
"Results path for Perf Analyzer is invalid. Please, provide the CSV file name. Example: results.csv"
)
self._server_url = server_url
self._model_name = model_name
self._input_data = input_data
self._batch_sizes = batch_sizes
self._concurrency = concurrency
self._measurement_mode = measurement_mode
self._measurement_interval = measurement_interval
self._measurement_request_count = measurement_request_count
self._evaluation_mode = evaluation_mode
self._offline_mode = offline_mode
self._result_path = result_path
self._output_shared_memory_size = output_shared_memory_size
self._timeout = timeout
self._verbose = verbose
self._protocol, self._host, self._port = parse_server_url(server_url)
self._flattened_input = flattened_input
def run(self):
results: List[Dict] = []
for batch_size in self._batch_sizes:
print("Measuring inference performance ")
input_data_filename, shapes = self._input_data[batch_size]
concurrency = 1
performance_partial_file = f"{self._evaluation_mode.value.lower()}_partial_{batch_size}_{concurrency}.csv"
perf_analyzer_batch_size = 1 if self._flattened_input else batch_size
params = {
"model-name": self._model_name,
"model-version": 1,
"batch-size": perf_analyzer_batch_size,
"url": f"{self._host}:{self._port}",
"protocol": self._protocol.value,
"input-data": input_data_filename,
"measurement-interval": self._measurement_interval,
"concurrency-range": f"{concurrency}:{concurrency}:1",
"latency-report-file": performance_partial_file,
}
if self._verbose:
params["extra-verbose"] = True
if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"):
params["measurement-mode"] = self._measurement_mode.value
params["measurement-request-count"] = self._measurement_request_count
if self._evaluation_mode == EvaluationMode.OFFLINE:
params["shared-memory"] = self._offline_mode.value
params["output-shared-memory-size"] = self._output_shared_memory_size
if self._verbose:
log_dict(
f"Perf Analyzer config for batch_size: {batch_size} and concurrency: {concurrency}",
params,
)
config = PerfAnalyzerConfig()
for param, value in params.items():
config[param] = value
for shape in shapes:
config["shape"] = shape
perf_analyzer = PerfAnalyzer(config=config, timeout=self._timeout)
perf_analyzer.run()
self._update_performance_data(results, batch_size, performance_partial_file)
os.remove(performance_partial_file)
results = sort_results(results=results)
save_results(filename=self._result_path.as_posix(), data=results)
show_results(results=results)
def _calculate_average_latency(self, r):
avg_sum_fields = [
"Client Send",
"Network+Server Send/Recv",
"Server Queue",
"Server Compute",
"Server Compute Input",
"Server Compute Infer",
"Server Compute Output",
"Client Recv",
]
avg_latency = sum(int(r.get(f, 0)) for f in avg_sum_fields)
return avg_latency
def _update_performance_data(
self, results: List, batch_size: int, performance_partial_file: str
):
row: Dict = {"Batch": batch_size}
with open(performance_partial_file) as csvfile:
reader = csv.DictReader(csvfile)
for r in reader:
avg_latency = self._calculate_average_latency(r)
row = {**row, **r, "avg latency": avg_latency}
if self._flattened_input:
# correction necessary because "formally" this is run with batch_size=1
row["Inferences/Second"] = str(
float(row["Inferences/Second"]) * batch_size
)
results.append(row)
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/deployment_toolkit/triton_performance_runner/perf_analyzer/runner.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .runner import PerfAnalyzerRunner # noqa: F401
from .warmup import PerfAnalyzerWarmupRunner # noqa: F401
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/deployment_toolkit/triton_performance_runner/perf_analyzer/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
from distutils.version import LooseVersion
from importlib.metadata import version
from typing import Dict, List, Optional, Tuple
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...core import EvaluationMode, MeasurementMode, OfflineMode
from ...utils import parse_server_url
from .perf_analyzer import PerfAnalyzer
from .perf_config import PerfAnalyzerConfig
LOGGER = logging.getLogger("warmup")
TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient"))
class PerfAnalyzerWarmupRunner:
def __init__(
self,
server_url: str,
model_name: str,
batch_sizes: List[int],
concurrency: List[int],
input_data: Dict[int, Tuple],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
offline_mode: OfflineMode,
evaluation_mode: EvaluationMode,
output_shared_memory_size: int,
timeout: Optional[int],
flattened_input: bool = False,
):
self._model_name = model_name
self._input_data = input_data
self._measurement_mode = measurement_mode
self._offline_mode = offline_mode
self._evaluation_mode = evaluation_mode
self._output_shared_memory_size = output_shared_memory_size
self._protocol, self._host, self._port = parse_server_url(server_url)
self._measurement_interval = 2 * measurement_interval
self._measurement_request_count = 2 * measurement_request_count
self._batch_sizes = [min(batch_sizes)]
self._concurrency = [max(concurrency)]
self._timeout = timeout
self._flattened_input = flattened_input
def run(self):
for batch_size in self._batch_sizes:
input_data_filename, shapes = self._input_data[batch_size]
perf_analyzer_batch_size = 1 if self._flattened_input else batch_size
concurrency = 1
params = {
"model-name": self._model_name,
"model-version": 1,
"batch-size": perf_analyzer_batch_size,
"url": f"{self._host}:{self._port}",
"protocol": self._protocol.value,
"input-data": input_data_filename,
"measurement-interval": self._measurement_interval,
"concurrency-range": f"{concurrency}:{concurrency}:1",
"verbose": True,
}
if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"):
params["measurement-mode"] = self._measurement_mode.value
params["measurement-request-count"] = self._measurement_request_count
if self._evaluation_mode == EvaluationMode.OFFLINE:
params["shared-memory"] = self._offline_mode.value
params["output-shared-memory-size"] = self._output_shared_memory_size
config = PerfAnalyzerConfig()
for param, value in params.items():
config[param] = value
for shape in shapes:
config["shape"] = shape
perf_analyzer = PerfAnalyzer(config=config, timeout=self._timeout)
perf_analyzer.run()
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/deployment_toolkit/triton_performance_runner/perf_analyzer/warmup.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from .exceptions import PerfAnalyzerException
class PerfAnalyzerConfig:
"""
A config class to set arguments to the perf_analyzer.
An argument set to None will use the perf_analyzer's default.
"""
perf_analyzer_args = [
"async",
"sync",
"measurement-interval",
"measurement-mode",
"measurement-request-count",
"concurrency-range",
"request-rate-range",
"request-distribution",
"request-intervals",
"binary-search",
"num-of-sequence",
"latency-threshold",
"max-threads",
"stability-percentage",
"max-trials",
"percentile",
"input-data",
"shared-memory",
"output-shared-memory-size",
"sequence-length",
"string-length",
"string-data",
]
perf_analyzer_multiple_args = [
"shape",
]
input_to_options = [
"model-name",
"model-version",
"batch-size",
"url",
"protocol",
"latency-report-file",
"streaming",
]
input_to_verbose = ["verbose", "extra-verbose"]
def __init__(self):
"""
Construct a PerfAnalyzerConfig
"""
self._args = {k: None for k in self.perf_analyzer_args}
self._multiple_args = {k: [] for k in self.perf_analyzer_multiple_args}
self._options = {
"-m": None,
"-x": None,
"-b": None,
"-u": None,
"-i": None,
"-f": None,
"-H": None,
"-c": None,
"-t": None,
}
self._verbose = {"-v": None, "-v -v": None}
self._input_to_options = {
"model-name": "-m",
"model-version": "-x",
"batch-size": "-b",
"url": "-u",
"protocol": "-i",
"latency-report-file": "-f",
"streaming": "-H",
"concurrency": "-c",
"threads": "-t",
}
self._input_to_verbose = {"verbose": "-v", "extra-verbose": "-v -v"}
@classmethod
def allowed_keys(cls):
"""
Returns
-------
list of str
The keys that are allowed to be
passed into perf_analyzer
"""
return (
list(cls.perf_analyzer_args)
+ list(cls.perf_analyzer_multiple_args)
+ list(cls.input_to_options)
+ list(cls.input_to_verbose)
)
def update_config(self, params=None):
"""
Allows setting values from a
params dict
Parameters
----------
params: dict
keys are allowed args to perf_analyzer
"""
if params:
for key in params:
self[key] = params[key]
def to_cli_string(self):
"""
Utility function to convert a config into a
string of arguments to the perf_analyzer with CLI.
Returns
-------
str
cli command string consisting of all arguments
to the perf_analyzer set in the config, without
the executable name.
"""
# single dashed options, then verbose flags, then main args
args = [f"{k} {v}" for k, v in self._options.items() if v]
args += [k for k, v in self._verbose.items() if v]
args += [f"--{k}={v}" for k, v in self._args.items() if v]
for k, v in self._multiple_args.items():
for item in v:
args.append(f"--{k}={item}")
return " ".join(args)
def __getitem__(self, key: str):
"""
Gets an arguments value in config
Parameters
----------
key : str
The name of the argument to the perf_analyzer
Returns
-------
The value that the argument is set to in this config
Raises
------
TritonModelAnalyzerException
If argument not found in the config
"""
if key in self._args:
return self._args[key]
elif key in self._multiple_args:
return self._multiple_args[key]
elif key in self._input_to_options:
return self._options[self._input_to_options[key]]
elif key in self._input_to_verbose:
return self._verbose[self._input_to_verbose[key]]
else:
raise PerfAnalyzerException(f"'{key}' Key not found in config")
def __setitem__(self, key: str, value: Any):
"""
Sets an arguments value in config
after checking if defined/supported.
Parameters
----------
key : str
The name of the argument to the perf_analyzer
value : (any)
The value to which the argument is being set
Raises
------
TritonModelAnalyzerException
If key is unsupported or undefined in the
config class
"""
if key in self._args:
self._args[key] = value
elif key in self._multiple_args:
self._multiple_args[key].append(value)
elif key in self._input_to_options:
self._options[self._input_to_options[key]] = value
elif key in self._input_to_verbose:
self._verbose[self._input_to_verbose[key]] = value
else:
raise PerfAnalyzerException(
f"The argument '{key}' to the perf_analyzer " "is not supported by the model analyzer."
)
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/deployment_toolkit/triton_performance_runner/perf_analyzer/perf_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class PerfAnalyzerException(Exception):
def __init__(self, message: str):
self._message = message
def __str__(self):
"""
Get the exception string representation.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
@property
def message(self):
"""
Get the exception message.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/deployment_toolkit/triton_performance_runner/perf_analyzer/exceptions.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
from subprocess import PIPE, CalledProcessError, Popen
# method from PEP-366 to support relative import in executed modules
from typing import List, Optional
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .exceptions import PerfAnalyzerException
MAX_INTERVAL_CHANGES = 10
COUNT_INTERVAL_DELTA = 50
TIME_INTERVAL_DELTA = 2000
LOGGER = logging.getLogger(__name__)
class PerfAnalyzer:
"""
This class provides an interface for running workloads
with perf_analyzer.
"""
def __init__(self, config, timeout: Optional[int]):
"""
Parameters
----------
config : PerfAnalyzerConfig
keys are names of arguments to perf_analyzer,
values are their values.
"""
self.bin_path = "perf_analyzer"
self._config = config
self._output = ""
self._timeout = timeout
def run(self):
"""
Runs the perf analyzer with the
initialized configuration
Returns
-------
List of Records
List of the metrics obtained from this
run of perf_analyzer
Raises
------
PerfAnalyzerException
If subprocess throws CalledProcessError
"""
self._output = ""
for _ in range(MAX_INTERVAL_CHANGES):
command = [self.bin_path]
command += self._config.to_cli_string().replace("=", " ").split()
LOGGER.debug(f"Perf Analyze command: {command}")
if not self._timeout:
LOGGER.debug("Perf Analyze command timeout not set")
else:
LOGGER.debug(f"Perf Analyze command timeout: {self._timeout} [s]")
try:
self._run_with_stream(command=command)
return
except CalledProcessError as e:
if self._failed_with_measurement_inverval(e.output):
if self._config["measurement-mode"] is None or self._config["measurement-mode"] == "count_windows":
self._increase_request_count()
else:
self._increase_time_interval()
else:
raise PerfAnalyzerException(
f"Running perf_analyzer with {e.cmd} failed with" f" exit status {e.returncode} : {e.output}"
)
raise PerfAnalyzerException(f"Ran perf_analyzer {MAX_INTERVAL_CHANGES} times, but no valid requests recorded.")
def output(self):
"""
Returns
-------
The stdout output of the
last perf_analyzer run
"""
if self._output:
return self._output
raise PerfAnalyzerException("Attempted to get perf_analyzer output" "without calling run first.")
def _run_with_stream(self, command: List[str]):
commands_lst = []
if self._timeout:
commands_lst = ["timeout", str(self._timeout)]
commands_lst.extend(command)
LOGGER.debug(f"Run with stream: {commands_lst}")
process = Popen(commands_lst, start_new_session=True, stdout=PIPE, encoding="utf-8")
streamed_output = ""
while True:
output = process.stdout.readline()
if output == "" and process.poll() is not None:
break
if output:
streamed_output += output
print(output.rstrip())
self._output += streamed_output
result = process.poll()
LOGGER.debug(f"Perf Analyzer process exited with result: {result}")
# WAR for Perf Analyzer exit code 0 when stabilization failed
if result == 0 and self._failed_with_measurement_inverval(streamed_output):
LOGGER.debug("Perf Analyzer finished with exit status 0, however measurement stabilization failed.")
result = 1
if result != 0:
raise CalledProcessError(returncode=result, cmd=commands_lst, output=streamed_output)
def _failed_with_measurement_inverval(self, output: str):
checks = [
output.find("Failed to obtain stable measurement"),
output.find("Please use a larger time window"),
]
result = any([status != -1 for status in checks])
LOGGER.debug(f"Measurement stability message validation: {checks}. Result: {result}.")
return result
def _increase_request_count(self):
self._config["measurement-request-count"] += COUNT_INTERVAL_DELTA
LOGGER.debug(
"perf_analyzer's measurement request count is too small, "
f"increased to {self._config['measurement-request-count']}."
)
def _increase_time_interval(self):
self._config["measurement-interval"] += TIME_INTERVAL_DELTA
LOGGER.debug(
"perf_analyzer's measurement window is too small, "
f"increased to {self._config['measurement-interval']} ms."
)
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/deployment_toolkit/triton_performance_runner/perf_analyzer/perf_analyzer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
emb_output_name = "OUTPUT0"
ens_lookup_tensors_name = "LOOKUP_VECTORS"
dense_input1_name = "args_1"
ens_numerical_features_name = "numerical_features"
dense_numerical_features_name = "args_0"
dense_output_name = "output_1"
ens_output_name = "DENSE_OUTPUT"
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/tf/constants.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
#from constants import dense_model_name, hps_model_name
from deployment.tf.deploy_dense import deploy_dense
from deployment.tf.deploy_ensemble import deploy_ensemble
from deployment.tf.deploy_sparse import deploy_sparse
from deployment.tf.deploy_monolithic import deploy_monolithic
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/tf/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import json
import os
import tensorflow as tf
from tensorflow.python.saved_model import save_options
from nn.embedding import DualEmbeddingGroup
from nn.dense_model import DenseModel
class SparseModel(tf.keras.Model):
def __init__(self, cardinalities, output_dim, memory_threshold):
super().__init__()
self.cardinalities = cardinalities
self.output_dim = output_dim
self.embedding = DualEmbeddingGroup(cardinalities, output_dim, memory_threshold, use_mde_embeddings=False)
@tf.function
def call(self, x):
x = self.embedding(x)
x = tf.reshape(x, [-1, len(self.cardinalities) * self.output_dim])
return x
class Model(tf.keras.Model):
def __init__(self, sparse_submodel, dense_submodel, cpu):
super().__init__()
self.sparse_submodel = sparse_submodel
self.dense_submodel = dense_submodel
self.cpu = cpu
def call(self, numerical_features, cat_features):
device = '/CPU:0' if self.cpu else '/GPU:0'
with tf.device(device):
embedding_outputs = self.sparse_submodel(cat_features)
y = self.dense_submodel(numerical_features, embedding_outputs)
return y
def load_dense(src, model_precision, model_format):
dense_model = DenseModel.from_config(os.path.join(src, "config.json"))
if dense_model.amp and model_precision == "fp16" and model_format == 'tf-savedmodel':
policy = tf.keras.mixed_precision.Policy("mixed_float16")
tf.keras.mixed_precision.set_global_policy(policy)
if dense_model.interaction == 'dot_custom_cuda':
dense_model.interaction = 'dot_tensorflow'
dense_model._create_interaction_op()
dense_model.load_weights(os.path.join(src, "dense"))
dense_model.transpose = False
dense_model.force_initialization(training=False)
return dense_model
def deploy_monolithic(
sparse_src,
dense_src,
dst,
model_name,
max_batch_size,
engine_count_per_device,
num_gpus=1,
version="1",
cpu=False,
model_precision='fp32'
):
if model_precision == 'fp16':
policy = tf.keras.mixed_precision.Policy("mixed_float16")
tf.keras.mixed_precision.set_global_policy(policy)
dense_model = load_dense(src=dense_src, model_precision=model_precision, model_format='tf-savedmodel')
print("deploy monolithic dst: ", dst)
with open(os.path.join(sparse_src, "config.json")) as f:
src_config = json.load(f)
num_cat_features = len(src_config["categorical_cardinalities"])
src_paths = [os.path.join(sparse_src, f"feature_{i}.npy") for i in range(num_cat_features)]
sparse_model = SparseModel(cardinalities=src_config["categorical_cardinalities"],
output_dim=src_config['embedding_dim'][0],
memory_threshold=75 if not cpu else 0)
model = Model(sparse_submodel=sparse_model, dense_submodel=dense_model, cpu=cpu)
dummy_batch_size = 65536
dummy_categorical = tf.zeros(shape=(dummy_batch_size, len(src_config["categorical_cardinalities"])), dtype=tf.int32)
dummy_numerical = tf.zeros(shape=(dummy_batch_size, dense_model.num_numerical_features), dtype=tf.float32)
_ = model(numerical_features=dummy_numerical, cat_features=dummy_categorical)
options = save_options.SaveOptions(experimental_variable_policy=save_options.VariablePolicy.SAVE_VARIABLE_DEVICES)
savedmodel_dir = os.path.join(dst, model_name, version, 'model.savedmodel')
os.makedirs(savedmodel_dir)
tf.keras.models.save_model(model=model, filepath=savedmodel_dir, overwrite=True, options=options)
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/tf/deploy_monolithic.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import tritonclient.utils
import tritonclient.http
import numpy as np
import tensorflow as tf
import deployment.tf.constants as c
class RecsysTritonEnsemble:
def __init__(self, model_name, num_tables, verbose, categorical_sizes, fused_embedding=True):
self.model_name = model_name
self.triton_client = tritonclient.http.InferenceServerClient(url="localhost:8000", verbose=verbose)
if not self.triton_client.is_server_live():
raise ValueError('Triton server is not live!')
print('triton model repo: ', self.triton_client.get_model_repository_index())
def __call__(self, inputs, sigmoid=False, training=False):
numerical_features, cat_features = list(inputs.values())
batch_size = cat_features[0].shape[0]
cat_features = tf.concat(cat_features, axis=1).numpy().astype(np.int32)
numerical_features = numerical_features.numpy().astype(np.float32)
inputs = [
tritonclient.http.InferInput("categorical_features",
cat_features.shape,
tritonclient.utils.np_to_triton_dtype(np.int32)),
tritonclient.http.InferInput("numerical_features",
numerical_features.shape,
tritonclient.utils.np_to_triton_dtype(np.float32)),
]
inputs[0].set_data_from_numpy(cat_features)
inputs[1].set_data_from_numpy(numerical_features)
outputs = [tritonclient.http.InferRequestedOutput(c.ens_output_name)]
response = self.triton_client.infer(self.model_name, inputs, outputs=outputs)
result_np = response.as_numpy(c.ens_output_name)
result_np = result_np.reshape([batch_size])
return result_np
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/tf/triton_ensemble_wrapper.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import argparse
import logging
import os
import pathlib
import shutil
import subprocess
import tempfile
import textwrap
from typing import List
import numpy as np
import tensorflow as tf
from nn.dense_model import DenseModel
from . import constants as c
LOGGER = logging.getLogger(__name__)
_dense_model_config_template = r"""name: "{model_name}"
{backend_type}: "{backend_runtime}"
max_batch_size: 0
input [
{{
name: "{input1}"
data_type: TYPE_FP32
dims: [-1, {input1_dim}]
}},
{{
name: "{input2}"
data_type: TYPE_FP32
dims: [-1, {input2_dim}]
}}
]
output [
{{
name: "{output1}"
data_type: TYPE_FP32
dims: [-1,1]
}}
]
version_policy: {{
specific:{{versions: 1}}
}},
instance_group [
{{
count: {engine_count_per_device}
kind : KIND_GPU
gpus: [0]
}}
]
"""
def _execute_cmd(cmd: List, verbose: bool = False):
"""Execute command as subprocess.
Args:
cmd: A command definition
verbose: Stream command output
Raises:
OSError when command execution failed
"""
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8"
)
if verbose:
LOGGER.info("Command output:")
stream_output = ""
while True:
output = process.stdout.readline()
if output == "" and process.poll() is not None:
break
if output:
stream_output += output
if verbose:
print(textwrap.indent(output.rstrip(), " ")) # noqa: T201
result = process.poll()
if result != 0:
raise OSError(
f"Processes exited with error code:{result}. Command to reproduce error:\n{' '.join(cmd)}"
)
def _savedmodel2onnx(source_model_path, dst_model_path, opset=11, verbose=False):
convert_cmd = [
"python",
"-m",
"tf2onnx.convert",
"--saved-model",
source_model_path.as_posix(),
"--output",
dst_model_path.as_posix(),
"--opset",
str(opset),
"--verbose",
]
_execute_cmd(convert_cmd, verbose=verbose)
def _onnx2trt(
model,
source_model_path,
dst_model_path,
precision,
optimal_batch_size,
max_batch_size,
verbose=False,
):
min_batch = np.array([model.num_numerical_features, sum(model.embedding_dim)])
optimal_batch = min_batch * optimal_batch_size
max_batch = min_batch * max_batch_size
print(
f"min batch {min_batch}, optimal_batch: {optimal_batch}, max_batch: {max_batch}"
)
convert_cmd = [
"trtexec",
f"--onnx={source_model_path.as_posix()}",
"--buildOnly",
f"--saveEngine={dst_model_path.as_posix()}",
f"--minShapes=args_0:1x{min_batch[0]},args_1:1x{min_batch[1]}",
f"--optShapes=args_0:{optimal_batch_size}x{min_batch[0]},args_1:{optimal_batch_size}x{min_batch[1]}",
f"--maxShapes=args_0:{max_batch_size}x{min_batch[0]},args_1:{max_batch_size}x{min_batch[1]}"
]
if precision == "fp16":
convert_cmd += ["--fp16"]
_execute_cmd(convert_cmd, verbose=True)
def _convert2onnx(source_model_path, workdir, verbose=False):
model_path = workdir / "model.onnx"
_savedmodel2onnx(
source_model_path=source_model_path,
dst_model_path=model_path,
verbose=verbose,
)
return model_path
def _convert2trt(
model,
source_model_path,
precision,
workdir,
optimal_batch_size,
max_batch_size,
verbose=False,
):
onnx_model_path = _convert2onnx(
source_model_path=source_model_path,
workdir=workdir,
verbose=verbose,
)
trt_model_path = workdir / "model.plan"
_onnx2trt(
model=model,
source_model_path=onnx_model_path,
dst_model_path=trt_model_path,
precision=precision,
verbose=verbose,
optimal_batch_size=optimal_batch_size,
max_batch_size=max_batch_size,
)
return trt_model_path
def deploy_dense(
src,
dst,
model_name,
model_format,
model_precision,
max_batch_size,
engine_count_per_device,
trt_optimal_batch_size,
version="1",
):
print("deploy dense dst: ", dst)
os.makedirs(dst, exist_ok=True)
dense_model = DenseModel.from_config(os.path.join(src, "config.json"))
if model_precision == "fp16" and model_format == 'tf-savedmodel':
policy = tf.keras.mixed_precision.Policy("mixed_float16")
tf.keras.mixed_precision.set_global_policy(policy)
# Currently, there's no support for custom kernels deployment.
# Use pure tensorflow implementation instead on the inference side.
if dense_model.interaction == 'dot_custom_cuda':
dense_model.interaction = 'dot_tensorflow'
dense_model._create_interaction_op()
dense_model.load_weights(os.path.join(src, "dense"))
dense_model.transpose = False
dense_model.force_initialization(training=False, flattened_input=False)
tempdir_path = '/tmp/deploy_recsys'
shutil.rmtree(tempdir_path, ignore_errors=True)
os.makedirs(tempdir_path, exist_ok=True)
tempdir = pathlib.Path(tempdir_path)
model_path = tempdir / "model.savedmodel"
dense_model.save_model(model_path.as_posix(), save_input_signature=False)
model_store = pathlib.Path(dst) / str(version)
model_store.mkdir(parents=True, exist_ok=True)
if model_format == "tf-savedmodel":
backend_type = "platform"
backend_runtime = "tensorflow_savedmodel"
shutil.copytree(model_path, model_store / "model.savedmodel")
elif model_format == "onnx":
backend_type = "backend"
backend_runtime = "onnxruntime"
model_path = _convert2onnx(model_path, workdir=tempdir)
shutil.copy(model_path, model_store / "model.onnx")
elif model_format == "trt":
backend_type = "backend"
backend_runtime = "tensorrt"
model_path = _convert2trt(
dense_model,
model_path,
precision=model_precision,
workdir=tempdir,
optimal_batch_size=trt_optimal_batch_size,
max_batch_size=max_batch_size,
)
shutil.copy(model_path, model_store / "model.plan")
else:
raise ValueError(f"Unsupported format: {model_format}")
shutil.rmtree(tempdir_path)
with open(os.path.join(dst, "config.pbtxt"), "w") as f:
s = _dense_model_config_template.format(
backend_type=backend_type,
backend_runtime=backend_runtime,
model_name=model_name,
input1=c.dense_input1_name,
input1_dim=sum(dense_model.embedding_dim),
input2=c.dense_numerical_features_name,
input2_dim=dense_model.num_numerical_features,
output1=c.dense_output_name,
max_batch_size=max_batch_size,
engine_count_per_device=engine_count_per_device,
)
f.write(s)
return dense_model.num_numerical_features
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/tf/deploy_dense.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import json
import os
import tensorflow as tf
from tensorflow.python.saved_model import save_options
from nn.embedding import DualEmbeddingGroup
class Model(tf.keras.Model):
def __init__(self, cardinalities, output_dim, memory_threshold):
super().__init__()
self.cardinalities = cardinalities
self.output_dim = output_dim
self.embedding = DualEmbeddingGroup(cardinalities, output_dim, memory_threshold, use_mde_embeddings=False)
@tf.function
def call(self, x):
x = self.embedding(x)
x = tf.reshape(x, [-1, len(self.cardinalities) * self.output_dim])
return x
_sparse_model_config_template = r"""name: "{model_name}"
platform: "tensorflow_savedmodel"
max_batch_size:{max_batch_size}
optimization {{
execution_accelerators {{
gpu_execution_accelerator {{
name: "gpu_io"
}}
}}
}}
version_policy: {{
specific:{{versions: {version}}}
}},
instance_group [
{{
count: {engine_count_per_device}
kind : KIND_GPU
gpus : [0]
}}
]"""
def save_triton_config(
dst_path, model_name, version, max_batch_size, engine_count_per_device
):
config_str = _sparse_model_config_template.format(
model_name=model_name,
max_batch_size=max_batch_size,
version=version,
engine_count_per_device=engine_count_per_device,
)
with open(dst_path, "w") as f:
f.write(config_str)
print("Wrote sparse model Triton config to:", dst_path)
def deploy_sparse(
src,
dst,
model_name,
max_batch_size,
engine_count_per_device,
memory_threshold_gb,
num_gpus=1,
version="1",
**kwargs,
):
print("deploy sparse dst: ", dst)
with open(os.path.join(src, "config.json")) as f:
src_config = json.load(f)
model = Model(cardinalities=src_config["categorical_cardinalities"],
output_dim=src_config['embedding_dim'][0],
memory_threshold=memory_threshold_gb)
x = tf.zeros(shape=(65536, len(src_config["categorical_cardinalities"])), dtype=tf.int32)
_ = model(x)
model.embedding.restore_checkpoint(src)
options = save_options.SaveOptions(experimental_variable_policy=save_options.VariablePolicy.SAVE_VARIABLE_DEVICES)
savedmodel_dir = os.path.join(dst, '1', 'model.savedmodel')
os.makedirs(savedmodel_dir)
tf.keras.models.save_model(model=model, filepath=savedmodel_dir, overwrite=True, options=options)
save_triton_config(
dst_path=os.path.join(dst, "config.pbtxt"),
model_name=model_name,
version=version,
max_batch_size=max_batch_size,
engine_count_per_device=engine_count_per_device,
)
return len(src_config["categorical_cardinalities"])
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/tf/deploy_sparse.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import os
_config_template = '''
name: "{ensemble_name}"
platform: "ensemble"
max_batch_size: {max_batch_size}
input [
{{
name: "categorical_features"
data_type: TYPE_INT32
dims: [{num_cat_features}]
}},
{{
name: "numerical_features"
data_type: TYPE_FP32
dims: [{num_numerical_features}]
}}
]
output [
{{
name: "DENSE_OUTPUT"
data_type: TYPE_FP32
dims: [1]
}}
]
ensemble_scheduling {{
step [
{{
model_name: "{sparse_model_name}"
model_version: -1
input_map {{
key: "input_1"
value: "categorical_features"
}},
output_map {{
key: "output_1"
value: "LOOKUP_VECTORS"
}}
}},
{{
model_name: "{dense_model_name}"
model_version: -1
input_map {{
key: "args_1"
value: "LOOKUP_VECTORS"
}},
input_map {{
key: "args_0"
value: "numerical_features"
}},
output_map {{
key: "output_1"
value: "DENSE_OUTPUT"
}}
}}
]
}}
'''
def deploy_ensemble(dst, model_name, sparse_model_name, dense_model_name,
num_cat_features, num_numerical_features, max_batch_size, version):
config_str = _config_template.format(
ensemble_name=model_name,
dense_model_name=dense_model_name,
sparse_model_name=sparse_model_name,
num_cat_features=num_cat_features,
num_numerical_features=num_numerical_features,
max_batch_size=max_batch_size
)
with open(os.path.join(dst, "config.pbtxt"), "w") as f:
f.write(config_str)
os.mkdir(os.path.join(dst, str(version)))
print("Ensemble configuration:")
print(config_str)
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/tf/deploy_ensemble.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
key_local_prefix = "KEYS"
numkey_local_prefix = "NUMKEYS"
key_global_prefix = "EMB_KEY"
numkey_global_prefix = "EMB_N_KEY"
emb_output_name = "OUTPUT0"
ens_lookup_tensors_name = "LOOKUP_VECTORS"
dense_input1_name = "args_1"
ens_numerical_features_name = "numerical_features"
dense_numerical_features_name = "args_0"
dense_output_name = "output_1"
ens_output_name = "DENSE_OUTPUT"
hps_model_name = "hps_embedding"
dense_model_name = "tf_reshape_dense_model"
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/hps/constants.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
from deployment.hps.constants import dense_model_name, hps_model_name
from deployment.hps.deploy_dense import deploy_dense
from deployment.hps.deploy_ensemble import deploy_ensemble
from deployment.hps.deploy_sparse import deploy_sparse
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/hps/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import tritonclient.utils
import tritonclient.http
import numpy as np
import deployment.hps.constants as c
class NumpyToHpsInputConverter:
def __init__(self, categorical_sizes, fused_embedding=True):
self.offsets = np.cumsum([0] + categorical_sizes)[:-1]
self.fused_embedding = fused_embedding
def __call__(self, numerical_features, cat_features):
batch_size = cat_features[0].shape[0]
cat_features = [f.numpy().flatten() for f in cat_features]
# add the offsets
if self.fused_embedding:
cat_features = [f + o for f, o in zip(cat_features, self.offsets)]
key_tensor = np.concatenate(cat_features, axis=0).astype(np.int64).reshape([1, -1])
if self.fused_embedding:
nkey_tensor = np.full(shape=(1, 1), fill_value=batch_size * len(cat_features), dtype=np.int32)
else:
nkey_tensor = np.full(shape=(1, len(cat_features)), fill_value=batch_size, dtype=np.int32)
numerical_features = numerical_features.numpy().astype(np.float32).reshape([1, -1])
return key_tensor, nkey_tensor, numerical_features
class RecsysTritonEnsemble:
def __init__(self, model_name, num_tables, verbose, categorical_sizes, fused_embedding=True):
self.input_converter = NumpyToHpsInputConverter(categorical_sizes, fused_embedding)
self.model_name = model_name
self.triton_client = tritonclient.http.InferenceServerClient(url="localhost:8000", verbose=verbose)
if not self.triton_client.is_server_live():
raise ValueError('Triton server is not live!')
print('triton model repo: ', self.triton_client.get_model_repository_index())
def __call__(self, inputs, sigmoid=False, training=False):
numerical_features, cat_features = list(inputs.values())
batch_size = cat_features[0].shape[0]
key_tensor, nkey_tensor, numerical_features = self.input_converter(numerical_features, cat_features)
inputs = [
tritonclient.http.InferInput(c.key_global_prefix,
key_tensor.shape,
tritonclient.utils.np_to_triton_dtype(np.int64)),
tritonclient.http.InferInput(c.numkey_global_prefix,
nkey_tensor.shape,
tritonclient.utils.np_to_triton_dtype(np.int32)),
tritonclient.http.InferInput(c.ens_numerical_features_name,
numerical_features.shape,
tritonclient.utils.np_to_triton_dtype(np.float32)),
]
inputs[0].set_data_from_numpy(key_tensor)
inputs[1].set_data_from_numpy(nkey_tensor)
inputs[2].set_data_from_numpy(numerical_features)
outputs = [tritonclient.http.InferRequestedOutput(c.ens_output_name)]
response = self.triton_client.infer(self.model_name, inputs, outputs=outputs)
result_np = response.as_numpy(c.ens_output_name)
result_np = result_np.reshape([batch_size])
return result_np
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/hps/triton_ensemble_wrapper.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import logging
import os
import pathlib
import shutil
import subprocess
import tempfile
import textwrap
from typing import List
import numpy as np
import tensorflow as tf
from nn.dense_model import DenseModel
from . import constants as c
LOGGER = logging.getLogger(__name__)
_dense_model_config_template = r"""name: "{model_name}"
{backend_type}: "{backend_runtime}"
max_batch_size: 0
input [
{{
name: "{input1}"
data_type: TYPE_FP32
dims: [-1]
}},
{{
name: "{input2}"
data_type: TYPE_FP32
dims: [-1]
}}
]
output [
{{
name: "{output1}"
data_type: TYPE_FP32
dims: [-1,1]
}}
]
version_policy: {{
specific:{{versions: 1}}
}},
instance_group [
{{
count: {engine_count_per_device}
kind : KIND_GPU
gpus: [0]
}}
]
"""
def _execute_cmd(cmd: List, verbose: bool = False):
"""Execute command as subprocess.
Args:
cmd: A command definition
verbose: Stream command output
Raises:
OSError when command execution failed
"""
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8"
)
if verbose:
LOGGER.info("Command output:")
stream_output = ""
while True:
output = process.stdout.readline()
if output == "" and process.poll() is not None:
break
if output:
stream_output += output
if verbose:
print(textwrap.indent(output.rstrip(), " ")) # noqa: T201
result = process.poll()
if result != 0:
raise OSError(
f"Processes exited with error code:{result}. Command to reproduce error:\n{' '.join(cmd)}"
)
def _savedmodel2onnx(source_model_path, dst_model_path, opset=11, verbose=False):
convert_cmd = [
"python",
"-m",
"tf2onnx.convert",
"--saved-model",
source_model_path.as_posix(),
"--output",
dst_model_path.as_posix(),
"--opset",
str(opset),
"--verbose",
]
_execute_cmd(convert_cmd, verbose=verbose)
def _onnx2trt(
model,
source_model_path,
dst_model_path,
precision,
optimal_batch_size,
max_batch_size,
verbose=False,
):
min_batch = np.array([model.num_numerical_features, sum(model.embedding_dim)])
optimal_batch = min_batch * optimal_batch_size
max_batch = min_batch * max_batch_size
print(
f"min batch {min_batch}, optimal_batch: {optimal_batch}, max_batch: {max_batch}"
)
convert_cmd = [
"trtexec",
f"--onnx={source_model_path.as_posix()}",
"--buildOnly",
f"--saveEngine={dst_model_path.as_posix()}",
f"--minShapes=args_0:{min_batch[0]},args_1:{min_batch[1]}",
f"--optShapes=args_0:{optimal_batch[0]},args_1:{optimal_batch[1]}",
f"--maxShapes=args_0:{max_batch[0]},args_1:{max_batch[1]}",
]
if precision == "fp16":
convert_cmd += ["--fp16"]
_execute_cmd(convert_cmd, verbose=verbose)
def _convert2onnx(source_model_path, workdir, verbose=False):
model_path = workdir / "model.onnx"
_savedmodel2onnx(
source_model_path=source_model_path,
dst_model_path=model_path,
verbose=verbose,
)
return model_path
def _convert2trt(
model,
source_model_path,
precision,
workdir,
optimal_batch_size,
max_batch_size,
verbose=False,
):
onnx_model_path = _convert2onnx(
source_model_path=source_model_path,
workdir=workdir,
verbose=verbose,
)
trt_model_path = workdir / "model.plan"
_onnx2trt(
model=model,
source_model_path=onnx_model_path,
dst_model_path=trt_model_path,
precision=precision,
verbose=verbose,
optimal_batch_size=optimal_batch_size,
max_batch_size=max_batch_size,
)
return trt_model_path
def _set_tf_memory_growth():
physical_devices = tf.config.list_physical_devices("GPU")
for d in physical_devices:
tf.config.experimental.set_memory_growth(d, True)
def deploy_dense(
src,
dst,
model_name,
model_format,
model_precision,
max_batch_size,
engine_count_per_device,
trt_optimal_batch_size,
version="1",
):
print("deploy dense dst: ", dst)
_set_tf_memory_growth()
os.makedirs(dst, exist_ok=True)
dense_model = DenseModel.from_config(os.path.join(src, "config.json"))
if model_precision == "fp16" and model_format == 'tf-savedmodel':
policy = tf.keras.mixed_precision.Policy("mixed_float16")
tf.keras.mixed_precision.set_global_policy(policy)
# Currently, there's no support for custom kernels deployment.
# Use pure tensorflow implementation instead on the inference side.
if dense_model.interaction == 'dot_custom_cuda':
dense_model.interaction = 'dot_tensorflow'
dense_model._create_interaction_op()
dense_model.load_weights(os.path.join(src, "dense"))
# transpose needed here because HPS expects a table-major format vs TensorFlow uses batch-major
dense_model.transpose = True
dense_model.force_initialization(training=False, flattened_input=True)
with tempfile.TemporaryDirectory() as tempdir:
tempdir = pathlib.Path(tempdir)
model_path = tempdir / "model.savedmodel"
dense_model.save_model(model_path.as_posix(), save_input_signature=False)
model_store = pathlib.Path(dst) / str(version)
model_store.mkdir(parents=True, exist_ok=True)
if model_format == "tf-savedmodel":
backend_type = "platform"
backend_runtime = "tensorflow_savedmodel"
shutil.copytree(model_path, model_store / "model.savedmodel")
elif model_format == "onnx":
backend_type = "backend"
backend_runtime = "onnxruntime"
model_path = _convert2onnx(model_path, workdir=tempdir)
shutil.copy(model_path, model_store / "model.onnx")
elif model_format == "trt":
backend_type = "backend"
backend_runtime = "tensorrt"
model_path = _convert2trt(
dense_model,
model_path,
precision=model_precision,
workdir=tempdir,
optimal_batch_size=trt_optimal_batch_size,
max_batch_size=max_batch_size,
)
shutil.copy(model_path, model_store / "model.plan")
else:
raise ValueError(f"Unsupported format: {model_format}")
with open(os.path.join(dst, "config.pbtxt"), "w") as f:
s = _dense_model_config_template.format(
backend_type=backend_type,
backend_runtime=backend_runtime,
model_name=model_name,
input1=c.dense_input1_name,
input2=c.dense_numerical_features_name,
output1=c.dense_output_name,
max_batch_size=max_batch_size,
engine_count_per_device=engine_count_per_device,
)
f.write(s)
print(f"{model_name} configuration:")
print(s)
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/hps/deploy_dense.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.