python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .perf_analyzer import PerfAnalyzer # noqa: F401
from .perf_config import PerfAnalyzerConfig # noqa: F401
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/perf_analyzer/__init__.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict
from .exceptions import PerfAnalyzerException
class PerfAnalyzerConfig:
"""
A config class to set arguments to the perf_analyzer.
An argument set to None will use the perf_analyzer's default.
"""
perf_analyzer_args = [
"async",
"sync",
"measurement-interval",
"measurement-mode",
"measurement-request-count",
"concurrency-range",
"request-rate-range",
"request-distribution",
"request-intervals",
"binary-search",
"num-of-sequence",
"latency-threshold",
"max-threads",
"stability-percentage",
"max-trials",
"percentile",
"input-data",
"shared-memory",
"output-shared-memory-size",
"sequence-length",
"string-length",
"string-data",
]
perf_analyzer_multiple_args = [
"shape",
]
input_to_options = [
"model-name",
"model-version",
"batch-size",
"url",
"protocol",
"latency-report-file",
"streaming",
]
input_to_verbose = ["verbose", "extra-verbose"]
def __init__(self):
"""
Construct a PerfAnalyzerConfig
"""
self._args = {k: None for k in self.perf_analyzer_args}
self._multiple_args = {k: [] for k in self.perf_analyzer_multiple_args}
self._options = {
"-m": None,
"-x": None,
"-b": None,
"-u": None,
"-i": None,
"-f": None,
"-H": None,
"-c": None,
"-t": None,
}
self._verbose = {"-v": None, "-v -v": None}
self._input_to_options = {
"model-name": "-m",
"model-version": "-x",
"batch-size": "-b",
"url": "-u",
"protocol": "-i",
"latency-report-file": "-f",
"streaming": "-H",
"concurrency": "-c",
"threads": "-t",
}
self._input_to_verbose = {"verbose": "-v", "extra-verbose": "-v -v"}
@classmethod
def allowed_keys(cls):
"""
Returns
-------
list of str
The keys that are allowed to be
passed into perf_analyzer
"""
return (
list(cls.perf_analyzer_args)
+ list(cls.perf_analyzer_multiple_args)
+ list(cls.input_to_options)
+ list(cls.input_to_verbose)
)
def update_config(self, params=None):
"""
Allows setting values from a
params dict
Parameters
----------
params: dict
keys are allowed args to perf_analyzer
"""
if params:
for key in params:
self[key] = params[key]
def to_cli_string(self):
"""
Utility function to convert a config into a
string of arguments to the perf_analyzer with CLI.
Returns
-------
str
cli command string consisting of all arguments
to the perf_analyzer set in the config, without
the executable name.
"""
# single dashed options, then verbose flags, then main args
args = [f"{k} {v}" for k, v in self._options.items() if v]
args += [k for k, v in self._verbose.items() if v]
args += [f"--{k}={v}" for k, v in self._args.items() if v]
for k, v in self._multiple_args.items():
for item in v:
args.append(f"--{k}={item}")
return " ".join(args)
def __getitem__(self, key: str):
"""
Gets an arguments value in config
Parameters
----------
key : str
The name of the argument to the perf_analyzer
Returns
-------
The value that the argument is set to in this config
Raises
------
TritonModelAnalyzerException
If argument not found in the config
"""
if key in self._args:
return self._args[key]
elif key in self._multiple_args:
return self._multiple_args[key]
elif key in self._input_to_options:
return self._options[self._input_to_options[key]]
elif key in self._input_to_verbose:
return self._verbose[self._input_to_verbose[key]]
else:
raise PerfAnalyzerException(f"'{key}' Key not found in config")
def __setitem__(self, key: str, value: Any):
"""
Sets an arguments value in config
after checking if defined/supported.
Parameters
----------
key : str
The name of the argument to the perf_analyzer
value : (any)
The value to which the argument is being set
Raises
------
TritonModelAnalyzerException
If key is unsupported or undefined in the
config class
"""
if key in self._args:
self._args[key] = value
elif key in self._multiple_args:
self._multiple_args[key].append(value)
elif key in self._input_to_options:
self._options[self._input_to_options[key]] = value
elif key in self._input_to_verbose:
self._verbose[self._input_to_verbose[key]] = value
else:
raise PerfAnalyzerException(
f"The argument '{key}' to the perf_analyzer " "is not supported by the model analyzer."
)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/perf_analyzer/perf_config.py |
class PerfAnalyzerException(Exception):
def __init__(self, message: str):
self._message = message
def __str__(self):
"""
Get the exception string representation.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
@property
def message(self):
"""
Get the exception message.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/perf_analyzer/exceptions.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
from subprocess import PIPE, CalledProcessError, Popen
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .exceptions import PerfAnalyzerException
MAX_INTERVAL_CHANGES = 10
COUNT_INTERVAL_DELTA = 50
TIME_INTERVAL_DELTA = 2000
LOGGER = logging.getLogger(__name__)
class PerfAnalyzer:
"""
This class provides an interface for running workloads
with perf_analyzer.
"""
def __init__(self, config):
"""
Parameters
----------
config : PerfAnalyzerConfig
keys are names of arguments to perf_analyzer,
values are their values.
"""
self.bin_path = "perf_analyzer"
self._config = config
self._output = str()
def run(self):
"""
Runs the perf analyzer with the
initialized configuration
Returns
-------
List of Records
List of the metrics obtained from this
run of perf_analyzer
Raises
------
PerfAnalyzerException
If subprocess throws CalledProcessError
"""
for _ in range(MAX_INTERVAL_CHANGES):
command = [self.bin_path]
command += self._config.to_cli_string().replace("=", " ").split()
LOGGER.debug(f"Perf Analyze command: {command}")
try:
process = Popen(command, start_new_session=True, stdout=PIPE, encoding="utf-8")
streamed_output = ""
while True:
output = process.stdout.readline()
if output == "" and process.poll() is not None:
break
if output:
streamed_output += output
print(output.rstrip())
self._output += streamed_output
result = process.poll()
if result != 0:
raise CalledProcessError(returncode=result, cmd=command, output=streamed_output)
return
except CalledProcessError as e:
if self._faild_with_measruement_inverval(e.output):
if self._config["measurement-mode"] is None or self._config["measurement-mode"] == "count_windows":
self._increase_request_count()
else:
self._increase_time_interval()
else:
raise PerfAnalyzerException(
f"Running perf_analyzer with {e.cmd} failed with" f" exit status {e.returncode} : {e.output}"
)
raise PerfAnalyzerException(f"Ran perf_analyzer {MAX_INTERVAL_CHANGES} times, but no valid requests recorded.")
def output(self):
"""
Returns
-------
The stdout output of the
last perf_analyzer run
"""
if self._output:
return self._output
raise PerfAnalyzerException("Attempted to get perf_analyzer output" "without calling run first.")
def _faild_with_measruement_inverval(self, output: str):
return (
output.find("Failed to obtain stable measurement") or output.find("Please use a larger time window")
) != -1
def _increase_request_count(self):
self._config["measurement-request-count"] += COUNT_INTERVAL_DELTA
LOGGER.debug(
"perf_analyzer's measurement request count is too small, "
f"increased to {self._config['measurement-request-count']}."
)
def _increase_time_interval(self):
self._config["measurement-interval"] += TIME_INTERVAL_DELTA
LOGGER.debug(
"perf_analyzer's measurement window is too small, "
f"increased to {self._config['measurement-interval']} ms."
)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/perf_analyzer/perf_analyzer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
class TrivialModel(nn.Module):
def __init__(self, config):
super().__init__()
self.bias = nn.Parameter(torch.zeros(1))
self.encoder_length = config.encoder_length
self.example_length = config.example_length
self.predict_steps = self.example_length - self.encoder_length
self.output_dim = len(config.get("quantiles", [""]))
def forward(self, batch):
t = next(t for t in batch.values() if t is not None)
bs = t.shape[0]
return torch.ones([bs, self.example_length - self.encoder_length, self.output_dim]).to(device=t.device) + self.bias
def predict(self, batch):
targets = batch["target"].clone()
prev_predictions = targets.roll(1, 1)
return prev_predictions[:, -self.predict_steps :, :]
# TODO: reenable usage of such functions
def test_with_last(self, batch):
bs = max([tensor.shape[0] if tensor is not None else 0 for tensor in batch.values()])
values = (
# TODO: this will become disfuntional after removing "targer_masked" from dataset. Seed comment in data_utils.py
batch["target_masked"]
.clone()[:, -1, :]
.reshape((bs, 1, self.output_dim))
)
return torch.cat((self.example_length - self.encoder_length) * [values], dim=1)
def test_with_previous_window(self, batch):
targets = batch["target"].clone()
prev_predictions = targets.roll(self.predict_steps, 1)
return prev_predictions[:, -self.predict_steps :, :]
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/trivial_model.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cudf
import pandas as pd
import pynvml
import numpy as np
import xgboost as xgb
import os
import glob
import dask_cudf
from distributed_utils import create_client
class TSPPXGBoost():
def __init__(self, config):
self.config = config
self.models = []
def fit(self, train, label, valid, valid_label, **kwargs):
X = xgb.DeviceQuantileDMatrix(cudf.from_pandas(train), label=cudf.from_pandas(label))
V = xgb.DMatrix(cudf.from_pandas(valid), label=cudf.from_pandas(valid_label))
model = xgb.train(params=self.config,
dtrain=X,
num_boost_round=self.config.n_rounds,
evals=[(X, 'train'), (V, 'valid')],
early_stopping_rounds=kwargs.get('patience', 5),
verbose_eval=kwargs.get("log_interval", 25),
)
self.models.append(model)
def predict(self, test, i):
model = self.models[i]
X = xgb.DMatrix(cudf.from_pandas(test))
return model.predict(X)
def save(self, path):
os.makedirs(os.path.join(path, 'checkpoints'), exist_ok=True)
for i in range(len(self.models)):
model = self.models[i]
model.save_model(os.path.join(path, f'checkpoints/xgb_{i+1}.model'))
def load(self, path):
self.models = []
for i in range(self.config.example_length - self.config.encoder_length):
p = os.path.join(path, f'checkpoints/xgb_{i+1}.model')
model = xgb.Booster()
model.load_model(p)
self.models.append(model)
class TSPPDaskXGBoost():
def __init__(self, config):
self.config = config
self.models = []
self.client = create_client(config)
self.npartitions = self.config.cluster.npartitions
def fit(self, train, label, valid, valid_label, **kwargs):
X = xgb.dask.DaskDeviceQuantileDMatrix(self.client,
dask_cudf.from_cudf(cudf.from_pandas(train), npartitions=self.npartitions),
label=dask_cudf.from_cudf(cudf.from_pandas(label), npartitions=self.npartitions))
V = xgb.dask.DaskDMatrix(self.client,
dask_cudf.from_cudf(cudf.from_pandas(valid), npartitions=self.npartitions),
label=dask_cudf.from_cudf(cudf.from_pandas(valid_label), npartitions=self.npartitions))
model = xgb.dask.train(client=self.client,
params=self.config,
dtrain=X,
num_boost_round=self.config.n_rounds,
evals=[(X, 'train'), (V, 'valid')],
early_stopping_rounds=kwargs.get('patience', 5),
verbose_eval=kwargs.get("log_interval", 25),
)
self.models.append(model)
self.client.restart()
def predict(self, test, i):
test = test.reset_index(drop=True)
model = self.models[i]
test = dask_cudf.from_cudf(cudf.from_pandas(test), npartitions=self.npartitions)
test = xgb.dask.DaskDMatrix(self.client, test)
out = xgb.dask.predict(self.client, model, test)
return out.compute()
def save(self, path):
os.makedirs(os.path.join(path, 'checkpoints'), exist_ok=True)
for i in range(len(self.models)):
model = self.models[i]
model['booster'].save_model(os.path.join(path, f'checkpoints/xgb_{i+1}.model'))
def load(self, path):
self.models = []
for i in range(self.config.example_length - self.config.encoder_length):
p = os.path.join(path, f'checkpoints/xgb_{i+1}.model')
model = {'booster': xgb.dask.Booster()}
model['booster'].load_model(p)
self.models.append(model)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tspp_xgboost.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from apex.normalization.fused_layer_norm import FusedLayerNorm
from torch import Tensor
from models.tft_pyt.modeling import *
class LSTM(nn.Module):
"""
Implementation from LSTM portion of https://arxiv.org/abs/1912.09363
"""
def __init__(self, config):
super().__init__()
self.encoder_steps = config.encoder_length # this determines from how distant past we want to use data from
self.mask_nans = config.missing_data_strategy == "mask"
self.embedding = TFTEmbedding(config)
self.static_encoder = StaticCovariateEncoder(config)
self.history_vsn = VariableSelectionNetwork(config, config.num_historic_vars)
self.history_encoder = nn.LSTM(config.hidden_size, config.hidden_size, batch_first=True)
self.future_vsn = VariableSelectionNetwork(config, config.num_future_vars)
self.future_encoder = nn.LSTM(config.hidden_size, config.hidden_size, batch_first=True)
self.output_proj = nn.Linear(config.hidden_size, 1)
def forward(self, x: Tensor) -> Tensor:
s_inp, t_known_inp, t_observed_inp, t_observed_tgt = self.embedding(x)
# Static context
cs, ce, ch, cc = self.static_encoder(s_inp)
ch, cc = ch.unsqueeze(0), cc.unsqueeze(0) # lstm initial states
# Temporal input
_historical_inputs = [t_known_inp[:, : self.encoder_steps, :], t_observed_tgt[:, : self.encoder_steps, :]]
if t_observed_inp is not None:
_historical_inputs.insert(0, t_observed_inp[:, : self.encoder_steps, :])
historical_inputs = torch.cat(_historical_inputs, dim=-2)
future_inputs = t_known_inp[:, self.encoder_steps :]
# Encoders
historical_features, _ = self.history_vsn(historical_inputs, cs)
history, state = self.history_encoder(historical_features, (ch, cc))
future_features, _ = self.future_vsn(future_inputs, cs)
future, _ = self.future_encoder(future_features, state)
output = self.output_proj(future)
return output
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/lstm.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
import os
import pmdarima as pm
# import cuml
import numpy as np
from cuml.tsa.auto_arima import AutoARIMA as cuMLAutoArima
import pickle as pkl
class StatModel(ABC):
def __init__(self, config):
self.horizon = config.example_length - config.encoder_length
self.config = config
def fit(self, label, data):
return
def predict(self, data, i):
return
def save(self):
return
def load(self, path):
return
class AutoARIMA(StatModel):
def __init__(self, config):
super().__init__(config)
self.models = []
def fit(self, label, data):
self.model = pm.auto_arima(label, X=data)
self.models.append(self.model)
def predict(self, data, i):
model = self.models[i]
return model.predict(self.horizon, X=data)
def save(self):
with open('arima.pkl', 'wb') as f:
pkl.dump(self.models, f)
def load(self, path):
with open(os.path.join(path, 'arima.pkl'), 'rb') as f:
self.models = pkl.load(f)
class CUMLAutoARIMA(StatModel):
def __init__(self, config):
super().__init__(config)
self.models = []
def fit(self, label, data):
self.model = cuMLAutoArima(label.astype(np.float64))
self.model.search()
self.model.fit()
self.models.append(self.model)
def predict(self, data, i):
model = self.models[i]
return model.forecast(self.horizon).get()
def save(self):
with open('arima.pkl', 'wb') as f:
pkl.dump(self.models, f)
def load(self, path):
with open(os.path.join(path, 'arima.pkl'), 'rb') as f:
self.models = pkl.load(f)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/stat_models.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from data_utils import InputTypes, DataTypes, FeatureSpec
import datetime
class ElectricityConfig():
def __init__(self):
self.features = [
FeatureSpec('id', InputTypes.ID, DataTypes.CATEGORICAL),
FeatureSpec('hours_from_start', InputTypes.TIME, DataTypes.CONTINUOUS),
FeatureSpec('power_usage', InputTypes.TARGET, DataTypes.CONTINUOUS),
FeatureSpec('hour', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('day_of_week', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('hours_from_start', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('categorical_id', InputTypes.STATIC, DataTypes.CATEGORICAL),
]
# Dataset split boundaries
self.time_ids = 'days_from_start' # This column contains time indices across which we split the data
self.train_range = (1096, 1315)
self.valid_range = (1308, 1339)
self.test_range = (1332, 1346)
self.dataset_stride = 1 #how many timesteps between examples
self.scale_per_id = True
self.missing_id_strategy = None
self.missing_cat_data_strategy='encode_all'
# Feature sizes
self.static_categorical_inp_lens = [369]
self.temporal_known_categorical_inp_lens = []
self.temporal_observed_categorical_inp_lens = []
self.quantiles = [0.1, 0.5, 0.9]
self.example_length = 8 * 24
self.encoder_length = 7 * 24
self.n_head = 4
self.hidden_size = 128
self.dropout = 0.1
self.attn_dropout = 0.0
#### Derived variables ####
self.temporal_known_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.KNOWN and x.feature_embed_type == DataTypes.CONTINUOUS])
self.temporal_observed_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.OBSERVED and x.feature_embed_type == DataTypes.CONTINUOUS])
self.temporal_target_size = len([x for x in self.features if x.feature_type == InputTypes.TARGET])
self.static_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.STATIC and x.feature_embed_type == DataTypes.CONTINUOUS])
self.num_static_vars = self.static_continuous_inp_size + len(self.static_categorical_inp_lens)
self.num_future_vars = self.temporal_known_continuous_inp_size + len(self.temporal_known_categorical_inp_lens)
self.num_historic_vars = sum([self.num_future_vars,
self.temporal_observed_continuous_inp_size,
self.temporal_target_size,
len(self.temporal_observed_categorical_inp_lens),
])
class VolatilityConfig():
def __init__(self):
self.features = [
FeatureSpec('Symbol', InputTypes.ID, DataTypes.CATEGORICAL),
FeatureSpec('days_from_start', InputTypes.TIME, DataTypes.CONTINUOUS),
FeatureSpec('log_vol', InputTypes.TARGET, DataTypes.CONTINUOUS),
FeatureSpec('open_to_close', InputTypes.OBSERVED, DataTypes.CONTINUOUS),
FeatureSpec('days_from_start', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('day_of_week', InputTypes.KNOWN, DataTypes.CATEGORICAL),
FeatureSpec('day_of_month', InputTypes.KNOWN, DataTypes.CATEGORICAL),
FeatureSpec('week_of_year', InputTypes.KNOWN, DataTypes.CATEGORICAL),
FeatureSpec('month', InputTypes.KNOWN, DataTypes.CATEGORICAL),
FeatureSpec('Region', InputTypes.STATIC, DataTypes.CATEGORICAL),
]
# Dataset split boundaries
self.time_ids = 'date' # This column contains time indices across which we split the data
self.train_range = ('2000-01-01', '2016-01-01')
self.valid_range = ('2016-01-01', '2018-01-01')
self.test_range = ('2018-01-01', '2019-06-28')
self.dataset_stride = 1 #how many timesteps between examples
self.scale_per_id = False
self.missing_id_strategy = None
self.missing_cat_data_strategy='encode_all'
# Feature sizes
self.static_categorical_inp_lens = [4]
self.temporal_known_categorical_inp_lens = [7,31,53,12]
self.temporal_observed_categorical_inp_lens = []
self.quantiles = [0.1, 0.5, 0.9]
self.example_length = 257
self.encoder_length = 252
self.n_head = 4
self.hidden_size = 96
self.dropout = 0.4
self.attn_dropout = 0.0
#### Derived variables ####
self.temporal_known_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.KNOWN and x.feature_embed_type == DataTypes.CONTINUOUS])
self.temporal_observed_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.OBSERVED and x.feature_embed_type == DataTypes.CONTINUOUS])
self.temporal_target_size = len([x for x in self.features if x.feature_type == InputTypes.TARGET])
self.static_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.STATIC and x.feature_embed_type == DataTypes.CONTINUOUS])
self.num_static_vars = self.static_continuous_inp_size + len(self.static_categorical_inp_lens)
self.num_future_vars = self.temporal_known_continuous_inp_size + len(self.temporal_known_categorical_inp_lens)
self.num_historic_vars = sum([self.num_future_vars,
self.temporal_observed_continuous_inp_size,
self.temporal_target_size,
len(self.temporal_observed_categorical_inp_lens),
])
class TrafficConfig():
def __init__(self):
self.features = [
FeatureSpec('id', InputTypes.ID, DataTypes.CATEGORICAL),
FeatureSpec('hours_from_start', InputTypes.TIME, DataTypes.CONTINUOUS),
FeatureSpec('values', InputTypes.TARGET, DataTypes.CONTINUOUS),
FeatureSpec('time_on_day', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('day_of_week', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('hours_from_start', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('categorical_id', InputTypes.STATIC, DataTypes.CATEGORICAL),
]
# Dataset split boundaries
self.time_ids = 'sensor_day' # This column contains time indices across which we split the data
self.train_range = (0, 151)
self.valid_range = (144, 166)
self.test_range = (159, float('inf'))
self.dataset_stride = 1 #how many timesteps between examples
self.scale_per_id = False
self.missing_id_strategy = None
self.missing_cat_data_strategy='encode_all'
# Feature sizes
self.static_categorical_inp_lens = [963]
self.temporal_known_categorical_inp_lens = []
self.temporal_observed_categorical_inp_lens = []
self.quantiles = [0.1, 0.5, 0.9]
self.example_length = 8 * 24
self.encoder_length = 7 * 24
self.n_head = 4
self.hidden_size = 128
self.dropout = 0.3
self.attn_dropout = 0.0
#### Derived variables ####
self.temporal_known_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.KNOWN and x.feature_embed_type == DataTypes.CONTINUOUS])
self.temporal_observed_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.OBSERVED and x.feature_embed_type == DataTypes.CONTINUOUS])
self.temporal_target_size = len([x for x in self.features if x.feature_type == InputTypes.TARGET])
self.static_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.STATIC and x.feature_embed_type == DataTypes.CONTINUOUS])
self.num_static_vars = self.static_continuous_inp_size + len(self.static_categorical_inp_lens)
self.num_future_vars = self.temporal_known_continuous_inp_size + len(self.temporal_known_categorical_inp_lens)
self.num_historic_vars = sum([self.num_future_vars,
self.temporal_observed_continuous_inp_size,
self.temporal_target_size,
len(self.temporal_observed_categorical_inp_lens),
])
class FavoritaConfig():
def __init__(self):
self.features = [
FeatureSpec('traj_id', InputTypes.ID, DataTypes.CATEGORICAL),
#FeatureSpec('days_from_start', InputTypes.TIME, DataTypes.CONTINUOUS),
FeatureSpec('date', InputTypes.TIME, DataTypes.DATE),
FeatureSpec('log_sales', InputTypes.TARGET, DataTypes.CONTINUOUS),
# XXX for no apparent reason TF implementation doesn't scale day_of_month
# and month variables. We probably should set them to be categorical
FeatureSpec('day_of_month', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('month', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('onpromotion', InputTypes.KNOWN, DataTypes.CATEGORICAL),
FeatureSpec('day_of_week', InputTypes.KNOWN, DataTypes.CATEGORICAL),
FeatureSpec('national_hol', InputTypes.KNOWN, DataTypes.CATEGORICAL),
FeatureSpec('regional_hol', InputTypes.KNOWN, DataTypes.CATEGORICAL),
FeatureSpec('local_hol', InputTypes.KNOWN, DataTypes.CATEGORICAL),
FeatureSpec('open', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('transactions', InputTypes.OBSERVED, DataTypes.CONTINUOUS),
FeatureSpec('oil', InputTypes.OBSERVED, DataTypes.CONTINUOUS),
FeatureSpec('categorical_id', InputTypes.STATIC, DataTypes.CATEGORICAL),
FeatureSpec('item_nbr', InputTypes.STATIC, DataTypes.CATEGORICAL),
FeatureSpec('store_nbr', InputTypes.STATIC, DataTypes.CATEGORICAL),
FeatureSpec('city', InputTypes.STATIC, DataTypes.CATEGORICAL),
FeatureSpec('state', InputTypes.STATIC, DataTypes.CATEGORICAL),
FeatureSpec('type', InputTypes.STATIC, DataTypes.CATEGORICAL),
FeatureSpec('cluster', InputTypes.STATIC, DataTypes.CATEGORICAL),
FeatureSpec('family', InputTypes.STATIC, DataTypes.CATEGORICAL),
FeatureSpec('class', InputTypes.STATIC, DataTypes.CATEGORICAL),
FeatureSpec('perishable', InputTypes.STATIC, DataTypes.CATEGORICAL)
]
# Dataset split boundaries
self.time_ids = 'date' # This column contains time indices across which we split the data
# When relative split is set then it is necessary to provide valid boundary.
# Valid split is shifted from train split by number of forecast steps to the future
# The test split is shifted by the number of forecast steps from the valid split
self.relative_split = True
self.valid_boundary = str(datetime.datetime(2015, 12, 1))
self.train_range = None
self.valid_range = None
self.test_range = None
self.dataset_stride = 1 #how many timesteps between examples
self.scale_per_id = True
self.missing_cat_data_strategy='encode_all'
self.missing_id_strategy = 'drop'
# Feature sizes
self.static_categorical_inp_lens = [90200, 3426, 53, 22, 16, 5, 17, 32, 313, 2]
self.temporal_known_categorical_inp_lens = [2, 7, 55, 5, 25]
self.temporal_observed_categorical_inp_lens = []
self.quantiles = [0.1, 0.5, 0.9]
self.example_length = 120
self.encoder_length = 90
self.n_head = 4
self.hidden_size = 240
self.dropout = 0.1
self.attn_dropout = 0.0
#### Derived variables ####
self.temporal_known_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.KNOWN and x.feature_embed_type == DataTypes.CONTINUOUS])
self.temporal_observed_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.OBSERVED and x.feature_embed_type == DataTypes.CONTINUOUS])
self.temporal_target_size = len([x for x in self.features if x.feature_type == InputTypes.TARGET])
self.static_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.STATIC and x.feature_embed_type == DataTypes.CONTINUOUS])
self.num_static_vars = self.static_continuous_inp_size + len(self.static_categorical_inp_lens)
self.num_future_vars = self.temporal_known_continuous_inp_size + len(self.temporal_known_categorical_inp_lens)
self.num_historic_vars = sum([self.num_future_vars,
self.temporal_observed_continuous_inp_size,
self.temporal_target_size,
len(self.temporal_observed_categorical_inp_lens),
])
CONFIGS = {'electricity': ElectricityConfig,
'volatility': VolatilityConfig,
'traffic': TrafficConfig,
'favorita': FavoritaConfig,
}
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/configuration.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import math
import os
import pathlib
import re
import pynvml
pynvml.nvmlInit()
def systemGetDriverVersion():
return pynvml.nvmlSystemGetDriverVersion()
def deviceGetCount():
return pynvml.nvmlDeviceGetCount()
class device:
# assume nvml returns list of 64 bit ints
_nvml_affinity_elements = math.ceil(os.cpu_count() / 64)
def __init__(self, device_idx):
super().__init__()
self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx)
def getName(self):
return pynvml.nvmlDeviceGetName(self.handle)
def getCpuAffinity(self):
affinity_string = ''
for j in pynvml.nvmlDeviceGetCpuAffinity(
self.handle, device._nvml_affinity_elements
):
# assume nvml returns list of 64 bit ints
affinity_string = '{:064b}'.format(j) + affinity_string
affinity_list = [int(x) for x in affinity_string]
affinity_list.reverse() # so core 0 is in 0th element of list
ret = [i for i, e in enumerate(affinity_list) if e != 0]
return ret
def set_socket_affinity(gpu_id):
dev = device(gpu_id)
affinity = dev.getCpuAffinity()
os.sched_setaffinity(0, affinity)
def set_single_affinity(gpu_id):
dev = device(gpu_id)
affinity = dev.getCpuAffinity()
os.sched_setaffinity(0, affinity[:1])
def set_single_unique_affinity(gpu_id, nproc_per_node):
devices = [device(i) for i in range(nproc_per_node)]
socket_affinities = [dev.getCpuAffinity() for dev in devices]
siblings_list = get_thread_siblings_list()
siblings_dict = dict(siblings_list)
# remove siblings
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values()))
affinities = []
assigned = []
for socket_affinity in socket_affinities:
for core in socket_affinity:
if core not in assigned:
affinities.append([core])
assigned.append(core)
break
os.sched_setaffinity(0, affinities[gpu_id])
def set_socket_unique_affinity(gpu_id, nproc_per_node, mode):
device_ids = [device(i) for i in range(nproc_per_node)]
socket_affinities = [dev.getCpuAffinity() for dev in device_ids]
siblings_list = get_thread_siblings_list()
siblings_dict = dict(siblings_list)
# remove siblings
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values()))
socket_affinities_to_device_ids = collections.defaultdict(list)
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities_to_device_ids[tuple(socket_affinity)].append(idx)
for socket_affinity, device_ids in socket_affinities_to_device_ids.items():
devices_per_group = len(device_ids)
cores_per_device = len(socket_affinity) // devices_per_group
for group_id, device_id in enumerate(device_ids):
if device_id == gpu_id:
if mode == 'interleaved':
affinity = list(socket_affinity[group_id::devices_per_group])
elif mode == 'continuous':
affinity = list(socket_affinity[group_id*cores_per_device:(group_id+1)*cores_per_device])
else:
raise RuntimeError('Unknown set_socket_unique_affinity mode')
# reintroduce siblings
affinity += [siblings_dict[aff] for aff in affinity if aff in siblings_dict]
os.sched_setaffinity(0, affinity)
def get_thread_siblings_list():
path = '/sys/devices/system/cpu/cpu*/topology/thread_siblings_list'
thread_siblings_list = []
pattern = re.compile(r'(\d+)\D(\d+)')
for fname in pathlib.Path(path[0]).glob(path[1:]):
with open(fname) as f:
content = f.read().strip()
res = pattern.findall(content)
if res:
pair = tuple(map(int, res[0]))
thread_siblings_list.append(pair)
return thread_siblings_list
def set_affinity(gpu_id, nproc_per_node, mode='socket'):
if mode == 'socket':
set_socket_affinity(gpu_id)
elif mode == 'single':
set_single_affinity(gpu_id)
elif mode == 'single_unique':
set_single_unique_affinity(gpu_id, nproc_per_node)
elif mode == 'socket_unique_interleaved':
set_socket_unique_affinity(gpu_id, nproc_per_node, 'interleaved')
elif mode == 'socket_unique_continuous':
set_socket_unique_affinity(gpu_id, nproc_per_node, 'continuous')
else:
raise RuntimeError('Unknown affinity mode')
affinity = os.sched_getaffinity(0)
return affinity
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/gpu_affinity.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class QuantileLoss(nn.Module):
def __init__(self, config):
super().__init__()
self.register_buffer('q', torch.tensor(config.quantiles))
def forward(self, predictions, targets):
diff = predictions - targets
ql = (1-self.q)*F.relu(diff) + self.q*F.relu(-diff)
losses = ql.view(-1, ql.shape[-1]).mean(0)
return losses
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/criterions.py |
"""
Exponential Moving Average (EMA) of model updates
"""
from collections import OrderedDict
from copy import deepcopy
import torch
import torch.nn as nn
class ModelEma(nn.Module):
""" Model Exponential Moving Average V2
Keep a moving average of everything in the model state_dict (parameters and buffers).
V2 of this module is simpler, it does not match params/buffers based on name but simply
iterates in order. It works with torchscript (JIT of full model).
"""
def __init__(self, model, decay=0.999, device=None):
super().__init__()
# make a copy of the model for accumulating moving average of weights
self.module = deepcopy(model)
self.module.eval()
self.decay = decay
self.device = device # perform ema on different device from model if set
if self.device is not None:
self.module.to(device=device)
def update(self, model):
update_fn=lambda ema_v, model_v: self.decay * ema_v + (1. - self.decay) * model_v
with torch.no_grad():
for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()):
if self.device is not None:
model_v = model_v.to(device=self.device)
ema_v.copy_(update_fn(ema_v, model_v))
def set(self, model):
with torch.no_grad():
for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()):
if self.device is not None:
model_v = model_v.to(device=self.device)
ema_v.copy_( model_v )
def forward(self, x):
return self.module(x)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/ema.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import math
import pickle
import enum
import datetime
from collections import namedtuple, OrderedDict
import sklearn.preprocessing
from sklearn.impute import SimpleImputer
import pandas as pd
import numpy as np
from bisect import bisect
import torch
from torch.utils.data import Dataset,IterableDataset,DataLoader
class DataTypes(enum.IntEnum):
"""Defines numerical types of each column."""
CONTINUOUS = 0
CATEGORICAL = 1
DATE = 2
STR = 3
class InputTypes(enum.IntEnum):
"""Defines input types of each column."""
TARGET = 0
OBSERVED = 1
KNOWN = 2
STATIC = 3
ID = 4 # Single column used as an entity identifier
TIME = 5 # Single column exclusively used as a time index
FeatureSpec = namedtuple('FeatureSpec', ['name', 'feature_type', 'feature_embed_type'])
DTYPE_MAP = {
DataTypes.CONTINUOUS : np.float32,
DataTypes.CATEGORICAL : np.int64,
DataTypes.DATE:'datetime64[ns]',
DataTypes.STR: str
}
FEAT_ORDER = [
(InputTypes.STATIC, DataTypes.CATEGORICAL),
(InputTypes.STATIC, DataTypes.CONTINUOUS),
(InputTypes.KNOWN, DataTypes.CATEGORICAL),
(InputTypes.KNOWN, DataTypes.CONTINUOUS),
(InputTypes.OBSERVED, DataTypes.CATEGORICAL),
(InputTypes.OBSERVED, DataTypes.CONTINUOUS),
(InputTypes.TARGET, DataTypes.CONTINUOUS),
(InputTypes.ID, DataTypes.CATEGORICAL)
]
FEAT_NAMES = ['s_cat' , 's_cont' , 'k_cat' , 'k_cont' , 'o_cat' , 'o_cont' , 'target', 'id']
DEFAULT_ID_COL = 'id'
class TFTBinaryDataset(Dataset):
def __init__(self, path, config):
super(TFTBinaryDataset).__init__()
self.features = [x for x in config.features if x.feature_embed_type != DataTypes.DATE]
self.example_length = config.example_length
self.stride = config.dataset_stride
self.grouped = pickle.load(open(path, 'rb'))
self.grouped = [x for x in self.grouped if x.shape[0] >= self.example_length]
self._cum_examples_in_group = np.cumsum([(g.shape[0] - self.example_length + 1)//self.stride for g in self.grouped])
self.feature_type_col_map = [[i for i,f in enumerate(self.features) if (f.feature_type, f.feature_embed_type) == x] for x in FEAT_ORDER]
# The list comprehension below is an elaborate way of rearranging data into correct order,
# simultaneously doing casting to proper types. Probably can be written neater
self.grouped = [
[
arr[:, idxs].view(dtype=np.float32).astype(DTYPE_MAP[t[1]])
for t, idxs in zip(FEAT_ORDER, self.feature_type_col_map)
]
for arr in self.grouped
]
def __len__(self):
return self._cum_examples_in_group[-1] if len(self._cum_examples_in_group) else 0
def __getitem__(self, idx):
g_idx = bisect(self._cum_examples_in_group, idx)
e_idx = idx - self._cum_examples_in_group[g_idx-1] if g_idx else idx
group = self.grouped[g_idx]
tensors = [
torch.from_numpy(feat[e_idx * self.stride:e_idx*self.stride + self.example_length])
if feat.size else torch.empty(0)
for feat in group
]
return OrderedDict(zip(FEAT_NAMES, tensors))
class TFTDataset(Dataset):
def __init__(self, path, config):
super(TFTDataset).__init__()
self.features = config.features
self.data = pd.read_csv(path, index_col=0)
self.example_length = config.example_length
self.stride = config.dataset_stride
# name field is a column name.
# there can be multiple entries with the same name because one column can be interpreted in many ways
time_col_name = next(x.name for x in self.features if x.feature_type==InputTypes.TIME)
id_col_name = next(x.name for x in self.features if x.feature_type==InputTypes.ID)
if not id_col_name in self.data.columns:
id_col_name = DEFAULT_ID_COL
self.features = [x for x in self.features if x.feature_type!=InputTypes.ID]
self.features.append(FeatureSpec(DEFAULT_ID_COL, InputTypes.ID, DataTypes.CATEGORICAL))
col_dtypes = {v.name:DTYPE_MAP[v.feature_embed_type] for v in self.features}
self.data.sort_values(time_col_name,inplace=True)
self.data = self.data[set(x.name for x in self.features)] #leave only relevant columns
self.data = self.data.astype(col_dtypes)
self.data = self.data.groupby(id_col_name).filter(lambda group: len(group) >= self.example_length)
self.grouped = list(self.data.groupby(id_col_name))
self._cum_examples_in_group = np.cumsum([(len(g[1]) - self.example_length + 1)//self.stride for g in self.grouped])
def __len__(self):
return self._cum_examples_in_group[-1]
def __getitem__(self, idx):
g_idx = len([x for x in self._cum_examples_in_group if x <= idx])
e_idx = idx - self._cum_examples_in_group[g_idx-1] if g_idx else idx
group = self.grouped[g_idx][1]
sliced = group.iloc[e_idx * self.stride:e_idx*self.stride + self.example_length]
# We need to be sure that tensors are returned in the correct order
tensors = tuple([] for _ in range(8))
for v in self.features:
if v.feature_type == InputTypes.STATIC and v.feature_embed_type == DataTypes.CATEGORICAL:
tensors[0].append(torch.from_numpy(sliced[v.name].to_numpy()))
elif v.feature_type == InputTypes.STATIC and v.feature_embed_type == DataTypes.CONTINUOUS:
tensors[1].append(torch.from_numpy(sliced[v.name].to_numpy()))
elif v.feature_type == InputTypes.KNOWN and v.feature_embed_type == DataTypes.CATEGORICAL:
tensors[2].append(torch.from_numpy(sliced[v.name].to_numpy()))
elif v.feature_type == InputTypes.KNOWN and v.feature_embed_type == DataTypes.CONTINUOUS:
tensors[3].append(torch.from_numpy(sliced[v.name].to_numpy()))
elif v.feature_type == InputTypes.OBSERVED and v.feature_embed_type == DataTypes.CATEGORICAL:
tensors[4].append(torch.from_numpy(sliced[v.name].to_numpy()))
elif v.feature_type == InputTypes.OBSERVED and v.feature_embed_type == DataTypes.CONTINUOUS:
tensors[5].append(torch.from_numpy(sliced[v.name].to_numpy()))
elif v.feature_type == InputTypes.TARGET:
tensors[6].append(torch.from_numpy(sliced[v.name].to_numpy()))
elif v.feature_type == InputTypes.ID:
tensors[7].append(torch.from_numpy(sliced[v.name].to_numpy()))
tensors = [torch.stack(x, dim=-1) if x else torch.empty(0) for x in tensors]
return OrderedDict(zip(FEAT_NAMES, tensors))
def get_dataset_splits(df, config):
if hasattr(config, 'relative_split') and config.relative_split:
forecast_len = config.example_length - config.encoder_length
# The valid split is shifted from the train split by number of the forecast steps to the future.
# The test split is shifted by the number of the forecast steps from the valid split
train = []
valid = []
test = []
for _, group in df.groupby(DEFAULT_ID_COL):
index = group[config.time_ids]
_train = group.loc[index < config.valid_boundary]
_valid = group.iloc[(len(_train) - config.encoder_length):(len(_train) + forecast_len)]
_test = group.iloc[(len(_train) - config.encoder_length + forecast_len):(len(_train) + 2*forecast_len)]
train.append(_train)
valid.append(_valid)
test.append(_test)
train = pd.concat(train, axis=0)
valid = pd.concat(valid, axis=0)
test = pd.concat(test, axis=0)
else:
index = df[config.time_ids]
train = df.loc[(index >= config.train_range[0]) & (index < config.train_range[1])]
valid = df.loc[(index >= config.valid_range[0]) & (index < config.valid_range[1])]
test = df.loc[(index >= config.test_range[0]) & (index < config.test_range[1])]
return train, valid, test
def flatten_ids(df, config):
if config.missing_id_strategy == 'drop':
if hasattr(config, 'combine_ids') and config.combine_ids:
index = np.logical_or.reduce([df[c].isna() for c in config.combine_ids])
else:
id_col = next(x.name for x in config.features if x.feature_type == InputTypes.ID)
index = df[id_col].isna()
index = index[index == True].index # Extract indices of nans
df.drop(index, inplace=True)
if not (hasattr(config, 'combine_ids') and config.combine_ids):
id_col = next(x.name for x in config.features if x.feature_type == InputTypes.ID)
ids = df[id_col].apply(str)
df.drop(id_col, axis=1, inplace=True)
encoder = sklearn.preprocessing.LabelEncoder().fit(ids.values)
df[DEFAULT_ID_COL] = encoder.transform(ids)
encoders = OrderedDict({id_col: encoder})
else:
encoders = {c:sklearn.preprocessing.LabelEncoder().fit(df[c].values) for c in config.combine_ids}
encoders = OrderedDict(encoders)
lens = [len(v.classes_) for v in encoders.values()]
clens = np.roll(np.cumprod(lens), 1)
clens[0] = 1
# this takes a looooooot of time. Probably it would be better to create 2 dummy columns
df[DEFAULT_ID_COL] = df.apply(lambda row: sum([encoders[c].transform([row[c]])[0]*clens[i] for i,c in enumerate(encoders.keys())]), axis=1)
df.drop(config.combine_ids, axis=1, inplace=True)
return DEFAULT_ID_COL, encoders
def impute(df, config):
#XXX This ensures that out scaling will have the same mean. We still need to check the variance
if not hasattr(config, 'missing_data_label'):
return df, None
else:
imp = SimpleImputer(missing_values=config.missing_data_label, strategy='mean')
mask = df.applymap(lambda x: True if x == config.missing_data_label else False)
data = df.values
col_mask = (data == config.missing_data_label).all(axis=0)
data[:,~col_mask] = imp.fit_transform(data)
return data, mask
def normalize_reals(train, valid, test, config, id_col=DEFAULT_ID_COL):
tgt_cols = [x.name for x in config.features if x.feature_type == InputTypes.TARGET]
real_cols = list(set(v.name for v in config.features if v.feature_embed_type == DataTypes.CONTINUOUS).difference(set(tgt_cols)))
real_scalers = {}
tgt_scalers = {}
def apply_scalers(df, name=None):
if name is None:
name = df.name
mask = df.applymap(lambda x: True if x == config.missing_data_label else False) if hasattr(config, 'missing_data_label') else None
df[real_cols] = real_scalers[name].transform(df[real_cols])
if mask is not None and any(mask):
df[real_cols].mask(mask, 10**9)
df[tgt_cols] = tgt_scalers[name].transform(df[tgt_cols])
return df
if config.scale_per_id:
for identifier, sliced in train.groupby(id_col):
data = sliced[real_cols]
data, _ = impute(data, config)
real_scalers[identifier] = sklearn.preprocessing.StandardScaler().fit(data)
# XXX We should probably remove examples that contain NaN as a target
target = sliced[tgt_cols]
tgt_scalers[identifier] = sklearn.preprocessing.StandardScaler().fit(target)
train = train.groupby(id_col).apply(apply_scalers)
# For valid and testing leave only timeseries previously present in train subset
# XXX for proper data science we should consider encoding unseen timeseries as a special case, not throwing them away
valid = valid.loc[valid[id_col].isin(real_scalers.keys())]
valid = valid.groupby(id_col).apply(apply_scalers)
test = test.loc[test[id_col].isin(real_scalers.keys())]
test = test.groupby(id_col).apply(apply_scalers)
else:
data, _ = impute(train[real_cols], config)
real_scalers[''] = sklearn.preprocessing.StandardScaler().fit(data)
tgt_scalers[''] = sklearn.preprocessing.StandardScaler().fit(train[tgt_cols])
train = apply_scalers(train, name='')
valid = apply_scalers(valid, name='')
test = apply_scalers(test, name='')
return train, valid, test, real_scalers, tgt_scalers
def encode_categoricals(train, valid, test, config):
cat_encodings = {}
cat_cols = list(set(v.name for v in config.features if v.feature_embed_type == DataTypes.CATEGORICAL and v.feature_type != InputTypes.ID))
num_classes = [] #XXX Maybe we should modify config based on this value? Or send a warninig?
# For TC performance reasons we might want for num_classes[i] be divisible by 8
# Train categorical encoders
for c in cat_cols:
if config.missing_cat_data_strategy == 'special_token':
#XXX this will probably require some data augmentation
unique = train[c].unique()
valid[c].loc[valid[c].isin(unique)] = '<UNK>'
test[c].loc[test[c].isin(unique)] = '<UNK>'
if config.missing_cat_data_strategy == 'encode_all' or \
config.missing_cat_data_strategy == 'special_token':
srs = pd.concat([train[c], valid[c], test[c]]).apply(str)
cat_encodings[c] = sklearn.preprocessing.LabelEncoder().fit(srs.values)
elif config.missing_cat_data_strategy == 'drop':
# TODO: implement this. In addition to dropping rows this has to split specific time series in chunks
# to prevent data from having temporal gaps
pass
num_classes.append(srs.nunique())
print('Categorical variables encodings lens: ', num_classes)
for split in [train, valid, test]:
for c in cat_cols:
srs = split[c].apply(str)
split[c] = srs
split.loc[:,c] = cat_encodings[c].transform(srs)
return cat_encodings
def preprocess(src_path, dst_path, config):
df = pd.read_csv(src_path, index_col=0)
for c in config.features:
if c.feature_embed_type == DataTypes.DATE:
df[c.name] = pd.to_datetime(df[c.name])
# Leave only columns relevant to preprocessing
relevant_columns = list(set([f.name for f in config.features] + [config.time_ids]))
df = df[relevant_columns]
id_col, id_encoders = flatten_ids(df, config)
df = df.reindex(sorted(df.columns), axis=1)
train, valid, test = get_dataset_splits(df, config)
# Length filter the data (all timeseries shorter than example len will be dropped)
#for df in [train, valid, test]:
# df.groupby(id_col).filter(lambda x: len(x) >= config.example_length)
train = pd.concat([x[1] for x in train.groupby(id_col) if len(x[1]) >= config.example_length])
valid = pd.concat([x[1] for x in valid.groupby(id_col) if len(x[1]) >= config.example_length])
test = pd.concat([x[1] for x in test.groupby(id_col) if len(x[1]) >= config.example_length])
train, valid, test, real_scalers, tgt_scalers = normalize_reals(train, valid, test, config, id_col)
cat_encodings = encode_categoricals(train, valid, test, config)
os.makedirs(dst_path, exist_ok=True)
train.to_csv(os.path.join(dst_path, 'train.csv'))
valid.to_csv(os.path.join(dst_path, 'valid.csv'))
test.to_csv(os.path.join(dst_path, 'test.csv'))
# Save relevant columns in binary form for faster dataloading
# IMORTANT: We always expect id to be a single column indicating the complete timeseries
# We also expect a copy of id in form of static categorical input!!!
col_names = [id_col] + [x.name for x in config.features if x.feature_embed_type != DataTypes.DATE and x.feature_type != InputTypes.ID]
grouped_train = [x[1][col_names].values.astype(np.float32).view(dtype=np.int32) for x in train.groupby(id_col)]
grouped_valid = [x[1][col_names].values.astype(np.float32).view(dtype=np.int32) for x in valid.groupby(id_col)]
grouped_test = [x[1][col_names].values.astype(np.float32).view(dtype=np.int32) for x in test.groupby(id_col)]
pickle.dump(grouped_train, open(os.path.join(dst_path, 'train.bin'), 'wb'))
pickle.dump(grouped_valid, open(os.path.join(dst_path, 'valid.bin'), 'wb'))
pickle.dump(grouped_test, open(os.path.join(dst_path, 'test.bin'), 'wb'))
with open(os.path.join(dst_path, 'real_scalers.bin'), 'wb') as f:
pickle.dump(real_scalers, f)
with open(os.path.join(dst_path, 'tgt_scalers.bin'), 'wb') as f:
pickle.dump(tgt_scalers, f)
with open(os.path.join(dst_path, 'cat_encodings.bin'), 'wb') as f:
pickle.dump(cat_encodings, f)
with open(os.path.join(dst_path, 'id_encoders.bin'), 'wb') as f:
pickle.dump(id_encoders, f)
def sample_data(dataset, num_samples):
if num_samples < 0:
return dataset
else:
return torch.utils.data.Subset(dataset, np.random.choice(np.arange(len(dataset)), size=num_samples, replace=False))
def standarize_electricity(path):
"""Code taken from https://github.com/google-research/google-research/blob/master/tft/script_download_data.py"""
df = pd.read_csv(os.path.join(path, 'LD2011_2014.txt'), index_col=0, sep=';', decimal=',')
df.index = pd.to_datetime(df.index)
df.sort_index(inplace=True)
# Used to determine the start and end dates of a series
output = df.resample('1h').mean().replace(0., np.nan)
earliest_time = output.index.min()
df_list = []
for label in output:
print('Processing {}'.format(label))
srs = output[label]
start_date = min(srs.fillna(method='ffill').dropna().index)
end_date = max(srs.fillna(method='bfill').dropna().index)
active_range = (srs.index >= start_date) & (srs.index <= end_date)
srs = srs[active_range].fillna(0.)
tmp = pd.DataFrame({'power_usage': srs})
date = tmp.index
tmp['t'] = (date - earliest_time).seconds / 60 / 60 + (
date - earliest_time).days * 24
tmp['days_from_start'] = (date - earliest_time).days
tmp['categorical_id'] = label
tmp['date'] = date
tmp['id'] = label
tmp['hour'] = date.hour
tmp['day'] = date.day
tmp['day_of_week'] = date.dayofweek
tmp['month'] = date.month
df_list.append(tmp)
output = pd.concat(df_list, axis=0, join='outer').reset_index(drop=True)
output['categorical_id'] = output['id'].copy()
output['hours_from_start'] = output['t']
output['categorical_day_of_week'] = output['day_of_week'].copy()
output['categorical_hour'] = output['hour'].copy()
output.to_csv(os.path.join(path, 'standarized.csv'))
def standarize_volatility(path):
df = pd.read_csv(os.path.join(path, 'oxfordmanrealizedvolatilityindices.csv'), index_col=0) # no explicit index
# Adds additional date/day fields
idx = [str(s).split('+')[0] for s in df.index
] # ignore timezones, we don't need them
dates = pd.to_datetime(idx)
df['date'] = dates
df['days_from_start'] = (dates - pd.datetime(2000, 1, 3)).days
df['day_of_week'] = dates.dayofweek
df['day_of_month'] = dates.day
df['week_of_year'] = dates.weekofyear
df['month'] = dates.month
df['year'] = dates.year
df['categorical_id'] = df['Symbol'].copy()
# Processes log volatility
vol = df['rv5_ss'].copy()
vol.loc[vol == 0.] = np.nan
df['log_vol'] = np.log(vol)
# Adds static information
symbol_region_mapping = {
'.AEX': 'EMEA',
'.AORD': 'APAC',
'.BFX': 'EMEA',
'.BSESN': 'APAC',
'.BVLG': 'EMEA',
'.BVSP': 'AMER',
'.DJI': 'AMER',
'.FCHI': 'EMEA',
'.FTMIB': 'EMEA',
'.FTSE': 'EMEA',
'.GDAXI': 'EMEA',
'.GSPTSE': 'AMER',
'.HSI': 'APAC',
'.IBEX': 'EMEA',
'.IXIC': 'AMER',
'.KS11': 'APAC',
'.KSE': 'APAC',
'.MXX': 'AMER',
'.N225': 'APAC ',
'.NSEI': 'APAC',
'.OMXC20': 'EMEA',
'.OMXHPI': 'EMEA',
'.OMXSPI': 'EMEA',
'.OSEAX': 'EMEA',
'.RUT': 'EMEA',
'.SMSI': 'EMEA',
'.SPX': 'AMER',
'.SSEC': 'APAC',
'.SSMI': 'EMEA',
'.STI': 'APAC',
'.STOXX50E': 'EMEA'
}
df['Region'] = df['Symbol'].apply(lambda k: symbol_region_mapping[k])
# Performs final processing
output_df_list = []
for grp in df.groupby('Symbol'):
sliced = grp[1].copy()
sliced.sort_values('days_from_start', inplace=True)
# Impute log volatility values
sliced['log_vol'].fillna(method='ffill', inplace=True)
sliced.dropna()
output_df_list.append(sliced)
df = pd.concat(output_df_list, axis=0)
df.to_csv(os.path.join(path, 'standarized.csv'))
def standarize_traffic(path):
def process_list(s, variable_type=int, delimiter=None):
"""Parses a line in the PEMS format to a list."""
if delimiter is None:
l = [
variable_type(i) for i in s.replace('[', '').replace(']', '').split()
]
else:
l = [
variable_type(i)
for i in s.replace('[', '').replace(']', '').split(delimiter)
]
return l
def read_single_list(filename):
"""Returns single list from a file in the PEMS-custom format."""
with open(os.path.join(path, filename), 'r') as dat:
l = process_list(dat.readlines()[0])
return l
def read_matrix(filename):
"""Returns a matrix from a file in the PEMS-custom format."""
array_list = []
with open(os.path.join(path, filename), 'r') as dat:
lines = dat.readlines()
for i, line in enumerate(lines):
if (i + 1) % 50 == 0:
print('Completed {} of {} rows for {}'.format(i + 1, len(lines),
filename))
array = [
process_list(row_split, variable_type=float, delimiter=None)
for row_split in process_list(
line, variable_type=str, delimiter=';')
]
array_list.append(array)
return array_list
shuffle_order = np.array(read_single_list('randperm')) - 1 # index from 0
train_dayofweek = read_single_list('PEMS_trainlabels')
train_tensor = read_matrix('PEMS_train')
test_dayofweek = read_single_list('PEMS_testlabels')
test_tensor = read_matrix('PEMS_test')
# Inverse permutate shuffle order
print('Shuffling')
inverse_mapping = {
new_location: previous_location
for previous_location, new_location in enumerate(shuffle_order)
}
reverse_shuffle_order = np.array([
inverse_mapping[new_location]
for new_location, _ in enumerate(shuffle_order)
])
# Group and reoder based on permuation matrix
print('Reodering')
day_of_week = np.array(train_dayofweek + test_dayofweek)
combined_tensor = np.array(train_tensor + test_tensor)
day_of_week = day_of_week[reverse_shuffle_order]
combined_tensor = combined_tensor[reverse_shuffle_order]
# Put everything back into a dataframe
print('Parsing as dataframe')
labels = ['traj_{}'.format(i) for i in read_single_list('stations_list')]
hourly_list = []
for day, day_matrix in enumerate(combined_tensor):
# Hourly data
hourly = pd.DataFrame(day_matrix.T, columns=labels)
hourly['hour_on_day'] = [int(i / 6) for i in hourly.index
] # sampled at 10 min intervals
if hourly['hour_on_day'].max() > 23 or hourly['hour_on_day'].min() < 0:
raise ValueError('Invalid hour! {}-{}'.format(
hourly['hour_on_day'].min(), hourly['hour_on_day'].max()))
hourly = hourly.groupby('hour_on_day', as_index=True).mean()[labels]
hourly['sensor_day'] = day
hourly['time_on_day'] = hourly.index
hourly['day_of_week'] = day_of_week[day]
hourly_list.append(hourly)
hourly_frame = pd.concat(hourly_list, axis=0, ignore_index=True, sort=False)
# Flatten such that each entitiy uses one row in dataframe
store_columns = [c for c in hourly_frame.columns if 'traj' in c]
other_columns = [c for c in hourly_frame.columns if 'traj' not in c]
flat_df = pd.DataFrame(columns=['values', 'prev_values', 'next_values'] +
other_columns + ['id'])
for store in store_columns:
print('Processing {}'.format(store))
sliced = hourly_frame[[store] + other_columns].copy()
sliced.columns = ['values'] + other_columns
sliced['id'] = int(store.replace('traj_', ''))
# Sort by Sensor-date-time
key = sliced['id'].apply(str) \
+ sliced['sensor_day'].apply(lambda x: '_{:03d}'.format(x)) \
+ sliced['time_on_day'].apply(lambda x: '_{:03d}'.format(x))
sliced = sliced.set_index(key).sort_index()
sliced['values'] = sliced['values'].fillna(method='ffill')
sliced['prev_values'] = sliced['values'].shift(1)
sliced['next_values'] = sliced['values'].shift(-1)
flat_df = flat_df.append(sliced.dropna(), ignore_index=True, sort=False)
# Filter to match range used by other academic papers
index = flat_df['sensor_day']
flat_df = flat_df[index < 173].copy()
# Creating columns fo categorical inputs
flat_df['categorical_id'] = flat_df['id'].copy()
flat_df['hours_from_start'] = flat_df['time_on_day'] \
+ flat_df['sensor_day']*24.
flat_df['categorical_day_of_week'] = flat_df['day_of_week'].copy()
flat_df['categorical_time_on_day'] = flat_df['time_on_day'].copy()
flat_df.to_csv(os.path.join(path, 'standarized.csv'))
# XXX needs rework
def standarize_favorita(data_folder):
import gc
# Extract only a subset of data to save/process for efficiency
start_date = pd.datetime(2015, 1, 1)
end_date = pd.datetime(2016, 6, 1)
print('Regenerating data...')
# load temporal data
temporal = pd.read_csv(os.path.join(data_folder, 'train.csv'), index_col=0)
store_info = pd.read_csv(os.path.join(data_folder, 'stores.csv'), index_col=0)
oil = pd.read_csv(
os.path.join(data_folder, 'oil.csv'), index_col=0).iloc[:, 0]
holidays = pd.read_csv(os.path.join(data_folder, 'holidays_events.csv'))
items = pd.read_csv(os.path.join(data_folder, 'items.csv'), index_col=0)
transactions = pd.read_csv(os.path.join(data_folder, 'transactions.csv'))
# Take first 6 months of data
temporal['date'] = pd.to_datetime(temporal['date'])
# Filter dates to reduce storage space requirements
if start_date is not None:
temporal = temporal[(temporal['date'] >= start_date)]
if end_date is not None:
temporal = temporal[(temporal['date'] < end_date)]
dates = temporal['date'].unique()
# Add trajectory identifier
temporal['traj_id'] = temporal['store_nbr'].apply(
str) + '_' + temporal['item_nbr'].apply(str)
temporal['unique_id'] = temporal['traj_id'] + '_' + temporal['date'].apply(
str)
# Remove all IDs with negative returns
print('Removing returns data')
min_returns = temporal['unit_sales'].groupby(temporal['traj_id']).min()
valid_ids = set(min_returns[min_returns >= 0].index)
selector = temporal['traj_id'].apply(lambda traj_id: traj_id in valid_ids)
new_temporal = temporal[selector].copy()
del temporal
gc.collect()
temporal = new_temporal
temporal['open'] = 1
# Resampling
print('Resampling to regular grid')
resampled_dfs = []
for traj_id, raw_sub_df in temporal.groupby('traj_id'):
print('Resampling', traj_id)
sub_df = raw_sub_df.set_index('date', drop=True).copy()
sub_df = sub_df.resample('1d').last()
sub_df['date'] = sub_df.index
sub_df[['store_nbr', 'item_nbr', 'onpromotion']] \
= sub_df[['store_nbr', 'item_nbr', 'onpromotion']].fillna(method='ffill')
sub_df['open'] = sub_df['open'].fillna(
0) # flag where sales data is unknown
sub_df['log_sales'] = np.log(sub_df['unit_sales'])
resampled_dfs.append(sub_df.reset_index(drop=True))
new_temporal = pd.concat(resampled_dfs, axis=0)
del temporal
gc.collect()
temporal = new_temporal
print('Adding oil')
oil.name = 'oil'
oil.index = pd.to_datetime(oil.index)
#XXX the lines below match the value of the oil on given date with the rest of the timeseries
# missing values in oil series are copied from the index before. Then the oil series is joined with
# temporal. Then there are some dates present in temporal which arent present in oil, for which
# oil values is substituted with -1. WHY?!
#TODO: check how many nans there are after first step. Previously oil series was extended by dates
# present in dates variable with nan value, which were forward filled.
# This behavior is no longer supported by pandas, so we changed to DataFrame.isin method.
# This leaves us with more nans after first step than previously. To achieve previous behavior
# we have to join series before filling nans.
temporal = temporal.join(
#oil.loc[oil.index.isin(dates)].fillna(method='ffill'), on='date', how='left')
oil.loc[oil.index.isin(dates)], on='date', how='left')
temporal['oil'] = temporal['oil'].fillna(method='ffill')
temporal['oil'] = temporal['oil'].fillna(-1)
print('Adding store info')
temporal = temporal.join(store_info, on='store_nbr', how='left')
print('Adding item info')
temporal = temporal.join(items, on='item_nbr', how='left')
transactions['date'] = pd.to_datetime(transactions['date'])
temporal = temporal.merge(
transactions,
left_on=['date', 'store_nbr'],
right_on=['date', 'store_nbr'],
how='left')
temporal['transactions'] = temporal['transactions'].fillna(-1)
# Additional date info
temporal['day_of_week'] = pd.to_datetime(temporal['date'].values).dayofweek
temporal['day_of_month'] = pd.to_datetime(temporal['date'].values).day
temporal['month'] = pd.to_datetime(temporal['date'].values).month
# Add holiday info
print('Adding holidays')
holiday_subset = holidays[holidays['transferred'].apply(
lambda x: not x)].copy()
holiday_subset.columns = [
s if s != 'type' else 'holiday_type' for s in holiday_subset.columns
]
holiday_subset['date'] = pd.to_datetime(holiday_subset['date'])
local_holidays = holiday_subset[holiday_subset['locale'] == 'Local']
regional_holidays = holiday_subset[holiday_subset['locale'] == 'Regional']
national_holidays = holiday_subset[holiday_subset['locale'] == 'National']
temporal['national_hol'] = temporal.merge(
national_holidays, left_on=['date'], right_on=['date'],
how='left')['description'].fillna('')
temporal['regional_hol'] = temporal.merge(
regional_holidays,
left_on=['state', 'date'],
right_on=['locale_name', 'date'],
how='left')['description'].fillna('')
temporal['local_hol'] = temporal.merge(
local_holidays,
left_on=['city', 'date'],
right_on=['locale_name', 'date'],
how='left')['description'].fillna('')
temporal.sort_values('unique_id', inplace=True)
# Transform date to integer index
start_date = pd.to_datetime(min(temporal['date']))
dates = temporal['date'].apply(pd.to_datetime)
temporal['days_from_start'] = (dates - start_date).dt.days
temporal['categorical_id'] = temporal['traj_id'].copy()
print('Saving processed file to {}'.format(os.path.join(data_folder, 'standarized.csv')))
temporal.to_csv(os.path.join(data_folder, 'standarized.csv'))
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/data_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
class PerformanceMeter():
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.count = 0
self.total_time = 0
self.last_update_time = time.time()
self.intervals = []
def update(self, n, exclude_from_total=False):
delta = time.time() - self.last_update_time
self.intervals.append(delta)
if not exclude_from_total:
self.total_time += delta
self.count += n
self.avg = self.count / self.total_time
self.last_update_time = time.time()
return n/delta
def reset_current_lap(self):
self.last_update_time = time.time()
def p(self, i):
assert i <= 100
idx = int(len(self.intervals) * i / 100)
return sorted(self.intervals)[idx]
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import time
import os
import pickle
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from torch.utils.data import DataLoader, DistributedSampler, RandomSampler
from apex import amp
from apex.optimizers import FusedAdam
#from torch.nn.parallel import DistributedDataParallel as DDP
from apex.parallel import DistributedDataParallel as DDP
import numpy as np
import dllogger
from modeling import TemporalFusionTransformer
from configuration import CONFIGS
from data_utils import TFTBinaryDataset, sample_data
from log_helper import setup_logger
from criterions import QuantileLoss
from inference import predict
from utils import PerformanceMeter
import gpu_affinity
from ema import ModelEma
def load_dataset(args, config):
train_split = TFTBinaryDataset(os.path.join(args.data_path, 'train.bin'), config)
train_split = sample_data(train_split, args.sample_data[0])
if args.distributed_world_size > 1:
data_sampler = DistributedSampler(train_split, args.distributed_world_size, args.distributed_rank, seed=args.seed + args.distributed_rank, drop_last=True)
else:
data_sampler = RandomSampler(train_split)
train_loader = DataLoader(train_split, batch_size=args.batch_size, num_workers=4, sampler=data_sampler, pin_memory=True)
valid_split = TFTBinaryDataset(os.path.join(args.data_path, 'valid.bin'), config)
valid_split = sample_data(valid_split, args.sample_data[1])
if args.distributed_world_size > 1:
data_sampler = DistributedSampler(valid_split, args.distributed_world_size, args.distributed_rank, shuffle=False, drop_last=False)
else:
data_sampler = None
valid_loader = DataLoader(valid_split, batch_size=args.batch_size, sampler=data_sampler, num_workers=4, pin_memory=True)
test_split = TFTBinaryDataset(os.path.join(args.data_path, 'test.bin'), config)
if args.distributed_world_size > 1:
data_sampler = DistributedSampler(test_split, args.distributed_world_size, args.distributed_rank, shuffle=False, drop_last=False)
else:
data_sampler = None
test_loader = DataLoader(test_split, batch_size=args.batch_size, sampler=data_sampler, num_workers=4, pin_memory=True)
print_once(f'Train split length: {len(train_split)}')
print_once(f'Valid split length: {len(valid_split)}')
print_once(f'Test split length: {len(test_split)}')
return train_loader, valid_loader, test_loader
def print_once(*args, **kwargs):
if not dist.is_initialized() or dist.get_rank() == 0:
print(*args, **kwargs)
def main(args):
### INIT DISTRIBUTED
args.distributed_world_size = int(os.environ.get('WORLD_SIZE', 1))
args.local_rank = int(os.environ.get('LOCAL_RANK', 0))
if args.distributed_world_size > 1:
dist.init_process_group(backend='nccl', init_method='env://')
print_once(f'Distributed training with {args.distributed_world_size} GPUs')
args.distributed_rank = dist.get_rank()
torch.cuda.set_device(args.local_rank)
torch.cuda.synchronize()
# Enable CuDNN autotuner
nproc_per_node = torch.cuda.device_count()
if args.affinity != 'disabled':
affinity = gpu_affinity.set_affinity(
args.local_rank,
nproc_per_node,
args.affinity
)
print(f'{args.local_rank}: thread affinity: {affinity}')
torch.backends.cudnn.benchmark = True
if args.seed:
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
setup_logger(args)
config = CONFIGS[args.dataset]()
if args.overwrite_config:
config.__dict__.update(json.loads(args.overwrite_config))
dllogger.log(step='HPARAMS', data={**vars(args), **vars(config)}, verbosity=1)
model = TemporalFusionTransformer(config).cuda()
if args.ema_decay:
model_ema = ModelEma(model, decay=args.ema_decay)
print_once('Model params: {}'.format(sum(p.numel() for p in model.parameters())))
criterion = QuantileLoss(config).cuda()
optimizer = FusedAdam(model.parameters(), lr=args.lr)
if args.use_amp:
model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale="dynamic")
if args.distributed_world_size > 1:
#model = DDP(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
model = DDP(model)
train_loader, valid_loader, test_loader = load_dataset(args, config)
global_step = 0
perf_meter = PerformanceMeter()
for epoch in range(args.epochs):
start = time.time()
dllogger.log(step=global_step, data={'epoch': epoch}, verbosity=1)
model.train()
for local_step, batch in enumerate(train_loader):
perf_meter.reset_current_lap()
batch = {key: tensor.cuda() if tensor.numel() else None for key, tensor in batch.items()}
predictions = model(batch)
targets = batch['target'][:,config.encoder_length:,:]
p_losses = criterion(predictions, targets)
loss = p_losses.sum()
if args.use_amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if not args.grad_accumulation or (global_step+1) % args.grad_accumulation == 0:
if args.clip_grad:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad)
optimizer.step()
optimizer.zero_grad()
if args.ema_decay:
model_ema.update(model)
if args.distributed_world_size > 1:
dist.all_reduce(p_losses)
p_losses /= args.distributed_world_size
loss = p_losses.sum()
torch.cuda.synchronize()
ips = perf_meter.update(args.batch_size * args.distributed_world_size,
exclude_from_total=local_step in [0, len(train_loader)-1])
log_dict = {'P10':p_losses[0].item(), 'P50':p_losses[1].item(), 'P90':p_losses[2].item(), 'loss': loss.item(), 'items/s':ips}
dllogger.log(step=global_step, data=log_dict, verbosity=1)
global_step += 1
validate(args, config, model_ema if args.ema_decay else model, criterion, valid_loader, global_step)
if validate.early_stop_c >= args.early_stopping:
print_once('Early stopping')
break
### TEST PHASE ###
state_dict = torch.load(os.path.join(args.results, 'checkpoint.pt'), map_location='cpu')
if isinstance(model, DDP):
model.module.load_state_dict(state_dict['model'])
else:
model.load_state_dict(state_dict['model'])
model.cuda().eval()
tgt_scalers = pickle.load(open(os.path.join(args.data_path, 'tgt_scalers.bin'), 'rb'))
cat_encodings = pickle.load(open(os.path.join(args.data_path,'cat_encodings.bin'), 'rb'))
unscaled_predictions, unscaled_targets, _, _ = predict(args, config, model, test_loader, tgt_scalers, cat_encodings)
losses = QuantileLoss(config)(unscaled_predictions, unscaled_targets)
normalizer = unscaled_targets.abs().mean()
quantiles = 2 * losses / normalizer
if args.distributed_world_size > 1:
quantiles = quantiles.cuda()
dist.all_reduce(quantiles)
quantiles /= args.distributed_world_size
quantiles = {'test_p10': quantiles[0].item(), 'test_p50': quantiles[1].item(), 'test_p90': quantiles[2].item(), 'sum':sum(quantiles).item()}
finish_log = {**quantiles, 'average_ips':perf_meter.avg, 'convergence_step':validate.conv_step}
dllogger.log(step=(), data=finish_log, verbosity=1)
def validate(args, config, model, criterion, dataloader, global_step):
if not hasattr(validate, 'best_valid_loss'):
validate.best_valid_loss = float('inf')
if not hasattr(validate, 'early_stop_c'):
validate.early_stop_c = 0
model.eval()
losses = []
validation_start = time.time()
for batch in dataloader:
with torch.no_grad():
batch = {key: tensor.cuda() if tensor.numel() else None for key, tensor in batch.items()}
predictions = model(batch)
targets = batch['target'][:,config.encoder_length:,:]
p_losses = criterion(predictions, targets)
bs = next(t for t in batch.values() if t is not None).shape[0]
losses.append((p_losses, bs))
validation_end = time.time()
p_losses = sum([l[0]*l[1] for l in losses])/sum([l[1] for l in losses]) #takes into accunt that the last batch is not full
if args.distributed_world_size > 1:
dist.all_reduce(p_losses)
p_losses = p_losses/args.distributed_world_size
ips = len(dataloader.dataset) / (validation_end - validation_start)
log_dict = {'P10':p_losses[0].item(), 'P50':p_losses[1].item(), 'P90':p_losses[2].item(), 'loss': p_losses.sum().item(), 'items/s':ips}
if log_dict['loss'] < validate.best_valid_loss:
validate.best_valid_loss = log_dict['loss']
validate.early_stop_c = 0
validate.conv_step = global_step
if not dist.is_initialized() or dist.get_rank() == 0:
state_dict = model.module.state_dict() if isinstance(model, (DDP, ModelEma)) else model.state_dict()
ckpt = {'args':args, 'config':config, 'model':state_dict}
torch.save(ckpt, os.path.join(args.results, 'checkpoint.pt'))
if args.distributed_world_size > 1:
dist.barrier()
else:
validate.early_stop_c += 1
log_dict = {'val_'+k:v for k,v in log_dict.items()}
dllogger.log(step=global_step, data=log_dict, verbosity=1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, required=True,
help='Path to the dataset')
parser.add_argument('--dataset', type=str, required=True, choices=CONFIGS.keys(),
help='Dataset name')
parser.add_argument('--epochs', type=int, default=25,
help='Default number of training epochs')
parser.add_argument('--sample_data', type=lambda x: int(float(x)), nargs=2, default=[-1, -1],
help="""Subsample the dataset. Specify number of training and valid examples.
Values can be provided in scientific notation. Floats will be truncated.""")
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--use_amp', action='store_true', help='Enable automatic mixed precision')
parser.add_argument('--clip_grad', type=float, default=0.0)
parser.add_argument('--grad_accumulation', type=int, default=0)
parser.add_argument('--early_stopping', type=int, default=1000,
help='Stop training if validation loss does not improve for more than this number of epochs.')
parser.add_argument('--results', type=str, default='/results',
help='Directory in which results are stored')
parser.add_argument('--log_file', type=str, default='dllogger.json',
help='Name of dllogger output file')
parser.add_argument('--overwrite_config', type=str, default='',
help='JSON string used to overload config')
parser.add_argument('--affinity', type=str,
default='socket_unique_interleaved',
choices=['socket', 'single', 'single_unique',
'socket_unique_interleaved',
'socket_unique_continuous',
'disabled'],
help='type of CPU affinity')
parser.add_argument("--ema_decay", type=float, default=0.0, help='Use exponential moving average')
ARGS = parser.parse_args()
main(ARGS)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/train.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
import numpy as np
import pickle
import argparse
import torch
from torch.utils.data import DataLoader
from torch.cuda import amp
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from modeling import TemporalFusionTransformer
from configuration import ElectricityConfig
from data_utils import TFTDataset
from utils import PerformanceMeter
from criterions import QuantileLoss
import dllogger
from log_helper import setup_logger
def _unscale_per_id(config, values, ids, scalers):
values = values.cpu().numpy()
num_horizons = config.example_length - config.encoder_length + 1
flat_values = pd.DataFrame(
values,
columns=[f't{j}' for j in range(num_horizons - values.shape[1], num_horizons)]
)
flat_values['id'] = ids
df_list = []
for idx, group in flat_values.groupby('id'):
scaler = scalers[idx]
group_copy = group.copy()
for col in group_copy.columns:
if not 'id' in col:
_col = np.expand_dims(group_copy[col].values, -1)
_t_col = scaler.inverse_transform(_col)[:,-1]
group_copy[col] = _t_col
df_list.append(group_copy)
flat_values = pd.concat(df_list, axis=0)
flat_values = flat_values[[col for col in flat_values if not 'id' in col]]
flat_tensor = torch.from_numpy(flat_values.values)
return flat_tensor
def _unscale(config, values, scaler):
values = values.cpu().numpy()
num_horizons = config.example_length - config.encoder_length + 1
flat_values = pd.DataFrame(
values,
columns=[f't{j}' for j in range(num_horizons - values.shape[1], num_horizons)]
)
for col in flat_values.columns:
if not 'id' in col:
_col = np.expand_dims(flat_values[col].values, -1)
_t_col = scaler.inverse_transform(_col)[:,-1]
flat_values[col] = _t_col
flat_values = flat_values[[col for col in flat_values if not 'id' in col]]
flat_tensor = torch.from_numpy(flat_values.values)
return flat_tensor
def predict(args, config, model, data_loader, scalers, cat_encodings, extend_targets=False):
model.eval()
predictions = []
targets = []
ids = []
perf_meter = PerformanceMeter()
n_workers = args.distributed_world_size if hasattr(args, 'distributed_world_size') else 1
for step, batch in enumerate(data_loader):
perf_meter.reset_current_lap()
with torch.no_grad():
batch = {key: tensor.cuda() if tensor.numel() else None for key, tensor in batch.items()}
ids.append(batch['id'][:,0,:])
targets.append(batch['target'])
predictions.append(model(batch).float())
perf_meter.update(args.batch_size * n_workers,
exclude_from_total=step in [0, len(data_loader)-1])
targets = torch.cat(targets, dim=0)
if not extend_targets:
targets = targets[:,config.encoder_length:,:]
predictions = torch.cat(predictions, dim=0)
if config.scale_per_id:
ids = torch.cat(ids, dim=0).cpu().numpy()
unscaled_predictions = torch.stack(
[_unscale_per_id(config, predictions[:,:,i], ids, scalers) for i in range(len(config.quantiles))],
dim=-1)
unscaled_targets = _unscale_per_id(config, targets[:,:,0], ids, scalers).unsqueeze(-1)
else:
ids = None
unscaled_predictions = torch.stack(
[_unscale(config, predictions[:,:,i], scalers['']) for i in range(len(config.quantiles))],
dim=-1)
unscaled_targets = _unscale(config, targets[:,:,0], scalers['']).unsqueeze(-1)
return unscaled_predictions, unscaled_targets, ids, perf_meter
def visualize_v2(args, config, model, data_loader, scalers, cat_encodings):
unscaled_predictions, unscaled_targets, ids, _ = predict(args, config, model, data_loader, scalers, cat_encodings, extend_targets=True)
num_horizons = config.example_length - config.encoder_length + 1
pad = unscaled_predictions.new_full((unscaled_targets.shape[0], unscaled_targets.shape[1] - unscaled_predictions.shape[1], unscaled_predictions.shape[2]), fill_value=float('nan'))
pad[:,-1,:] = unscaled_targets[:,-num_horizons,:]
unscaled_predictions = torch.cat((pad, unscaled_predictions), dim=1)
ids = torch.from_numpy(ids.squeeze())
joint_graphs = torch.cat([unscaled_targets, unscaled_predictions], dim=2)
graphs = {i:joint_graphs[ids == i, :, :] for i in set(ids.tolist())}
for key, g in graphs.items():
for i, ex in enumerate(g):
df = pd.DataFrame(ex.numpy(),
index=range(num_horizons - ex.shape[0], num_horizons),
columns=['target'] + [f'P{int(q*100)}' for q in config.quantiles])
fig = df.plot().get_figure()
ax = fig.get_axes()[0]
_values = df.values[config.encoder_length-1:,:]
ax.fill_between(range(num_horizons), _values[:,1], _values[:,-1], alpha=0.2, color='green')
os.makedirs(os.path.join(args.results, 'single_example_vis', str(key)), exist_ok=True)
fig.savefig(os.path.join(args.results, 'single_example_vis', str(key), f'{i}.pdf'))
def inference(args, config, model, data_loader, scalers, cat_encodings):
unscaled_predictions, unscaled_targets, ids, perf_meter = predict(args, config, model, data_loader, scalers, cat_encodings)
if args.joint_visualization or args.save_predictions:
ids = torch.from_numpy(ids.squeeze())
#ids = torch.cat([x['id'][0] for x in data_loader.dataset])
joint_graphs = torch.cat([unscaled_targets, unscaled_predictions], dim=2)
graphs = {i:joint_graphs[ids == i, :, :] for i in set(ids.tolist())}
for key, g in graphs.items(): #timeseries id, joint targets and predictions
_g = {'targets': g[:,:,0]}
_g.update({f'P{int(q*100)}':g[:,:,i+1] for i, q in enumerate(config.quantiles)})
if args.joint_visualization:
summary_writer = SummaryWriter(log_dir=os.path.join(args.results, 'predictions_vis', str(key)))
for q, t in _g.items(): # target and quantiles, timehorizon values
if q == 'targets':
targets = torch.cat([t[:,0], t[-1,1:]]) # WIP
# We want to plot targets on the same graph as predictions. Probably could be written better.
for i, val in enumerate(targets):
summary_writer.add_scalars(str(key), {f'{q}':val}, i)
continue
# Tensor t contains different time horizons which are shifted in phase
# Next lines realign them
y = t.new_full((t.shape[0] + t.shape[1] -1, t.shape[1]), float('nan'))
for i in range(y.shape[1]):
y[i:i+t.shape[0], i] = t[:,i]
for i, vals in enumerate(y): # timestep, timehorizon values value
summary_writer.add_scalars(str(key), {f'{q}_t+{j+1}':v for j,v in enumerate(vals) if v == v}, i)
summary_writer.close()
if args.save_predictions:
for q, t in _g.items():
df = pd.DataFrame(t.tolist())
df.columns = [f't+{i+1}' for i in range(len(df.columns))]
os.makedirs(os.path.join(args.results, 'predictions', str(key)), exist_ok=True)
df.to_csv(os.path.join(args.results, 'predictions', str(key), q+'.csv'))
losses = QuantileLoss(config)(unscaled_predictions, unscaled_targets)
normalizer = unscaled_targets.abs().mean()
q_risk = 2 * losses / normalizer
perf_dict = {
'throughput': perf_meter.avg,
'latency_avg': perf_meter.total_time/len(perf_meter.intervals),
'latency_p90': perf_meter.p(90),
'latency_p95': perf_meter.p(95),
'latency_p99': perf_meter.p(99),
'total_infernece_time': perf_meter.total_time,
}
return q_risk, perf_dict
def main(args):
setup_logger(args)
# Set up model
state_dict = torch.load(args.checkpoint)
config = state_dict['config']
model = TemporalFusionTransformer(config).cuda()
model.load_state_dict(state_dict['model'])
model.eval()
model.cuda()
# Set up dataset
test_split = TFTDataset(args.data, config)
data_loader = DataLoader(test_split, batch_size=args.batch_size, num_workers=4)
scalers = pickle.load(open(args.tgt_scalers, 'rb'))
cat_encodings = pickle.load(open(args.cat_encodings, 'rb'))
if args.visualize:
# TODO: abstract away all forms of visualization.
visualize_v2(args, config, model, data_loader, scalers, cat_encodings)
quantiles, perf_dict = inference(args, config, model, data_loader, scalers, cat_encodings)
quantiles = {'test_p10': quantiles[0].item(), 'test_p50': quantiles[1].item(), 'test_p90': quantiles[2].item(), 'sum':sum(quantiles).item()}
finish_log = {**quantiles, **perf_dict}
dllogger.log(step=(), data=finish_log, verbosity=1)
print('Test q-risk: P10 {} | P50 {} | P90 {}'.format(*quantiles))
print('Latency:\n\tAverage {:.3f}s\n\tp90 {:.3f}s\n\tp95 {:.3f}s\n\tp99 {:.3f}s'.format(
perf_dict['latency_avg'], perf_dict['latency_p90'], perf_dict['latency_p95'], perf_dict['latency_p99']))
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', type=str,
help='Path to the checkpoint')
parser.add_argument('--data', type=str,
help='Path to the test split of the dataset')
parser.add_argument('--tgt_scalers', type=str,
help='Path to the tgt_scalers.bin file produced by the preprocessing')
parser.add_argument('--cat_encodings', type=str,
help='Path to the cat_encodings.bin file produced by the preprocessing')
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--visualize', action='store_true', help='Visualize predictions - each example on the separate plot')
parser.add_argument('--joint_visualization', action='store_true', help='Visualize predictions - each timeseries on separate plot. Projections will be concatenated.')
parser.add_argument('--save_predictions', action='store_true')
parser.add_argument('--results', type=str, default='/results')
parser.add_argument('--log_file', type=str, default='dllogger.json')
ARGS = parser.parse_args()
main(ARGS)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/inference.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import itertools
import atexit
import dllogger
from dllogger import Backend, JSONStreamBackend, StdOutBackend
import torch.distributed as dist
from torch.utils.tensorboard import SummaryWriter
class TensorBoardBackend(Backend):
def __init__(self, verbosity, log_dir):
super().__init__(verbosity=verbosity)
self.summary_writer = SummaryWriter(log_dir=os.path.join(log_dir, 'TB_summary'),
flush_secs=120,
max_queue=200
)
self.hp_cache = None
atexit.register(self.summary_writer.close)
@property
def log_level(self):
return self._log_level
def metadata(self, timestamp, elapsedtime, metric, metadata):
pass
def log(self, timestamp, elapsedtime, step, data):
if step == 'HPARAMS':
parameters = {k: v for k, v in data.items() if not isinstance(v, (list, tuple))}
#Unpack list and tuples
for d in [{k+f'_{i}':v for i,v in enumerate(l)} for k,l in data.items() if isinstance(l, (list, tuple))]:
parameters.update(d)
#Remove custom classes
parameters = {k: v for k, v in data.items() if isinstance(v, (int, float, str, bool))}
parameters.update({k:'None' for k, v in data.items() if v is None})
self.hp_cache = parameters
if step == ():
if self.hp_cache is None:
print('Warning: Cannot save HParameters. Please log HParameters with step=\'HPARAMS\'', file=sys.stderr)
return
self.summary_writer.add_hparams(self.hp_cache, data)
if not isinstance(step, int):
return
for k, v in data.items():
self.summary_writer.add_scalar(k, v, step)
def flush(self):
pass
def setup_logger(args):
os.makedirs(args.results, exist_ok=True)
log_path = os.path.join(args.results, args.log_file)
if os.path.exists(log_path):
for i in itertools.count():
s_fname = args.log_file.split('.')
fname = '.'.join(s_fname[:-1]) + f'_{i}.' + s_fname[-1] if len(s_fname) > 1 else args.stat_file + f'.{i}'
log_path = os.path.join(args.results, fname)
if not os.path.exists(log_path):
break
def metric_format(metric, metadata, value):
return "{}: {}".format(metric, f'{value:.5f}' if isinstance(value, float) else value)
def step_format(step):
if step == ():
return "Finished |"
elif isinstance(step, int):
return "Step {0: <5} |".format(step)
return "Step {} |".format(step)
if not dist.is_initialized() or not args.distributed_world_size > 1 or args.distributed_rank == 0:
dllogger.init(backends=[JSONStreamBackend(verbosity=1, filename=log_path),
TensorBoardBackend(verbosity=1, log_dir=args.results),
StdOutBackend(verbosity=2,
step_format=step_format,
prefix_format=lambda x: "")#,
#metric_format=metric_format)
])
else:
dllogger.init(backends=[])
dllogger.log(step='PARAMETER', data=vars(args), verbosity=0)
container_setup_info = {**get_framework_env_vars(), **get_system_info()}
dllogger.log(step='ENVIRONMENT', data=container_setup_info, verbosity=0)
dllogger.metadata('loss', {'GOAL': 'MINIMIZE', 'STAGE': 'TRAIN', 'format': ':5f'})
dllogger.metadata('P10', {'GOAL': 'MINIMIZE', 'STAGE': 'TRAIN', 'format': ':5f'})
dllogger.metadata('P50', {'GOAL': 'MINIMIZE', 'STAGE': 'TRAIN', 'format': ':5f'})
dllogger.metadata('P90', {'GOAL': 'MINIMIZE', 'STAGE': 'TRAIN', 'format': ':5f'})
dllogger.metadata('items/s', {'GOAL': 'MAXIMIZE', 'STAGE': 'TRAIN', 'format': ':1f'})
dllogger.metadata('val_loss', {'GOAL': 'MINIMIZE', 'STAGE': 'VAL', 'format':':5f'})
dllogger.metadata('val_P10', {'GOAL': 'MINIMIZE', 'STAGE': 'VAL', 'format': ':5f'})
dllogger.metadata('val_P50', {'GOAL': 'MINIMIZE', 'STAGE': 'VAL', 'format': ':5f'})
dllogger.metadata('val_P90', {'GOAL': 'MINIMIZE', 'STAGE': 'VAL', 'format': ':5f'})
dllogger.metadata('val_items/s', {'GOAL': 'MAXIMIZE', 'STAGE': 'VAL', 'format': ':1f'})
dllogger.metadata('test_P10', {'GOAL': 'MINIMIZE', 'STAGE': 'TEST', 'format': ':5f'})
dllogger.metadata('test_P50', {'GOAL': 'MINIMIZE', 'STAGE': 'TEST', 'format': ':5f'})
dllogger.metadata('test_P90', {'GOAL': 'MINIMIZE', 'STAGE': 'TEST', 'format': ':5f'})
dllogger.metadata('throughput', {'GOAL': 'MAXIMIZE', 'STAGE': 'TEST', 'format': ':1f'})
dllogger.metadata('latency_p90', {'GOAL': 'MIMIMIZE', 'STAGE': 'TEST', 'format': ':5f'})
dllogger.metadata('latency_p95', {'GOAL': 'MIMIMIZE', 'STAGE': 'TEST', 'format': ':5f'})
dllogger.metadata('latency_p99', {'GOAL': 'MIMIMIZE', 'STAGE': 'TEST', 'format': ':5f'})
def get_framework_env_vars():
return {
'NVIDIA_PYTORCH_VERSION': os.environ.get('NVIDIA_PYTORCH_VERSION'),
'PYTORCH_VERSION': os.environ.get('PYTORCH_VERSION'),
'CUBLAS_VERSION': os.environ.get('CUBLAS_VERSION'),
'NCCL_VERSION': os.environ.get('NCCL_VERSION'),
'CUDA_DRIVER_VERSION': os.environ.get('CUDA_DRIVER_VERSION'),
'CUDNN_VERSION': os.environ.get('CUDNN_VERSION'),
'CUDA_VERSION': os.environ.get('CUDA_VERSION'),
'NVIDIA_PIPELINE_ID': os.environ.get('NVIDIA_PIPELINE_ID'),
'NVIDIA_BUILD_ID': os.environ.get('NVIDIA_BUILD_ID'),
'NVIDIA_TF32_OVERRIDE': os.environ.get('NVIDIA_TF32_OVERRIDE'),
}
def get_system_info():
system_info = subprocess.run('nvidia-smi --query-gpu=gpu_name,memory.total,enforced.power.limit --format=csv'.split(), capture_output=True).stdout
system_info = [i.decode('utf-8') for i in system_info.split(b'\n')]
system_info = [x for x in system_info if x]
return {'system_info': system_info}
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/log_helper.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from typing import Dict, Tuple, Optional, List
if os.environ.get("TFT_SCRIPTING", False):
from torch.nn import LayerNorm
else:
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
class MaybeLayerNorm(nn.Module):
def __init__(self, output_size, hidden_size, eps):
super().__init__()
if output_size and output_size == 1:
self.ln = nn.Identity()
else:
self.ln = LayerNorm(output_size if output_size else hidden_size, eps=eps)
def forward(self, x):
return self.ln(x)
class GLU(nn.Module):
def __init__(self, hidden_size, output_size):
super().__init__()
self.lin = nn.Linear(hidden_size, output_size * 2)
def forward(self, x: Tensor) -> Tensor:
x = self.lin(x)
x = F.glu(x)
return x
class GRN(nn.Module):
def __init__(self,
input_size,
hidden_size,
output_size=None,
context_hidden_size=None,
dropout=0):
super().__init__()
self.layer_norm = MaybeLayerNorm(output_size, hidden_size, eps=1e-3)
self.lin_a = nn.Linear(input_size, hidden_size)
if context_hidden_size is not None:
self.lin_c = nn.Linear(context_hidden_size, hidden_size, bias=False)
self.lin_i = nn.Linear(hidden_size, hidden_size)
self.glu = GLU(hidden_size, output_size if output_size else hidden_size)
self.dropout = nn.Dropout(dropout)
self.out_proj = nn.Linear(input_size, output_size) if output_size else None
def forward(self, a: Tensor, c: Optional[Tensor] = None):
x = self.lin_a(a)
if c is not None:
x = x + self.lin_c(c).unsqueeze(1)
x = F.elu(x)
x = self.lin_i(x)
x = self.dropout(x)
x = self.glu(x)
y = a if not self.out_proj else self.out_proj(a)
x = x + y
x = self.layer_norm(x)
return x
class TFTEmbedding(nn.Module):
def __init__(self, config):
super().__init__()
self.s_cat_inp_lens = config.static_categorical_inp_lens
self.t_cat_k_inp_lens = config.temporal_known_categorical_inp_lens
self.t_cat_o_inp_lens = config.temporal_observed_categorical_inp_lens
self.s_cont_inp_size = config.static_continuous_inp_size
self.t_cont_k_inp_size = config.temporal_known_continuous_inp_size
self.t_cont_o_inp_size = config.temporal_observed_continuous_inp_size
self.t_tgt_size = config.temporal_target_size
self.hidden_size = config.hidden_size
# There are 7 types of input:
# 1. Static categorical
# 2. Static continuous
# 3. Temporal known a priori categorical
# 4. Temporal known a priori continuous
# 5. Temporal observed categorical
# 6. Temporal observed continuous
# 7. Temporal observed targets (time series obseved so far)
self.s_cat_embed = nn.ModuleList([
nn.Embedding(n, self.hidden_size) for n in self.s_cat_inp_lens]) if self.s_cat_inp_lens else None
self.t_cat_k_embed = nn.ModuleList([
nn.Embedding(n, self.hidden_size) for n in self.t_cat_k_inp_lens]) if self.t_cat_k_inp_lens else None
self.t_cat_o_embed = nn.ModuleList([
nn.Embedding(n, self.hidden_size) for n in self.t_cat_o_inp_lens]) if self.t_cat_o_inp_lens else None
self.s_cont_embedding_vectors = nn.Parameter(torch.Tensor(self.s_cont_inp_size, self.hidden_size)) if self.s_cont_inp_size else None
self.t_cont_k_embedding_vectors = nn.Parameter(torch.Tensor(self.t_cont_k_inp_size, self.hidden_size)) if self.t_cont_k_inp_size else None
self.t_cont_o_embedding_vectors = nn.Parameter(torch.Tensor(self.t_cont_o_inp_size, self.hidden_size)) if self.t_cont_o_inp_size else None
self.t_tgt_embedding_vectors = nn.Parameter(torch.Tensor(self.t_tgt_size, self.hidden_size))
self.s_cont_embedding_bias = nn.Parameter(torch.zeros(self.s_cont_inp_size, self.hidden_size)) if self.s_cont_inp_size else None
self.t_cont_k_embedding_bias = nn.Parameter(torch.zeros(self.t_cont_k_inp_size, self.hidden_size)) if self.t_cont_k_inp_size else None
self.t_cont_o_embedding_bias = nn.Parameter(torch.zeros(self.t_cont_o_inp_size, self.hidden_size)) if self.t_cont_o_inp_size else None
self.t_tgt_embedding_bias = nn.Parameter(torch.zeros(self.t_tgt_size, self.hidden_size))
if self.s_cont_embedding_vectors is not None:
torch.nn.init.xavier_normal_(self.s_cont_embedding_vectors)
if self.t_cont_k_embedding_vectors is not None:
torch.nn.init.xavier_normal_(self.t_cont_k_embedding_vectors)
if self.t_cont_o_embedding_vectors is not None:
torch.nn.init.xavier_normal_(self.t_cont_o_embedding_vectors)
torch.nn.init.xavier_normal_(self.t_tgt_embedding_vectors)
def _apply_embedding(self,
cat: Optional[Tensor],
cont: Optional[Tensor],
cat_emb: Optional[nn.ModuleList],
cont_emb: Tensor,
cont_bias: Tensor,
) -> Tuple[Optional[Tensor], Optional[Tensor]]:
e_cat = torch.stack([embed(cat[...,i]) for i, embed in enumerate(cat_emb)], dim=-2) if cat is not None else None
if cont is not None:
#the line below is equivalent to following einsums
#e_cont = torch.einsum('btf,fh->bthf', cont, cont_emb)
#e_cont = torch.einsum('bf,fh->bhf', cont, cont_emb)
e_cont = torch.mul(cont.unsqueeze(-1), cont_emb)
e_cont = e_cont + cont_bias
else:
e_cont = None
if e_cat is not None and e_cont is not None:
return torch.cat([e_cat, e_cont], dim=-2)
elif e_cat is not None:
return e_cat
elif e_cont is not None:
return e_cont
else:
return None
def forward(self, x: Dict[str, Tensor]):
# temporal/static categorical/continuous known/observed input
s_cat_inp = x.get('s_cat', None)
s_cont_inp = x.get('s_cont', None)
t_cat_k_inp = x.get('k_cat', None)
t_cont_k_inp = x.get('k_cont', None)
t_cat_o_inp = x.get('o_cat', None)
t_cont_o_inp = x.get('o_cont', None)
t_tgt_obs = x['target'] # Has to be present
# Static inputs are expected to be equal for all timesteps
# For memory efficiency there is no assert statement
s_cat_inp = s_cat_inp[:,0,:] if s_cat_inp is not None else None
s_cont_inp = s_cont_inp[:,0,:] if s_cont_inp is not None else None
s_inp = self._apply_embedding(s_cat_inp,
s_cont_inp,
self.s_cat_embed,
self.s_cont_embedding_vectors,
self.s_cont_embedding_bias)
t_known_inp = self._apply_embedding(t_cat_k_inp,
t_cont_k_inp,
self.t_cat_k_embed,
self.t_cont_k_embedding_vectors,
self.t_cont_k_embedding_bias)
t_observed_inp = self._apply_embedding(t_cat_o_inp,
t_cont_o_inp,
self.t_cat_o_embed,
self.t_cont_o_embedding_vectors,
self.t_cont_o_embedding_bias)
# Temporal observed targets
# t_observed_tgt = torch.einsum('btf,fh->btfh', t_tgt_obs, self.t_tgt_embedding_vectors)
t_observed_tgt = torch.matmul(t_tgt_obs.unsqueeze(3).unsqueeze(4), self.t_tgt_embedding_vectors.unsqueeze(1)).squeeze(3)
t_observed_tgt = t_observed_tgt + self.t_tgt_embedding_bias
return s_inp, t_known_inp, t_observed_inp, t_observed_tgt
class VariableSelectionNetwork(nn.Module):
def __init__(self, config, num_inputs):
super().__init__()
self.joint_grn = GRN(config.hidden_size*num_inputs, config.hidden_size, output_size=num_inputs, context_hidden_size=config.hidden_size)
self.var_grns = nn.ModuleList([GRN(config.hidden_size, config.hidden_size, dropout=config.dropout) for _ in range(num_inputs)])
def forward(self, x: Tensor, context: Optional[Tensor] = None):
Xi = x.reshape(*x.shape[:-2], -1)
grn_outputs = self.joint_grn(Xi, c=context)
sparse_weights = F.softmax(grn_outputs, dim=-1)
transformed_embed_list = [m(x[...,i,:]) for i, m in enumerate(self.var_grns)]
transformed_embed = torch.stack(transformed_embed_list, dim=-1)
#the line below performs batched matrix vector multiplication
#for temporal features it's bthf,btf->bth
#for static features it's bhf,bf->bh
variable_ctx = torch.matmul(transformed_embed, sparse_weights.unsqueeze(-1)).squeeze(-1)
return variable_ctx, sparse_weights
class StaticCovariateEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.vsn = VariableSelectionNetwork(config, config.num_static_vars)
self.context_grns = nn.ModuleList([GRN(config.hidden_size, config.hidden_size, dropout=config.dropout) for _ in range(4)])
def forward(self, x: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
variable_ctx, sparse_weights = self.vsn(x)
# Context vectors:
# variable selection context
# enrichment context
# state_c context
# state_h context
cs, ce, ch, cc = tuple(m(variable_ctx) for m in self.context_grns)
return cs, ce, ch, cc
class InterpretableMultiHeadAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.n_head = config.n_head
assert config.hidden_size % config.n_head == 0
self.d_head = config.hidden_size // config.n_head
self.qkv_linears = nn.Linear(config.hidden_size, (2 * self.n_head + 1) * self.d_head, bias=False)
self.out_proj = nn.Linear(self.d_head, config.hidden_size, bias=False)
self.attn_dropout = nn.Dropout(config.attn_dropout)
self.out_dropout = nn.Dropout(config.dropout)
self.scale = self.d_head**-0.5
self.register_buffer("_mask", torch.triu(torch.full((config.example_length, config.example_length), float('-inf')), 1).unsqueeze(0))
def forward(self, x: Tensor, mask_future_timesteps: bool = True) -> Tuple[Tensor, Tensor]:
bs, t, h_size = x.shape
qkv = self.qkv_linears(x)
q, k, v = qkv.split((self.n_head * self.d_head, self.n_head * self.d_head, self.d_head), dim=-1)
q = q.view(bs, t, self.n_head, self.d_head)
k = k.view(bs, t, self.n_head, self.d_head)
v = v.view(bs, t, self.d_head)
# attn_score = torch.einsum('bind,bjnd->bnij', q, k)
attn_score = torch.matmul(q.permute((0, 2, 1, 3)), k.permute((0, 2, 3, 1)))
attn_score.mul_(self.scale)
if mask_future_timesteps:
attn_score = attn_score + self._mask
attn_prob = F.softmax(attn_score, dim=3)
attn_prob = self.attn_dropout(attn_prob)
# attn_vec = torch.einsum('bnij,bjd->bnid', attn_prob, v)
attn_vec = torch.matmul(attn_prob, v.unsqueeze(1))
m_attn_vec = torch.mean(attn_vec, dim=1)
out = self.out_proj(m_attn_vec)
out = self.out_dropout(out)
return out, attn_vec
class TemporalFusionTransformer(nn.Module):
"""
Implementation of https://arxiv.org/abs/1912.09363
"""
def __init__(self, config):
super().__init__()
if hasattr(config, 'model'):
config = config.model
self.encoder_length = config.encoder_length #this determines from how distant past we want to use data from
self.embedding = TFTEmbedding(config)
self.static_encoder = StaticCovariateEncoder(config)
self.history_vsn = VariableSelectionNetwork(config, config.num_historic_vars)
self.history_encoder = nn.LSTM(config.hidden_size, config.hidden_size, batch_first=True)
self.future_vsn = VariableSelectionNetwork(config, config.num_future_vars)
self.future_encoder = nn.LSTM(config.hidden_size, config.hidden_size, batch_first=True)
self.input_gate = GLU(config.hidden_size, config.hidden_size)
self.input_gate_ln = LayerNorm(config.hidden_size, eps=1e-3)
self.enrichment_grn = GRN(config.hidden_size,
config.hidden_size,
context_hidden_size=config.hidden_size,
dropout=config.dropout)
self.attention = InterpretableMultiHeadAttention(config)
self.attention_gate = GLU(config.hidden_size, config.hidden_size)
self.attention_ln = LayerNorm(config.hidden_size, eps=1e-3)
self.positionwise_grn = GRN(config.hidden_size,
config.hidden_size,
dropout=config.dropout)
self.decoder_gate = GLU(config.hidden_size, config.hidden_size)
self.decoder_ln = LayerNorm(config.hidden_size, eps=1e-3)
self.quantile_proj = nn.Linear(config.hidden_size, len(config.quantiles))
def forward(self, x: Dict[str, Tensor]) -> Tensor:
s_inp, t_known_inp, t_observed_inp, t_observed_tgt = self.embedding(x)
# Static context
cs, ce, ch, cc = self.static_encoder(s_inp)
ch, cc = ch.unsqueeze(0), cc.unsqueeze(0) #lstm initial states
# Temporal input
_historical_inputs = [t_known_inp[:,:self.encoder_length,:], t_observed_tgt[:,:self.encoder_length,:]]
if t_observed_inp is not None:
_historical_inputs.insert(0,t_observed_inp[:,:self.encoder_length,:])
historical_inputs = torch.cat(_historical_inputs, dim=-2)
future_inputs = t_known_inp[:, self.encoder_length:]
# Encoders
historical_features, _ = self.history_vsn(historical_inputs, cs)
history, state = self.history_encoder(historical_features, (ch, cc))
future_features, _ = self.future_vsn(future_inputs, cs)
future, _ = self.future_encoder(future_features, state)
torch.cuda.synchronize() # this call gives perf boost for unknown reasons
# skip connection
input_embedding = torch.cat([historical_features, future_features], dim=1)
temporal_features = torch.cat([history, future], dim=1)
temporal_features = self.input_gate(temporal_features)
temporal_features = temporal_features + input_embedding
temporal_features = self.input_gate_ln(temporal_features)
# Static enrichment
enriched = self.enrichment_grn(temporal_features, c=ce)
# Temporal self attention
x, _ = self.attention(enriched, mask_future_timesteps=True)
# Don't compute hictorical quantiles
x = x[:, self.encoder_length:, :]
temporal_features = temporal_features[:, self.encoder_length:, :]
enriched = enriched[:, self.encoder_length:, :]
x = self.attention_gate(x)
x = x + enriched
x = self.attention_ln(x)
# Position-wise feed-forward
x = self.positionwise_grn(x)
# Final skip connection
x = self.decoder_gate(x)
x = x + temporal_features
x = self.decoder_ln(x)
out = self.quantile_proj(x)
return out
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/modeling.py |
import argparse
import logging
import os
from pathlib import Path
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "1"
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator # noqa: E402 module level import not at top of file
from .deployment_toolkit.core import ( # noqa: E402 module level import not at top of file
DATALOADER_FN_NAME,
BaseLoader,
BaseSaver,
Format,
load_from_file,
)
from .deployment_toolkit.extensions import loaders, savers # noqa: E402 module level import not at top of file
LOGGER = logging.getLogger("export_model")
INPUT_MODEL_TYPES = [Format.TF_ESTIMATOR, Format.TF_KERAS, Format.PYT]
OUTPUT_MODEL_TYPES = [Format.TF_SAVEDMODEL, Format.TS_TRACE, Format.TS_SCRIPT, Format.ONNX]
def _get_args():
parser = argparse.ArgumentParser(
description="Script for exporting models from supported frameworks.", allow_abbrev=False
)
parser.add_argument("--input-path", help="Path to input python module", required=True)
parser.add_argument(
"--input-type", help="Input model type", choices=[f.value for f in INPUT_MODEL_TYPES], required=True
)
parser.add_argument("--output-path", help="Path to output model file", required=True)
parser.add_argument(
"--output-type", help="Output model type", choices=[f.value for f in OUTPUT_MODEL_TYPES], required=True
)
parser.add_argument("--dataloader", help="Path to python module containing data loader")
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
parser.add_argument(
"--ignore-unknown-parameters",
help="Ignore unknown parameters (argument often used in CI where set of arguments is constant)",
action="store_true",
default=False,
)
args, unparsed_args = parser.parse_known_args()
Loader: BaseLoader = loaders.get(args.input_type)
ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser)
if args.input_type == Format.PYT.value and args.output_type == Format.ONNX.value:
saver_type = f"{Format.PYT.value}--{Format.ONNX.value}"
else:
saver_type = args.output_type
Saver: BaseSaver = savers.get(saver_type)
ArgParserGenerator(Saver).update_argparser(parser)
if args.dataloader is not None:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
if args.ignore_unknown_parameters:
args, unknown_args = parser.parse_known_args()
LOGGER.warning(f"Got additional args {unknown_args}")
else:
args = parser.parse_args()
return args
def main():
args = _get_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
dataloader_fn = None
if args.dataloader is not None:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
Loader: BaseLoader = loaders.get(args.input_type)
loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args)
model = loader.load(args.input_path, dataloader_fn=dataloader_fn, output_type=args.output_type)
LOGGER.info("inputs: %s", model.inputs)
LOGGER.info("outputs: %s", model.outputs)
if args.input_type == Format.PYT.value and args.output_type == Format.ONNX.value:
saver_type = f"{Format.PYT.value}--{Format.ONNX.value}"
else:
saver_type = args.output_type
Saver: BaseSaver = savers.get(saver_type)
saver = ArgParserGenerator(Saver).from_args(args)
saver.save(model, args.output_path, dataloader_fn)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/export_model.py |
import os
import pandas as pd
import numpy as np
import pickle
import torch
from criterions import QuantileLoss
from triton.deployment_toolkit.core import BaseMetricsCalculator
def update_argparser(parser):
parser.add_argument("--dataset", type=str, help="Path to dataset to be used", required=True)
parser.add_argument("--checkpoint", type=str, help="Path to checkpoint to be used", required=True)
def _unscale_per_id(config, values, ids, scalers):
# values = values.cpu().numpy()
num_horizons = config.example_length - config.encoder_length + 1
flat_values = pd.DataFrame(
values,
columns=[f't{j}' for j in range(num_horizons - values.shape[1], num_horizons)]
)
flat_values['id'] = ids
df_list = []
for idx, group in flat_values.groupby('id'):
scaler = scalers[idx]
group_copy = group.copy()
for col in group_copy.columns:
if not 'id' in col:
_col = np.expand_dims(group_copy[col].values, -1)
_t_col = scaler.inverse_transform(_col)[:,-1]
group_copy[col] = _t_col
df_list.append(group_copy)
flat_values = pd.concat(df_list, axis=0)
flat_values = flat_values[[col for col in flat_values if not 'id' in col]]
flat_tensor = torch.from_numpy(flat_values.values)
return flat_tensor
def _unscale(config, values, scaler):
# values = values.cpu().numpy()
num_horizons = config.example_length - config.encoder_length + 1
flat_values = pd.DataFrame(
values,
columns=[f't{j}' for j in range(num_horizons - values.shape[1], num_horizons)]
)
for col in flat_values.columns:
if not 'id' in col:
_col = np.expand_dims(flat_values[col].values, -1)
_t_col = scaler.inverse_transform(_col)[:,-1]
flat_values[col] = _t_col
flat_values = flat_values[[col for col in flat_values if not 'id' in col]]
flat_tensor = torch.from_numpy(flat_values.values)
return flat_tensor
class MetricsCalculator(BaseMetricsCalculator):
def __init__(self, dataset, checkpoint):
state_dict = torch.load(os.path.join(checkpoint, "checkpoint.pt"))
self.config = state_dict['config']
self.predictions = []
self.targets = []
self.ids = []
self.scalers = pickle.load(open(os.path.join(dataset, 'tgt_scalers.bin'), 'rb'))
@property
def metrics(self):
targets = np.concatenate(self.targets, axis=0)
# targets = torch.cat(self.targets, dim=0)
predictions = np.concatenate(self.predictions, axis=0)
# predictions = torch.cat(self.predictions, dim=0)
ids = np.concatenate(self.ids, axis=0)
if self.config.scale_per_id:
unscaled_predictions = torch.stack(
[_unscale_per_id(self.config, predictions[:,:,i], ids, self.scalers) for i in range(len(self.config.quantiles))],
dim=-1)
unscaled_targets = _unscale_per_id(self.config, targets[:,:,0], ids, self.scalers).unsqueeze(-1)
else:
ids = None
unscaled_predictions = torch.stack(
[_unscale(self.config, predictions[:,:,i], self.scalers['']) for i in range(len(self.config.quantiles))],
dim=-1)
unscaled_targets = _unscale(self.config, targets[:,:,0], self.scalers['']).unsqueeze(-1)
losses = QuantileLoss(self.config)(unscaled_predictions, unscaled_targets)
normalizer = unscaled_targets.abs().mean()
q_risk = 2 * losses / normalizer
return {'test_p10': q_risk[0].cpu().numpy(), 'test_p50': q_risk[1].cpu().numpy(), 'test_p90': q_risk[2].cpu().numpy()}
def update(
self,
ids,
y_pred,
x,
y_real,
):
#can probably just pass all of this to the evaluator main class
self.predictions.append(y_pred["target__0"])
self.targets.append(y_real['target__0'][:,:,0][:,:,np.newaxis])
self.ids.append(ids)
# return self.metrics
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/metrics.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Using `calculate_metrics.py` script, you can obtain model accuracy/error metrics using defined `MetricsCalculator` class.
Data provided to `MetricsCalculator` are obtained from dump files
stored in directory pointed by `--dump-dir` argument.
Above files are prepared by `run_inference_on_fw.py` and `run_inference_on_triton.py` scripts.
Output data is stored in csv file pointed by `--csv` argument.
Example call:
```shell script
python ./triton/calculate_metrics.py \
--dump-dir /results/dump_triton \
--csv /results/accuracy_results.csv \
--metrics metrics.py \
--metric-class-param1 value
```
"""
import argparse
import csv
import logging
import string
from pathlib import Path
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import BaseMetricsCalculator, load_from_file
from .deployment_toolkit.dump import JsonDumpReader
LOGGER = logging.getLogger("calculate_metrics")
TOTAL_COLUMN_NAME = "_total_"
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="Run models with given dataloader", allow_abbrev=False)
parser.add_argument("--metrics", help="Path to python module containing metrics calculator", required=True)
parser.add_argument("--csv", help="Path to csv file", required=True)
parser.add_argument("--dump-dir", help="Path to directory with dumped outputs (and labels)", required=True)
args, *_ = parser.parse_known_args()
MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator")
ArgParserGenerator(MetricsCalculator).update_argparser(parser)
args = parser.parse_args()
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator")
metrics_calculator: BaseMetricsCalculator = ArgParserGenerator(MetricsCalculator).from_args(args)
reader = JsonDumpReader(args.dump_dir)
for ids, x, y_true, y_pred in reader.iterate_over(["ids", "inputs", "labels", "outputs"]):
ids = list(ids["ids"]) if ids is not None else None
metrics_calculator.update(ids=ids, x=x, y_pred=y_pred, y_real=y_true)
metrics = metrics_calculator.metrics
metric_names_with_space = [name for name in metrics if any([c in string.whitespace for c in name])]
if metric_names_with_space:
raise ValueError(f"Metric names shall have no spaces; Incorrect names: {', '.join(metric_names_with_space)}")
csv_path = Path(args.csv)
csv_path.parent.mkdir(parents=True, exist_ok=True)
with csv_path.open("w") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=list(metrics.keys()))
writer.writeheader()
writer.writerow(metrics)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/calculate_metrics.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
To infer the model deployed on Triton, you can use `run_inference_on_triton.py` script.
It sends a request with data obtained from pointed data loader and dumps received data into dump files.
Those files are stored in directory pointed by `--output-dir` argument.
Currently, the client communicates with the Triton server asynchronously using GRPC protocol.
Example call:
```shell script
python ./triton/run_inference_on_triton.py \
--server-url localhost:8001 \
--model-name ResNet50 \
--model-version 1 \
--dump-labels \
--output-dir /results/dump_triton
```
"""
import argparse
import functools
import logging
import queue
import threading
import time
import traceback
from pathlib import Path
from typing import Optional
from tqdm import tqdm
# pytype: disable=import-error
try:
from tritonclient import utils as client_utils # noqa: F401
from tritonclient.grpc import InferenceServerClient, InferInput, InferRequestedOutput
except ImportError:
from tritongrpcclient import InferenceServerClient, InferInput, InferRequestedOutput
# pytype: enable=import-error
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import DATALOADER_FN_NAME, load_from_file
from .deployment_toolkit.dump import JsonDumpWriter
LOGGER = logging.getLogger("run_inference_on_triton")
class SyncGRPCTritonRunner:
DEFAULT_MAX_RESP_WAIT_S = 120
def __init__(
self,
server_url: str,
model_name: str,
model_version: str,
*,
dataloader,
verbose=False,
resp_wait_s: Optional[float] = None,
):
self._server_url = server_url
self._model_name = model_name
self._model_version = model_version
self._dataloader = dataloader
self._verbose = verbose
self._response_wait_t = self.DEFAULT_MAX_RESP_WAIT_S if resp_wait_s is None else resp_wait_s
def __iter__(self):
client = InferenceServerClient(self._server_url, verbose=self._verbose)
error = self._verify_triton_state(client)
if error:
raise RuntimeError(f"Could not communicate to Triton Server: {error}")
LOGGER.debug(
f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} " f"are up and ready!"
)
model_config = client.get_model_config(self._model_name, self._model_version)
model_metadata = client.get_model_metadata(self._model_name, self._model_version)
LOGGER.info(f"Model config {model_config}")
LOGGER.info(f"Model metadata {model_metadata}")
inputs = {tm.name: tm for tm in model_metadata.inputs}
outputs = {tm.name: tm for tm in model_metadata.outputs}
output_names = list(outputs)
outputs_req = [InferRequestedOutput(name) for name in outputs]
for ids, x, y_real in self._dataloader:
infer_inputs = []
for name in inputs:
data = x[name]
infer_input = InferInput(name, data.shape, inputs[name].datatype)
target_np_dtype = client_utils.triton_to_np_dtype(inputs[name].datatype)
data = data.astype(target_np_dtype)
infer_input.set_data_from_numpy(data)
infer_inputs.append(infer_input)
results = client.infer(
model_name=self._model_name,
model_version=self._model_version,
inputs=infer_inputs,
outputs=outputs_req,
client_timeout=self._response_wait_t,
)
y_pred = {name: results.as_numpy(name) for name in output_names}
yield ids, x, y_pred, y_real
def _verify_triton_state(self, triton_client):
if not triton_client.is_server_live():
return f"Triton server {self._server_url} is not live"
elif not triton_client.is_server_ready():
return f"Triton server {self._server_url} is not ready"
elif not triton_client.is_model_ready(self._model_name, self._model_version):
return f"Model {self._model_name}:{self._model_version} is not ready"
return None
class AsyncGRPCTritonRunner:
DEFAULT_MAX_RESP_WAIT_S = 120
DEFAULT_MAX_UNRESP_REQS = 128
DEFAULT_MAX_FINISH_WAIT_S = 900 # 15min
def __init__(
self,
server_url: str,
model_name: str,
model_version: str,
*,
dataloader,
verbose=False,
resp_wait_s: Optional[float] = None,
max_unresponded_reqs: Optional[int] = None,
):
self._server_url = server_url
self._model_name = model_name
self._model_version = model_version
self._dataloader = dataloader
self._verbose = verbose
self._response_wait_t = self.DEFAULT_MAX_RESP_WAIT_S if resp_wait_s is None else resp_wait_s
self._max_unresp_reqs = self.DEFAULT_MAX_UNRESP_REQS if max_unresponded_reqs is None else max_unresponded_reqs
self._results = queue.Queue()
self._processed_all = False
self._errors = []
self._num_waiting_for = 0
self._sync = threading.Condition()
self._req_thread = threading.Thread(target=self.req_loop, daemon=True)
def __iter__(self):
self._req_thread.start()
timeout_s = 0.050 # check flags processed_all and error flags every 50ms
while True:
try:
ids, x, y_pred, y_real = self._results.get(timeout=timeout_s)
yield ids, x, y_pred, y_real
except queue.Empty:
shall_stop = self._processed_all or self._errors
if shall_stop:
break
LOGGER.debug("Waiting for request thread to stop")
self._req_thread.join()
if self._errors:
error_msg = "\n".join(map(str, self._errors))
raise RuntimeError(error_msg)
def _on_result(self, ids, x, y_real, output_names, result, error):
with self._sync:
request_id = str(ids[0])
NOT_MATCHING_REQUEST_ID_MSG = (
"Error during processing result - request_id doesn't match. This shouldn't have happened."
)
if error:
response_id = error.get_response().id
if response_id != request_id:
raise RuntimeError(NOT_MATCHING_REQUEST_ID_MSG)
self._errors.append(error)
else:
response_id = result.get_response().id
if response_id != request_id:
raise RuntimeError(NOT_MATCHING_REQUEST_ID_MSG)
y_pred = {name: result.as_numpy(name) for name in output_names}
self._results.put((ids, x, y_pred, y_real))
self._num_waiting_for -= 1
self._sync.notify_all()
def req_loop(self):
client = InferenceServerClient(self._server_url, verbose=self._verbose)
self._errors = self._verify_triton_state(client)
if self._errors:
return
LOGGER.debug(
f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} " f"are up and ready!"
)
model_config = client.get_model_config(self._model_name, self._model_version)
model_metadata = client.get_model_metadata(self._model_name, self._model_version)
LOGGER.info(f"Model config {model_config}")
LOGGER.info(f"Model metadata {model_metadata}")
inputs = {tm.name: tm for tm in model_metadata.inputs}
outputs = {tm.name: tm for tm in model_metadata.outputs}
output_names = list(outputs)
self._num_waiting_for = 0
for ids, x, y_real in self._dataloader:
infer_inputs = []
for name in inputs:
data = x[name]
infer_input = InferInput(name, data.shape, inputs[name].datatype)
target_np_dtype = client_utils.triton_to_np_dtype(inputs[name].datatype)
data = data.astype(target_np_dtype)
infer_input.set_data_from_numpy(data)
infer_inputs.append(infer_input)
outputs_req = [InferRequestedOutput(name) for name in outputs]
with self._sync:
def _check_can_send():
return self._num_waiting_for < self._max_unresp_reqs
can_send = self._sync.wait_for(_check_can_send, timeout=self._response_wait_t)
if not can_send:
error_msg = f"Runner could not send new requests for {self._response_wait_t}s"
self._errors.append(error_msg)
self._sync.notify_all()
break
request_id = str(ids[0])
callback = functools.partial(AsyncGRPCTritonRunner._on_result, self, ids, x, y_real, output_names)
client.async_infer(
model_name=self._model_name,
model_version=self._model_version,
inputs=infer_inputs,
outputs=outputs_req,
callback=callback,
request_id=request_id,
)
self._num_waiting_for += 1
self._sync.notify_all()
# wait till receive all requested data
with self._sync:
def _all_processed():
LOGGER.debug(f"wait for {self._num_waiting_for} unprocessed jobs")
return self._num_waiting_for == 0
self._processed_all = self._sync.wait_for(_all_processed, self.DEFAULT_MAX_FINISH_WAIT_S)
if not self._processed_all:
error_msg = f"Runner {self._response_wait_t}s timeout received while waiting for results from server"
self._errors.append(error_msg)
self._sync.notify_all()
LOGGER.debug("Finished request thread")
def _verify_triton_state(self, triton_client):
errors = []
if not triton_client.is_server_live():
errors.append(f"Triton server {self._server_url} is not live")
elif not triton_client.is_server_ready():
errors.append(f"Triton server {self._server_url} is not ready")
elif not triton_client.is_model_ready(self._model_name, self._model_version):
errors.append(f"Model {self._model_name}:{self._model_version} is not ready")
return errors
def _parse_args():
parser = argparse.ArgumentParser(description="Infer model on Triton server", allow_abbrev=False)
parser.add_argument(
"--server-url", type=str, default="localhost:8001", help="Inference server URL (default localhost:8001)"
)
parser.add_argument("--model-name", help="The name of the model used for inference.", required=True)
parser.add_argument("--model-version", help="The version of the model used for inference.", required=True)
parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True)
parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False)
parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False)
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=True)
parser.add_argument("--output-dir", required=True, help="Path to directory where outputs will be saved")
parser.add_argument(
"--response-wait-time", required=False, help="Maximal time to wait for response", default=120, type=float
)
parser.add_argument(
"--max-unresponded-requests",
required=False,
help="Maximal number of unresponded requests",
default=128,
type=int,
)
parser.add_argument(
"--synchronous", help="Enable synchronous calls to Triton Server", action="store_true", default=False
)
args, *_ = parser.parse_known_args()
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
args = parser.parse_args()
return args
def main():
args = _parse_args()
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
log_level = logging.INFO if not args.verbose else logging.DEBUG
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
try:
if args.synchronous:
runner = SyncGRPCTritonRunner(
args.server_url,
args.model_name,
args.model_version,
dataloader=dataloader_fn(),
verbose=False,
resp_wait_s=args.response_wait_time,
)
else:
runner = AsyncGRPCTritonRunner(
args.server_url,
args.model_name,
args.model_version,
dataloader=dataloader_fn(),
verbose=False,
resp_wait_s=args.response_wait_time,
max_unresponded_reqs=args.max_unresponded_requests,
)
except Exception as e:
message = traceback.format_exc()
LOGGER.error(f"Encountered exception \n{message}")
raise e
with JsonDumpWriter(output_dir=args.output_dir) as writer:
start = time.time()
for ids, x, y_pred, y_real in tqdm(runner, unit="batch", mininterval=10):
data = _verify_and_format_dump(args, ids, x, y_pred, y_real)
writer.write(**data)
stop = time.time()
LOGGER.info(f"\nThe inference took {stop - start:0.3f}s")
def _verify_and_format_dump(args, ids, x, y_pred, y_real):
data = {"outputs": y_pred, "ids": {"ids": ids}}
if args.dump_inputs:
data["inputs"] = x
if args.dump_labels:
if not y_real:
raise ValueError(
"Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument"
)
data["labels"] = y_real
return data
if __name__ == "__main__":
main()
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/run_inference_on_triton.py |
import os
import numpy as np
import torch
from torch.utils.data import DataLoader
from configuration import ElectricityConfig
from data_utils import TFTDataset
import argparse
from deployment_toolkit.dump import JsonDumpWriter
def _verify_and_format_dump(**x):
data = {}
for k, v in x.items():
temp_data = {}
for i in range(v.shape[1]):
temp_data["INPUT" + str(i)] = v[:,i]
data[k] = temp_data
return data
def main():
args = _parse_args()
state_dict = torch.load(os.path.join(args.checkpoint, "checkpoint.pt"))
config = state_dict['config']
test_split = TFTDataset(os.path.join(args.dataset, "test.csv"), config)
data_loader = DataLoader(test_split, batch_size=args.batch_size, num_workers=2)
input_names_dict = {'s_cat': 's_cat__0', 's_cont':'s_cont__1', 'k_cat':'k_cat__2', 'k_cont':'k_cont__3', 'o_cat':'o_cat__4', 'o_cont':'o_cont__5', 'target':'target__6', 'id':'id__7'}
reshaper = [-1] + [1]
for step, batch in enumerate(data_loader):
bs = batch['target'].shape[0]
x = {input_names_dict[key]: tensor.numpy() if tensor.numel() else np.ones([bs]).reshape(reshaper) for key, tensor in batch.items()}
ids = batch['id'][:,0,:].numpy()
y_real = {'target__0':batch['target'][:,config.encoder_length:,:].numpy()}
break
import json
data = {"data": [{k: {"content": v[i].flatten().tolist(), "shape": list(v[i].shape), "dtype": str(v[i].dtype)} for k, v in x.items()} for i in range(args.batch_size)]}
with open(os.path.join(args.input_data_dir, "data.json"), "w") as f:
f.write(json.dumps(data))
f.close()
# d = json.load(f)
# print(d)
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--checkpoint", required=True)
parser.add_argument("--batch-size", required=False, default=1)
parser.add_argument("--dataset", help="Path to dataset", required=True)
parser.add_argument("--input-data-dir", help="Path to output folder", required=True)
args, *_ = parser.parse_known_args()
args = parser.parse_args()
return args
if __name__ == "__main__":
main() | DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/prepare_input_data.py |
import os
import torch
import torch.nn as nn
def update_argparser(parser):
parser.add_argument("--checkpoint", type=str, help="Path to checkpoint to be used", required=True)
parser.add_argument("--precision", type=str, choices=['fp16', 'fp32'], required=True)
class TFTWrapper(nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, s_cat, s_cont, k_cat, k_cont, o_cat, o_cont, target, id):
# wrapped_input = torch.jit.annotate(Dict[str, Optional[Tensor]], {})
wrapped_input = {}
input_names = ['s_cat', 's_cont', 'k_cat', 'k_cont', 'o_cat', 'o_cont', 'target', 'id']
wrapped_input['s_cat'] = s_cat if s_cat.shape[1] != 1 else None
wrapped_input['s_cont'] = s_cont if s_cont.shape[1] != 1 else None
wrapped_input['k_cat'] = k_cat if k_cat.shape[1] != 1 else None
wrapped_input['k_cont'] = k_cont if k_cont.shape[1] != 1 else None
wrapped_input['o_cat'] = o_cat if o_cat.shape[1] != 1 else None
wrapped_input['o_cont'] = o_cont if o_cont.shape[1] != 1 else None
wrapped_input['target'] = target
wrapped_input['id'] = id if id.numel() else None
return self.model(wrapped_input)
def get_model(**args):
#get model config
os.environ["TFT_SCRIPTING"] = "True"
from modeling import TemporalFusionTransformer
state_dict = torch.load(os.path.join(args['checkpoint'], "checkpoint.pt"))
config = state_dict['config']
#create model
model = TemporalFusionTransformer(config)
#load model
model.load_state_dict(state_dict['model'])
model.eval()
model.cuda()
model = TFTWrapper(model).cuda()
tensor_names = {
"inputs": ['s_cat__0', 's_cont__1', 'k_cat__2', 'k_cont__3', 'o_cat__4', 'o_cont__5', 'target__6', 'id__7'],
"outputs": ["target__0"]
}
return model, tensor_names | DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/model.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
To infer the model on framework runtime, you can use `run_inference_on_fw.py` script.
It infers data obtained from pointed data loader locally and saves received data into dump files.
Those files are stored in directory pointed by `--output-dir` argument.
Example call:
```shell script
python ./triton/run_inference_on_fw.py \
--input-path /models/exported/model.onnx \
--input-type onnx \
--dataloader triton/dataloader.py \
--data-dir /data/imagenet \
--batch-size 32 \
--output-dir /results/dump_local \
--dump-labels
```
"""
import argparse
import logging
import os
from pathlib import Path
from tqdm import tqdm
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "0"
from .deployment_toolkit.args import ArgParserGenerator # noqa: E402 module level import not at top of file
from .deployment_toolkit.core import ( # noqa: E402 module level import not at top of file
DATALOADER_FN_NAME,
BaseLoader,
BaseRunner,
load_from_file,
)
from .deployment_toolkit.dump import JsonDumpWriter # noqa: E402 module level import not at top of file
from .deployment_toolkit.extensions import loaders, runners # noqa: E402 module level import not at top of file
LOGGER = logging.getLogger("run_inference_on_fw")
def _verify_and_format_dump(args, ids, x, y_pred, y_real):
data = {"outputs": y_pred, "ids": {"ids": ids}}
if args.dump_inputs:
data["inputs"] = x
if args.dump_labels:
if not y_real:
raise ValueError(
"Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument"
)
data["labels"] = y_real
return data
def _parse_and_validate_args():
supported_inputs = set(runners.supported_extensions) & set(loaders.supported_extensions)
parser = argparse.ArgumentParser(description="Dump local inference output of given model", allow_abbrev=False)
parser.add_argument("--input-path", help="Path to input model", required=True)
parser.add_argument("--input-type", help="Input model type", choices=supported_inputs, required=True)
parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True)
parser.add_argument("--output-dir", help="Path to dir where output files will be stored", required=True)
parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False)
parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False)
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
args, *_ = parser.parse_known_args()
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
Loader: BaseLoader = loaders.get(args.input_type)
ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser)
Runner: BaseRunner = runners.get(args.input_type)
ArgParserGenerator(Runner).update_argparser(parser)
args = parser.parse_args()
types_requiring_io_params = []
if args.input_type in types_requiring_io_params and not all(p for p in [args.inputs, args.outptputs]):
parser.error(f"For {args.input_type} input provide --inputs and --outputs parameters")
return args
def main():
args = _parse_and_validate_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
Loader: BaseLoader = loaders.get(args.input_type)
Runner: BaseRunner = runners.get(args.input_type)
loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args)
runner = ArgParserGenerator(Runner).from_args(args)
LOGGER.info(f"Loading {args.input_path}")
model = loader.load(args.input_path)
with runner.init_inference(model=model) as runner_session, JsonDumpWriter(args.output_dir) as writer:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
LOGGER.info("Data loader initialized; Running inference")
for ids, x, y_real in tqdm(dataloader_fn(), unit="batch", mininterval=10):
y_pred = runner_session(x)
data = _verify_and_format_dump(args, ids=ids, x=x, y_pred=y_pred, y_real=y_real)
writer.write(**data)
LOGGER.info("Inference finished")
if __name__ == "__main__":
main()
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/run_inference_on_fw.py |
import os
import numpy as np
import torch
from torch.utils.data import DataLoader
from data_utils import TFTDataset
def update_argparser(parser):
parser.add_argument("--dataset", type=str, help="Path to dataset to be used", required=True)
parser.add_argument("--checkpoint", type=str, help="Path to checkpoint to be used", required=True)
parser.add_argument("--batch-size", type=int, help="Path to dataset to be used", default=64)
def get_dataloader_fn(dataset, checkpoint, batch_size=64):
state_dict = torch.load(os.path.join(checkpoint, "checkpoint.pt"))
config = state_dict['config']
test_split = TFTDataset(os.path.join(dataset, "test.csv"), config)
data_loader = DataLoader(test_split, batch_size=int(batch_size), num_workers=2)
input_names_dict = {'s_cat': 's_cat__0', 's_cont':'s_cont__1', 'k_cat':'k_cat__2', 'k_cont':'k_cont__3', 'o_cat':'o_cat__4', 'o_cont':'o_cont__5', 'target':'target__6', 'id':'id__7'}
reshaper = [-1] + [1]
def _get_dataloader():
for step, batch in enumerate(data_loader):
bs = batch['target'].shape[0]
x = {input_names_dict[key]: tensor.numpy() if tensor.numel() else np.ones([bs]).reshape(reshaper) for key, tensor in batch.items()}
ids = batch['id'][:,0,:].numpy()
# ids = np.arange(step * batch_size, (step + 1) * batch_size)
y_real = {'target__0':np.tile(batch['target'][:,config.encoder_length:,:].numpy(), (1, 1, len(config.quantiles)))}
yield (ids, x, y_real)
return _get_dataloader | DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/dataloader.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import csv
import logging
import os
import pathlib
import shutil
import sys
from distutils.version import LooseVersion
from enum import Enum
from typing import Any, Dict, List
import yaml
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .deployment_toolkit.core import BatchingMode, EvaluationMode, MeasurementMode, OfflineMode, PerformanceTool
from .deployment_toolkit.model_analyzer import ModelAnalyzer, ModelAnalyzerConfig, ModelAnalyzerMode
from .deployment_toolkit.perf_analyzer import PerfAnalyzer, PerfAnalyzerConfig
from .deployment_toolkit.report import save_results, show_results, sort_results
from .deployment_toolkit.utils import parse_server_url
from .deployment_toolkit.warmup import performance_evaluation_warmup
LOGGER = logging.getLogger("run_performance_on_triton")
if LooseVersion(sys.version) >= LooseVersion("3.8.0"):
from importlib.metadata import version
TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient"))
TRITON_MODEL_ANALYZER_VERSION = LooseVersion(version("triton-model-analyzer"))
else:
import pkg_resources
TRITON_CLIENT_VERSION = LooseVersion(pkg_resources.get_distribution("tritonclient").version)
TRITON_MODEL_ANALYZER_VERSION = LooseVersion(pkg_resources.get_distribution("triton-model-analyzer").version)
def _log_dict(title: str, dict_: Dict[str, Any]):
LOGGER.info(title)
for key, value in dict_.items():
LOGGER.info(f"\t{key} = {value}")
def _calculate_average_latency(r):
avg_sum_fields = [
"Client Send",
"Network+Server Send/Recv",
"Server Queue",
"Server Compute",
"Server Compute Input",
"Server Compute Infer",
"Server Compute Output",
"Client Recv",
]
avg_latency = sum([int(r.get(f, 0)) for f in avg_sum_fields])
return avg_latency
def _update_performance_data(results: List, batch_size: int, performance_partial_file: str):
row: Dict = {"Batch": batch_size}
with open(performance_partial_file) as csvfile:
reader = csv.DictReader(csvfile)
for r in reader:
avg_latency = _calculate_average_latency(r)
row = {**row, **r, "avg latency": avg_latency}
results.append(row)
def _model_analyzer_evaluation(
server_url: str,
model_name: str,
input_data: str,
input_shapes: List[str],
batch_sizes: List[int],
number_of_triton_instances: int,
number_of_model_instances: int,
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
concurrency_steps: int,
batching_mode: BatchingMode,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
model_repository: str,
result_path: pathlib.Path,
output_shared_memory_size: int = 102400,
verbose: bool = False,
):
_log_dict(
"Selected configuration",
{
"server_url": server_url,
"model_name": model_name,
"input_data": input_data,
"input_shapes": input_shapes,
"batch_sizes": batch_sizes,
"number_of_triton_instances": number_of_triton_instances,
"number_of_model_instances": number_of_model_instances,
"measurement_mode": measurement_mode,
"measurement_interval": measurement_interval,
"measurement_request_count": measurement_request_count,
"concurrency_steps": concurrency_steps,
"batching_mode": batching_mode,
"evaluation_mode": evaluation_mode,
"offline_mode": offline_mode,
"output_shared_memory_size": output_shared_memory_size,
"model_repository": model_repository,
"result_path": result_path,
"verbose": verbose,
},
)
perf_analyzer_config = {
"measurement-interval": measurement_interval,
}
if TRITON_MODEL_ANALYZER_VERSION >= LooseVersion("1.8.0"):
perf_analyzer_config["input-data"] = [input_data]
else:
perf_analyzer_config["input-data"] = input_data
if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"):
perf_analyzer_config["measurement-mode"] = measurement_mode.value
perf_analyzer_config["measurement-request-count"] = measurement_request_count
if evaluation_mode == EvaluationMode.OFFLINE:
perf_analyzer_config["shared-memory"] = offline_mode.value
perf_analyzer_config["output-shared-memory-size"] = output_shared_memory_size
if input_shapes:
if TRITON_MODEL_ANALYZER_VERSION > LooseVersion("1.8.0"):
perf_analyzer_config["shape"] = input_shapes
else:
perf_analyzer_config["shape"] = input_shapes[0]
LOGGER.warning("Model Analyzer <= 1.8.0 support only single shape param for Perf Analyzer.")
if batching_mode == BatchingMode.STATIC:
batch_sizes = batch_sizes
concurrency = [number_of_triton_instances]
elif batching_mode == BatchingMode.DYNAMIC:
max_batch_size = max(batch_sizes)
max_total_requests = 2 * max_batch_size * number_of_triton_instances * number_of_model_instances
max_concurrency = min(256, max_total_requests)
step = max(1, max_concurrency // concurrency_steps)
min_concurrency = step
concurrency = {"start": min_concurrency, "stop": max_concurrency, "step": step}
batch_sizes = [max(1, max_total_requests // 256)]
else:
raise ValueError(f"Unsupported batching mode: {batching_mode}")
protocol, host, port = parse_server_url(server_url)
checkpoints = pathlib.Path("./checkpoints")
if checkpoints.is_dir():
shutil.rmtree(checkpoints.as_posix())
checkpoints.mkdir(parents=True, exist_ok=True)
config = {
"model_repository": model_repository,
"triton_launch_mode": "remote",
"run_config_search_disable": True,
"perf_analyzer_flags": perf_analyzer_config,
"perf_analyzer_timeout": 3600, # Workaround for Perf Analyzer timeout - use 1h
"profile_models": [model_name],
"batch_sizes": batch_sizes,
"concurrency": concurrency,
"verbose": verbose,
"checkpoint_directory": checkpoints.as_posix(),
"override_output_model_repository": True,
"client_protocol": protocol,
f"triton_{protocol}_endpoint": f"{host}:{port}",
}
if verbose:
_log_dict("Model Analyzer profiling configuration", config)
with open("config.yaml", "w") as file:
yaml.safe_dump(config, file)
config = ModelAnalyzerConfig()
model_analyzer = ModelAnalyzer(config=config)
model_analyzer.run(mode=ModelAnalyzerMode.PROFILE, verbose=verbose)
result_path.mkdir(parents=True, exist_ok=True)
for file in checkpoints.iterdir():
if not file.is_file() or file.suffix != ".ckpt":
continue
LOGGER.info(f"Moving checkpoint {file.name} to {result_path}")
shutil.move(file, result_path / file.name)
inference_output_fields = [
"batch_size",
"concurrency",
"perf_throughput",
"perf_latency",
"perf_client_send_recv",
"perf_client_response_wait",
"perf_server_queue",
"perf_server_compute_input",
"perf_server_compute_infer",
"perf_server_compute_output",
]
gpu_output_fields = [
"gpu_uuid",
"batch_size",
"concurrency",
"gpu_used_memory",
"gpu_free_memory",
"gpu_utilization",
"gpu_power_usage",
]
filename_model_inference = "metrics-model-inference.csv"
filename_model_gpu = "metrics-model-gpu.csv"
config = {
"analysis_models": model_name,
"checkpoint_directory": result_path.as_posix(),
"export_path": "/tmp",
"inference_output_fields": inference_output_fields,
"gpu_output_fields": gpu_output_fields,
"filename_model_inference": filename_model_inference,
"filename_model_gpu": filename_model_gpu,
"summarize": False,
}
if verbose:
_log_dict("Model Analyzer analysis configuration", config)
with open("config.yaml", "w") as file:
yaml.safe_dump(config, file)
config = ModelAnalyzerConfig()
model_analyzer = ModelAnalyzer(config=config)
model_analyzer.run(mode=ModelAnalyzerMode.ANALYZE, verbose=verbose)
inference_metrics_file = pathlib.Path("/tmp") / "results" / filename_model_inference
gpu_metrics_file = pathlib.Path("/tmp") / "results" / filename_model_gpu
for file in [inference_metrics_file, gpu_metrics_file]:
LOGGER.info(f"Moving metrics {file.name} to {result_path}")
shutil.move(file, result_path / file.name)
def _perf_analyzer_evaluation(
server_url: str,
model_name: str,
input_data: str,
input_shapes: List[str],
batch_sizes: List[int],
number_of_triton_instances: int,
number_of_model_instances: int,
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
concurrency_steps: int,
batching_mode: BatchingMode,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
result_path: pathlib.Path,
output_shared_memory_size: int = 102400,
verbose: bool = False,
):
protocol, host, port = parse_server_url(server_url)
if batching_mode == BatchingMode.STATIC:
batch_sizes = batch_sizes
max_concurrency = 1
min_concurrency = 1
step = 1
elif batching_mode == BatchingMode.DYNAMIC:
max_batch_size = max(batch_sizes)
max_total_requests = 2 * max_batch_size * number_of_triton_instances * number_of_model_instances
max_concurrency = min(256, max_total_requests)
step = max(1, max_concurrency // concurrency_steps)
min_concurrency = step
batch_sizes = [max(1, max_total_requests // 256)]
else:
raise ValueError(f"Unsupported batching mode: {batching_mode}")
_log_dict(
"Selected configuration",
{
"server_url": server_url,
"model_name": model_name,
"input_data": input_data,
"input_shapes": input_shapes,
"batch_sizes": batch_sizes,
"number_of_triton_instances": number_of_triton_instances,
"number_of_model_instances": number_of_model_instances,
"measurement_mode": measurement_mode,
"measurement_interval": measurement_interval,
"measurement_request_count": measurement_request_count,
"concurrency_steps": concurrency_steps,
"batching_mode": batching_mode,
"evaluation_mode": evaluation_mode,
"offline_mode": offline_mode,
"output_shared_memory_size": output_shared_memory_size,
"result_path": result_path,
"verbose": verbose,
},
)
results: List[Dict] = list()
for batch_size in batch_sizes:
for concurrency in range(min_concurrency, max_concurrency + step, step):
performance_partial_file = f"triton_performance_{evaluation_mode.value.lower()}_{batching_mode.value.lower()}_partial_{batch_size}_{concurrency}.csv"
params = {
"model-name": model_name,
"model-version": 1,
"batch-size": batch_size,
"url": f"{host}:{port}",
"protocol": protocol,
"input-data": input_data,
"measurement-interval": measurement_interval,
"concurrency-range": f"{concurrency}:{concurrency}:1",
"latency-report-file": performance_partial_file,
}
if verbose:
params["extra-verbose"] = True
if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"):
params["measurement-mode"] = measurement_mode.value
params["measurement-request-count"] = measurement_request_count
if evaluation_mode == EvaluationMode.OFFLINE:
params["shared-memory"] = offline_mode.value
params["output-shared-memory-size"] = output_shared_memory_size
if verbose:
_log_dict(f"Perf Analyzer config for batch_size: {batch_size} and concurrency: {concurrency}", params)
config = PerfAnalyzerConfig()
for param, value in params.items():
config[param] = value
for shape in input_shapes:
config["shape"] = shape
perf_analyzer = PerfAnalyzer(config=config)
perf_analyzer.run()
_update_performance_data(results, batch_size, performance_partial_file)
os.remove(performance_partial_file)
results = sort_results(results=results)
save_results(filename=result_path.as_posix(), data=results)
show_results(results=results)
def _run_performance_analysis(
server_url: str,
model_name: str,
input_data: str,
input_shapes: List[str],
batch_sizes: List[int],
number_of_triton_instances: int,
number_of_model_instances: int,
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
concurrency_steps: int,
batching_mode: BatchingMode,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
output_shared_memory_size: int,
performance_tool: PerformanceTool,
model_repository: str,
result_path: pathlib.Path,
warmup: bool,
verbose: bool,
):
log_level = logging.INFO if not verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
if performance_tool == PerformanceTool.MODEL_ANALYZER:
if result_path.suffix:
raise ValueError(
"Results path for Model Analyzer is invalid. Please, provide the directory name. Example: results"
)
elif performance_tool == PerformanceTool.PERF_ANALYZER:
if result_path.suffix != ".csv":
raise ValueError(
"Results path for Perf Analyzer is invalid. Please, provide the CSV file name. Example: results.csv"
)
else:
raise ValueError(f"Unsupported performance tool {performance_tool}")
if warmup:
LOGGER.info("Running warmup before the main test")
performance_evaluation_warmup(
server_url=server_url,
model_name=model_name,
input_data=input_data,
input_shapes=input_shapes,
batch_sizes=batch_sizes,
number_of_triton_instances=number_of_triton_instances,
number_of_model_instances=number_of_model_instances,
measurement_mode=measurement_mode,
measurement_interval=measurement_interval,
measurement_request_count=measurement_request_count,
batching_mode=batching_mode,
evaluation_mode=evaluation_mode,
offline_mode=offline_mode,
output_shared_memory_size=output_shared_memory_size,
)
if performance_tool == PerformanceTool.MODEL_ANALYZER:
LOGGER.info("Using Model Analyzer for performance evaluation")
_model_analyzer_evaluation(
server_url=server_url,
model_name=model_name,
input_data=input_data,
input_shapes=input_shapes,
batch_sizes=batch_sizes,
number_of_triton_instances=number_of_triton_instances,
number_of_model_instances=number_of_model_instances,
measurement_mode=measurement_mode,
measurement_interval=measurement_interval,
measurement_request_count=measurement_request_count,
concurrency_steps=concurrency_steps,
batching_mode=batching_mode,
evaluation_mode=evaluation_mode,
offline_mode=offline_mode,
output_shared_memory_size=output_shared_memory_size,
model_repository=model_repository,
result_path=result_path,
verbose=verbose,
)
elif performance_tool == PerformanceTool.PERF_ANALYZER:
LOGGER.info("Using Perf Analyzer for performance evaluation")
_perf_analyzer_evaluation(
server_url=server_url,
model_name=model_name,
input_data=input_data,
input_shapes=input_shapes,
batch_sizes=batch_sizes,
number_of_triton_instances=number_of_triton_instances,
number_of_model_instances=number_of_model_instances,
measurement_mode=measurement_mode,
measurement_interval=measurement_interval,
measurement_request_count=measurement_request_count,
concurrency_steps=concurrency_steps,
batching_mode=batching_mode,
evaluation_mode=evaluation_mode,
offline_mode=offline_mode,
output_shared_memory_size=output_shared_memory_size,
result_path=result_path,
verbose=verbose,
)
else:
raise ValueError(f"Unsupported performance tool {performance_tool}")
class MeasurementMode(Enum):
"""
Available measurement stabilization modes
"""
COUNT_WINDOWS = "count_windows"
TIME_WINDOWS = "time_windows"
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--server-url",
type=str,
required=False,
default="http://127.0.0.1:8000",
help="Url to Triton server",
)
parser.add_argument(
"--model-name",
type=str,
required=True,
help="Name of the model to test",
)
parser.add_argument(
"--input-data",
type=str,
required=False,
default="random",
help="Input data to perform profiling.",
)
parser.add_argument(
"--input-shapes",
action="append",
required=False,
help="Input data shape in form INPUT_NAME:<full_shape_without_batch_axis>.",
)
parser.add_argument(
"--batch-sizes",
type=str,
required=True,
help="List of batch sizes to tests. Comma separated.",
)
parser.add_argument(
"--number-of-triton-instances",
type=int,
default=1,
help="Number of Triton Server instances",
)
parser.add_argument(
"--number-of-model-instances",
type=int,
default=1,
help="Number of models instances on Triton Server",
)
parser.add_argument(
"--measurement-mode",
choices=[item.value for item in MeasurementMode],
default=MeasurementMode.COUNT_WINDOWS.value,
type=str,
help="Select measurement mode "
"'time_windows' stabilize performance on measurement window. "
"'count_windows' stabilize performance on number of samples.",
)
parser.add_argument(
"--measurement-interval",
required=False,
help="Time window perf_analyzer will wait to stabilize the measurement",
default=5000,
type=int,
)
parser.add_argument(
"--measurement-request-count",
required=False,
help="Number of samples on which perf_analyzer will stabilize the measurement",
default=50,
type=int,
)
parser.add_argument(
"--concurrency-steps",
help="Define number of concurrency steps used for dynamic batching tests",
default=32,
type=int,
)
parser.add_argument(
"--batching-mode",
choices=[item.value for item in BatchingMode],
default=BatchingMode.STATIC.value,
type=str,
help="Select batching mode "
"'static' run static batching scenario. "
"'dynamic' run dynamic batching scenario.",
)
parser.add_argument(
"--evaluation-mode",
choices=[item.value for item in EvaluationMode],
default=EvaluationMode.OFFLINE.value,
type=str,
help="Select evaluation mode "
"'offline' run offline analysis and use GPU memory to pass tensors. "
"'online' run online analysis and use HTTP protocol.",
)
parser.add_argument(
"--offline-mode",
choices=[item.value for item in OfflineMode],
default=OfflineMode.SYSTEM.value,
type=str,
help="Select offline mode "
"'system' pass tensors through CPU RAM memory. "
"'cuda' pass tensors through GPU RAM memory.",
)
parser.add_argument(
"--output-shared-memory-size",
default=100240,
type=int,
help="Size of memory buffer allocated for output with dynamic shapes in bytes. "
"Has to be equal to maximal size of output tensor.",
)
parser.add_argument(
"--performance-tool",
choices=[item.value for item in PerformanceTool],
default=PerformanceTool.MODEL_ANALYZER.value,
type=str,
help="Select performance tool for measurement mode "
"'model_analyzer' use Model Analyzer "
"'perf_analyzer' use Perf Analyzer",
)
parser.add_argument(
"--model-repository",
default=None,
type=str,
help="Path to model repository. Valid when using Model Analyzer",
)
parser.add_argument("--result-path", type=pathlib.Path, required=True, help="Path where results files is stored.")
parser.add_argument(
"--warmup", help="Enable model warmup before performance test", action="store_true", default=False
)
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
args = parser.parse_args()
batch_sizes = list(map(lambda x: int(x), args.batch_sizes.split(",")))
_run_performance_analysis(
server_url=args.server_url,
model_name=args.model_name,
input_data=args.input_data,
input_shapes=args.input_shapes or [],
batch_sizes=batch_sizes,
number_of_triton_instances=args.number_of_triton_instances,
number_of_model_instances=args.number_of_model_instances,
measurement_mode=MeasurementMode(args.measurement_mode),
measurement_interval=args.measurement_interval,
measurement_request_count=args.measurement_request_count,
concurrency_steps=args.concurrency_steps,
batching_mode=BatchingMode(args.batching_mode),
evaluation_mode=EvaluationMode(args.evaluation_mode),
offline_mode=OfflineMode(args.offline_mode),
output_shared_memory_size=args.output_shared_memory_size,
performance_tool=PerformanceTool(args.performance_tool),
model_repository=args.model_repository,
result_path=args.result_path,
warmup=args.warmup,
verbose=args.verbose,
)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/run_performance_on_triton.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import importlib
import logging
import os
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
import numpy as np
LOGGER = logging.getLogger(__name__)
DATALOADER_FN_NAME = "get_dataloader_fn"
GET_MODEL_FN_NAME = "get_model"
GET_SERVING_INPUT_RECEIVER_FN = "get_serving_input_receiver_fn"
GET_ARGPARSER_FN_NAME = "update_argparser"
class TensorSpec(NamedTuple):
name: str
dtype: str
shape: Tuple
class Parameter(Enum):
def __lt__(self, other: "Parameter") -> bool:
return self.value < other.value
def __str__(self):
return self.value
class Accelerator(Parameter):
NONE = "none"
AMP = "amp"
TRT = "trt"
CUDA = NONE # backward compatibility
class Precision(Parameter):
INT8 = "int8"
FP16 = "fp16"
FP32 = "fp32"
TF32 = "tf32" # Deprecated
class Format(Parameter):
TF_GRAPHDEF = "tf-graphdef"
TF_SAVEDMODEL = "tf-savedmodel"
TF_TRT = "tf-trt"
TF_ESTIMATOR = "tf-estimator"
TF_KERAS = "tf-keras"
ONNX = "onnx"
TRT = "trt"
TS_SCRIPT = "ts-script"
TS_TRACE = "ts-trace"
PYT = "pyt"
FASTERTRANSFORMER = "fastertransformer"
class Model(NamedTuple):
handle: object
# TODO: precision should be removed
precision: Optional[Precision]
inputs: Dict[str, TensorSpec]
outputs: Dict[str, TensorSpec]
def load_from_file(file_path, label, target):
spec = importlib.util.spec_from_file_location(name=label, location=file_path)
my_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
return getattr(my_module, target, None)
class BaseLoader(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def load(self, model_path: Union[str, Path], **kwargs) -> Model:
"""
Loads and process model from file based on given set of args
"""
pass
class BaseSaver(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None:
"""
Save model to file
"""
pass
class BaseRunner(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def init_inference(self, model: Model):
raise NotImplementedError
class BaseRunnerSession(abc.ABC):
def __init__(self, model: Model):
self._model = model
@abc.abstractmethod
def __enter__(self):
raise NotImplementedError()
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError()
@abc.abstractmethod
def __call__(self, x: Dict[str, object]):
raise NotImplementedError()
def _set_env_variables(self) -> Dict[str, object]:
"""this method not remove values; fix it if needed"""
to_set = {}
old_values = {k: os.environ.pop(k, None) for k in to_set}
os.environ.update(to_set)
return old_values
def _recover_env_variables(self, old_envs: Dict[str, object]):
for name, value in old_envs.items():
if value is None:
del os.environ[name]
else:
os.environ[name] = str(value)
class BaseConverter(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def convert(self, model: Model, dataloader_fn) -> Model:
raise NotImplementedError()
@staticmethod
def required_source_model_precision(requested_model_precision: Precision) -> Precision:
return requested_model_precision
class BaseMetricsCalculator(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
def calc(
self,
*,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
) -> Dict[str, float]:
"""
Calculates error/accuracy metrics
Args:
ids: List of ids identifying each sample in the batch
y_pred: model output as dict where key is output name and value is output value
x: model input as dict where key is input name and value is input value
y_real: input ground truth as dict where key is output name and value is output value
Returns:
dictionary where key is metric name and value is its value
"""
pass
@abc.abstractmethod
def update(
self,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
):
pass
@property
@abc.abstractmethod
def metrics(self) -> Dict[str, Any]:
pass
class ShapeSpec(NamedTuple):
min: Tuple
opt: Tuple
max: Tuple
class MeasurementMode(Enum):
COUNT_WINDOWS = "count_windows"
TIME_WINDOWS = "time_windows"
class PerformanceTool(Enum):
"""
Available performance evaluation tools
"""
MODEL_ANALYZER = "model_analyzer"
PERF_ANALYZER = "perf_analyzer"
class BatchingMode(Enum):
"""
Available batching modes
"""
STATIC = "static"
DYNAMIC = "dynamic"
class EvaluationMode(Enum):
"""
Available evaluation modes
"""
OFFLINE = "offline"
ONLINE = "online"
class OfflineMode(Enum):
SYSTEM = "system"
CUDA = "cuda"
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/core.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import json
import pickle
import threading
from pathlib import Path
from typing import Dict, Iterator, List, Union
import numpy as np
MB2B = 2 ** 20
B2MB = 1 / MB2B
FLUSH_THRESHOLD_B = 256 * MB2B
def _validate_batch(name: str, value: Union[list, np.ndarray]):
if not isinstance(value, (list, np.ndarray)):
raise ValueError(f"Values shall be lists or np.ndarrays; current type {type(value)}")
def _validate_prefix_data(prefix_data: Dict[str, List[np.ndarray]]):
batch_sizes_per_io_name = {name: [len(batch) for batch in batches] for name, batches in prefix_data.items()}
names = list(batch_sizes_per_io_name)
for io_name in names:
for batch_idx, batch_size in enumerate(batch_sizes_per_io_name[io_name]):
if not all([batch_sizes_per_io_name[other_name][batch_idx] == batch_size for other_name in names]):
non_equal_batch_sizes = {
other_name: batch_sizes_per_io_name[other_name][batch_idx] for other_name in names
}
non_equal_batch_sizes_str = ", ".join(
[f"{name}={batch_size}" for name, batch_size in non_equal_batch_sizes.items()]
)
raise ValueError(
"All inputs/outputs should have same number of batches with equal batch_size. "
f"At batch_idx={batch_idx} there are batch_sizes: {non_equal_batch_sizes_str}"
)
# ensure if each io has same number of batches with equal size
def _get_nitems_and_batches(prefix_data: Dict[str, List[np.ndarray]]):
nitems = 0
nbatches = 0
if prefix_data:
nitems_per_io_name = {name: sum(len(batch) for batch in batches) for name, batches in prefix_data.items()}
nbatches_per_io_name = {name: len(batches) for name, batches in prefix_data.items()}
nitems = list(nitems_per_io_name.values())[0]
nbatches = list(nbatches_per_io_name.values())[0]
return nitems, nbatches
class BaseDumpWriter(abc.ABC):
FILE_SUFFIX = ".abstract"
def __init__(self, output_dir: Union[str, Path]):
self._output_dir = Path(output_dir)
# outer dict key is prefix (i.e. input/output/labels/...), inner dict key is input/output name
# list is list of batches
self._items_cache: Dict[str, Dict[str, List[np.ndarray]]] = {}
# key is prefix
self._items_counters: Dict[str, int] = {}
self._cache_lock = threading.RLock()
self._flush_threshold_b = FLUSH_THRESHOLD_B
@property
def cache_size(self):
def _get_bytes_size(name, batch):
_validate_batch(name, batch)
if not isinstance(batch, np.ndarray):
batch = np.narray(batch)
return batch.nbytes
with self._cache_lock:
return {
prefix: sum(_get_bytes_size(name, batch) for name, batches in data.items() for batch in batches)
for prefix, data in self._items_cache.items()
}
def _append_to_cache(self, prefix, prefix_data):
if prefix_data is None:
return
if not isinstance(prefix_data, dict):
raise ValueError(f"{prefix} data to store shall be dict")
with self._cache_lock:
cached_prefix_data = self._items_cache.setdefault(prefix, {})
for name, batch in prefix_data.items():
_validate_batch(name, batch)
if not isinstance(batch, np.ndarray):
batch = np.array(batch)
cached_batches = cached_prefix_data.setdefault(name, [])
cached_batches += [batch]
def write(self, **kwargs):
with self._cache_lock:
for prefix, prefix_data in kwargs.items():
self._append_to_cache(prefix, prefix_data)
biggest_prefix_data_size = max(self.cache_size.values())
if biggest_prefix_data_size > self._flush_threshold_b:
self.flush()
def flush(self):
with self._cache_lock:
for prefix, prefix_data in self._items_cache.items():
_validate_prefix_data(prefix_data)
output_path = self._output_dir / self._get_filename(prefix)
self._dump(prefix_data, output_path)
nitems, nbatches = _get_nitems_and_batches(prefix_data)
self._items_counters[prefix] += nitems
self._items_cache = {}
def _get_filename(self, prefix):
idx = self._items_counters.setdefault(prefix, 0)
return f"{prefix}-{idx:012d}{self.FILE_SUFFIX}"
@abc.abstractmethod
def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path):
pass
def __enter__(self):
if self._output_dir.exists() and len(list(self._output_dir.iterdir())):
raise ValueError(f"{self._output_dir.as_posix()} is not empty")
self._output_dir.mkdir(parents=True, exist_ok=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.flush()
class PickleDumpWriter(BaseDumpWriter):
FILE_SUFFIX = ".pkl"
def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path):
output_path.parent.mkdir(parents=True, exist_ok=True)
with output_path.open("wb") as pickle_file:
pickle.dump(prefix_data, pickle_file)
class JsonDumpWriter(BaseDumpWriter):
FILE_SUFFIX = ".json"
def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path):
repacked_prefix_data = self._format_data(prefix_data)
output_path.parent.mkdir(parents=True, exist_ok=True)
with output_path.open("w") as json_file:
json.dump(repacked_prefix_data, json_file)
def _format_data(self, prefix_data: Dict[str, List[np.ndarray]]) -> Dict:
def _format_batch_for_perf_analyzer_json_format(batch: np.ndarray):
return {
"content": batch.flatten().tolist(),
"shape": list(batch.shape),
"dtype": str(batch.dtype),
}
_, nbatches = _get_nitems_and_batches(prefix_data)
batches = [{} for _ in range(nbatches)]
for io_name, batches_per_io in prefix_data.items():
for batch_idx, batch in enumerate(batches_per_io):
batches[batch_idx][io_name] = _format_batch_for_perf_analyzer_json_format(batch)
return {"data": batches}
class BaseDumpReader(abc.ABC):
FILE_SUFFIX = ".abstract"
def __init__(self, dump_dir: Union[Path, str]):
self._dump_dir = Path(dump_dir)
def get(self, prefix: str) -> Iterator[Dict[str, np.ndarray]]:
dump_files_paths = sorted(self._dump_dir.glob(f"{prefix}*{self.FILE_SUFFIX}"))
for dump_file_path in dump_files_paths:
prefix_data = self._load_file(dump_file_path)
nitems, nbatches = _get_nitems_and_batches(prefix_data)
for batch_idx in range(nbatches):
yield {io_name: prefix_data[io_name][batch_idx] for io_name in prefix_data}
@abc.abstractmethod
def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]:
pass
def iterate_over(self, prefix_list: List[str]) -> Iterator:
iterators = [self.get(prefix) for prefix in prefix_list]
empty_iterators = [False] * len(iterators)
while not all(empty_iterators):
values = [None] * len(iterators)
for idx, iterator in enumerate(iterators):
if empty_iterators[idx]:
continue
try:
values[idx] = next(iterator)
except StopIteration:
empty_iterators[idx] = True
if all(empty_iterators):
break
if not all(empty_iterators):
yield values
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class PickleDumpReader(BaseDumpReader):
FILE_SUFFIX = ".pkl"
def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]:
with dump_file_path.open("rb") as pickle_file:
return pickle.load(pickle_file)
class JsonDumpReader(BaseDumpReader):
FILE_SUFFIX = ".json"
def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]:
with dump_file_path.open("rb") as json_file:
data = json.load(json_file)
return self._repack_data(data)
def _repack_data(self, data: Dict) -> Dict[str, List[np.ndarray]]:
result: Dict[str, List[np.ndarray]] = {}
batches = data["data"]
for batch in batches:
for io_name, batch_as_dict in batch.items():
io_batches = result.setdefault(io_name, [])
flat_array = batch_as_dict["content"]
shape = batch_as_dict["shape"]
dtype = batch_as_dict["dtype"]
batch_as_array = np.array(flat_array).reshape(shape).astype(dtype)
io_batches.append(batch_as_array)
return result
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/dump.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import logging
import os
import re
from pathlib import Path
from typing import List
LOGGER = logging.getLogger(__name__)
class ExtensionManager:
def __init__(self, name: str):
self._name = name
self._registry = {}
def register_extension(self, extension: str, clazz):
already_registered_class = self._registry.get(extension, None)
if already_registered_class and already_registered_class.__module__ != clazz.__module__:
raise RuntimeError(
f"Conflicting extension {self._name}/{extension}; "
f"{already_registered_class.__module__}.{already_registered_class.__name} "
f"and "
f"{clazz.__module__}.{clazz.__name__}"
)
elif already_registered_class is None:
clazz_full_name = f"{clazz.__module__}.{clazz.__name__}" if clazz is not None else "None"
LOGGER.debug(f"Registering extension {self._name}/{extension}: {clazz_full_name}")
self._registry[extension] = clazz
def get(self, extension):
if extension not in self._registry:
raise RuntimeError(f"Missing extension {self._name}/{extension}")
return self._registry[extension]
@property
def supported_extensions(self):
return list(self._registry)
@staticmethod
def scan_for_extensions(extension_dirs: List[Path]):
register_pattern = r".*\.register_extension\(.*"
for extension_dir in extension_dirs:
for python_path in extension_dir.rglob("*.py"):
if not python_path.is_file():
continue
payload = python_path.read_text()
if re.findall(register_pattern, payload):
import_path = python_path.relative_to(toolkit_root_dir.parent)
package = import_path.parent.as_posix().replace(os.sep, ".")
package_with_module = f"{package}.{import_path.stem}"
spec = importlib.util.spec_from_file_location(name=package_with_module, location=python_path)
my_module = importlib.util.module_from_spec(spec)
my_module.__package__ = package
try:
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
except ModuleNotFoundError as e:
LOGGER.error(
f"Could not load extensions from {import_path} due to missing python packages; {e}"
)
runners = ExtensionManager("runners")
loaders = ExtensionManager("loaders")
savers = ExtensionManager("savers")
converters = ExtensionManager("converters")
toolkit_root_dir = (Path(__file__).parent / "..").resolve()
ExtensionManager.scan_for_extensions([toolkit_root_dir])
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/extensions.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
from distutils.version import LooseVersion
from importlib.metadata import version
from typing import List
TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient"))
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import BatchingMode, EvaluationMode, MeasurementMode, OfflineMode
from .perf_analyzer import PerfAnalyzer, PerfAnalyzerConfig
from .utils import parse_server_url
LOGGER = logging.getLogger("warmup")
def performance_evaluation_warmup(
server_url: str,
model_name: str,
batch_sizes: List[int],
number_of_triton_instances: int,
number_of_model_instances: int,
input_data: str,
input_shapes: List[str],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
batching_mode: BatchingMode,
offline_mode: OfflineMode,
evaluation_mode: EvaluationMode,
output_shared_memory_size: int,
):
protocol, host, port = parse_server_url(server_url)
measurement_interval = 2 * measurement_interval
measurement_request_count = 2 * measurement_request_count
if batching_mode == BatchingMode.STATIC:
if len(batch_sizes) == 1:
batch_sizes = {batch_sizes[0]}
else:
batch_sizes = sorted({1, batch_sizes[-1]})
max_concurrency = 1
min_concurrency = 1
step = 1
elif batching_mode == BatchingMode.DYNAMIC:
max_batch_size = max(batch_sizes)
max_total_requests = 2 * max_batch_size * number_of_triton_instances * number_of_model_instances
max_concurrency = min(256, max_total_requests)
step = max(1, max_concurrency // 2)
min_concurrency = step
batch_sizes = [max(1, max_total_requests // 256)]
else:
raise ValueError(f"Unsupported batching mode: {batching_mode}")
for batch_size in batch_sizes:
for concurrency in range(min_concurrency, max_concurrency + step, step):
params = {
"model-name": model_name,
"model-version": 1,
"batch-size": batch_size,
"url": f"{host}:{port}",
"protocol": protocol,
"input-data": input_data,
"measurement-interval": measurement_interval,
"concurrency-range": f"{concurrency}:{concurrency}:1",
}
if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"):
params["measurement-mode"] = measurement_mode.value
params["measurement-request-count"] = measurement_request_count
if evaluation_mode == EvaluationMode.OFFLINE:
params["shared-memory"] = offline_mode.value
params["output-shared-memory-size"] = output_shared_memory_size
config = PerfAnalyzerConfig()
for param, value in params.items():
config[param] = value
for shape in input_shapes:
config["shape"] = shape
perf_analyzer = PerfAnalyzer(config=config)
perf_analyzer.run()
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/warmup.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Tuple
LOGGER = logging.getLogger(__name__)
def parse_server_url(server_url: str) -> Tuple[str, str, int]:
DEFAULT_PORTS = {"http": 8000, "grpc": 8001}
# extract protocol
server_url_items = server_url.split("://")
if len(server_url_items) != 2:
raise ValueError("Prefix server_url with protocol ex.: grpc://127.0.0.1:8001")
requested_protocol, server_url = server_url_items
requested_protocol = requested_protocol.lower()
if requested_protocol not in DEFAULT_PORTS:
raise ValueError(f"Unsupported protocol: {requested_protocol}")
# extract host and port
default_port = DEFAULT_PORTS[requested_protocol]
server_url_items = server_url.split(":")
if len(server_url_items) == 1:
host, port = server_url, default_port
elif len(server_url_items) == 2:
host, port = server_url_items
port = int(port)
if port != default_port:
LOGGER.warning(
f"Current server URL is {server_url} while default {requested_protocol} port is {default_port}"
)
else:
raise ValueError(f"Could not parse {server_url}. Example of correct server URL: grpc://127.0.0.1:8001")
return requested_protocol, host, port
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import inspect
import logging
from typing import Callable, Dict, Optional, Union
from model_navigator.utils.cli import is_dict_generic, is_list_generic, is_optional_generic
from .core import GET_ARGPARSER_FN_NAME, load_from_file
LOGGER = logging.getLogger(__name__)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def filter_fn_args(args: Union[dict, argparse.Namespace], fn: Callable) -> dict:
signature = inspect.signature(fn)
parameters_names = list(signature.parameters)
if isinstance(args, argparse.Namespace):
args = vars(args)
args = {k: v for k, v in args.items() if k in parameters_names}
return args
def add_args_for_fn_signature(parser, fn) -> argparse.ArgumentParser:
parser.conflict_handler = "resolve"
signature = inspect.signature(fn)
for parameter in signature.parameters.values():
if parameter.name in ["self", "args", "kwargs"]:
continue
argument_kwargs = {}
if parameter.annotation != inspect.Parameter.empty:
is_optional = is_optional_generic(parameter.annotation)
if is_optional:
annotation = parameter.annotation.__args__[0] # Optional[cls] will be changed into Union[cls, None]
else:
annotation = parameter.annotation
is_list = is_list_generic(annotation)
is_dict = is_dict_generic(annotation)
if parameter.annotation == bool:
argument_kwargs["type"] = str2bool
argument_kwargs["choices"] = [0, 1]
elif is_list:
argument_kwargs["type"] = annotation.__args__[0] # List[cls] -> cls
elif is_dict:
raise RuntimeError(
f"Could not prepare argument parser for {parameter.name}: {parameter.annotation} in {fn}"
)
else:
argument_kwargs["type"] = annotation
if parameter.default != inspect.Parameter.empty:
if parameter.annotation == bool:
argument_kwargs["default"] = str2bool(parameter.default)
else:
argument_kwargs["default"] = parameter.default
else:
argument_kwargs["required"] = True
name = parameter.name.replace("_", "-")
LOGGER.debug(f"Adding argument {name} with {argument_kwargs}")
parser.add_argument(f"--{name}", **argument_kwargs)
return parser
class ArgParserGenerator:
def __init__(self, cls_or_fn, module_path: Optional[str] = None):
self._cls_or_fn = cls_or_fn
init_method_name = "__init__"
self._handle = cls_or_fn if inspect.isfunction(cls_or_fn) else getattr(cls_or_fn, init_method_name, None)
input_is_python_file = module_path and module_path.endswith(".py")
self._input_path = module_path if input_is_python_file else None
self._required_fn_name_for_signature_parsing = getattr(
cls_or_fn, "required_fn_name_for_signature_parsing", None
)
def update_argparser(self, parser):
name = self._handle.__name__
group_parser = parser.add_argument_group(name)
add_args_for_fn_signature(group_parser, fn=self._handle)
self._update_argparser(group_parser)
def get_args(self, args: argparse.Namespace):
filtered_args = filter_fn_args(args, fn=self._handle)
tmp_parser = argparse.ArgumentParser(allow_abbrev=False)
self._update_argparser(tmp_parser)
custom_names = [
p.dest.replace("-", "_") for p in tmp_parser._actions if not isinstance(p, argparse._HelpAction)
]
custom_params = {n: getattr(args, n) for n in custom_names}
filtered_args = {**filtered_args, **custom_params}
return filtered_args
def from_args(self, args: Union[argparse.Namespace, Dict]):
args = self.get_args(args)
LOGGER.info(f"Initializing {self._cls_or_fn.__name__}({args})")
return self._cls_or_fn(**args)
def _update_argparser(self, parser):
label = "argparser_update"
if self._input_path:
update_argparser_handle = load_from_file(self._input_path, label=label, target=GET_ARGPARSER_FN_NAME)
if update_argparser_handle:
update_argparser_handle(parser)
elif self._required_fn_name_for_signature_parsing:
fn_handle = load_from_file(
self._input_path, label=label, target=self._required_fn_name_for_signature_parsing
)
if fn_handle:
add_args_for_fn_signature(parser, fn_handle)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/args.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import re
from typing import Dict, List
from natsort import natsorted
from tabulate import tabulate
def sort_results(results: List):
results = natsorted(results, key=lambda item: [item[key] for key in item.keys()])
return results
def save_results(filename: str, data: List, formatted: bool = False):
data = format_data(data=data) if formatted else data
with open(filename, "a") as csvfile:
fieldnames = data[0].keys()
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in data:
writer.writerow(row)
def format_data(data: List[Dict]) -> List[Dict]:
formatted_data = list()
for item in data:
formatted_item = format_keys(data=item)
formatted_data.append(formatted_item)
return formatted_data
def format_keys(data: Dict) -> Dict:
keys = {format_key(key=key): value for key, value in data.items()}
return keys
def format_key(key: str) -> str:
key = " ".join([k.capitalize() for k in re.split("_| ", key)])
return key
def show_results(results: List[Dict]):
headers = list(results[0].keys())
summary = map(lambda x: list(map(lambda item: item[1], x.items())), results)
print(tabulate(summary, headers=headers))
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/report.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pathlib import Path
from typing import Dict, Optional, Union
import numpy as np
# pytype: disable=import-error
import onnx
import onnx.shape_inference
import onnxruntime
from google.protobuf import text_format
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
from ..core import BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec
from ..extensions import loaders, runners, savers
from .utils import infer_precision
# pytype: enable=import-error
LOGGER = logging.getLogger(__name__)
def _value_info2tensor_spec(value_info: onnx.ValueInfoProto):
onnx_data_type_map = {"float": "float32", "double": "float64"}
elem_type_name = onnx.TensorProto.DataType.Name(value_info.type.tensor_type.elem_type).lower()
dtype = onnx_data_type_map.get(elem_type_name, elem_type_name)
def _get_dim(dim):
which = dim.WhichOneof("value")
if which is not None: # which is None when dim is None
dim = getattr(dim, which)
return None if isinstance(dim, (str, bytes)) else dim
shape = value_info.type.tensor_type.shape
shape = tuple(_get_dim(d) for d in shape.dim)
return TensorSpec(value_info.name, dtype=dtype, shape=shape)
def _infer_graph_precision(onnx_graph: onnx.GraphProto) -> Optional[Precision]:
import networkx as nx
# build directed graph
nx_graph = nx.DiGraph()
def _get_dtype(vi):
t = vi.type
if hasattr(t, "tensor_type"):
type_id = t.tensor_type.elem_type
else:
raise NotImplementedError("Not implemented yet")
return TENSOR_TYPE_TO_NP_TYPE[type_id]
node_output2type = {vi.name: _get_dtype(vi) for vi in onnx_graph.value_info}
node_outputs2node = {output_name: node for node in onnx_graph.node for output_name in node.output}
node_inputs2node = {input_name: node for node in onnx_graph.node for input_name in node.input}
for node in onnx_graph.node:
node_dtype = node_output2type.get("+".join(node.output), None)
nx_graph.add_node(
node.name,
op=node.op_type,
attr={a.name: a for a in node.attribute},
dtype=node_dtype,
)
for input_name in node.input:
prev_node = node_outputs2node.get(input_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, node.name)
for input_node in onnx_graph.input:
input_name = input_node.name
nx_graph.add_node(input_name, op="input", dtype=_get_dtype(input_node))
next_node = node_inputs2node.get(input_name, None)
if next_node:
nx_graph.add_edge(input_name, next_node.name)
for output in onnx_graph.output:
output_name = output.name
nx_graph.add_node(output_name, op="output", dtype=_get_dtype(output))
prev_node = node_outputs2node.get(output_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, output_name)
else:
LOGGER.warning(f"Could not find previous node for {output_name}")
input_names = [n.name for n in onnx_graph.input]
output_names = [n.name for n in onnx_graph.output]
most_common_dtype = infer_precision(nx_graph, input_names, output_names, lambda node: node.get("dtype", None))
if most_common_dtype is not None:
precision = {np.dtype("float32"): Precision.FP32, np.dtype("float16"): Precision.FP16}[most_common_dtype]
else:
precision = None
return precision
class OnnxLoader(BaseLoader):
def load(self, model_path: Union[str, Path], **_) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
model = onnx.load(model_path)
onnx.checker.check_model(model)
onnx.helper.strip_doc_string(model)
model = onnx.shape_inference.infer_shapes(model)
# TODO: probably modification of onnx model ios causes error on optimize
# from onnx.utils import polish_model
# model = polish_model(model) # run checker, docs strip, optimizer and shape inference
inputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.input}
outputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.output}
precision = _infer_graph_precision(model.graph)
return Model(model, precision, inputs, outputs)
class OnnxSaver(BaseSaver):
def __init__(self, as_text: bool = False):
self._as_text = as_text
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None:
model_path = Path(model_path)
LOGGER.debug(f"Saving ONNX model to {model_path.as_posix()}")
model_path.parent.mkdir(parents=True, exist_ok=True)
onnx_model: onnx.ModelProto = model.handle
if self._as_text:
with model_path.open("w") as f:
f.write(text_format.MessageToString(onnx_model))
else:
with model_path.open("wb") as f:
f.write(onnx_model.SerializeToString())
"""
ExecutionProviders on onnxruntime 1.4.0
['TensorrtExecutionProvider',
'CUDAExecutionProvider',
'MIGraphXExecutionProvider',
'NGRAPHExecutionProvider',
'OpenVINOExecutionProvider',
'DnnlExecutionProvider',
'NupharExecutionProvider',
'VitisAIExecutionProvider',
'ArmNNExecutionProvider',
'ACLExecutionProvider',
'CPUExecutionProvider']
"""
def _check_providers(providers):
providers = providers or []
if not isinstance(providers, (list, tuple)):
providers = [providers]
available_providers = onnxruntime.get_available_providers()
unavailable = set(providers) - set(available_providers)
if unavailable:
raise RuntimeError(f"Unavailable providers {unavailable}")
return providers
class OnnxRunner(BaseRunner):
def __init__(self, verbose_runtime_logs: bool = False):
self._providers = None
self._verbose_runtime_logs = verbose_runtime_logs
def init_inference(self, model: Model):
assert isinstance(model.handle, onnx.ModelProto)
return OnnxRunnerSession(
model=model, providers=self._providers, verbose_runtime_logs=self._verbose_runtime_logs
)
class OnnxRunnerSession(BaseRunnerSession):
def __init__(self, model: Model, providers, verbose_runtime_logs: bool = False):
super().__init__(model)
self._input_names = None
self._output_names = None
self._session = None
self._providers = providers
self._verbose_runtime_logs = verbose_runtime_logs
self._old_env_values = {}
def __enter__(self):
self._old_env_values = self._set_env_variables()
sess_options = onnxruntime.SessionOptions() # default session options
if self._verbose_runtime_logs:
sess_options.log_severity_level = 0
sess_options.log_verbosity_level = 1
LOGGER.info(
f"Starting inference session for onnx model providers={self._providers} sess_options={sess_options}"
)
self._input_names = list(self._model.inputs)
self._output_names = list(self._model.outputs)
model_payload = self._model.handle.SerializeToString()
self._session = onnxruntime.InferenceSession(
model_payload, providers=self._providers, sess_options=sess_options
)
return self
def __exit__(self, exc_type, exc_value, traceback):
self._input_names = None
self._output_names = None
self._session = None
self._recover_env_variables(self._old_env_values)
def __call__(self, x: Dict[str, object]):
feed_dict = {k: x[k] for k in self._input_names}
y_pred = self._session.run(self._output_names, feed_dict)
y_pred = dict(zip(self._output_names, y_pred))
return y_pred
loaders.register_extension(Format.ONNX.value, OnnxLoader)
runners.register_extension(Format.ONNX.value, OnnxRunner)
savers.register_extension(Format.ONNX.value, OnnxSaver)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/bermuda/onnx.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/bermuda/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from typing import Callable, Dict, List, Optional
import networkx as nx
from ..core import ShapeSpec
def infer_precision(
nx_graph: nx.Graph,
input_names: List[str],
output_names: List[str],
get_node_dtype_fn: Callable,
):
node_dtypes = [nx_graph.nodes[node_name].get("dtype", None) for node_name in nx_graph.nodes]
node_dtypes = [dt for dt in node_dtypes if dt is None or dt.kind not in ["i", "b"]]
dtypes_counter = Counter(node_dtypes)
return dtypes_counter.most_common()[0][0]
def get_shapes_with_dynamic_axes(dataloader, batch_size_dim: Optional[int] = None):
def _set_dynamic_shapes(t, shapes):
for k, v in t.items():
shape = list(v.shape)
for dim, s in enumerate(shape):
if shapes[k][dim] != -1 and shapes[k][dim] != s:
shapes[k][dim] = -1
def _mark_batch_axis(shape, batch_axis: int):
shape = list(shape)
shape[batch_axis] = -1
return tuple(shape)
## get all shapes from input and output tensors
input_shapes = {}
output_shapes = {}
for batch in dataloader:
_, x, y = batch
for k, v in x.items():
input_shapes[k] = list(v.shape)
for k, v in y.items():
output_shapes[k] = list(v.shape)
break
# based on max <max_num_iters> iterations, check which
# dimensions differ to determine dynamic_axes
max_num_iters = 100
for idx, batch in enumerate(dataloader):
if idx >= max_num_iters:
break
_, x, y = batch
_set_dynamic_shapes(x, input_shapes)
_set_dynamic_shapes(y, output_shapes)
if batch_size_dim is not None:
input_shapes = {name: _mark_batch_axis(shape, batch_size_dim) for name, shape in input_shapes.items()}
output_shapes = {name: _mark_batch_axis(shape, batch_size_dim) for name, shape in output_shapes.items()}
return input_shapes, output_shapes
def get_dynamic_axes(dataloader, batch_size_dim: Optional[int] = None):
input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader, batch_size_dim=batch_size_dim)
all_shapes = {**input_shapes, **output_shapes}
dynamic_axes = {}
for k, shape in all_shapes.items():
for idx, s in enumerate(shape):
if s == -1:
dynamic_axes[k] = {idx: k + "_" + str(idx)}
for k in all_shapes:
if k in dynamic_axes:
dynamic_axes[k].update({batch_size_dim: "batch_size_" + str(batch_size_dim)})
else:
dynamic_axes[k] = {batch_size_dim: "batch_size_" + str(batch_size_dim)}
return dynamic_axes
def get_input_shapes(dataloader, max_batch_size=1) -> Dict[str, ShapeSpec]:
def init_counters_and_shapes(x, counters, min_shapes, max_shapes):
for k, v in x.items():
counters[k] = Counter()
min_shapes[k] = [float("inf")] * v.ndim
max_shapes[k] = [float("-inf")] * v.ndim
counters = {}
min_shapes: Dict[str, tuple] = {}
max_shapes: Dict[str, tuple] = {}
for idx, batch in enumerate(dataloader):
ids, x, y = batch
if idx == 0:
init_counters_and_shapes(x, counters, min_shapes, max_shapes)
for k, v in x.items():
shape = v.shape
counters[k][shape] += 1
min_shapes[k] = tuple(min(a, b) for a, b in zip(min_shapes[k], shape))
max_shapes[k] = tuple(max(a, b) for a, b in zip(max_shapes[k], shape))
opt_shapes: Dict[str, tuple] = {}
for k, v in counters.items():
opt_shapes[k] = v.most_common(1)[0][0]
shapes = {}
for k in opt_shapes.keys(): # same keys in min_shapes and max_shapes
shapes[k] = ShapeSpec(
min=(1,) + min_shapes[k][1:],
max=(max_batch_size,) + max_shapes[k][1:],
opt=(max_batch_size,) + opt_shapes[k][1:],
)
return shapes
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/bermuda/utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from pathlib import Path
from typing import Dict, NamedTuple, Optional, Union
import numpy as np
# pytype: disable=import-error
try:
import pycuda.autoinit
import pycuda.driver as cuda
except Exception as e:
logging.getLogger(__name__).warning(f"Problems with importing pycuda package; {e}")
# pytype: enable=import-error
import tensorrt as trt # pytype: disable=import-error
from ..core import BaseLoader, BaseRunner, BaseRunnerSession, Format, Model, TensorSpec
from ..extensions import loaders, runners
LOGGER = logging.getLogger(__name__)
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
# documentation:
# https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/index.html
# https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#python_samples_section
_NP_DTYPE2TRT_DTYPE = {
np.dtype("float32"): trt.DataType.FLOAT,
np.dtype("float16"): trt.DataType.HALF,
np.dtype("int8"): trt.DataType.INT8,
np.dtype("int32"): trt.DataType.INT32,
np.dtype("bool"): trt.DataType.BOOL,
}
class TensorRTLoader(BaseLoader):
def load(self, model_path: Union[str, Path], **_) -> Model:
model_path = Path(model_path)
LOGGER.debug(f"Loading TensorRT engine from {model_path}")
engine = self._load_engine(model_path)
if engine is None:
LOGGER.debug("Unable to load engine without plugins. Loading plugins.")
trt.init_libnvinfer_plugins(logger=TRT_LOGGER, namespace="")
LOGGER.debug(f"Loading TensorRT engine with plugins from {model_path}")
engine = self._load_engine(model_path)
if engine is None:
raise RuntimeError(f"Could not load ICudaEngine from {model_path}")
inputs = {}
outputs = {}
for binding_idx in range(engine.num_bindings):
name = engine.get_binding_name(binding_idx)
is_input = engine.binding_is_input(binding_idx)
dtype = np.dtype(trt.nptype(engine.get_binding_dtype(binding_idx))).name
shape = engine.get_binding_shape(binding_idx)
if is_input:
inputs[name] = TensorSpec(name, dtype, shape)
else:
outputs[name] = TensorSpec(name, dtype, shape)
return Model(engine, None, inputs, outputs)
def _load_engine(self, model_path: Path):
with model_path.open("rb") as fh, trt.Runtime(TRT_LOGGER) as runtime:
engine = runtime.deserialize_cuda_engine(fh.read())
return engine
class TRTBuffers(NamedTuple):
x_host: Optional[Dict[str, object]]
x_dev: Dict[str, object]
y_pred_host: Dict[str, object]
y_pred_dev: Dict[str, object]
class TensorRTRunner(BaseRunner):
def __init__(self):
pass
def init_inference(self, model: Model):
return TensorRTRunnerSession(model=model)
class TensorRTRunnerSession(BaseRunnerSession):
def __init__(self, model: Model):
super().__init__(model)
assert isinstance(model.handle, trt.ICudaEngine)
self._model = model
self._has_dynamic_shapes = None
self._context = None
self._engine: trt.ICudaEngine = self._model.handle
self._cuda_context = pycuda.autoinit.context
self._input_names = None
self._output_names = None
self._buffers = None
def __enter__(self):
self._context = self._engine.create_execution_context()
self._context.__enter__()
self._input_names = [
self._engine[idx] for idx in range(self._engine.num_bindings) if self._engine.binding_is_input(idx)
]
self._output_names = [
self._engine[idx] for idx in range(self._engine.num_bindings) if not self._engine.binding_is_input(idx)
]
# all_binding_shapes_specified is True for models without dynamic shapes
# so initially this variable is False for models with dynamic shapes
self._has_dynamic_shapes = not self._context.all_binding_shapes_specified
return self
def __exit__(self, exc_type, exc_value, traceback):
self._context.__exit__(exc_type, exc_value, traceback)
self._input_names = None
self._output_names = None
# TODO: are cuda buffers dealloc automatically?
self._buffers = None
def __call__(self, x):
buffers = self._prepare_buffers_if_needed(x)
bindings = self._update_bindings(buffers)
for name in self._input_names:
cuda.memcpy_htod(buffers.x_dev[name], buffers.x_host[name])
self._cuda_context.push()
self._context.execute_v2(bindings=bindings)
self._cuda_context.pop()
for name in self._output_names:
cuda.memcpy_dtoh(buffers.y_pred_host[name], buffers.y_pred_dev[name])
return buffers.y_pred_host
def _update_bindings(self, buffers: TRTBuffers):
bindings = [None] * self._engine.num_bindings
for name in buffers.y_pred_dev:
binding_idx: int = self._engine[name]
bindings[binding_idx] = buffers.y_pred_dev[name]
for name in buffers.x_dev:
binding_idx: int = self._engine[name]
bindings[binding_idx] = buffers.x_dev[name]
return bindings
def _set_dynamic_input_shapes(self, x_host):
def _is_shape_dynamic(input_shape):
return any([dim is None or dim == -1 for dim in input_shape])
for name in self._input_names:
bindings_idx = self._engine[name]
data_shape = x_host[name].shape # pytype: disable=attribute-error
if self._engine.is_shape_binding(bindings_idx):
input_shape = self._context.get_shape(bindings_idx)
if _is_shape_dynamic(input_shape):
self._context.set_shape_input(bindings_idx, data_shape)
else:
input_shape = self._engine.get_binding_shape(bindings_idx)
if _is_shape_dynamic(input_shape):
self._context.set_binding_shape(bindings_idx, data_shape)
assert self._context.all_binding_shapes_specified and self._context.all_shape_inputs_specified
def _prepare_buffers_if_needed(self, x_host: Dict[str, object]):
# pytype: disable=attribute-error
new_batch_size = list(x_host.values())[0].shape[0]
current_batch_size = list(self._buffers.y_pred_host.values())[0].shape[0] if self._buffers else 0
# pytype: enable=attribute-error
if self._has_dynamic_shapes or new_batch_size != current_batch_size:
# TODO: are CUDA buffers dealloc automatically?
self._set_dynamic_input_shapes(x_host)
y_pred_host = {}
for name in self._output_names:
shape = self._context.get_binding_shape(self._engine[name])
binding_idx: int = self._engine[name]
dtype_from_trt_binding = np.dtype(trt.nptype(self._engine.get_binding_dtype(binding_idx)))
dtype_from_model_spec = np.dtype(self._model.outputs[name].dtype)
assert dtype_from_model_spec == dtype_from_trt_binding
y_pred_host[name] = np.zeros(shape, dtype=dtype_from_model_spec)
y_pred_dev = {name: cuda.mem_alloc(data.nbytes) for name, data in y_pred_host.items()}
# cast host input into binding dtype
def _cast_input(name, data):
binding_idx: int = self._engine[name]
np_dtype = trt.nptype(self._engine.get_binding_dtype(binding_idx))
return data.astype(np_dtype)
x_host = {name: _cast_input(name, host_input) for name, host_input in x_host.items()}
x_dev = {
name: cuda.mem_alloc(host_input.nbytes)
for name, host_input in x_host.items()
if name in self._input_names # pytype: disable=attribute-error
}
self._buffers = TRTBuffers(None, x_dev, y_pred_host, y_pred_dev)
return self._buffers._replace(x_host=x_host)
if "pycuda.driver" in sys.modules:
loaders.register_extension(Format.TRT.value, TensorRTLoader)
runners.register_extension(Format.TRT.value, TensorRTRunner)
else:
LOGGER.warning("Do not register TensorRT extension due problems with importing pycuda.driver package.")
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/bermuda/tensorrt.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import typing
from collections import Counter
from pathlib import Path
from typing import Dict, Optional, Union
import numpy as np
import torch # pytype: disable=import-error
import yaml
from model_navigator.model import ModelSignatureConfig
from model_navigator.tensor import TensorSpec
from model_navigator.utils.config import YamlConfigFile
from ..core import (
GET_MODEL_FN_NAME,
BaseLoader,
BaseRunner,
BaseRunnerSession,
BaseSaver,
Format,
Model,
Precision,
load_from_file,
)
from ..extensions import loaders, runners, savers
from .utils import get_dynamic_axes, get_shapes_with_dynamic_axes
LOGGER = logging.getLogger(__name__)
def get_sample_input(dataloader, device):
for batch in dataloader:
_, x, _ = batch
break
if isinstance(x, dict):
sample_input = list(x.values())
elif isinstance(x, list):
sample_input = x
else:
raise TypeError("The first element (x) of batch returned by dataloader must be a list or a dict")
for idx, s in enumerate(sample_input):
sample_input[idx] = torch.from_numpy(s).to(device)
return tuple(sample_input)
def get_model_device(torch_model):
if next(torch_model.parameters()).is_cuda:
return "cuda"
else:
return "cpu"
def infer_model_precision(model):
counter = Counter()
for param in model.parameters():
counter[param.dtype] += 1
if counter[torch.float16] > 0:
return Precision.FP16
else:
return Precision.FP32
def _get_tensor_dtypes(dataloader, precision):
def _get_dtypes(t):
def _get_dtype(v):
dtype = str(v.dtype)
if dtype == "float64":
dtype = "float32"
if precision == Precision.FP16 and dtype == "float32":
dtype = "float16"
return np.dtype(dtype)
return {k: _get_dtype(v) for k, v in t.items()}
batch = next(dataloader)
_, x, y = batch
input_dtypes = _get_dtypes(x)
output_dtypes = _get_dtypes(y)
return input_dtypes, output_dtypes
### TODO assumption: floating point input
### type has same precision as the model
def _get_model_signature(
inputs_names: typing.List[str],
outputs_names: typing.List[str],
precision,
dataloader_fn,
batch_size_dim: typing.Optional[int] = None,
):
dataloader = dataloader_fn()
input_dtypes, output_dtypes = _get_tensor_dtypes(dataloader, precision)
input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader, batch_size_dim=batch_size_dim)
inputs = {
name: TensorSpec(name=name, dtype=input_dtypes[name], shape=tuple(input_shapes[name])) for name in inputs_names
}
outputs = {
name: TensorSpec(name=name, dtype=output_dtypes[name], shape=tuple(output_shapes[name]))
for name in outputs_names
}
return ModelSignatureConfig(inputs, outputs)
class PyTorchModelLoader(BaseLoader):
required_fn_name_for_signature_parsing: Optional[str] = GET_MODEL_FN_NAME
def __init__(self, **kwargs):
self._model_args = kwargs
def load(self, model_path: Union[str, Path], **kwargs) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
get_model = load_from_file(model_path, "model", GET_MODEL_FN_NAME)
model, io_names_dict = get_model(**self._model_args)
dataloader_fn = kwargs.get("dataloader_fn", None)
output_type = kwargs.get("output_type", None)
precision = infer_model_precision(model)
batch_axis = getattr(model, "bermuda_batch_axis", 0) # by default models supports batching; batch_axis=0
model_signature = _get_model_signature(
inputs_names=io_names_dict["inputs"],
outputs_names=io_names_dict["outputs"],
precision=precision,
dataloader_fn=dataloader_fn,
batch_size_dim=batch_axis,
)
model = Model(handle=model, precision=precision, inputs=model_signature.inputs, outputs=model_signature.outputs)
if output_type == Format.TS_TRACE.value:
return self._trace(model, dataloader_fn)
elif output_type == Format.TS_SCRIPT.value:
return self._script(model)
elif output_type == Format.ONNX.value:
return model
else:
raise ValueError(f"Not supported PyTorch format: {output_type}")
def _trace(self, model: Model, dataloader_fn) -> Model:
device = get_model_device(model.handle)
dummy_input = get_sample_input(dataloader_fn(), device)
traced_model = torch.jit.trace_module(model.handle, {"forward": dummy_input})
return Model(traced_model, precision=model.precision, inputs=model.inputs, outputs=model.outputs)
def _script(self, model: Model) -> Model:
scripted_model = torch.jit.script(model.handle)
return Model(scripted_model, precision=model.precision, inputs=model.inputs, outputs=model.outputs)
class TorchScriptLoader(BaseLoader):
def __init__(self, tensor_names_path: str = None, **kwargs):
self._model_args = kwargs
self._io_spec = None
if tensor_names_path is not None:
with Path(tensor_names_path).open("r") as fh:
tensor_infos = yaml.load(fh, Loader=yaml.SafeLoader)
self._io_spec = ModelSignatureConfig(tensor_infos["inputs"], tensor_infos["outputs"])
def load(self, model_path: Union[str, Path], **_) -> Model:
if not isinstance(model_path, Path):
model_path = Path(model_path)
model = torch.jit.load(model_path.as_posix())
precision = infer_model_precision(model)
io_spec = self._io_spec
if not io_spec:
yaml_path = model_path.parent / f"{model_path.name}.yaml"
if not yaml_path.is_file():
raise ValueError(
f"If `--tensor-names-path is not provided, "
f"TorchScript model loader expects file {yaml_path} with tensor information."
)
with yaml_path.open("r") as fh:
tensor_info = yaml.load(fh, Loader=yaml.SafeLoader)
io_spec = ModelSignatureConfig(tensor_info["inputs"], tensor_info["outputs"])
return Model(handle=model, precision=precision, inputs=io_spec.inputs, outputs=io_spec.outputs)
class PYT2ONNXSaver(BaseSaver):
def __init__(self, onnx_opset: int = None):
self._onnx_opset = onnx_opset
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
assert isinstance(model.handle, torch.jit.ScriptModule) or isinstance(
model.handle, torch.nn.Module
), "The model must be of type 'torch.jit.ScriptModule' or 'torch.nn.Module'. Converter aborted."
dynamic_axes = get_dynamic_axes(dataloader_fn(), batch_size_dim=0)
device = get_model_device(model.handle)
dummy_input = get_sample_input(dataloader_fn(), device)
with torch.no_grad():
torch.onnx.export(
model.handle,
dummy_input,
model_path,
do_constant_folding=True,
input_names=list(model.inputs),
output_names=list(model.outputs),
dynamic_axes=dynamic_axes,
opset_version=self._onnx_opset,
)
class TorchScriptSaver(BaseSaver):
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None:
if not isinstance(model_path, Path):
model_path = Path(model_path)
if isinstance(model.handle, torch.jit.ScriptModule):
torch.jit.save(model.handle, model_path.as_posix())
else:
raise RuntimeError("The model must be of type 'torch.jit.ScriptModule'. Saving aborted.")
signature_config = ModelSignatureConfig(inputs=model.inputs, outputs=model.outputs)
annotation_path = model_path.parent / f"{model_path.name}.yaml"
with YamlConfigFile(annotation_path) as config_file:
config_file.save_config(signature_config)
class PyTorchRunner(BaseRunner):
def __init__(self):
pass
def init_inference(self, model: Model):
return PyTorchRunnerSession(model=model)
class PyTorchRunnerSession(BaseRunnerSession):
def __init__(self, model: Model):
super().__init__(model)
assert isinstance(model.handle, torch.jit.ScriptModule) or isinstance(
model.handle, torch.nn.Module
), "The model must be of type 'torch.jit.ScriptModule' or 'torch.nn.Module'. Runner aborted."
self._model = model
self._output_names = None
def __enter__(self):
self._output_names = list(self._model.outputs)
return self
def __exit__(self, exc_type, exc_value, traceback):
self._output_names = None
self._model = None
def __call__(self, x: Dict[str, object]):
with torch.no_grad():
feed_list = [torch.from_numpy(v).cuda() for k, v in x.items()]
y_pred = self._model.handle(*feed_list)
if isinstance(y_pred, torch.Tensor):
y_pred = (y_pred,)
y_pred = [t.cpu().numpy() for t in y_pred]
y_pred = dict(zip(self._output_names, y_pred))
return y_pred
loaders.register_extension(Format.PYT.value, PyTorchModelLoader)
loaders.register_extension(Format.TS_TRACE.value, TorchScriptLoader)
loaders.register_extension(Format.TS_SCRIPT.value, TorchScriptLoader)
savers.register_extension(Format.TS_SCRIPT.value, TorchScriptSaver)
savers.register_extension(Format.TS_TRACE.value, TorchScriptSaver)
savers.register_extension(f"{Format.PYT.value}--{Format.ONNX.value}", PYT2ONNXSaver)
runners.register_extension(Format.PYT.value, PyTorchRunner)
runners.register_extension(Format.TS_SCRIPT.value, PyTorchRunner)
runners.register_extension(Format.TS_TRACE.value, PyTorchRunner)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/bermuda/pyt.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .model_analyzer import ModelAnalyzer, ModelAnalyzerMode, ModelAnalyzerReportMode # noqa: F401
from .model_analyzer_config import ModelAnalyzerConfig # noqa: F401
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/model_analyzer/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .exceptions import ModelAnalyzerException
class ModelAnalyzerConfig:
"""
A config class to set arguments to the Model Analyzer.
An argument set to None will use the default.
"""
model_analyzer_args = [
"config-file",
]
input_to_options = [
"config-file",
]
def __init__(self):
# Args will be a dict with the string representation as key
self._args = {k: None for k in self.model_analyzer_args}
self._options = {
"-f": "config.yaml",
}
self._input_to_options = {
"config-file": "-f",
}
def to_cli_string(self):
"""
Utility function to convert a config into a
string of arguments to the server with CLI.
Returns
-------
str
the command consisting of all set arguments to
the model analyzer.
e.g. '--model-repository=/models --verbose=True'
"""
# single dashed options, then verbose flags, then main args
args = [f"{k} {v}" for k, v in self._options.items() if v]
args += [f"--{k}={v}" for k, v in self._args.items() if v]
return " ".join(args)
@classmethod
def allowed_keys(cls):
"""
Returns
-------
list of str
The keys that are allowed to be
passed into model_analyzer
"""
return list(cls.model_analyzer_args) + list(cls.input_to_options)
def __getitem__(self, key):
"""
Gets an arguments value in config
Parameters
----------
key : str
The name of the argument to the model analyzer
Returns
-------
The value that the argument is set to in this config
"""
if key in self._args:
return self._args[key]
elif key in self._input_to_options:
return self._options[self._input_to_options[key]]
else:
raise ModelAnalyzerException(f"'{key}' Key not found in config")
def __setitem__(self, key, value):
"""
Sets an arguments value in config
after checking if defined/supported.
Parameters
----------
key : str
The name of the argument to the model analyzer
value : (any)
The value to which the argument is being set
Raises
------
TritonModelAnalyzerException
If key is unsupported or undefined in the
config class
"""
if key in self._args:
self._args[key] = value
elif key in self._input_to_options:
self._options[self._input_to_options[key]] = value
else:
raise ModelAnalyzerException(f"The argument '{key}' to the Model Analyzer is not supported.")
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/model_analyzer/model_analyzer_config.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ModelAnalyzerException(Exception):
def __init__(self, message: str):
self._message = message
def __str__(self):
"""
Get the exception string representation.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
@property
def message(self):
"""
Get the exception message.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/model_analyzer/exceptions.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import subprocess
from subprocess import CalledProcessError
from .exceptions import ModelAnalyzerException
SERVER_OUTPUT_TIMEOUT_SECS = 5
LOGGER = logging.getLogger(__name__)
class ModelAnalyzerMode:
PROFILE = "profile"
ANALYZE = "analyze"
REPORT = "report"
class ModelAnalyzerReportMode:
OFFLINE = "offline"
ONLINE = "online"
class ModelAnalyzer:
"""
Concrete Implementation of Model Analyzer interface that runs
analyzer locally as as subprocess.
"""
_analyzer_path = "model-analyzer"
def __init__(self, config):
"""
Parameters
----------
config : AnalyzerConfig
the config object containing arguments for this server instance
"""
self._analyzer_process = None
self._analyzer_config = config
self._log = None
def run(self, mode: str, verbose: bool = False, quiet: bool = False, report_mode: str = None):
"""
Starts the model analyzer locally
"""
if self._analyzer_path:
cmd = [self._analyzer_path]
if verbose:
cmd += ["--verbose"]
if quiet:
cmd += ["--quiet"]
if report_mode:
cmd += ["-m"]
cmd += [report_mode]
cmd += [mode]
cmd += self._analyzer_config.to_cli_string().split()
LOGGER.debug(f"Model Analyze command: {cmd}")
try:
subprocess.run(cmd, check=True, start_new_session=True)
except CalledProcessError as e:
raise ModelAnalyzerException(
f"Running {self._analyzer_path} with {e.cmd} failed with"
f" exit status {e.returncode} : {e.output}"
)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/model_analyzer/model_analyzer.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .perf_analyzer import PerfAnalyzer # noqa: F401
from .perf_config import PerfAnalyzerConfig # noqa: F401
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/perf_analyzer/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict
from .exceptions import PerfAnalyzerException
class PerfAnalyzerConfig:
"""
A config class to set arguments to the perf_analyzer.
An argument set to None will use the perf_analyzer's default.
"""
perf_analyzer_args = [
"async",
"sync",
"measurement-interval",
"measurement-mode",
"measurement-request-count",
"concurrency-range",
"request-rate-range",
"request-distribution",
"request-intervals",
"binary-search",
"num-of-sequence",
"latency-threshold",
"max-threads",
"stability-percentage",
"max-trials",
"percentile",
"input-data",
"shared-memory",
"output-shared-memory-size",
"sequence-length",
"string-length",
"string-data",
]
perf_analyzer_multiple_args = [
"shape",
]
input_to_options = [
"model-name",
"model-version",
"batch-size",
"url",
"protocol",
"latency-report-file",
"streaming",
]
input_to_verbose = ["verbose", "extra-verbose"]
def __init__(self):
"""
Construct a PerfAnalyzerConfig
"""
self._args = {k: None for k in self.perf_analyzer_args}
self._multiple_args = {k: [] for k in self.perf_analyzer_multiple_args}
self._options = {
"-m": None,
"-x": None,
"-b": None,
"-u": None,
"-i": None,
"-f": None,
"-H": None,
"-c": None,
"-t": None,
}
self._verbose = {"-v": None, "-v -v": None}
self._input_to_options = {
"model-name": "-m",
"model-version": "-x",
"batch-size": "-b",
"url": "-u",
"protocol": "-i",
"latency-report-file": "-f",
"streaming": "-H",
"concurrency": "-c",
"threads": "-t",
}
self._input_to_verbose = {"verbose": "-v", "extra-verbose": "-v -v"}
@classmethod
def allowed_keys(cls):
"""
Returns
-------
list of str
The keys that are allowed to be
passed into perf_analyzer
"""
return (
list(cls.perf_analyzer_args)
+ list(cls.perf_analyzer_multiple_args)
+ list(cls.input_to_options)
+ list(cls.input_to_verbose)
)
def update_config(self, params=None):
"""
Allows setting values from a
params dict
Parameters
----------
params: dict
keys are allowed args to perf_analyzer
"""
if params:
for key in params:
self[key] = params[key]
def to_cli_string(self):
"""
Utility function to convert a config into a
string of arguments to the perf_analyzer with CLI.
Returns
-------
str
cli command string consisting of all arguments
to the perf_analyzer set in the config, without
the executable name.
"""
# single dashed options, then verbose flags, then main args
args = [f"{k} {v}" for k, v in self._options.items() if v]
args += [k for k, v in self._verbose.items() if v]
args += [f"--{k}={v}" for k, v in self._args.items() if v]
for k, v in self._multiple_args.items():
for item in v:
args.append(f"--{k}={item}")
return " ".join(args)
def __getitem__(self, key: str):
"""
Gets an arguments value in config
Parameters
----------
key : str
The name of the argument to the perf_analyzer
Returns
-------
The value that the argument is set to in this config
Raises
------
TritonModelAnalyzerException
If argument not found in the config
"""
if key in self._args:
return self._args[key]
elif key in self._multiple_args:
return self._multiple_args[key]
elif key in self._input_to_options:
return self._options[self._input_to_options[key]]
elif key in self._input_to_verbose:
return self._verbose[self._input_to_verbose[key]]
else:
raise PerfAnalyzerException(f"'{key}' Key not found in config")
def __setitem__(self, key: str, value: Any):
"""
Sets an arguments value in config
after checking if defined/supported.
Parameters
----------
key : str
The name of the argument to the perf_analyzer
value : (any)
The value to which the argument is being set
Raises
------
TritonModelAnalyzerException
If key is unsupported or undefined in the
config class
"""
if key in self._args:
self._args[key] = value
elif key in self._multiple_args:
self._multiple_args[key].append(value)
elif key in self._input_to_options:
self._options[self._input_to_options[key]] = value
elif key in self._input_to_verbose:
self._verbose[self._input_to_verbose[key]] = value
else:
raise PerfAnalyzerException(
f"The argument '{key}' to the perf_analyzer " "is not supported by the model analyzer."
)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/perf_analyzer/perf_config.py |
class PerfAnalyzerException(Exception):
def __init__(self, message: str):
self._message = message
def __str__(self):
"""
Get the exception string representation.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
@property
def message(self):
"""
Get the exception message.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/perf_analyzer/exceptions.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
from subprocess import PIPE, CalledProcessError, Popen
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .exceptions import PerfAnalyzerException
MAX_INTERVAL_CHANGES = 10
COUNT_INTERVAL_DELTA = 50
TIME_INTERVAL_DELTA = 2000
LOGGER = logging.getLogger(__name__)
class PerfAnalyzer:
"""
This class provides an interface for running workloads
with perf_analyzer.
"""
def __init__(self, config):
"""
Parameters
----------
config : PerfAnalyzerConfig
keys are names of arguments to perf_analyzer,
values are their values.
"""
self.bin_path = "perf_analyzer"
self._config = config
self._output = str()
def run(self):
"""
Runs the perf analyzer with the
initialized configuration
Returns
-------
List of Records
List of the metrics obtained from this
run of perf_analyzer
Raises
------
PerfAnalyzerException
If subprocess throws CalledProcessError
"""
for _ in range(MAX_INTERVAL_CHANGES):
command = [self.bin_path]
command += self._config.to_cli_string().replace("=", " ").split()
LOGGER.debug(f"Perf Analyze command: {command}")
try:
process = Popen(command, start_new_session=True, stdout=PIPE, encoding="utf-8")
streamed_output = ""
while True:
output = process.stdout.readline()
if output == "" and process.poll() is not None:
break
if output:
streamed_output += output
print(output.rstrip())
self._output += streamed_output
result = process.poll()
if result != 0:
raise CalledProcessError(returncode=result, cmd=command, output=streamed_output)
return
except CalledProcessError as e:
if self._faild_with_measruement_inverval(e.output):
if self._config["measurement-mode"] is None or self._config["measurement-mode"] == "count_windows":
self._increase_request_count()
else:
self._increase_time_interval()
else:
raise PerfAnalyzerException(
f"Running perf_analyzer with {e.cmd} failed with" f" exit status {e.returncode} : {e.output}"
)
raise PerfAnalyzerException(f"Ran perf_analyzer {MAX_INTERVAL_CHANGES} times, but no valid requests recorded.")
def output(self):
"""
Returns
-------
The stdout output of the
last perf_analyzer run
"""
if self._output:
return self._output
raise PerfAnalyzerException("Attempted to get perf_analyzer output" "without calling run first.")
def _faild_with_measruement_inverval(self, output: str):
return (
output.find("Failed to obtain stable measurement") or output.find("Please use a larger time window")
) != -1
def _increase_request_count(self):
self._config["measurement-request-count"] += COUNT_INTERVAL_DELTA
LOGGER.debug(
"perf_analyzer's measurement request count is too small, "
f"increased to {self._config['measurement-request-count']}."
)
def _increase_time_interval(self):
self._config["measurement-interval"] += TIME_INTERVAL_DELTA
LOGGER.debug(
"perf_analyzer's measurement window is too small, "
f"increased to {self._config['measurement-interval']} ms."
)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/perf_analyzer/perf_analyzer.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import Any, Dict, List, Union
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .task import DataObject
class Configuration(DataObject):
"""
Configuration object - handle single experiment data
"""
def __init__(
self,
precision: str,
format: str,
batch_size: Union[str, List],
accelerator: str,
triton_gpu_engine_count: int,
triton_max_queue_delay: int,
capture_cuda_graph: int,
checkpoint_variant: str,
triton_preferred_batch_sizes: Union[str, List],
**kwargs: Any,
):
"""
Args:
precision: Target model precision
format: Target conversion format
batch_size: Batch sizes to evaluate
accelerator: Triton Backend Accelerator
triton_gpu_engine_count: Number of model instances
triton_max_queue_delay: Maximal queue delay
capture_cuda_graph: Triton Capture CUDA Graph optimization for tensorrt
checkpoint_variant: Checkpoint used for configuration
triton_preferred_batch_sizes: Preferred batch sizes
**kwargs: Additional model arguments
"""
if isinstance(batch_size, str):
batch_size = map(lambda item: int(item), batch_size.split(","))
if isinstance(triton_preferred_batch_sizes, str):
triton_preferred_batch_sizes = map(lambda item: int(item), triton_preferred_batch_sizes.split(" "))
self.precision = precision
self.format = format
self.batch_size = sorted(batch_size)
self.accelerator = accelerator
self.triton_gpu_engine_count = triton_gpu_engine_count
self.triton_max_queue_delay = triton_max_queue_delay
self.capture_cuda_graph = capture_cuda_graph
self.max_batch_size = max(self.batch_size)
self.checkpoint_variant = checkpoint_variant
self.triton_preferred_batch_sizes = " ".join(map(lambda i: str(i), sorted(triton_preferred_batch_sizes)))
for key, value in kwargs.items():
self.__setattr__(key, value)
@property
def parameters(self) -> Dict:
"""
Return values stored in configuration
Returns:
Dictionary with configuration parameters
"""
return self.__dict__
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/configuration.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
from datetime import datetime
from typing import Dict, List
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .config import Config
from .configuration import Configuration
from .downloader import download
from .experiment import Experiment, Stage
from .logger import LOGGER
from .maintainer import Maintainer
from .pipeline import Pipeline
from .stages import ResultsType, TritonPerformanceOfflineStage, TritonPerformanceOnlineStage
from .task import Checkpoint, Dataset, SystemInfo, Task
from .triton import Triton
from .utils import clean_directory
class Preparer(abc.ABC):
"""
Runner preparer object.
"""
@abc.abstractmethod
def exec(
self,
workspace: pathlib.Path,
config: Config,
pipeline: Pipeline,
maintainer: Maintainer,
triton: Triton,
logs_dir: pathlib.Path,
):
pass
class ExperimentPreparer(Preparer):
"""
Experiment runner preparer object.
"""
def exec(
self,
workspace: pathlib.Path,
config: Config,
pipeline: Pipeline,
maintainer: Maintainer,
triton: Triton,
logs_dir: pathlib.Path,
):
LOGGER.info("Preparing Triton container image")
triton_container_image = self._prepare_triton_container_image(config, maintainer, triton)
LOGGER.info("Initialize task")
task = self._initialize_task(
workspace=workspace,
config=config,
pipeline=pipeline,
triton_container_image=triton_container_image,
logs_dir=logs_dir,
)
LOGGER.info("Preparing directories")
self._create_dirs(workspace, task)
LOGGER.info("Clean previous run artifacts directories")
self._clean_previous_run_artifacts(workspace, task)
LOGGER.info("Downloading checkpoints")
self._download_checkpoints(task)
return task
def _create_dirs(self, workspace: pathlib.Path, task: Task) -> None:
"""
Create directories used to store artifacts and final results
Returns:
None
"""
for directory in [task.results_dir, task.logs_dir, task.checkpoints_dir]:
directory_path = workspace / directory
directory_path.mkdir(parents=True, exist_ok=True)
LOGGER.info(f"Directory {directory} created.")
def _clean_previous_run_artifacts(self, workspace: pathlib.Path, task: Task) -> None:
"""
Clean logs from previous run
Returns:
None
"""
for directory in [
task.logs_dir,
task.results_dir,
]:
directory_path = workspace / directory
clean_directory(directory_path)
LOGGER.info(f"Location {directory} cleaned.")
def _prepare_triton_container_image(self, config: Config, maintainer: Maintainer, triton: Triton) -> str:
"""
Prepare Triton Container Image based on provided configuration
Returns:
Name of container image to use in process
"""
if not config.triton_dockerfile:
image_name = triton.container_image(config.container_version)
LOGGER.info(f"Using official Triton container image: {image_name}.")
return image_name
if config.triton_container_image:
LOGGER.info(f"Using provided Triton Container Image: {config.triton_container_image}")
return config.triton_container_image
normalized_model_name = config.model_name.lower().replace("_", "-")
image_name = f"tritonserver-{normalized_model_name}:latest"
LOGGER.info(f"Building Triton Container Image: {image_name}")
maintainer.build_image(
image_name=image_name,
image_file_path=pathlib.Path(config.triton_dockerfile),
build_args={"FROM_IMAGE": triton.container_image(container_version=config.container_version)},
)
return image_name
def _download_checkpoints(self, task: Task) -> None:
"""
Download checkpoints
"""
for variant, checkpoint in task.checkpoints.items():
checkpoint_url = checkpoint.url
download_path = checkpoint.path
if download_path.is_dir():
LOGGER.info(f"Checkpoint {download_path.name} already downloaded.")
continue
if not checkpoint_url:
LOGGER.warning(
f"Checkpoint {variant} url is not provided."
"\nIf you want to use that checkpoint please train the model locally"
f"\nand copy to {download_path} directory"
)
continue
download(checkpoint_url, download_path)
def _initialize_task(
self,
workspace: pathlib.Path,
config: Config,
pipeline: Pipeline,
triton_container_image: str,
logs_dir: pathlib.Path,
) -> Task:
"""
Initialize task object
Args:
workspace: Path to workspace where artifacts are stored
config: Config object
pipeline: Pipeline object
triton_container_image: Triton Inference Server container image used for tests
Returns:
Task object
"""
datasets = {}
for dataset in config.datasets:
datasets[dataset.name] = Dataset(name=dataset.name)
checkpoints = {}
for checkpoint in config.checkpoints:
download_path = workspace / Task.checkpoints_dir / checkpoint.name
checkpoints[checkpoint.name] = Checkpoint(name=checkpoint.name, url=checkpoint.url, path=download_path)
results_types = self._task_results_types(pipeline=pipeline)
stages = dict()
for stage in pipeline.stages():
stages[stage.label] = {"result_path": stage.result_path, "result_type": stage.result_type}
experiments = list()
for idx, configuration in enumerate(config.configurations, start=1):
experiment = self._prepare_experiment(
idx=idx,
configuration=configuration,
results_types=results_types,
stages=stages,
)
experiments.append(experiment)
system_info = SystemInfo.from_host()
task = Task(
model_name=config.model_name,
framework=config.framework,
checkpoints=checkpoints,
datasets=datasets,
datasets_dir=config.datasets_dir,
experiments=experiments,
container_version=config.container_version,
system_info=system_info,
triton_container_image=triton_container_image,
triton_custom_operations=config.triton_custom_operations,
triton_load_model_method=config.triton_load_model_method,
started_at=int(datetime.utcnow().timestamp()),
logs_dir=logs_dir,
)
return task
def _task_results_types(self, pipeline: Pipeline) -> List[str]:
"""
Types of results generated as part of task
Returns:
List of result types
"""
results = list()
for stage in pipeline.stages():
if TritonPerformanceOfflineStage.label == stage.label:
results.append(ResultsType.TRITON_PERFORMANCE_OFFLINE)
continue
if TritonPerformanceOnlineStage.label == stage.label:
results.append(ResultsType.TRITON_PERFORMANCE_ONLINE)
continue
return results
def _prepare_experiment(
self,
idx: int,
configuration: Configuration,
results_types: List[str],
stages: Dict,
) -> Experiment:
"""
Prepare experiments data
Args:
idx: Experiment index
configuration: Configuration object
results_types: Results types stored in experiment
stages: Stages executed as part of experiment
Returns:
Experiment object
"""
parameters = {key.lower(): value for key, value in configuration.parameters.items()}
results_mapped = dict()
for result_type in results_types:
results_mapped[result_type] = result_type
stages_mapped = dict()
for name, stage_data in stages.items():
stages_mapped[name] = Stage(name=name, **stage_data)
experiment = Experiment(
experiment_id=idx,
parameters=parameters,
stages=stages_mapped,
results=results_mapped,
)
return experiment
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/preparer.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import platform
import subprocess
from datetime import datetime
from typing import Dict, List, Optional, Union
import cpuinfo
import psutil
import yaml
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import CustomDumper, DataObject
from .experiment import Experiment
from .triton import Triton
class GPU(DataObject):
"""
GPU information data object
"""
name: str
driver_version: str
cuda_version: str
memory: str
tdp: str
def __init__(self, name: str, driver_version: str, cuda_version: str, memory: str, tdp: str):
"""
Args:
name: name of GPU
driver_version: version of driver
cuda_version: version of CUDA
memory: size of memory available on GPU [MB]
tdp: Max TDP of GPU unit
"""
self.name = name
self.driver_version = driver_version
self.cuda_version = cuda_version
self.memory = memory
self.tdp = tdp
@staticmethod
def from_dict(data: Dict):
"""
Create GPU object from dictionary
Args:
data: dictionary with GPU data
Returns:
GPU object
"""
return GPU(
name=data["name"],
driver_version=data["driver_version"],
cuda_version=data["cuda_version"],
memory=data["memory"],
tdp=data["tdp"],
)
@staticmethod
def from_host():
"""
Create GPU object from host data
Returns:
GPU object
"""
data = subprocess.check_output(
["nvidia-smi", "--query-gpu=name,driver_version,memory.total,power.max_limit", "--format=csv"]
).decode()
lines = data.split(sep="\n")
device_details = lines[1].split(",")
name = device_details[0].strip()
driver_version = device_details[1].strip()
memory = device_details[2].strip()
tdp = device_details[3].strip()
cuda_version = None
data = subprocess.check_output(["nvidia-smi", "--query"]).decode()
lines = data.split(sep="\n")
for line in lines:
if line.startswith("CUDA Version"):
cuda_version = line.split(":")[1].strip()
break
return GPU(
name=name,
driver_version=driver_version,
cuda_version=cuda_version,
memory=memory,
tdp=tdp,
)
class CPU(DataObject):
"""
CPU details
"""
name: str
physical_cores: int
logical_cores: int
min_frequency: float
max_frequency: float
def __init__(self, name: str, physical_cores: int, logical_cores: int, min_frequency: float, max_frequency: float):
"""
Args:
name: name of CPU unit
physical_cores: number of physical cores available on CPU
logical_cores: number of logical cores available on CPU
min_frequency: minimal clock frequency
max_frequency: maximal clock frequency
"""
self.name = name
self.physical_cores = physical_cores
self.logical_cores = logical_cores
self.min_frequency = min_frequency
self.max_frequency = max_frequency
@staticmethod
def from_host():
"""
Create CPU object from host data
Returns:
CPU object
"""
return CPU(
name=cpuinfo.get_cpu_info()["brand_raw"],
physical_cores=psutil.cpu_count(logical=False),
logical_cores=psutil.cpu_count(logical=True),
min_frequency=psutil.cpu_freq().min,
max_frequency=psutil.cpu_freq().max,
)
class Memory(DataObject):
"""
Memory data object
"""
size: float
def __init__(self, size: float):
"""
Args:
size: RAM memory size in MB
"""
self.size = size
@staticmethod
def from_host():
"""
Create Memory object from host data
Returns:
Memory object
"""
svm = psutil.virtual_memory()
return Memory(size=svm.total)
class SystemInfo(DataObject):
"""
System Information data object
"""
system: str
cpu: CPU
memory: Memory
gpu: GPU
def __init__(self, system: str, cpu: CPU, memory: Memory, gpu: GPU):
"""
Args:
system: name of operating system
cpu: CPU info
memory: Memory info
gpu: GPU info
"""
self.system = system
self.cpu = cpu
self.memory = memory
self.gpu = gpu
@staticmethod
def from_host():
"""
Create SystemInfo object from host data
Returns:
SystemInfo object
"""
system = platform.platform()
gpu = GPU.from_host()
memory = Memory.from_host()
cpu = CPU.from_host()
return SystemInfo(system=system, cpu=cpu, gpu=gpu, memory=memory)
class Checkpoint(DataObject):
"""
Checkpoint data object
"""
def __init__(self, name: str, url: str, path: Union[str, pathlib.Path]):
"""
Args:
name: Name of checkpoint
path: Location of checkpoint on local hardware
"""
self.name = name
self.url = url
self.path = pathlib.Path(path)
class Dataset(DataObject):
"""
Dataset data object
"""
def __init__(self, name: str):
"""
Args:
name: Name of dataset
"""
self.name = name
class Task(DataObject):
"""
Task data object to store build information
"""
model_name: str
framework: str
started_at: int
ended_at: Optional[int]
container_version: str
checkpoints: Dict[str, Checkpoint]
datasets: Dict[str, Dataset]
datasets_dir: Optional[Union[str, pathlib.Path]]
experiments: List[Experiment]
system_info: SystemInfo
triton_container_image: Optional[str]
triton_custom_operations: Optional[str]
filename: str = "task.yaml"
results_dir: str = "results"
checkpoints_dir: str = "checkpoints"
def __init__(
self,
model_name: str,
framework: str,
container_version: str,
checkpoints: Dict,
datasets: Dict,
experiments: List,
system_info: SystemInfo,
started_at: int,
logs_dir: pathlib.Path = pathlib.Path("/var/logs"),
datasets_dir: Optional[Union[str, pathlib.Path]] = None,
ended_at: Optional[int] = None,
triton_container_image: Optional[str] = None,
triton_custom_operations: Optional[str] = None,
triton_load_model_method: str = Triton.LOAD_MODE.EXPLICIT,
):
"""
Args:
model_name: Name of model
framework: Model framework
container_version: Container version used in task
checkpoints: List of checkpoints
datasets: List of datasets
datasets_dir: Directory where datasests are stored
experiments: List of experiments run as part of task
system_info: information about node on which experiment was executed
started_at: Time when task has started
ended_at: Time when task has ended
triton_container_image: Custom Triton Container Image used for task
triton_custom_operations: Custom operations library path
triton_load_model_method: Method how models are loaded on Triton
"""
self.started_at = started_at
self.ended_at = ended_at
self.model_name = model_name
self.framework = framework
self.container_version = container_version
self.checkpoints = checkpoints
self.datasets = datasets
self.datasets_dir = pathlib.Path(datasets_dir)
self.experiments = experiments
self.system_info = system_info
self.triton_container_image = triton_container_image
self.triton_custom_operations = triton_custom_operations
self.triton_load_model_method = triton_load_model_method
self.logs_dir = logs_dir
def start(self) -> None:
"""
Update stage execution info at start
Returns:
None
"""
self.started_at = int(datetime.utcnow().timestamp())
def end(self) -> None:
"""
Update stage execution info at end
Returns:
None
"""
self.ended_at = int(datetime.utcnow().timestamp())
def to_file(self, file_path: Union[pathlib.Path, str]):
"""
Store task data to YAML file
Args:
file_path: path to file where task data has to be saved
Returns:
None
"""
task_data = self.to_dict()
with open(file_path, "w") as f:
yaml.dump(task_data, f, Dumper=CustomDumper, width=240, sort_keys=False)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/task.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
import signal
import sys
from typing import List, Type
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .config import Config
from .exceptions import RunnerException
from .executor import Executor
from .finalizer import Finalizer
from .logger import LOGGER, log_format
from .maintainer import Maintainer
from .pipeline import Pipeline
from .preparer import Preparer
from .triton import Triton
class Runner:
"""
Runner class. Main entrypoint to performing task and experiments
"""
WORKSPACE = pathlib.Path.cwd()
EXECUTOR_WORKSPACE = WORKSPACE / "runner_workspace"
def __init__(
self,
pipeline: Pipeline,
config: Config,
executor_cls: Type[Executor],
maintainer_cls: Type[Maintainer],
preparer_cls: Type[Preparer],
finalizer_cls: Type[Finalizer],
devices: List[str] = None,
log_level: int = logging.INFO,
):
self._pipeline = pipeline
self._config = config
self._pipeline = pipeline
self._config = config
self._preparer = preparer_cls()
self._finalizer = finalizer_cls()
self._devices = devices or ["0"]
self._log_level = log_level
self._logs_dir = self.EXECUTOR_WORKSPACE / "logs"
self._log_file_path = self._logs_dir / "runner.log"
self._maintainer = maintainer_cls()
self._executor = executor_cls(
workspace=self.EXECUTOR_WORKSPACE,
maintainer=self._maintainer,
pipeline=pipeline,
devices=devices,
)
signal.signal(signal.SIGINT, self._catch)
self._logs_dir.mkdir(parents=True, exist_ok=True)
def start(self) -> None:
"""
Start runner
Returns:
None
"""
self._setup_logger()
task = self._preparer.exec(
workspace=self.EXECUTOR_WORKSPACE,
config=self._config,
pipeline=self._pipeline,
logs_dir=self._logs_dir,
maintainer=self._maintainer,
triton=Triton(),
)
results = []
try:
for result in self._executor.start(task):
results.append(result)
except RunnerException as e:
LOGGER.error(f"Error running task: {str(e)}")
finally:
self._executor.stop()
self._finalizer.exec(workspace=self.EXECUTOR_WORKSPACE, task=task, results=results)
def _catch(self, signum, frame):
"""
SIGINT catcher. Stops executor on any sigterm.
Args:
signum: signal id
frame: signal frame
"""
self._executor.stop()
sys.exit(0)
def _setup_logger(self) -> None:
"""
Add file handle for logger
Returns:
None
"""
file = logging.FileHandler(self._log_file_path)
formatter = logging.Formatter(log_format)
file.setFormatter(formatter)
LOGGER.addHandler(file)
LOGGER.setLevel(level=self._log_level)
LOGGER.initialize(file_path=self._log_file_path)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/runner.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import Framework, Paths
class Triton:
"""
Triton Inference Server helper class
"""
image = "nvcr.io/nvidia/tritonserver"
tag = "py3"
class LOAD_MODE:
"""
Loading mode available in Triton
"""
POLL = "poll"
EXPLICIT = "explicit"
@staticmethod
def container_image(container_version: str):
"""
Container image based on version
Args:
container_version: Version of container to be used
Returns:
Image name with tag
"""
return f"{Triton.image}:{container_version}-{Triton.tag}"
@staticmethod
def command(
framework: str,
repository_path: str,
strict_mode: bool = False,
poll_model: bool = False,
metrics: bool = False,
verbose: bool = False,
):
"""
Command to run Triton Inference Server inside container
Args:
framework: Framework used for model
repository_path: Path to model repository
strict_mode: Flag to use strict model config
poll_model: Poll model
metrics: Enable GPU metrics (disable for MIG)
verbose: Use verbose mode logging
Returns:
"""
triton_command = f"tritonserver --model-store={repository_path}"
if poll_model:
triton_command += " --model-control-mode=poll --repository-poll-secs 5"
else:
triton_command += " --model-control-mode=explicit"
if not strict_mode:
triton_command += " --strict-model-config=false"
if not metrics:
triton_command += " --allow-metrics=false --allow-gpu-metrics=false"
if verbose:
triton_command += " --log-verbose 1"
if framework in (Framework.TensorFlow1, Framework.TensorFlow2):
version = 1 if framework == Framework.TensorFlow1 else 2
triton_command += f" --backend-config=tensorflow,version={version}"
return triton_command
@staticmethod
def library_path(framework: str):
"""
Obtain custom library path for framework
Args:
framework: Framework used for model
Returns:
Path to additional libraries needed by framework
"""
paths = {
Framework.PyTorch.name: "/opt/tritonserver/backends/pytorch",
Framework.TensorFlow1.name: "/opt/tritonserver/backends/tensorflow1",
Framework.TensorFlow2.name: "/opt/tritonserver/backends/tensorflow2",
}
return paths[framework]
@staticmethod
def custom_library_path_remote() -> str:
"""
Path to custom library mounted in Triton container
Returns:
Path to shared library with custom operations
"""
return f"{Paths.LIBRARIES_PATH}/libcustomops.so"
@staticmethod
def custom_library_path_local(libs_dir: pathlib.Path) -> pathlib.Path:
"""
Path to custom library in local path
Args:
libs_dir: path to libraries directory
Returns:
Path to shared library with custom operations
"""
return libs_dir / "libcustomops.so"
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/triton.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import Dict, List, Optional, Union
import yaml
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .configuration import Configuration
from .core import DataObject
from .triton import Triton
class Checkpoint(DataObject):
"""
Checkpoint data placeholder
"""
name: str
url: str
def __init__(self, name: str, url: str):
self.name = name
self.url = url
class Dataset(DataObject):
"""
Dataset data placeholder
"""
name: str
def __init__(self, name: str):
self.name = name
class Config(DataObject):
"""
Configuration object for runner experiments
"""
def __init__(
self,
model_name: str,
framework: str,
container_version: str,
configurations: List[Configuration],
datasets_dir: str = "datasets",
datasets: List[Dataset] = None,
checkpoints: List[Checkpoint] = None,
triton_dockerfile: Optional[str] = None,
triton_container_image: Optional[str] = None,
triton_custom_operations: Optional[str] = None,
triton_load_model_method: Optional[str] = Triton.LOAD_MODE.EXPLICIT,
):
"""
Args:
model_name: Name of model
framework: Framework used to create model
container_version: Version of Triton Inference Server container used for evaluation
configurations: List of experiments configurations
datasets_dir: Directory where datasets are stored
datasets: Datasets used for conversion/export
checkpoints: Checkpoints with trained model
triton_load_model_method: Triton Inference Server model loading mode
triton_dockerfile: Dockerfile for Triton to build custom image
triton_container_image: Custom image used for Triton Server - leave empty to use default or built from Dockerfile
triton_custom_operations: Path where custom operation library is stored
"""
self.model_name = model_name
self.framework = framework
self.container_version = container_version
self.configurations = configurations
self.datasets_dir = datasets_dir
self.datasets = datasets
self.checkpoints = checkpoints
self.triton_load_model_method = triton_load_model_method
self.triton_dockerfile = triton_dockerfile
self.triton_container_image = triton_container_image
self.triton_custom_operations = triton_custom_operations
def to_file(self, file_path: Union[pathlib.Path, str]) -> None:
"""
Save config data to file
Args:
file_path: path to file where config data is should be stored
Returns:
None
"""
data = self.to_dict()
with open(file_path, "w") as f:
yaml.safe_dump(data, f)
@staticmethod
def from_dict(config_data: Dict):
"""
Create configuration object from data stored in dictionary
Args:
config_data: dictionary with config data
Returns:
Config object
"""
configurations = []
for configuration_data in config_data["configurations"]:
configuration = Configuration(**configuration_data)
configurations.append(configuration)
checkpoints = []
for checkpoint_data in config_data.get("checkpoints", []):
checkpoint = Checkpoint(
name=checkpoint_data["name"],
url=checkpoint_data["url"],
)
checkpoints.append(checkpoint)
datasets = []
for dataset_data in config_data.get("datasets", []):
dataset = Dataset(name=dataset_data["name"])
datasets.append(dataset)
return Config(
model_name=config_data["model_name"],
framework=config_data["framework"],
container_version=config_data["container_version"],
configurations=configurations,
checkpoints=checkpoints,
datasets=datasets,
datasets_dir=config_data.get("datasets_dir"),
triton_load_model_method=config_data["triton_load_model_method"],
triton_dockerfile=config_data.get("triton_dockerfile"),
triton_container_image=config_data.get("triton_container_image"),
triton_custom_operations=config_data.get("triton_custom_operations"),
)
@staticmethod
def from_file(file_path: Union[pathlib.Path, str]):
"""
Load experiment data from file
Args:
file_path: path to file where experiment data is stored
Returns:
Experiment object
"""
with open(file_path, "r") as f:
config_data = yaml.safe_load(f)
return Config.from_dict(config_data)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/config.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import shutil
import urllib.request
from typing import Any, Callable
from zipfile import ZipFile
from retrying import retry
from tqdm.auto import tqdm
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .logger import LOGGER
from .exceptions import RunnerException
def unzip(checkpoint_path: pathlib.Path, archive_path: pathlib.Path) -> None:
"""
Unzip acrhive to provided path
Args:
checkpoint_path: Path where archive has to be unpacked
archive_path: Path to archive Archive filename
Returns:
None
"""
LOGGER.info(f"Creating directory for checkpoint: {checkpoint_path.name}")
checkpoint_path.mkdir(parents=True, exist_ok=True)
LOGGER.info(f"Unpacking checkpoint files {checkpoint_path}")
with ZipFile(archive_path, "r") as zf:
zf.extractall(path=checkpoint_path)
LOGGER.info("done")
LOGGER.info(f"Removing zip file: {archive_path}")
archive_path.unlink()
LOGGER.info("done")
def download_progress(t: Any) -> Callable:
"""
Progress bar
Args:
t: progress
Returns:
Callable
"""
last_b = [0]
def update_to(b: int = 1, bsize: int = 1, tsize: int = None):
if tsize not in (None, -1):
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return update_to
@retry(stop_max_attempt_number=3)
def download(checkpoint_url: str, checkpoint_path: pathlib.Path) -> None:
"""
Download checkpoint from given url to provided path
Args:
checkpoint_url: Url from which checkpoint has to be downloaded
checkpoint_path: Path where checkpoint has to be stored
Returns:
None
"""
LOGGER.info(f"Downloading checkpoint from {checkpoint_url}")
with tqdm(unit="B") as t:
reporthook = download_progress(t)
result = urllib.request.urlretrieve(checkpoint_url, reporthook=reporthook)
filename = result[0]
LOGGER.info(f"Checkpoint saved in {filename}")
file_path = pathlib.Path(filename)
if not file_path.is_file() and not file_path.is_dir():
raise RunnerException(f"Checkpoint {filename} does not exist")
LOGGER.info(f"Moving checkpoint to {checkpoint_path.parent}")
shutil.move(file_path, checkpoint_path.parent / file_path.name)
LOGGER.info("done")
archive_path = checkpoint_path.parent / file_path.name
unzip(checkpoint_path, archive_path)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/downloader.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
import shutil
from typing import Dict, List
import yaml
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .experiment import ExperimentResult
from .logger import LOGGER
from .stages import ResultsType
from .summary import load_results, save_summary
from .task import Task
class Finalizer(abc.ABC):
@abc.abstractmethod
def exec(self, workspace: pathlib.Path, task: Task, results: List[ExperimentResult]):
pass
class ExperimentFinalizer(Finalizer):
"""
Public runner finalizer object.
"""
def exec(self, workspace: pathlib.Path, task: Task, results: List[ExperimentResult]):
results_path = workspace / task.results_dir
self._generate_summary(results_path, results)
self._finalize_task(results_path, task)
def _finalize_task(self, results_path: pathlib.Path, task: Task) -> None:
"""
Finalize task information
Args:
task: Task object
Returns:
None
"""
task.end()
file_path = results_path / task.filename
LOGGER.debug(f"Saving task details to file {file_path}")
task.to_file(file_path)
LOGGER.debug("Done")
LOGGER.info(f"Task details and results stored in {results_path}")
def _generate_summary(self, results_path: pathlib.Path, experiment_results: List[ExperimentResult]):
"""
Generate summary for results collected in all experiments
Args:
results_path: Path where results should be stored
experiment_results: Results collected from experiments
Returns:
"""
performance_offline_results = list()
performance_online_results = list()
results_mapping = {
ResultsType.TRITON_PERFORMANCE_OFFLINE: performance_offline_results,
ResultsType.TRITON_PERFORMANCE_ONLINE: performance_online_results,
}
self._collect_summary_results(experiment_results, results_mapping)
self._prepare_final_results(results_path, results_mapping)
def _collect_summary_results(self, experiment_results: List[ExperimentResult], results_mapping: Dict):
for experiment_result in experiment_results:
experiment = experiment_result.experiment
for result_type, result_path in experiment_result.results.items():
if not result_path.is_file() and not result_path.is_dir():
raise FileNotFoundError(f"Expected file {result_path} not found")
LOGGER.debug(f"Found {result_type} in {result_path} file.")
if result_type not in results_mapping:
LOGGER.debug(f"Results {result_type} for {experiment.experiment_id} are ignored in final summary.")
return
LOGGER.debug(f"Collecting {result_type} results from {result_path} for summary")
result = load_results(
results_path=result_path,
parameters=experiment.parameters,
result_type=result_type,
)
results_mapping[result_type].extend(result)
LOGGER.debug(f"Done.")
def _prepare_final_results(self, results_path: pathlib.Path, results_mapping: Dict) -> None:
"""
Prepare summary files for offline and online performance
Args:
results_path: Path where results should be stored
results_mapping: Mapping with results type and collected results for given stage
Returns:
None
"""
for results_type, results in results_mapping.items():
save_summary(
result_type=results_type,
results=results,
summary_dir=results_path,
)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/finalizer.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import Command
from .exceptions import RunnerException
from .stages import Stage
class CommandsExporter:
"""
Command exported to BASH scripts
"""
def __init__(self, scripts_dir: pathlib.Path):
"""
Args:
scripts_dir: Paths where scripts should be stored
"""
self._scripts_dir = scripts_dir
def export(self, stage: Stage) -> Command:
"""
Export stage commands to script and return new command to execute
Args:
stage: Stage object with commands
Returns:
Command object with script execution command
"""
filename = self._get_filename(stage.label)
file_path = self._scripts_dir / filename
with open(file_path, "w+") as stagefile:
stagefile.write("set -x\n")
stagefile.write("set -e\n")
stagefile.write("export PYTHONUNBUFFERED=1\n")
stagefile.write("export PYTHONPATH=`pwd`\n")
for command in stage.commands:
stagefile.write(str(command))
result = os.system(f'ex +"set syn=sh" +"norm gg=G" -cwq {file_path}')
if result != 0:
raise RunnerException(f"Failed running {filename} script formatting. Exit code {result}")
command = Command(f"bash -xe {file_path.as_posix()}")
return command
def _get_filename(self, label: str):
"""
Generate filename for script based on label
Args:
label: String with stage label
Returns:
String with script filename
"""
filename = label.replace(" ", "_").lower()
filename = f"{filename}.sh"
return filename
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/exporter.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from enum import Enum
from typing import Any, Dict, List
import yaml
class CustomDumper(yaml.Dumper):
"""
Custom YAML dumper to avoid craeting aliases
"""
def ignore_aliases(self, data: Dict) -> bool:
return True
class Paths:
"""
Paths mapping inside Triton Container
"""
MODEL_REPOSITORY_PATH = "/mnt/triton-models"
LIBRARIES_PATH = "/mnt/libs"
class Framework(Enum):
"""
Supported frameworks
"""
TensorFlow1 = "TensorFlow1"
TensorFlow2 = "TensorFlow2"
PyTorch = "PyTorch"
class Command:
"""Represents wrapper of raw string command"""
def __init__(self, data: str):
"""
Store command data
Args:
data: string with bash commands to execute
"""
self._data = data
def __str__(self) -> str:
"""
String object representation
Returns:
String
"""
return self._data
class DataObject(object):
"""
Data object representation handling recursive transformation from object to dict
"""
READ_ONLY = set()
def to_dict(self) -> Dict:
"""
Represent object as dictionary
Returns:
Dict
"""
data = dict()
filtered_data = {key: value for key, value in self.__dict__.items() if key not in self.READ_ONLY}
for key, value in filtered_data.items():
data[key] = self._convert_value(value)
return data
def _convert_value(self, value: Any) -> Any:
"""
Convert value based on its type
Args:
value: variable to convert
Returns:
Converted object
"""
if isinstance(value, DataObject):
value = value.to_dict()
elif isinstance(value, dict):
value = self._from_dict(value)
elif isinstance(value, list):
value = self._from_list(value)
elif isinstance(value, Enum):
value = value.value
elif isinstance(value, pathlib.Path):
value = value.as_posix()
return value
def _from_dict(self, values: Dict) -> Any:
"""
Convert dictionary values
Args:
values: dictionary with values
Returns:
Any
"""
data = dict()
for key, value in values.items():
data[key] = self._convert_value(value)
return data
def _from_list(self, values: List) -> Any:
"""
Convert list of values
Args:
values: list with values
Returns:
Any
"""
items = list()
for value in values:
item = self._convert_value(value)
items.append(item)
return items
AVAILABLE_FRAMEWORKS = [f.value for f in Framework]
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/core.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
import coloredlogs
class Logger(logging.Logger):
def __init__(self, name, level=logging.NOTSET):
super().__init__(name, level=level)
self._file_path = None
def initialize(self, file_path: pathlib.Path):
self._file_path = file_path
def write(self, log: str):
if not self._file_path:
return
with open(self._file_path, "+a") as file:
file.write(log)
LOGGER = Logger("runner")
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(format=log_format)
coloredlogs.install(
level=logging.INFO,
fmt=log_format,
logger=LOGGER,
field_styles={
"asctime": {"color": "green"},
"hostname": {"color": "magenta"},
"levelname": {"bold": True, "color": "blue"},
"name": {"color": "blue"},
"programname": {"color": "cyan"},
"username": {"color": "yellow"},
},
reconfigure=True,
)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/logger.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import List, Type
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .config import Config
from .executor import Executor
from .finalizer import Finalizer
from .maintainer import Maintainer
from .pipeline import Pipeline
from .preparer import Preparer
from .runner import Runner
class RunnerProxy:
"""
Runner proxy to configure original runner
"""
maintainer_cls: Type[Maintainer] = None
executor_cls: Type[Executor] = None
preparer_cls: Type[Preparer] = None
finalizer_cls: Type[Finalizer] = None
def __init__(self, config: Config, pipeline: Pipeline, devices: List[str]):
"""
RunnerProxy constructor
Args:
config: Config object
pipeline: Pipeline to evaluate
devices: List of devices to use for tests
"""
self._runner = Runner(
config=config,
pipeline=pipeline,
devices=devices,
maintainer_cls=self.maintainer_cls,
executor_cls=self.executor_cls,
preparer_cls=self.preparer_cls,
finalizer_cls=self.finalizer_cls,
)
def start(self) -> None:
"""
Runner interface
"""
self._runner.start()
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/runner_proxy.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import List, Optional, Tuple, Union
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import Command
class ResultsType:
"""
Results types generated by runner
"""
TRITON_PERFORMANCE_OFFLINE = "triton_performance_offline"
TRITON_PERFORMANCE_ONLINE = "triton_performance_online"
class Stage:
"""
Stage definition
"""
label: str
commands: List[Command]
result_path: Optional[str]
result_type: Optional[str]
def __init__(
self,
commands: Union[Tuple[str, ...], List[str]],
result_path: Optional[str] = None,
result_type: Optional[str] = None,
):
"""
Args:
commands: List or Tuple of commands provided as raw string
result_path: Path to results file generated by stage
result_type: Type of results generated by stage
"""
if type(commands) not in [tuple, list]:
raise ValueError("""Incorrect type of commands list. Please, provide list of commands as tuple.""")
self.commands = list(map(lambda command: Command(data=command), commands))
self.result_path = result_path
self.result_type = result_type
class ExportStage(Stage):
label = "Export Model"
class ConversionStage(Stage):
label = "Convert Model"
class DeployStage(Stage):
label = "Deploy Model"
class CorrectnessStage(Stage):
label = "Model Correctness Tests"
class TritonPreparePerformanceProfilingDataStage(Stage):
label = "Prepare Triton Profiling Data"
class TritonPerformanceOfflineStage(Stage):
label = "Triton Performance Offline Tests"
class TritonPerformanceOnlineStage(Stage):
label = "Triton Performance Online Tests"
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/stages.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import pathlib
from datetime import datetime
from typing import Any, Dict, Optional
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import DataObject
class ExperimentStatus(object):
"""
Experiment status flags object
"""
SUCCEED = "Succeed"
FAILED = "Failed"
class StageStatus:
"""
Stages status flags object
"""
SUCCEED = "Succeed"
FAILED = "Failed"
class Stage(DataObject):
"""
Stage data object
"""
name: str
status: str
started_at: Optional[int]
ended_at: Optional[int]
result_path: Optional[str]
result_type: Optional[str]
def __init__(
self,
name: str,
result_path: Optional[str],
result_type: Optional[str],
status: str = StageStatus.FAILED,
started_at: Optional[int] = None,
ended_at: Optional[int] = None,
):
"""
Args:
name: name of stage
result_path: path where results file is stored
result_type: type of results
status: success/fail status
started_at: time when stage has started
ended_at: time when stage has ended
"""
self.name = name
self.status = status
self.started_at = started_at
self.ended_at = ended_at
self.result_path = result_path
self.result_type = result_type
def start(self) -> None:
"""
Update stage execution info at start
Returns:
None
"""
self.started_at = int(datetime.utcnow().timestamp())
def end(self) -> None:
"""
Update stage execution info at end
Returns:
None
"""
self.status = StageStatus.SUCCEED
self.ended_at = int(datetime.utcnow().timestamp())
class Experiment(DataObject):
"""
Experiment data object
"""
experiment_id: int
parameters: Dict
stages: Dict[str, Stage]
results: Dict[str, str]
status: str
started_at: Optional[int]
ended_at: Optional[int]
def __init__(
self,
experiment_id: int,
parameters: Dict,
stages: Dict[str, Stage],
results: Dict[str, str],
started_at: Optional[int] = None,
ended_at: Optional[int] = None,
status: str = ExperimentStatus.FAILED,
):
"""
Args:
experiment_id: experiment identifier
parameters: dictionary with experiment configuration
stages: dictionary with stages run in experiment
results: mapping between results types and location where are stored
started_at: time when experiment has started
ended_at: time when experiment has ended
status: experiment success/fail information
"""
self.experiment_id = experiment_id
self.started_at = started_at
self.ended_at = ended_at
self.parameters = parameters
self.stages = stages
self.status = status
self.results = results
self.results_dir = f"experiment_{experiment_id}"
def start(self) -> None:
"""
Update experiment execution info at start
Returns:
None
"""
self.started_at = int(datetime.utcnow().timestamp())
def end(self) -> None:
"""
Update experiment execution info at end
Returns:
None
"""
self.status = ExperimentStatus.SUCCEED
self.ended_at = int(datetime.utcnow().timestamp())
@dataclasses.dataclass
class Status:
state: ExperimentStatus
message: str
@dataclasses.dataclass
class ExperimentResult:
"""
Experiment result object
"""
status: Status
experiment: Experiment
results: Dict[str, pathlib.Path]
payload: Dict[str, Any] = dataclasses.field(default_factory=dict)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/experiment.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import pathlib
from typing import Dict, List, Union
# method from PEP-366 to support relative import in executed modules
import yaml
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ..deployment_toolkit.report import save_results, sort_results
from .logger import LOGGER
def save_summary(result_type: str, results: List, summary_dir: pathlib.Path) -> None:
"""
Create file with summary for results of given type
Args:
result_type: Type of results to dump
results: Results data
summary_dir: Path where results should be stored
Returns:
None
"""
if len(results) == 0:
LOGGER.warning(f"No {result_type} results found.")
return
results = sort_results(results=results)
kind_file = summary_dir / f"{result_type}_summary.csv"
save_results(filename=kind_file.as_posix(), data=results, formatted=True)
LOGGER.info(f"Summary for {result_type} stored in {kind_file}")
def load_results(*, results_path: Union[pathlib.Path, str], result_type: str, parameters: Dict) -> List:
"""
Update results
Args:
results_path: Path to file or directory from which data should be read
result_type: type of results
parameters: Parameters used in experiment which generated results
Returns:
List of result rows
"""
LOGGER.debug(f"Loading {result_type} from {results_path} for summary")
results_path = pathlib.Path(results_path)
if results_path.is_file():
files = [results_path]
elif results_path.is_dir():
files = list(results_path.iterdir())
else:
LOGGER.debug(f"Unable to load file: {results_path}. Generating empty rows.")
data = [{}]
return data
if any([file.name.endswith(".ckpt") for file in files]):
model_analyzer_metrics = results_path / "metrics-model-inference.csv"
files = [model_analyzer_metrics]
else:
files = [file for file in files if file.name.endswith(".csv")]
results = list()
parameters_cpy = {key: value for key, value in parameters.items() if key != "batch"}
for file in files:
if file.suffix == ".csv":
data = _generate_data_from_csv(file=file)
elif file.suffix == ".json":
data = _generate_data_from_json(file=file)
elif file.suffix == ".yaml":
data = _generate_data_from_yaml(file=file)
else:
raise ValueError(f"Unsupported file extension: {file.suffix}")
for item in data:
result = {**parameters_cpy, **item}
results.append(result)
LOGGER.debug(f"Loading done. Collected {len(results)} results.")
return results
def _normalize_key(*, key: str) -> str:
"""
Normalize key
Args:
key: Key to normalize
Returns:
Normalized string
"""
key = "_".join(key.split(sep=" "))
key = key.lower()
return key
def _normalize_keys(*, data: Dict) -> Dict:
"""
Normalize keys in dictionary
Args:
data: Dictionary to normalize
Returns:
Normalized dictionary
"""
keys = {_normalize_key(key=key): value for key, value in data.items()}
return keys
def _generate_data_from_csv(*, file: Union[pathlib.Path, str]) -> List[Dict]:
"""
Generate result rows from CSV file
Args:
file: CSV file path
Returns:
List of rows
"""
LOGGER.debug(f"Reading data from {file}")
filtered_rows: List[Dict] = []
with open(file, "r") as csvfile:
reader = csv.DictReader(csvfile)
for r in reader:
r = _normalize_keys(data=r)
filtered_row = {k: v for k, v in r.items()}
filtered_rows.append(filtered_row)
LOGGER.debug("done")
return filtered_rows
def _generate_data_from_json(file: pathlib.Path) -> List[Dict]:
LOGGER.info(f"Reading data from {file}")
filtered_rows: List[Dict] = list()
with open(file, "r") as json_file:
file_data = json.load(json_file)
if not isinstance(file_data, list):
file_data = [file_data]
for r in file_data:
r = _normalize_keys(data=r)
filtered_row = {k: v for k, v in r.items()}
filtered_rows.append(filtered_row)
LOGGER.info("done")
return filtered_rows
def _generate_data_from_yaml(file: pathlib.Path) -> List[Dict]:
LOGGER.info(f"Reading data from {file}")
filtered_rows: List[Dict] = list()
with open(file, "r") as yaml_file:
file_data = yaml.safe_load(yaml_file)
if not isinstance(file_data, list):
file_data = [file_data]
for r in file_data:
r = _normalize_keys(data=r)
filtered_row = {k: v for k, v in r.items()}
filtered_rows.append(filtered_row)
LOGGER.info("done")
return filtered_rows
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/summary.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .pipeline import Pipeline
pipeline = Pipeline()
pipeline.model_export(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "ts-trace" || "${EXPORT_FORMAT}" == "ts-script" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
python3 triton/export_model.py \
--input-path triton/model.py \
--input-type pyt \
--output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-type ${EXPORT_FORMAT} \
--ignore-unknown-parameters \
--onnx-opset 13 \
\
--checkpoint ${CHECKPOINT_DIR}/ \
--precision ${EXPORT_PRECISION} \
\
--dataloader triton/dataloader.py \
--dataset ${DATASETS_DIR}/${DATASET} \
--batch-size 1
""",
)
)
pipeline.model_conversion(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "ts-trace" || "${EXPORT_FORMAT}" == "ts-script" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
model-navigator convert \
--model-name ${MODEL_NAME} \
--model-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-path ${SHARED_DIR}/converted_model \
--target-formats ${FORMAT} \
--target-precisions ${PRECISION} \
--launch-mode local \
--override-workspace \
--verbose \
\
--onnx-opsets 13 \
--max-batch-size ${MAX_BATCH_SIZE} \
--container-version 21.08 \
--max-workspace-size 10000000000 \
--atol target__0=100 \
--rtol target__0=100
""",
)
)
pipeline.model_deploy(
commands=(
r"""
if [[ "${FORMAT}" == "ts-trace" || "${FORMAT}" == "ts-script" ]]; then
export CONFIG_FORMAT="torchscript"
else
export CONFIG_FORMAT="${FORMAT}"
fi
model-navigator triton-config-model \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--model-version 1 \
--model-path ${SHARED_DIR}/converted_model \
--model-format ${CONFIG_FORMAT} \
--model-control-mode ${TRITON_LOAD_MODEL_METHOD} \
--load-model \
--load-model-timeout-s 100 \
--verbose \
\
--backend-accelerator ${ACCELERATOR} \
--tensorrt-precision ${PRECISION} \
--tensorrt-capture-cuda-graph \
--tensorrt-max-workspace-size 10000000000 \
--max-batch-size ${MAX_BATCH_SIZE} \
--batching dynamic \
--preferred-batch-sizes ${TRITON_PREFERRED_BATCH_SIZES} \
--max-queue-delay-us ${TRITON_MAX_QUEUE_DELAY} \
--engine-count-per-device ${DEVICE}=${TRITON_GPU_ENGINE_COUNT}
""",
)
)
pipeline.triton_prepare_performance_profiling_data(
commands=(
r"""
mkdir -p ${SHARED_DIR}/input_data
""",
r"""
python triton/prepare_input_data.py \
--input-data-dir ${SHARED_DIR}/input_data/ \
--dataset ${DATASETS_DIR}/${DATASET} \
--checkpoint ${CHECKPOINT_DIR}/ \
""",
)
)
pipeline.triton_performance_offline_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data ${SHARED_DIR}/input_data/data.json \
--batch-sizes ${BATCH_SIZE} \
--number-of-triton-instances ${TRITON_INSTANCES} \
--batching-mode static \
--evaluation-mode offline \
--measurement-request-count ${REQUEST_COUNT} \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_offline.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_offline.csv",
)
pipeline.triton_performance_online_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data ${SHARED_DIR}/input_data/data.json \
--batch-sizes ${BATCH_SIZE} \
--number-of-triton-instances ${TRITON_INSTANCES} \
--number-of-model-instances ${TRITON_GPU_ENGINE_COUNT} \
--batching-mode dynamic \
--evaluation-mode online \
--measurement-request-count 500 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_online.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_online.csv",
) | DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/pipeline_impl.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
import shutil
import subprocess
from enum import Enum
from typing import Any, List, Optional
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import Command
from .exceptions import RunnerException
from .logger import LOGGER
def format_env_key(s: str):
"""
Format environmental variable key
Args:
s: String to format
Returns:
Upper cased string
"""
return s.upper()
def format_env_value(value: Any) -> str:
"""
Format environment variable value
Args:
value: value to be formatted
Returns:
Formatted value as a string
"""
value = value if not isinstance(value, Enum) else value.value
value = value if type(value) not in [list, tuple] else ",".join(map(str, value))
value = str(value)
return value
def get_result_path(result_path: str) -> str:
"""
Map result path when different variants passed ex. with env variable in path
Args:
result_path: Path to result file
Returns:
str
"""
for env_var, val in os.environ.items():
result_path = result_path.replace(f"${{{env_var}}}", val)
if result_path.startswith("/"):
return result_path
if result_path.startswith("./"):
result_path = result_path[2:]
return result_path
def clean_directory(directory: pathlib.Path) -> None:
"""
Remove all files and directories from directory
Args:
directory: Path to directory which should be cleaned
Returns:
None
"""
LOGGER.debug(f"Cleaning {directory.as_posix()}")
if not directory.is_dir():
LOGGER.warning(f"{directory.name} is not a directory.")
return
for item in os.listdir(directory):
item_path = directory / item
if item_path.is_dir():
LOGGER.debug(f"Remove dir {item_path.as_posix()}")
shutil.rmtree(item_path.as_posix())
elif item_path.is_file():
LOGGER.debug(f"Remove file: {item_path.as_posix()}")
item_path.unlink()
else:
LOGGER.warning(f"Cannot remove item {item_path.name}. Not a file or directory.")
def exec_command(command: Command) -> None:
"""
Execute command
Args:
command: Command to run
"""
try:
process = subprocess.Popen(
[str(command)],
shell=True,
start_new_session=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding="utf-8",
)
while True:
output = process.stdout.readline()
if output == "" and process.poll() is not None:
break
if output:
print(output.rstrip())
LOGGER.write(output)
result = process.poll()
if result != 0:
raise RunnerException(f"Command {command} failed with exit status: {result}")
except subprocess.CalledProcessError as e:
raise RunnerException(f"Running command {e.cmd} failed with exit status {e.returncode} : {e.output}")
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import Dict, Tuple
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .stages import (
ConversionStage,
DeployStage,
ExportStage,
ResultsType,
TritonPerformanceOfflineStage,
TritonPerformanceOnlineStage,
TritonPreparePerformanceProfilingDataStage,
)
class Pipeline:
"""
Definition of stages that has to be executed before and during experiments
"""
# Stages to execute as part of single experiment
_experiment_stages = [
ExportStage.label,
ConversionStage.label,
DeployStage.label,
TritonPreparePerformanceProfilingDataStage.label,
TritonPerformanceOfflineStage.label,
TritonPerformanceOnlineStage.label,
]
def __init__(self):
"""
Initialize pipeline
"""
self._stages: Dict = dict()
def model_export(self, commands: Tuple[str, ...]) -> None:
"""
Model export stage
Args:
commands: Commands to be executed as part of stage
Returns:
None
"""
stage = ExportStage(commands=commands)
self._stages[stage.label] = stage
def model_conversion(self, commands: Tuple[str, ...]) -> None:
"""
Model conversion stage
Args:
commands: Commands to be executed as part of stage
Returns:
None
"""
stage = ConversionStage(commands=commands)
self._stages[stage.label] = stage
def model_deploy(self, commands: Tuple[str, ...]) -> None:
"""
Model deployment stage
Args:
commands: Commands to be executed as part of stage
Returns:
None
"""
stage = DeployStage(commands=commands)
self._stages[stage.label] = stage
def triton_prepare_performance_profiling_data(self, commands: Tuple[str, ...]) -> None:
"""
Model profiling data creation stage
Args:
commands: Commands to be executed as part of stage
Returns:
None
"""
stage = TritonPreparePerformanceProfilingDataStage(commands=commands)
self._stages[stage.label] = stage
def triton_performance_offline_tests(self, commands: Tuple[str, ...], result_path: str) -> None:
"""
Model performance offline test stage
Args:
commands: Commands to be executed as part of stage
result_path: Path where results file is stored
Returns:
None
"""
stage = TritonPerformanceOfflineStage(
commands=commands,
result_path=result_path,
result_type=ResultsType.TRITON_PERFORMANCE_OFFLINE,
)
self._stages[stage.label] = stage
def triton_performance_online_tests(self, commands: Tuple[str, ...], result_path: str) -> None:
"""
Model performance online test stage
Args:
commands: Commands to be executed as part of stage
result_path: Path where results file is stored
Returns:
None
"""
stage = TritonPerformanceOnlineStage(
commands=commands,
result_path=result_path,
result_type=ResultsType.TRITON_PERFORMANCE_ONLINE,
)
self._stages[stage.label] = stage
def stages(self):
"""
Generate stages which should be run per experiment
Returns:
Generator with stages object
"""
for stage_name in self._experiment_stages:
stage = self._stages.get(stage_name)
if not stage:
continue
yield stage
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/pipeline.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RunnerException(Exception):
"""
Runner Exception
"""
def __init__(self, message: str):
self._message = message
def __str__(self):
return self._message
@property
def message(self):
"""Get the exception message.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/exceptions.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pathlib
from typing import List
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .config import Config
from .executor import Executor
from .finalizer import ExperimentFinalizer
from .maintainer import DockerMaintainer
from .preparer import ExperimentPreparer
from .runner_proxy import RunnerProxy
from .pipeline_impl import pipeline
class ExperimentRunner(RunnerProxy):
"""
Experiment Runner proxy for runner wrapper
"""
maintainer_cls = DockerMaintainer
executor_cls = Executor
preparer_cls = ExperimentPreparer
finalizer_cls = ExperimentFinalizer
def execute(config_path: str, devices: List[str]):
if len(devices) == 0:
devices = ["0"]
config = Config.from_file(config_path)
runner = ExperimentRunner(config=config, pipeline=pipeline, devices=devices)
runner.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config-path", type=str, required=True, help="Path to configuration file with details.")
parser.add_argument(
"--devices", type=str, nargs="*", required=False, help="Path to configuration file with details."
)
args = parser.parse_args()
config_path = args.config_path
devices = args.devices
execute(config_path, devices) | DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/__main__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pathlib
import shutil
import traceback
from typing import Dict, List, Optional
from colorama import Fore
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ..deployment_toolkit.core import Accelerator, Precision
from .core import Paths
from .exceptions import RunnerException
from .experiment import ExperimentResult, ExperimentStatus, Status
from .exporter import CommandsExporter
from .logger import LOGGER
from .maintainer import Container, Maintainer
from .pipeline import Pipeline
from .stages import Stage
from .task import Experiment, Task
from .triton import Triton
from .utils import clean_directory, exec_command, format_env_key, format_env_value, get_result_path
class Executor:
"""
Experiments executor
"""
def __init__(
self,
workspace: pathlib.Path,
maintainer: Maintainer,
pipeline: Pipeline,
devices: List[str] = None,
):
"""
Initialize experiments executor
Args:
workspace: Path to workspace to store artifacts
maintainer: maintainer for running commands
pipeline: pipeline definition
devices: List of devices on which Triton Inference Server will be executed
"""
self._maintainer = maintainer
self._pipeline = pipeline
self._devices = devices or ["0"]
self._workspace = workspace
self._executor_workspace = workspace / "executor"
self._shared_dir = self._executor_workspace / "shared"
self._triton_models_repository_dir = self._executor_workspace / "triton_models"
self._scripts_dir = self._executor_workspace / "scripts"
self._libraries_dir = self._executor_workspace / "libs"
self._exporter = CommandsExporter(self._scripts_dir)
self._triton_container: Optional[Container] = None
def start(self, task: Task):
"""
Process the task and execute experiments.
"""
self._create_dirs()
total_experiment = len(task.experiments)
LOGGER.info(f"Total experiments to verify: {total_experiment}")
for idx, experiment in enumerate(task.experiments, start=1):
LOGGER.info(
f"{Fore.CYAN}================ Experiment: {idx}/{total_experiment} Started ================{Fore.RESET}"
)
results = {}
environment = self._prepare_environment(task, experiment.parameters)
LOGGER.info(f"Experiment details")
LOGGER.info(json.dumps(environment, indent=4))
self._clean_experiment_artifacts(idx, total_experiment)
self._create_experiment_results_dir(task, experiment)
experiment.start()
LOGGER.info("Running Triton Servers:")
log_file = self._workspace / task.logs_dir / f"triton-server-experiment-{idx}.log"
self._triton_container = self._triton_server_container(
triton_container_image=task.triton_container_image,
framework=task.framework,
accelerator=experiment.parameters["accelerator"],
precision=experiment.parameters["precision"],
custom_library=bool(task.triton_custom_operations is not None),
load_model_method=task.triton_load_model_method,
log_file=log_file,
)
try:
self._triton_container.start()
for stage in self._pipeline.stages():
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total_experiment}] ================ Stage {stage.label} Started ================{Fore.RESET}"
)
experiment_stage = experiment.stages[stage.label]
experiment_stage.start()
is_ok = self._run_stage(stage=stage)
if not is_ok:
LOGGER.error(f"Stage {stage.label} failed.")
break
self._save_results(task, experiment, stage.label, results)
experiment_stage.end()
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total_experiment}] ================ Stage {stage.label} Finished ================{Fore.RESET}"
)
except Exception:
message = traceback.format_exc()
LOGGER.error(f"Error running experiment: {message}")
yield ExperimentResult(
status=Status(state=ExperimentStatus.FAILED, message=message),
experiment=experiment,
results=results,
)
finally:
self._triton_container.stop()
experiment.end()
LOGGER.info(
f"{Fore.CYAN}================ Experiment: {idx}/{total_experiment} Finished ================{Fore.RESET}"
)
yield ExperimentResult(
status=Status(state=ExperimentStatus.SUCCEED, message="Experiment Succeed"),
experiment=experiment,
results=results,
)
def stop(self) -> None:
"""
Stop executor
Returns:
None
"""
if self._triton_container:
self._triton_container.stop()
def _prepare_environment(self, task: Task, parameters: Dict) -> Dict:
"""
Prepare environment data and export it
Args:
parameters: Key and values which should be exported to environment
Returns:
Dictionary with environment data
"""
environment = {
"MODEL_NAME": task.model_name,
"FRAMEWORK": task.framework,
"SHARED_DIR": self._shared_dir.as_posix(),
"MODEL_REPOSITORY_PATH": self._triton_models_repository_dir.as_posix(),
"TRITON_SERVER_URL": "localhost",
"TRITON_INSTANCES": "1",
"TRITON_LOAD_MODEL_METHOD": task.triton_load_model_method,
}
checkpoint_variant = parameters.get("checkpoint_variant")
if checkpoint_variant:
del parameters["checkpoint_variant"]
environment["CHECKPOINT_DIR"] = task.checkpoints[checkpoint_variant].path.as_posix()
if task.datasets_dir:
environment["DATASETS_DIR"] = task.datasets_dir.as_posix()
for key, value in parameters.items():
key = format_env_key(key)
value = format_env_value(value)
environment[key] = value
for key, value in environment.items():
os.environ[key] = value
return environment
def _triton_server_container(
self,
triton_container_image: str,
framework: str,
load_model_method: str,
accelerator: str,
precision: str,
log_file: pathlib.Path,
custom_library: bool,
) -> Container:
"""
Create Triton Inference Server container for experiment
Args:
triton_container_image: Triton Inference Server container image
framework: Framework used to run model
accelerator: Accelerator used for experiment
precision: Precision used for experiment
load_model_method: Configure how Triton will load model
log_file: File where Triton logs are stored
Returns:
Container object
"""
volumes = {
self._triton_models_repository_dir: {"bind": Paths.MODEL_REPOSITORY_PATH, "mode": "rw"},
self._libraries_dir: {"bind": Paths.LIBRARIES_PATH, "mode": "rw"},
}
environment = {
"MODEL_REPOSITORY_PATH": Paths.MODEL_REPOSITORY_PATH,
"LIBRARIES_PATH": Paths.LIBRARIES_PATH,
"TRITON_LOAD_MODEL_METHOD": load_model_method,
}
if custom_library:
library_path = Triton.library_path(framework=framework)
environment["LD_LIBRARY_PATH"] = f"{library_path}:${{LD_LIBRARY_PATH}}"
environment["LD_PRELOAD"] = Triton.custom_library_path_remote()
if accelerator == Accelerator.TRT.value and precision == Precision.FP16.value:
environment["ORT_TENSORRT_FP16_ENABLE"] = 1
strict_mode = False
command = Triton.command(
framework=framework,
repository_path=Paths.MODEL_REPOSITORY_PATH,
strict_mode=strict_mode,
)
command = f' bash -c "{command}"'
container = self._maintainer.triton_container(
command=command,
image=triton_container_image,
devices=self._devices,
volumes=volumes,
environment=environment,
log_file=log_file,
)
return container
def _save_results(self, task: Task, experiment: Experiment, stage_name: str, results: Dict) -> None:
"""
Update results for stage
Args:
task: Task object
experiment: Experiment for which stage has to be updated
stage_name: Name of stage
results: Results path mapping
Returns:
None
"""
stage = experiment.stages[stage_name]
if not stage.result_path:
LOGGER.debug(f"No results file to copy for {stage.name}")
return
if not stage.result_type:
LOGGER.debug(f"No results type provided for {stage.name}")
return
os.environ["SHARED_DIR"] = self._shared_dir.as_posix()
result_path = get_result_path(result_path=stage.result_path)
result_path = pathlib.Path(result_path)
if not result_path.is_file() and not result_path.is_dir():
raise RunnerException(f"Results file {result_path} not found.")
experiment_dir = self._workspace / task.results_dir / experiment.results_dir
LOGGER.info(f"Saving {stage.result_type} to {experiment_dir}")
if result_path.is_dir():
dst_path = experiment_dir / stage.result_type
shutil.copytree(result_path, dst_path)
elif result_path.is_file():
suffix = result_path.suffix
dst_path = experiment_dir / f"{stage.result_type}{suffix}"
shutil.copy(result_path, dst_path)
else:
raise RunnerException(f"Result not found {result_path}")
LOGGER.info("Done")
results[stage.result_type] = dst_path
def _create_dirs(self) -> None:
"""
Create directories used to store artifacts and final results
Returns:
None
"""
LOGGER.info(f"{Fore.GREEN}================ Creating Artifacts Directories Started ================{Fore.RESET}")
if self._executor_workspace.is_dir():
LOGGER.info(f"Removing previous executor workspace: {self._executor_workspace}")
shutil.rmtree(self._executor_workspace)
for directory in [
self._libraries_dir,
self._shared_dir,
self._scripts_dir,
self._triton_models_repository_dir,
]:
directory.mkdir(parents=True, exist_ok=True)
LOGGER.info(f"Directory {directory.name} created.")
LOGGER.info(
f"{Fore.GREEN}================ Creating Artifacts Directories Finished ================{Fore.RESET}"
)
def _clean_experiment_artifacts(self, idx: int, total: int) -> None:
"""
Clean artifacts stored between experiments
Returns:
None
"""
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total}] ================ Cleanup Experiment Data Started ================{Fore.RESET}"
)
for directory in [
self._shared_dir,
self._scripts_dir,
self._triton_models_repository_dir,
]:
clean_directory(directory)
LOGGER.info(f"Location {directory} cleaned.")
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total}] ================ Cleanup Experiment Data Finished ================{Fore.RESET}"
)
def _create_experiment_results_dir(self, task: Task, experiment: Experiment):
"""
Create result directory for experiment
Returns:
"""
experiment_dir = self._workspace / task.results_dir / experiment.results_dir
experiment_dir.mkdir(parents=True, exist_ok=True)
def _prepare_triton_custom_operations(self, task: Task) -> None:
"""
Prepare Triton Server custom operations library
Returns:
None
"""
if task.triton_custom_operations:
target_library_path = Triton.custom_library_path_local(self._libraries_dir)
target_library_path_dir = target_library_path.parent
target_library_path_dir.mkdir(parents=True, exist_ok=True)
shutil.copy(task.triton_custom_operations, target_library_path)
def _run_stage(self, stage: Stage) -> bool:
"""
Run single stage commands
Args:
stage: Stage object with defined commands
Returns:
True on success, False otherwise
"""
try:
command = self._exporter.export(stage=stage)
exec_command(command)
except RunnerException:
return False
return True
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/executor.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .docker.maintainer import DockerMaintainer
class MaintainerFactory:
@staticmethod
def create_docker_maintainer():
return DockerMaintainer()
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/maintainer/maintainer_factory.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .container import Container
from .docker.maintainer import DockerMaintainer
from .maintainer import Maintainer
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/maintainer/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from typing import Any
class Container(abc.ABC):
def __init__(self, name: str):
self.name = name
self._container = None
@abc.abstractmethod
def start(self):
"""
Start container
"""
pass
@abc.abstractmethod
def stop(self):
"""
Stop container
"""
@abc.abstractmethod
def run(self, command: str) -> Any:
"""
Run command inside container
Args:
command: command to execute
Returns:
Any
"""
pass
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/maintainer/container.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
from typing import Any, Dict, List, Optional, Union
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .container import Container
class Maintainer(abc.ABC):
@abc.abstractmethod
def triton_container(
self, command: str, image: str, devices: List, volumes: Dict, environment: Dict, log_file: Union[pathlib.Path, str]
) -> Container:
"""
Return triton container
Args:
command: Triton Server command that has to be executed
image: Container image
devices: List of device ids which has to be available in container
volumes: Volumes mapping
environment: Environment variables set in container
log_file: File path where server logs has to be saved
Returns:
Container object
"""
pass
@abc.abstractmethod
def build_image(
self,
*,
image_file_path: pathlib.Path,
image_name: str,
workdir_path: Optional[pathlib.Path] = None,
build_args: Optional[Dict[str, Any]] = None,
) -> None:
pass
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/maintainer/maintainer.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ContainerNotStarted(Exception):
pass
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/maintainer/exceptions.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/maintainer/docker/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
import docker
from docker.models.containers import ExecResult
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ..container import Container
class DockerContainer(Container):
def __init__(self, name: str):
super().__init__(name)
self._container = None
self._docker_client = docker.from_env()
self._docker_api_client = docker.APIClient()
@abc.abstractmethod
def start(self):
"""
Start container
"""
pass
@abc.abstractmethod
def stop(self):
"""
Stop container
"""
@abc.abstractmethod
def run(self, command: str) -> ExecResult:
"""
Run command inside container
Args:
command: command to execute
Returns:
ExecResult
"""
pass
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/maintainer/docker/container.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import Any, Dict, List, Optional, Union
import docker
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...logger import LOGGER
from ..maintainer import Maintainer
from .container import DockerContainer
from .containers import TritonServerContainer
class DockerMaintainer(Maintainer):
def triton_container(
self, command: str, image: str, devices: List, volumes: Dict, environment: Dict, log_file: Union[pathlib.Path, str]
) -> DockerContainer:
"""
Return triton container
Args:
command: Triton Server command that has to be executed
image: Container image
devices: List of device ids which has to be available in container
volumes: Volumes mapping
environment: Environment variables set in container
log_file: File path where server logs has to be saved
Returns:
DockerContainer object
"""
return TritonServerContainer(
name="triton-server",
command=command,
image=image,
devices=devices,
volumes=volumes,
environment=environment,
log_file=log_file,
)
def build_image(
self,
*,
image_file_path: pathlib.Path,
image_name: str,
workdir_path: Optional[pathlib.Path] = None,
build_args: Optional[Dict[str, Any]] = None,
) -> None:
workdir_path = workdir_path or image_file_path.parent
build_args = build_args or {}
LOGGER.info(f"Building {image_name} docker image.")
LOGGER.debug(f" Using workdir: {workdir_path}")
LOGGER.debug(f" Dockerfile: {image_file_path}")
LOGGER.debug(f" Build args: {build_args}")
build_logs = list()
try:
docker_client = docker.from_env()
_, build_logs = docker_client.images.build(
path=workdir_path.resolve().as_posix(),
dockerfile=image_file_path.resolve().as_posix(),
tag=image_name,
buildargs=build_args,
network_mode="host",
rm=True,
)
except docker.errors.BuildError as e:
build_logs = e.build_log
raise e
finally:
for chunk in build_logs:
log = chunk.get("stream")
if log:
LOGGER.debug(log.rstrip())
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/maintainer/docker/maintainer.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .triton_server_container import TritonServerContainer
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/maintainer/docker/containers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pathlib
from threading import Thread
from typing import Dict, Generator, Union
from docker.models.containers import ExecResult
from docker.types import DeviceRequest, Ulimit
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ....logger import LOGGER
from ...exceptions import ContainerNotStarted
from ..container import DockerContainer
class TritonServerContainer(DockerContainer):
def __init__(
self,
name: str,
command: str,
image: str,
volumes: Dict,
devices: Union[list, int],
environment: Dict,
log_file: Union[pathlib.Path, str],
network: str = "host",
shm_size: str = "1G",
):
"""
Initialize Triton Server Container
Args:
name: Container name
command: Triton Server command to exec on container start
image: Docker Image
volumes: Volumes to mount inside container
devices: Devices which has to be visible in container
environment: Environment variables
log_file: Path where logs should be saved
network: Network mode
shm_size: Shared memory size
"""
super().__init__(name)
self._image = image
self._command = command
self._volumes = volumes
self._devices = devices
self._environment = environment
self._network = network
self._shm_size = shm_size
self._triton_exec = None
self._logging_thread = None
self._log_file_path = pathlib.Path(log_file)
def start(self) -> None:
"""
Start Triton Server Container
"""
devices = [
DeviceRequest(capabilities=[["gpu"]], device_ids=self._devices),
]
LOGGER.info(f"Triton environment: {json.dumps(self._environment, indent=4)}")
LOGGER.info(f"Starting Triton container {self.name}.")
self._container = self._docker_client.containers.run(
image=self._image,
name=self.name,
device_requests=devices,
detach=True,
tty=True,
shm_size=self._shm_size,
ulimits=[
Ulimit(name="memlock", soft=-1, hard=-1),
Ulimit(name="stack", soft=67108864, hard=67108864),
],
volumes=self._volumes,
environment=self._environment,
network_mode=self._network,
auto_remove=True,
ipc_mode="host",
)
LOGGER.info(f"Triton command:")
LOGGER.info(f" {self._command}")
LOGGER.info(f"Starting Triton Server {self.name}.")
self._triton_exec = self._docker_api_client.exec_create(
container=self._container.id,
cmd=self._command,
)
stream_generator = self._docker_api_client.exec_start(exec_id=self._triton_exec["Id"], stream=True)
self._logging_thread = Thread(target=TritonServerContainer._logging, args=(self, stream_generator), daemon=True)
self._logging_thread.start()
def stop(self) -> None:
"""
Stop Triton Server Container and save logs to file
"""
if self._container is not None:
triton_result = self._docker_api_client.exec_inspect(self._triton_exec["Id"])
if triton_result.get("ExitCode") not in (0, None):
LOGGER.info(
f"Triton Inference Server instance {self.name} failed. Exit code: {triton_result.get('ExitCode')}"
)
LOGGER.info(f"Stopping triton server {self.name}.")
self._container.stop()
self._container = None
self._docker_client.close()
self._docker_api_client.close()
def run(self, command: str) -> ExecResult:
"""
Run command in container
Args:
command: Command to execute
Returns:
ExecResult
"""
if not self._container:
raise ContainerNotStarted("Triton Server Container is not running. Use .start() first.")
return self._container.exec_run(command)
def _logging(self, generator: Generator) -> None:
"""Triton logging thread for Triton Inference Server
Args:
generator (string generator): Triton log stream.
"""
with open(self._log_file_path, mode="w") as file:
try:
while True:
log = next(generator)
txt = log.decode("utf-8")
file.write(txt)
except StopIteration:
LOGGER.info(f"Saving Triton Inference Server {self.name} logs in {self._log_file_path}.")
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/maintainer/docker/containers/triton_server_container.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import numpy as np
import time
import logging
from tqdm import tqdm
from .evaluation_metrics import METRICS
from .evaluator import MetricEvaluator
from triton.run_inference_on_triton import AsyncGRPCTritonRunner
import tritonclient.http as triton_http
import tritonclient.grpc as triton_grpc
import xgboost as xgb
import hydra
class TritonEvaluator(MetricEvaluator):
def __init__(self, config):
self.output_selector = config.get("output_selector", None)
self.metrics = []
preprocessor_state = pickle.load(open(config.preprocessor_state_path, "rb"))
self.scalers = preprocessor_state["scalers"]
self.save_predictions = config.get("save_predictions", False)
self.example_history = []
for name in config.metrics:
if name not in METRICS:
raise ValueError(f"No metric of name: {name}")
self.metrics.append(METRICS[name]())
self.config = config
def predict(self, dataloader, model_name, server_url="localhost:8001"):
LOGGER = logging.getLogger("run_inference_on_triton")
runner = AsyncGRPCTritonRunner(
server_url,
model_name,
"1",
dataloader=dataloader(),
verbose=False,
resp_wait_s=120,
max_unresponded_reqs=128,
)
start = time.time()
preds_full = []
labels_full = []
weights_full = []
ids_full = []
for ids, x, y_pred, y_real in tqdm(runner, unit="batch", mininterval=10):
if self.save_predictions:
self.example_history.append(x['target__6'][:,:self.config.encoder_length])
ids_full.append(ids)
preds_full.append(y_pred['target__0'])
labels_full.append(y_real['target__0'][:,:,0][:,:,np.newaxis])
weights_full.append(x['weight__9'])
stop = time.time()
preds_full = np.concatenate(preds_full, axis=0)
labels_full = np.concatenate(labels_full, axis=0)
weights_full = np.concatenate(weights_full, axis=0)
if np.isnan(weights_full).any():
weights_full = np.empty([0])
ids_full = np.concatenate(ids_full, axis=0)
LOGGER.info(f"\nThe inference took {stop - start:0.3f}s")
if self.save_predictions:
self.example_history = np.concatenate(self.example_history, axis=0)
return preds_full, labels_full, ids_full, weights_full
def predict_xgboost(self, dataloader, max_batch_size, server_url="localhost:8001"):
grpc_client = triton_grpc.InferenceServerClient(
url=server_url,
verbose = False
)
out = []
labels = []
ids = []
weights = []
for i, (test_step, test_label) in enumerate(dataloader):
labels.append(test_label.to_numpy())
ids.append(test_step['_id_'].to_numpy())
data = test_step.to_numpy().astype('float32')
weights.append([])
test_len = len(data)
num_iters = int(test_len/max_batch_size) + 1
temp_out = []
for j in range(num_iters):
sliced_data = data[j*max_batch_size:(j+1)*max_batch_size]
dims = sliced_data.shape
triton_input_grpc = triton_grpc.InferInput(
'input__0',
dims,
'FP32'
)
triton_input_grpc.set_data_from_numpy(sliced_data)
triton_output_grpc = triton_grpc.InferRequestedOutput('output__0')
request_grpc = grpc_client.infer(
f'xgb_{i+1}',
model_version='1',
inputs=[triton_input_grpc],
outputs=[triton_output_grpc]
)
outt = request_grpc.as_numpy('output__0')
temp_out = np.hstack((temp_out, outt))
out.append(temp_out)
weights.append([])
outtemp = np.vstack(out).transpose()
labels_temp = np.hstack(labels)
ids_temp = np.vstack(ids).transpose()
if len(outtemp.shape) == 2:
outtemp = outtemp[:,:,np.newaxis]
if len(labels_temp.shape) == 2:
labels_temp = labels_temp[:, :, np.newaxis]
if self.save_predictions:
labels_ids = dataloader.data[['_id_', dataloader.target[0]]]
for n, g in labels_ids.groupby("_id_"):
labels_all = g[dataloader.target[0]].to_numpy().round(6)
windows_labels = np.lib.stride_tricks.sliding_window_view(labels_all, dataloader.example_length)
self.example_history.append(windows_labels.copy()[:, :dataloader.encoder_length])
self.example_history = np.concatenate(self.example_history, axis=0)[:, :, np.newaxis]
return outtemp, labels_temp, ids_temp[:,0], np.stack(weights)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/evaluators/triton_evaluator.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import numpy as np
from abc import ABC, abstractmethod
class AbstractMetric(ABC):
@staticmethod
@abstractmethod
def __call__(pred, label, weights):
pass
class SMAPE(AbstractMetric):
name = "SMAPE"
@staticmethod
def __call__(preds, labels, weights):
if not weights.size:
weights = None
return 100 * np.average(2 * np.abs(preds - labels) / (np.abs(labels) + np.abs(preds)), weights=weights)
def normalised_quantile_loss(y_pred, y, quantile, weights=None):
"""Implementation of the q-Risk function from https://arxiv.org/pdf/1912.09363.pdf"""
prediction_underflow = y - y_pred
weighted_errors = quantile * np.maximum(prediction_underflow, 0.0) + (1.0 - quantile) * np.maximum(
-prediction_underflow, 0.0
)
if weights is not None and weights.size:
weighted_errors = weighted_errors * weights
y = y * weights
loss = weighted_errors.sum()
normaliser = abs(y).sum()
return 2 * loss / normaliser
class P50_loss(AbstractMetric):
name = "P50"
selector = 1
@staticmethod
def __call__(labels, preds, weights):
return normalised_quantile_loss(labels, preds, 0.5,weights)
class P90_loss(AbstractMetric):
name = "P90"
selector = 2
@staticmethod
def __call__(labels, preds, weights):
return normalised_quantile_loss(labels, preds, 0.9,weights)
# Normalized Deviation
class ND(AbstractMetric):
name = "ND"
@staticmethod
def __call__(preds, labels, weights):
diff = np.abs(labels - preds)
if not weights.size:
return np.sum(diff) / np.sum(np.abs(labels))
else:
return np.sum(diff * weights) / np.sum(np.abs(labels) * weights)
class MAE(AbstractMetric):
name = "MAE"
@staticmethod
def __call__(preds, labels, weights, return_individual=False):
if not weights.size:
weights = None
if return_individual:
return np.average(np.abs(preds - labels), weights=weights, axis=0)
else:
return np.average(np.abs(preds - labels), weights=weights)
class MSE(AbstractMetric):
name = "MSE"
@staticmethod
def __call__(preds, labels, weights, return_individual=False):
if not weights.size:
weights = None
if return_individual:
return np.average((preds - labels)**2, weights=weights, axis=0)
else:
return np.average((preds - labels)**2, weights=weights)
class RMSE(AbstractMetric):
name = "RMSE"
@staticmethod
def __call__(preds, labels, weights):
if not weights.size:
weights = None
return np.sqrt(np.average((preds - labels)**2, weights=weights))
class R_Squared(AbstractMetric):
name = "R_Squared"
@staticmethod
def __call__(preds, labels, weights, return_individual=False):
if not weights.size:
if return_individual:
return r2_score(preds, labels, multioutput="raw_values")
return r2_score(preds, labels)
else:
values = r2_score(preds, labels, multioutput="raw_values")
if return_individual:
return values * weights
return np.sum(values * weights) / np.sum(weights)
class WMSMAPE(AbstractMetric):
name = "WMSMAPE"
@staticmethod
def __call__(preds, labels, weights, return_individual=False):
if weights.size:
if return_individual:
return 2 * weights * np.abs(preds - labels) / (np.maximum(labels, 1) + np.abs(preds))
else:
return (
100.0
/ np.sum(weights)
* np.sum(2 * weights * np.abs(preds - labels) / (np.maximum(labels, 1) + np.abs(preds)))
)
if return_individual:
return 2 * np.abs(preds - labels) / (np.maximum(labels, 1) + np.abs(preds))
else:
return 100.0 / len(labels) * np.sum(2 * np.abs(preds - labels) / (np.maximum(labels, 1) + np.abs(preds)))
METRICS = {
"SMAPE": SMAPE,
"WMSMAPE": WMSMAPE,
"MSE": MSE,
"MAE": MAE,
"P50": P50_loss,
"P90": P90_loss,
"RMSE": RMSE,
"R_Squared": R_Squared,
"ND": ND,
}
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/evaluators/evaluation_metrics.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
from abc import ABC
import dgl
import numpy as np
import torch
from data.datasets import get_collate_fn
from distributed_utils import get_mp_context
from torch.utils.data import DataLoader
from training.utils import to_device
from .evaluation_metrics import METRICS
import pandas as pd
class MetricEvaluator(ABC):
def __init__(self, config):
self.output_selector = config.get("output_selector", None)
self.metrics = []
preprocessor_state = pickle.load(open(config.preprocessor_state_path, "rb"))
self.scalers = preprocessor_state["scalers"]
self.save_predictions = config.get("save_predictions", False)
self.example_history = []
for name in config.metrics:
if name not in METRICS:
raise ValueError(f"No metric of name: {name}")
self.metrics.append(METRICS[name]())
self.config = config
def predict(self, *args, **kwargs):
raise NotImplementedError
def save_preds(self, preds, ids):
all_examples = self.example_history
all_examples = all_examples.transpose(2,0,1).reshape(-1, all_examples.shape[1])
if len(preds.shape) == 4:
tgt_ords = np.arange(preds.shape[2]).repeat(preds.shape[0])
tgt_ords = pd.DataFrame(tgt_ords, columns=['#target'])
preds = preds.transpose(2,0,1,3).reshape(-1,preds.shape[1], preds.shape[3])
ids = ids.transpose().reshape(-1)
else:
tgt_ords = None
all_examples = self.scalers.inverse_transform_targets(all_examples, ids)
hist_df = pd.DataFrame(all_examples, columns=[f't{i+1}' for i in range(-self.config.encoder_length, 0)])
ids = pd.DataFrame(ids, columns=['id'])
col_labels = [f'Estimator{j}_t{i:+}' for j in range(preds.shape[2]) for i in range(preds.shape[1])]
preds_df = pd.DataFrame(preds.reshape(preds.shape[0],-1, order='F'), columns=col_labels)
df = pd.concat([ids, tgt_ords, hist_df, preds_df], axis=1)
df.to_csv('predictions.csv')
def evaluate(self, preds, labels, ids, weights):
results = {}
# In multi target case we treat each target as a separate example.
# Then we can reduce it to a single target case setting BS = prev_BS * num_targets
if len(preds.shape) == 4:
if self.scalers.scale_per_id:
ids = np.arange(preds.shape[-2])
ids = np.repeat(ids, preds.shape[0])
else:
ids = None
# TODO: this causes a memory movement. Rewrite this with views!
preds = np.concatenate([preds[:, :, i] for i in range(preds.shape[-2])], axis=0)
labels = np.concatenate([labels[:, :, i] for i in range(labels.shape[-1])], axis=0)
weights = np.concatenate([weights[:, :, i] for i in range(weights.shape[-1])], axis=0)
elif len(preds.shape) == 3:
labels = labels.squeeze(-1)
if weights.size:
weights = weights.squeeze(-1)
else:
raise ValueError("Expected shape of predictions is either BSxTxFxH or BSxTxH")
upreds = np.stack([self.scalers.inverse_transform_targets(preds[..., i], ids) for i in range(preds.shape[-1])],
axis=-1)
labels = self.scalers.inverse_transform_targets(labels, ids)
if self.save_predictions:
self.save_preds(upreds, ids)
for metric in self.metrics:
selector = getattr(metric, 'selector', self.output_selector)
preds = upreds[..., selector]
results[metric.name] = metric(preds, labels, weights) if np.all(np.isfinite(preds)) else np.NaN
results = {k: float(v) for k, v in results.items()}
return results
class CTLMetricEvaluator(MetricEvaluator):
def __init__(self, test_data, config):
super().__init__(config)
self.device = config.device
if test_data is not None:
mp_context = get_mp_context()
self.dataloader = DataLoader(
test_data,
batch_size=self.config.batch_size,
num_workers=1,
pin_memory=True,
collate_fn=get_collate_fn(config.model_type, config.encoder_length, test=True),
multiprocessing_context=mp_context
)
else:
self.dataloader = None
def prep_data(self, batch):
ids = batch.ndata['id'] if isinstance(batch, dgl.DGLGraph) else batch["id"]
ids = ids[:, 0, ...] # Shape BS x T x F [x H]
weights = batch.ndata['weight'] if isinstance(batch, dgl.DGLGraph) else batch['weight']
weights = weights[:, self.config.encoder_length:,
:] if weights is not None and weights.numel() else torch.empty(0)
batch = to_device(batch, device=self.device)
return batch, weights, ids
def predict(self, model, dataloader=None):
if not dataloader:
dataloader = self.dataloader
assert dataloader is not None, "Dataloader cannot be None, either pass in a valid dataloader or \
initialize evaluator with valid test_data"
test_method_name = 'predict' if hasattr(model, "predict") else '__call__'
test_method = getattr(model, test_method_name)
model.eval()
with torch.no_grad():
preds_full = []
labels_full = []
weights_full = []
ids_full = []
for i, (batch, labels, _) in enumerate(dataloader):
if self.save_predictions:
self.example_history.append(batch['target'][:,:self.config.encoder_length].detach().cpu())
batch, weights, ids = self.prep_data(batch)
labels_full.append(labels)
weights_full.append(weights)
preds = test_method(batch)
ids_full.append(ids)
preds_full.append(preds)
preds_full = torch.cat(preds_full, dim=0).cpu().numpy()
labels_full = torch.cat(labels_full, dim=0).cpu().numpy()
weights_full = torch.cat(weights_full).cpu().numpy()
ids_full = torch.cat(ids_full).cpu().numpy()
if self.save_predictions:
self.example_history = torch.cat(self.example_history, dim=0).cpu().numpy()
return preds_full, labels_full, ids_full, weights_full
class StatMetricEvaluator(MetricEvaluator):
def __init__(self, test_data, config):
super().__init__(config)
self.dataloader = test_data
def predict(self, model, dataloader=None):
dataloader = dataloader or self.dataloader
assert dataloader, "Test dataloader not provided"
preds_full = []
labels_full = []
weights_full = []
ids_full = []
for i, test_batch in enumerate(dataloader):
labels = test_batch["endog"]
ids = test_batch["id"].iloc[0]
preds = np.array(model.predict(test_batch["exog"], i))
labels_full.append(labels)
weights_full.append(test_batch.get('weight', []))
ids_full.append(ids)
preds_full.append(preds)
preds_full = np.stack(preds_full)
labels_full = np.stack(labels_full)
weights_full = np.stack(weights_full)
ids_full = np.stack(ids_full)
if len(preds_full.shape) == 2:
preds_full = preds_full[:, :, np.newaxis]
return preds_full, labels_full, ids_full, weights_full
class XGBMetricEvaluator(MetricEvaluator):
def __init__(self, test_data, config):
super().__init__(config)
self.dataloader = test_data
def predict(self, model, dataloader=None):
dataloader = dataloader or self.dataloader
assert dataloader, "Test dataloader not provided"
out = []
labels = []
ids = []
weights = []
for i, (test_step, test_label) in enumerate(dataloader):
labels.append(test_label.to_numpy())
ids.append(test_step['_id_'].to_numpy())
outt = model.predict(test_step, i)
weights.append([])
out.append(outt)
outtemp = np.vstack(out).transpose()
labels_temp = np.hstack(labels)
ids_temp = np.vstack(ids).transpose()[:, 0]
if len(outtemp.shape) == 2:
outtemp = outtemp[:, :, np.newaxis]
if len(labels_temp.shape) == 2:
labels_temp = labels_temp[:, :, np.newaxis]
if self.save_predictions:
labels_ids = self.dataloader.data[['_id_', self.dataloader.target[0]]]
for n, g in labels_ids.groupby("_id_"):
labels_all = g[self.dataloader.target[0]].to_numpy().round(6)
windows_labels = np.lib.stride_tricks.sliding_window_view(labels_all, self.dataloader.example_length)
self.example_history.append(windows_labels.copy()[:, :self.dataloader.encoder_length])
self.example_history = np.concatenate(self.example_history, axis=0)[:, :, np.newaxis]
return outtemp, labels_temp, ids_temp, np.stack(weights)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/evaluators/evaluator.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import subprocess
from typing import Dict, List, Optional, Tuple
import dllogger
import shutil
import hydra
from triton.dataloader import get_dataloader_fn
from loggers.log_helper import setup_logger
def run_server_launch(config):
cfg = config
# export model
output_path = os.path.join(cfg.checkpoint, "deployment")
tspp_main_dir = os.path.sep + os.path.join(*(os.getcwd().split(os.path.sep)[:-3]))
# get the actual model name
if not os.path.isdir(os.path.join(output_path, "navigator_workspace")) or not os.path.isdir(
os.path.join(output_path, "navigator_workspace/model-store")
):
if os.path.isdir(os.path.join(output_path, "navigator_workspace/final-model-store")):
shutil.copytree(os.path.join(output_path, "navigator_workspace/final-model-store"), os.path.join(output_path, "navigator_workspace/model-store"))
else:
assert (
False
), "This checkpoint directory is not configured correctly, there should be a dir/deployment/navigator_workspace/model-store/ directory"
files_in_store = list(os.listdir(os.path.join(output_path, "navigator_workspace/model-store")))
if len(files_in_store) < 1:
assert False, "There needs to be exactly 1 model in the model-store directory"
model_name = cfg.get("model_name") if cfg.get("model_name", None) else files_in_store[0]
# deploy
subprocess.run(["bash", "inference/deploy.sh", output_path, str(cfg.gpu)], cwd=tspp_main_dir, check=True)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/inference/launch_inference_server.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import subprocess
from typing import Dict, List, Optional, Tuple
import shutil
import yaml
import conf.conf_utils
from triton.xgboost_triton import run_XGBoost_triton
from omegaconf import OmegaConf
def run_converter(config, export, convert):
cfg = config
with open(os.path.join(cfg.checkpoint, ".hydra/config.yaml"), "rb") as f:
config = OmegaConf.load(f)
config.inference = cfg
with open(os.path.join(cfg.checkpoint, ".hydra/config_merged.yaml"), "wb") as f:
OmegaConf.resolve(config)
OmegaConf.save(config=config, f=f.name)
if config.dataset.config.get('xgb', False):
return run_XGBoost_triton(cfg, config)
if config.dataset.config.get('stat', False):
raise ValueError("Stat models not supported in deployment")
model_name = config.model._target_.split(".")[1]
precision = cfg.precision
assert precision in ["fp16", "fp32"], "Precision needs to be either fp32 or fp16"
# export model
output_path = os.path.join(cfg.checkpoint, "deployment")
os.makedirs(output_path, exist_ok=True)
tspp_main_dir = os.path.sep + os.path.join(*(os.getcwd().split(os.path.sep)[:-3]))
model_format = "torchscript" if export.config.type != "onnx" else export.config.type
subprocess.run(
[
"python",
"triton/export_model.py",
"--input-path",
"triton/model.py",
"--input-type",
"pyt",
"--output-path",
"{}/exported_model.pt".format(output_path),
"--output-type",
"{}".format(export.config.type),
"--dataloader",
"triton/dataloader.py",
"--batch-size",
"{}".format(cfg.batch_size),
"--model-dir",
"{}".format(cfg.checkpoint),
"--onnx-opset",
"13",
"--ignore-unknown-parameters",
],
cwd=tspp_main_dir,
check=True,
)
if model_format == "torchscript":
with open(output_path + "/exported_model.pt.yaml", "r") as stream:
var_config = yaml.safe_load(stream)
var_config_list = []
for arg in ["--value-ranges", "--max-shapes", "--dtypes", "--min-shapes"]:
var_config_list.append(arg)
if arg == "--value-ranges":
for k, v in var_config["inputs"].items():
var_config_list.append(k + "=0,1")
elif arg == "--max-shapes":
for k, v in var_config["inputs"].items():
var_config_list.append(k + "=" + ",".join([str(cfg.batch_size)] + [str(x) for x in v["shape"][1:]]))
elif arg == "--min-shapes":
for k, v in var_config["inputs"].items():
var_config_list.append(k + "=" + ",".join([str(x) for x in v["shape"]]))
else:
for k, v in var_config["inputs"].items():
var_config_list.append(k + "=" + v["dtype"])
else:
var_config_list = []
# model-navigator run
if cfg.optimize:
subprocess.run(
[
"model-navigator",
"run",
"--model-name",
model_name,
"--model-path",
"{}/exported_model.pt".format(output_path),
"--config-path",
"{}/exported_model.pt.yaml".format(output_path),
"--override-workspace",
"--workspace-path",
"{}/navigator_workspace".format(output_path),
"--verbose",
"--target-formats",
"{}".format(convert.config.type),
"--model-format",
model_format,
"--config-search-concurrency",
"1",
"32",
"1024",
"--triton-launch-mode",
"docker",
"--max-workspace-size",
"10000000000",
"--max-batch-size",
"{}".format(cfg.batch_size),
"--gpus",
"{}".format(cfg.gpu),
"--atol",
"1e-3",
"--rtol",
"100",
"--onnx-opsets",
"13",
"--container-version",
"21.12",
]
+ var_config_list,
cwd=tspp_main_dir,
check=True,
)
else:
subprocess.run(
[
"model-navigator",
"convert",
"--model-name",
model_name,
"--model-path",
"{}/exported_model.pt".format(output_path),
"--override-workspace",
"--workspace-path",
"{}/navigator_workspace".format(output_path),
"--output-path",
"{}/converted_model".format(output_path),
"--verbose",
"--target-formats",
"{}".format(convert.config.type),
"--model-format",
model_format,
"--launch-mode",
"local",
"--max-workspace-size",
"10000000000",
"--max-batch-size",
"{}".format(cfg.batch_size),
"--target-precisions",
precision,
"--gpus",
"{}".format(cfg.gpu),
"--atol",
"1e-3",
"--rtol",
"100",
"--onnx-opsets",
"13",
"--container-version",
"21.12",
]
+ var_config_list,
cwd=tspp_main_dir,
check=True,
)
subprocess.run(
[
"model-navigator",
"triton-config-model",
"--model-name",
model_name,
"--model-path",
"{}/converted_model".format(output_path),
"--model-version",
"1",
"--model-format",
"{}".format(convert.config.type),
"--model-repository",
"{}/navigator_workspace/model-store/".format(output_path),
"--backend-accelerator",
cfg.accelerator,
"--max-batch-size",
"{}".format(cfg.batch_size),
"--engine-count-per-device",
"gpu=2",
"--tensorrt-precision",
precision,
"--tensorrt-capture-cuda-graph",
"--verbose",
],
cwd=tspp_main_dir,
check=True,
)
convert_type = (
convert.config.type if convert.config.type != "torchscript" else export.config.type
)
subprocess.run(
[
"python",
"triton/check_accuracy.py",
"--native-model",
cfg.checkpoint,
"--native-type",
"pyt",
"--export-model",
"{}/exported_model.pt".format(output_path),
"--export-type",
export.config.type,
"--convert-model",
"{}/converted_model".format(output_path),
"--convert-type",
convert_type,
"--dataloader",
"triton/dataloader.py",
"--batch-size",
"{}".format(1),
"--model-dir",
"{}".format(cfg.checkpoint),
],
cwd=tspp_main_dir,
check=True,
) | DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/inference/converter.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import subprocess
import hydra
from omegaconf import OmegaConf
from triton.dataloader import get_dataloader_fn
from loggers.log_helper import setup_logger
import dllogger
from data.data_utils import Preprocessor
def run_inference_triton(config):
cfg = config
with open(os.path.join(cfg.checkpoint, ".hydra/config_merged.yaml"), "rb") as f:
config = OmegaConf.load(f)
config.evaluator = OmegaConf.merge(config.evaluator, cfg.evaluator)
if cfg.get("dataset_dir", None):
if not os.path.isdir(config.dataset.config.dest_path):
raise ValueError("dataset_dir must be a directory")
config.dataset.config.dest_path = cfg.dataset_dir
config.inference = cfg
with open(os.path.join(cfg.checkpoint, ".hydra/config_merged.yaml"), "wb") as f:
OmegaConf.resolve(config)
OmegaConf.save(config=config, f=f.name)
output_path = os.path.join(cfg.checkpoint, "deployment")
tspp_main_dir = os.path.sep + os.path.join(*(os.getcwd().split(os.path.sep)[:-3]))
# get the actual model name
if not os.path.isdir(os.path.join(output_path, "navigator_workspace")) or not os.path.isdir(
os.path.join(output_path, "navigator_workspace/model-store")
):
if os.path.isdir(os.path.join(output_path, "navigator_workspace/final-model-store")):
shutil.copytree(os.path.join(output_path, "navigator_workspace/final-model-store"), os.path.join(output_path, "navigator_workspace/model-store"))
else:
assert (
False
), "This checkpoint directory is not configured correctly, there should be a dir/deployment/navigator_workspace/model-store/ directory"
files_in_store = list(os.listdir(os.path.join(output_path, "navigator_workspace/model-store")))
if len(files_in_store) < 1:
assert False, "There needs to be exactly 1 model in the model-store directory"
evaluator = hydra.utils.call(config.evaluator)
if config.dataset.config.get('xgb', False):
if cfg.get("dataset_path", None):
preprocessor = Preprocessor(config.dataset.config)
if cfg.get("preproc_state_path", None):
preprocessor_state_file = cfg.preproc_state_path
else:
preprocessor_state_file = None
preprocessor.load_state(preprocessor_state_file)
test_df = preprocessor.preprocess_test(dataset=cfg.dataset_path)
test_df = preprocessor.apply_scalers(test_df)
test_df = preprocessor.impute(test_df)
train, valid, test = hydra.utils.call(config.dataset, input_df=test_df)
else:
train, valid, test = hydra.utils.call(config.dataset)
del train, valid
preds_full, labels_full, ids_full, weights_full = evaluator.predict_xgboost(test, max_batch_size=cfg.batch_size)
elif config.dataset.config.get('stat', False):
raise ValueError("Stat models not supported on triton")
else:
model_name = cfg.get("model_name") if cfg.get("model_name", None) else files_in_store[0]
dataloader = get_dataloader_fn(cfg.checkpoint, cfg.batch_size)
preds_full, labels_full, ids_full, weights_full = evaluator.predict(dataloader, model_name)
#Need to merge the eval configs here
metrics = evaluator.evaluate(preds_full, labels_full, ids_full, weights_full)
logger = setup_logger(cfg)
logger.log(step=[], data={k: float(v) for k, v in metrics.items()}, verbosity=dllogger.Verbosity.VERBOSE)
logger.log(step='event', data={"String": "Evaluation Metrics: {}".format(metrics)}, verbosity=dllogger.Verbosity.DEFAULT)
print(metrics) | DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/inference/inference_triton.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import Dict, List, Optional, Tuple
import dllogger
import hydra
import numpy as np
import torch
from apex import amp
from omegaconf import OmegaConf
import conf.conf_utils
from loggers.log_helper import setup_logger
from data.data_utils import Preprocessor
def run_inference(config):
cfg = config
with open(os.path.join(cfg.checkpoint, ".hydra/config.yaml"), "rb") as f:
config = OmegaConf.load(f)
if cfg.get("evaluator", None) is not None:
config.evaluator.config = OmegaConf.merge(config.evaluator.config, cfg.evaluator.config)
if cfg.get("dataset_dir", None):
if not os.path.isdir(config.dataset.config.dest_path):
raise ValueError("dataset_dir must be a directory")
config.dataset.config.dest_path = cfg.dataset_dir
config.evaluator.config.device = cfg.device
if cfg.get("dataset_path", None):
preprocessor = Preprocessor(config.dataset.config)
if cfg.get("preproc_state_path", None):
preprocessor_state_file = cfg.preproc_state_path
else:
preprocessor_state_file = None
preprocessor.load_state(preprocessor_state_file)
test_df = preprocessor.preprocess_test(dataset=cfg.dataset_path)
test_df = preprocessor.apply_scalers(test_df)
test_df = preprocessor.impute(test_df)
train, valid, test = hydra.utils.call(config.dataset, input_df=test_df)
else:
train, valid, test = hydra.utils.call(config.dataset)
del train, valid
evaluator = hydra.utils.instantiate(config.evaluator, test_data=test)
model = hydra.utils.instantiate(config.model)
if not (config.dataset.config.get('xgb', False) or config.dataset.config.get('stat', False)):
state_dict = torch.load(os.path.join(cfg.checkpoint, "best_checkpoint.zip"))['model_state_dict']
model.load_state_dict(state_dict)
device = torch.device(cfg.device) # maybe change depending on evaluator
model.to(device=device)
precision = cfg.precision
assert precision in ["fp16", "fp32"], "Precision needs to be either fp32 or fp16"
if precision == "fp16":
model = amp.initialize(model, opt_level="O2")
else:
model.load(cfg.checkpoint)
preds_full, labels_full, ids_full, weights_full = evaluator.predict(model)
eval_metrics = evaluator.evaluate(preds_full, labels_full, ids_full, weights_full)
logger = setup_logger(cfg)
logger.log(step=[], data={k: float(v) for k, v in eval_metrics.items()}, verbosity=dllogger.Verbosity.VERBOSE)
logger.log(step='event', data={"String": "Evaluation Metrics: {}".format(eval_metrics)}, verbosity=dllogger.Verbosity.DEFAULT)
return eval_metrics
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/inference/inference.py |
# Copyright 2022 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import os
def select_per_group(df, start, end):
'''
Groups the dataframe by the _id_ and grabs elements on the slice start to end. The resulting array
is concat to a dataframe.
'''
result = []
for _, g in df.groupby("_id_"):
result.append(g[start:end])
return pd.concat((result))
def select_test_group(df, encoder, example):
'''
Purpose of the function is to create the dataframe to pass to the xgboost predict. After grouping by
the _id_, each group has elements selected such that all complete time-series are chosen.
'''
final = []
for _, g in df.groupby("_id_"):
final.append(g[encoder-1: encoder + len(g) - example])
return pd.concat((final))
def load_xgb_df(dest_path, features, ds_type):
'''
Loads and does some light preprocessing on the train, valid and test.
First the csvs are read for each, then the features not present in the feature spec are dropped,
and finally the features with datatype as object are dropped. The final step is to prevent issues with
xgboost training and cuDF casting.
'''
path = dest_path
if not isinstance(path, pd.DataFrame):
df = pd.read_csv(os.path.join(path, f"{ds_type}.csv"))
else:
df = path
all_features = [f.name for f in features] + ['_id_']
all_read = df.columns
to_drop = [c for c in all_read if c not in all_features]
df.drop(columns=to_drop, inplace=True)
object_columns = [c for c, d in zip(df.columns, df.dtypes) if d == "object"]
df.drop(columns=object_columns, inplace=True)
return df
def xgb_multiID_preprocess(df, features, time_series_count):
date = [feature.name for feature in features if feature.feature_type == "TIME"][0]
target = [feature.name for feature in features if feature.feature_type == "TARGET"][0]
time_series_count = time_series_count
target_values = []
for _, g in df.groupby("_id_"):
target_values.append(g[[date, target]])
final = target_values[0]
final.rename(columns={target: f'{target}_{0}'}, inplace=True)
for i in range(1, time_series_count):
target_values[i].rename(columns={target: f'{target}_{i}'}, inplace=True)
final = final.merge(target_values[i], on=date, how='outer')
df = df.merge(final, on=date, how='outer')
return df
def feat_adder(df, lag_feats, rolling_feats):
'''
Main data preprocessing function for xgboost. lag_feats and rolling_feats are both
dictionaries from features to lists. After grouping by the _id_
we iterate through the lag features and move down the features i steps in the history.
Similarly the rolling_feats are iterated through and the moving average of the past i time steps
of that feature is added as a feature. The names of the new lag features are the
{feature_name}_{i}_lag and of the new rolling features are {feature_name}_{i}_rolling.
'''
final = []
for _, g in df.groupby("_id_"):
for f, v in lag_feats.items():
for i in v:
g['{}_{}_lag'.format(f, i)] = g[f].shift(i)
for f, v in rolling_feats.items():
for i in v:
g['{}_{}_rolling'.format(f, i)] = g[f].rolling(i).sum()
final.append(g)
return pd.concat((final))
def data_label_split(df, target):
'''
Drops rows with NaN as the target value. In addition separates the labels from
the data by doing an inplace drop.
'''
df.dropna(subset=target, inplace=True)
labels = df[target]
df.drop(target, 1, inplace=True)
return labels
def target_shift(df, target, feat, i):
'''
Brings features up that are (i+1) time steps in the future. Currently these features are
the target and the known/static variables. This future target is the value that will be predicted in the trainer.
These features have an _target added to their name.
'''
in_feat = target + feat
out_feat = [f'{i}_target' for i in in_feat]
df[out_feat] = df.groupby("_id_")[in_feat].shift(-1 * (i+1))
return df | DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/data/xgb_util.py |
# Copyright 2021-2022 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
import pickle
from bisect import bisect
import dgl
import numpy as np
import pandas as pd
import torch
from data.data_utils import InputTypes, DataTypes, FEAT_NAMES, FEAT_ORDER, DTYPE_MAP, translate_features
import dgl
from dgl.transform import metis_partition_assignment
from torch.utils.data import Dataset
from torch.utils.data.dataloader import default_collate
from data.xgb_util import load_xgb_df, feat_adder, data_label_split, select_test_group, target_shift, \
xgb_multiID_preprocess
from bisect import bisect
from data.data_utils import InputTypes, DataTypes, FEAT_NAMES, FEAT_ORDER, DTYPE_MAP, translate_features, group_ids
class TSBaseDataset(Dataset):
def __init__(self, features, df, encoder_length, example_length, stride=1, **kwargs):
super().__init__()
assert example_length > encoder_length
self.features = features
self.encoder_length = encoder_length
self.example_length = example_length
self.stride = stride
self.df = df
self.load()
self.features = [i for i in self.features if i.feature_type != InputTypes.TIME]
self.feature_type_col_map = [
[i for i, f in enumerate(self.features) if (f.feature_type, f.feature_embed_type) == x] for x in FEAT_ORDER
]
def load(self):
raise NotImplementedError
class TSDataset(TSBaseDataset):
def __init__(self, features, df=None, encoder_length=52, example_length=54, stride=1, **kwargs):
super().__init__(features, df, encoder_length, example_length, stride)
self.grouped = [x for x in self.grouped if x.shape[0] >= self.example_length]
self.group_lens = [(g.shape[0] - self.example_length + 1) // self.stride for g in self.grouped]
self._cum_examples_in_group = np.cumsum(self.group_lens)
self.grouped = [
[
arr[:, idxs].view(dtype=np.float32).astype(DTYPE_MAP[t[1]])
for t, idxs in zip(FEAT_ORDER, self.feature_type_col_map)
]
for arr in self.grouped
]
def load(self):
if isinstance(self.df, pd.DataFrame):
data = self.df
else:
data = pd.read_csv(self.df, index_col=0)
self.grouped = group_ids(data, self.features)
def get_probabilities(self):
sampled = []
for i in range(len(self.grouped)):
group_len = self.group_lens[i]
group = self.grouped[i]
sample_weights = group[-1]
sampled.append(sample_weights[np.arange(0, self.stride * group_len, self.stride)])
sampled = np.concatenate(sampled)
return sampled
def __len__(self):
return self._cum_examples_in_group[-1]
def __getitem__(self, idx):
g_idx = bisect(self._cum_examples_in_group, idx)
e_idx = idx - self._cum_examples_in_group[g_idx - 1] if g_idx else idx
group = self.grouped[g_idx]
tensors = [
torch.from_numpy(feat[e_idx * self.stride: e_idx * self.stride + self.example_length])
if feat.size
else torch.empty(0)
for feat in group
]
out = dict(zip(FEAT_NAMES, tensors))
out["id"] = out["id"][0, :]
return out
class TSBinaryDataset(TSDataset):
def load(self):
if isinstance(self.df, pd.DataFrame):
data = self.df
self.grouped = group_ids(data, self.features)
else:
self.grouped = pickle.load(open(self.df, "rb"))
class TSMultiIDDatasetBase(TSBaseDataset):
def __init__(self,
features,
df=None,
encoder_length=52,
example_length=54,
stride=1,
collumns_to_collapse=None,
**kwargs
):
super().__init__(features, df, encoder_length, example_length, stride)
# This part is tricky: we want to do this only for training dataset and then apply the same changes to valid and test splits to maintain coherence.
# We can't do this in the preprocessing step because many different dataset classes rely on the same csv file. Thus the first time dataset is created
# if we pass empty list of collumns to collapse and populate it here. This list is a part for common argument set for the train, valid and test splits
# so is maintained throughout construction of all the splits.
if collumns_to_collapse is not None:
if not collumns_to_collapse:
for name, df in self.tables.items():
if df.eq(df.iloc[:, 0], axis=0).all().all():
self.tables[name] = df.iloc[:, :1]
collumns_to_collapse.append(name)
# Append dummy value to indicate that this this operation has already been performed
# This alleviates an edge case in which in train split we don't collapse any collumns and then we pass an empty list allowing collapse of
# collumns in valid and test sets.
collumns_to_collapse.append(None)
else:
for name in collumns_to_collapse:
if name is not None:
self.tables[name] = self.tables[name].iloc[:, :1]
self.data = {}
for fname, ftype in zip(FEAT_NAMES, FEAT_ORDER):
names = [f.name for f in self.features if (f.feature_type, f.feature_embed_type) == ftype]
if names:
df = pd.concat([v for k,v in self.tables.items() if k in names], axis=1)
self.data[fname] = df.values.astype(dtype=DTYPE_MAP[ftype[1]])
else:
self.data[fname] = None
del self.tables
self._n_timeslices = (next(len(df) for df in self.data.values() if df is not None) - self.example_length + 1) // self.stride
def load(self):
time_col_name = next(x.name for x in self.features if x.feature_type == InputTypes.TIME)
id_col_name = next(x.name for x in self.features if x.feature_type == InputTypes.ID)
if isinstance(self.df, pd.DataFrame):
data = self.df
else:
data = pd.read_csv(self.df, index_col=0)
self.tables = {}
for f in self.features:
self.tables[f.name] = data.pivot(index=time_col_name, columns=id_col_name, values=f.name)
class TSMultiTargetDataset(TSMultiIDDatasetBase):
def __len__(self):
return self._n_timeslices
def __getitem__(self, idx):
if idx < 0:
idx = idx + len(self)
if idx >= len(self) or idx < 0:
raise IndexError
out = {
k: torch.from_numpy(v[idx * self.stride : idx * self.stride + self.example_length])
if v is not None else torch.empty(0)
for k,v in self.data.items()
}
return out
class TSMultiIDDataset(TSMultiIDDatasetBase):
def __init__(self, features, df=None, encoder_length=52, example_length=54, stride=1, collumns_to_collapse=None, **kwargs):
super().__init__(features, df, encoder_length, example_length, stride, collumns_to_collapse)
def __len__(self):
return self._n_timeslices * self.data['id'].shape[1]
def __getitem__(self, idx):
g_idx = idx // self._n_timeslices
e_idx = idx - g_idx * self._n_timeslices
targets = torch.from_numpy(self.data['target'][e_idx * self.stride : e_idx * self.stride + self.example_length])
out = {
k: torch.from_numpy(v[e_idx * self.stride : e_idx * self.stride + self.example_length, :])
if v is not None else torch.empty(0)
for k,v in self.data.items()
}
out['o_cont'] = torch.cat([out['o_cont'], targets], dim=-1)
out['s_cat'] = out['s_cat'][:, g_idx].unsqueeze(1) if out['s_cat'].numel() else out['s_cat']
out['s_cont'] = out['s_cont'][:, g_idx].unsqueeze(1) if out['s_cont'].numel() else out['s_cont']
out['id'] = out['id'][:, g_idx]
out['target'] = out['target'][:, g_idx].unsqueeze(1)
out['weight'] = out['weight'][:, g_idx].unsqueeze(1) if out['weight'].numel() else out['weight']
return out
class StatDataset(Dataset):
def __init__(self, features, path_stat, df=None, encoder_length=52, example_length=54, stride=1, split=None, split_feature=None, ds_type=None):
self.ds_type = ds_type
if ds_type == "valid":
return
super().__init__()
assert example_length > encoder_length, "Length of example longer than encoder length"
assert split, "Split not given"
assert ds_type in ["train", "test"]
self.features = features
self.time_feature = split_feature
self.weight_features = [feature.name for feature in self.features if feature.feature_type == InputTypes.WEIGHT]
self.encoder_length = encoder_length
self.example_length = example_length
self.horizon = self.example_length - self.encoder_length
self.stride = stride
self.split = split
self.id_col_name = next(x.name for x in self.features if x.feature_type == InputTypes.ID)
self.col_dtypes = {v.name: DTYPE_MAP[v.feature_embed_type] for v in self.features}
if isinstance(df, pd.DataFrame):
self.data = df.astype(self.col_dtypes)
else:
self.data = pd.read_csv(os.path.join(path_stat, "full.csv"), dtype=self.col_dtypes)
self.data = self.data.groupby(self.id_col_name).filter(lambda group: len(group) >= self.example_length)
self.grouped = list(self.data.groupby(self.id_col_name))
self.endog = [feature.name for feature in self.features if feature.feature_type == InputTypes.TARGET]
self.exog = [
feature.name
for feature in self.features
if feature.feature_type in [InputTypes.KNOWN, InputTypes.OBSERVED, InputTypes.STATIC]
and feature.feature_embed_type == DataTypes.CONTINUOUS
]
self.grouped = [group[1] for group in self.grouped]
self.grouped = [
group
for group in self.grouped
if len(group[group[self.time_feature] <= self.split]) >= self.encoder_length
and len(group[group[self.time_feature] > self.split]) >= self.horizon
]
self._cum_examples_in_group = np.cumsum(
[(len(group[group[self.time_feature] > split]) - self.horizon) // self.stride + 1 for group in self.grouped]
)
def __len__(self):
if self.ds_type == "valid":
raise ValueError
return self._cum_examples_in_group[-1]
def __getitem__(self, idx):
if self.ds_type == "valid":
raise ValueError
if idx > self._cum_examples_in_group[-1]:
raise StopIteration
g_idx = bisect(self._cum_examples_in_group, idx)
e_idx = idx - self._cum_examples_in_group[g_idx - 1] if g_idx else idx
group = self.grouped[g_idx]
test = group[group[self.time_feature] > self.split]
if self.ds_type == "test":
test_slice = test[self.stride * e_idx: self.stride * e_idx + self.horizon]
test_out = {"endog": test_slice[self.endog], "exog": test_slice[self.exog], "id": test_slice[self.id_col_name]}
if len(self.weight_features):
test_out["weight"] = test_slice[self.weight_features]
return test_out
else:
train = group[group[self.time_feature] <= self.split]
if (self.encoder_length - self.stride * e_idx) > 0:
train_slice = train[-(self.encoder_length - self.stride * e_idx):].append(
test[max(0, self.stride * e_idx - self.encoder_length): self.stride * e_idx]
)
else:
train_slice = test[max(0, self.stride * e_idx - self.encoder_length): self.stride * e_idx]
train_out = {"endog": train_slice[self.endog], "exog": train_slice[self.exog]}
return train_out
class XGBDataset(Dataset):
def __init__(self, df, path_xgb, features_xgb, lag_features, moving_average_features, example_length, encoder_length, time_series_count, MultiID, ds_type, **kwargs):
self.ds_type = ds_type
features = features_xgb
dest_path = df if isinstance(df, pd.DataFrame) else path_xgb
self.encoder_length = encoder_length
self.example_length = example_length
lag_features_conf = lag_features
self.lag_features = {}
for feat in lag_features_conf:
assert feat.get("min_value", None) is not None or feat.get("value", None) is not None
if feat.get("min_value", None) is not None:
assert feat.get("max_value", None) is not None and feat.get("min_value") > 0 and feat.get(
"max_value") > feat.get("min_value")
self.lag_features[feat.name] = list(range(feat.get("min_value"), feat.get("max_value") + 1))
else:
self.lag_features[feat.name] = list(feat.value)
moving_average_features_conf = moving_average_features
self.moving_average_features = {}
for feat in moving_average_features_conf:
assert feat.get("window_size", None) is not None
self.moving_average_features[feat.name] = self.moving_average_features.get(feat.name, []) + [
feat.window_size]
self.horizon = example_length - encoder_length
self.target = [feature.name for feature in features if
feature.feature_type == "TARGET"]
self.observed = [feature.name for feature in features if
feature.feature_type == "OBSERVED"]
self.known = [feature.name for feature in features if
feature.feature_type in ["KNOWN", "STATIC"]]
assert len(self.target) == 1, "Only 1 target feature is currently supported with xgboost"
self.data = load_xgb_df(dest_path, features, ds_type)
self.extra_columns = [[f'{k}_{i}' for i in v] for k, v in self.lag_features.items()]
if MultiID:
target = self.target[0]
lag_target_value = self.lag_features.pop(target, [])
for i in range(time_series_count):
self.lag_features[f'{target}_{i}'] = lag_target_value
self.moving_average_features[f'{target}_{i}'] = self.moving_average_features.pop(target, [])
self.data = xgb_multiID_preprocess(self.data, features, time_series_count) # XXX need to work with
self.data = feat_adder(self.data, self.lag_features, self.moving_average_features)
def __getitem__(self, idx):
if idx >= self.horizon:
raise StopIteration
data_step = self.data.copy()
data_step = target_shift(data_step, self.target, self.known, idx)
if self.ds_type == 'test':
data_step = select_test_group(data_step, self.encoder_length, self.example_length)
labels = data_label_split(data_step, [f'{i}_target' for i in self.target])
return data_step, labels
def __len__(self):
return self.horizon
class ClusteredGraphDataset(Dataset):
def __init__(self, graph, graph_partitions=10, partition_joining_coef=2, **kwargs):
if isinstance(graph, str):
self.graph = pickle.load(open(graph, "rb"))
else:
self.graph = graph
assert isinstance(graph_partitions, int) and graph_partitions > 0
assert partition_joining_coef <= graph_partitions
self.part_count = graph_partitions
if graph_partitions > 1:
self.partition = metis_partition_assignment(self.graph, self.part_count)
else:
self.partition = torch.zeros(self.graph.num_nodes(), dtype=torch.int64)
self.joining_coef = partition_joining_coef
def __len__(self):
return math.comb(self.part_count, self.joining_coef)
def __getitem__(self, idx):
indicator = self.idx_to_combination(self.part_count, self.joining_coef, idx)
c_ids = np.nonzero(indicator)[0]
subgraph = self.get_subgraph(c_ids)
return subgraph
def get_subgraph(self, c_ids):
ids = sum([self.partition == i for i in c_ids]).bool()
return self.graph.subgraph(ids)
def idx_to_combination(self, n, r, m):
"""
n: int total number of elements
r: int number of elements in combination
m: int 0-based index of combination in reverse-lexicographic order
Returns list - indicator vector of chosen elements
"""
assert m < math.comb(n, r), "Index out of range"
out = [0] * n
while n > 0:
if n > r and r >= 0:
y = math.comb(n - 1, r)
else:
y = 0
if m >= y:
m -= y
out[n - 1] = 1
r -= 1
n -= 1
return out
class TemporalClusteredGraphDataset(ClusteredGraphDataset):
def __init__(self, features, graph, df=None, encoder_length=52, example_length=54, stride=1, **kwargs):
super().__init__(graph, **kwargs)
assert example_length > encoder_length
self.features = [i for i in features if i.feature_type != InputTypes.TIME]
self.encoder_length = encoder_length
self.example_length = example_length
self.stride = stride
self.df = df
self.feature_type_col_map = [
np.array([i for i, f in enumerate(self.features) if (f.feature_type, f.feature_embed_type) == x])
for x in FEAT_ORDER
]
if isinstance(df, pd.DataFrame):
data = self.df
grouped = group_ids(data, self.features)
else:
grouped = pickle.load(open(self.df, "rb"))
# We assume that all the time series are of the same length and have the same set of features
assert all([x.shape == grouped[0].shape for x in grouped])
ndata = np.stack(grouped)
self.ndata = {
name: ndata[:, :, ids].view(dtype=np.float32).astype(DTYPE_MAP[f[1]])
if not ids.size == 0
else np.empty((*ndata.shape[:-1], 0))
for name, f, ids in zip(FEAT_NAMES, FEAT_ORDER, self.feature_type_col_map)
}
self.t_dim = ndata.shape[1]
self.n_timeslices = (self.t_dim - self.example_length + 1) // self.stride
def __len__(self):
# the number of possible subgraphs times the number of possible time slices
return super().__len__() * self.n_timeslices
def __getitem__(self, idx):
g_idx = idx // self.n_timeslices
t_idx = idx - g_idx * self.n_timeslices
subgraph = super().__getitem__(g_idx)
node_ids = np.array(subgraph.ndata["_ID"])
for k, v in self.ndata.items():
subgraph.ndata[k] = torch.from_numpy(
v[node_ids, t_idx * self.stride: t_idx * self.stride + self.example_length, :]
)
return subgraph
def create_datasets(config, input_df=None):
def select_dataset_class(config):
binarized = config.get("binarized", False)
graph_dataset = config.get("construct_graph", False)
multi_id_dataset = config.get("MultiID", False)
single_target = config.get('single_target', False)
if config.get("xgb", False):
specific_args = {
"path_xgb": config.dest_path,
"features_xgb": config.features,
"lag_features": config.get("lag_features", []),
"moving_average_features": config.get("moving_average_features", []),
"time_series_count": config.time_series_count,
"MultiID": config.get("MultiID", False)
}
return XGBDataset, specific_args
if config.get("stat", False):
specific_args = {
"path_stat": config.dest_path,
"split": config.test_range[0],
"split_feature": config.time_ids
}
return StatDataset, specific_args
if binarized and graph_dataset:
specific_args = {
"graph": os.path.join(config.dest_path, "graph.bin"),
"graph_partitions": config.graph_partitions,
"partition_joining_coef": config.partition_joining_coef,
}
return TemporalClusteredGraphDataset, specific_args
elif binarized and multi_id_dataset:
raise NotImplementedError
elif binarized:
return TSBinaryDataset, {}
elif not binarized and graph_dataset:
raise NotImplementedError
elif not binarized and multi_id_dataset and not single_target:
specific_args = {}
if config.get('collapse_identical_columns', False):
specific_args['collumns_to_collapse'] = []
return TSMultiTargetDataset, specific_args
elif not binarized and multi_id_dataset and single_target:
specific_args = {}
if config.get('collapse_identical_columns', False):
specific_args['collumns_to_collapse'] = []
return TSMultiIDDataset, specific_args
else:
return TSDataset, {}
common_args = {
"features": translate_features(config.features),
"encoder_length": config.encoder_length,
"example_length": config.example_length,
"stride": config.get("stride", 1),
}
dataset_class, specific_args = select_dataset_class(config)
if input_df is not None:
print("Input DataFrame provided to create_datasets functions")
print("Warning: Please make sure the dataframe is preprocessed")
test = dataset_class(df=input_df, **common_args, **specific_args, ds_type='test')
train = None
valid = None
else:
path_template = os.path.join(config.dest_path, "{{subset}}.{extension}")
path_template = path_template.format(extension="bin" if config.get("binarized", False) else "csv")
train = dataset_class(df=path_template.format(subset="train"), **common_args, **specific_args, ds_type="train")
valid = dataset_class(df=path_template.format(subset="valid"), **common_args, **specific_args, ds_type="valid")
test = dataset_class(df=path_template.format(subset="test"), **common_args, **specific_args, ds_type="test")
if not (config.get("xgb", False) or config.get("stat", False)):
train = sample_data(train, config.get("train_samples", -1))
valid = sample_data(valid, config.get("valid_samples", -1))
return train, valid, test
def sample_data(dataset, num_samples):
if num_samples < 0:
return dataset
else:
return torch.utils.data.Subset(dataset,
np.random.choice(np.arange(len(dataset)), size=num_samples, replace=False))
def get_collate_fn(model_type, encoder_length, test=False):
allowed_types = ['default', 'graph', 'autoregressive']
if model_type not in allowed_types:
raise ValueError(f'Model type has to be one of {allowed_types}')
def collate_graph(samples):
"""A collater used for GNNs"""
batch = dgl.batch(samples)
labels = batch.ndata["target"][:, encoder_length:, :]
weights = batch.ndata['weight']
if weights is not None and weights.numel():
weights = weights[:, encoder_length :, :]
return batch, labels, weights
def collate_ar(samples):
batch = default_collate(samples)
labels = batch["target"]
weights = batch['weight']
return batch, labels, weights
def collate_dict(samples):
"""Default TSPP collater"""
batch = default_collate(samples)
labels = batch["target"][:, encoder_length:, :]
weights = batch['weight']
if weights is not None and weights.numel():
weights = weights[:, encoder_length:, :]
return batch, labels, weights
if model_type == 'graph':
return collate_graph
elif model_type == 'autoregressive' and not test:
return collate_ar
else:
return collate_dict
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/data/datasets.py |
# Copyright 2021-2022 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import enum
import os
import pickle
import hydra
import numpy as np
import pandas as pd
from omegaconf.listconfig import ListConfig
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import FunctionTransformer
from typing import Union
class DataTypes(enum.IntEnum):
"""Defines numerical types of each column."""
CONTINUOUS = 0
CATEGORICAL = 1
DATE = 2
STR = 3
DTYPE_MAP = {
DataTypes.CONTINUOUS: np.float32,
DataTypes.CATEGORICAL: np.int64,
DataTypes.DATE: np.datetime64,
DataTypes.STR: str,
}
class InputTypes(enum.IntEnum):
"""Defines input types of each column."""
TARGET = 0
OBSERVED = 1
KNOWN = 2
STATIC = 3
ID = 4 # Single column used as an entity identifier
TIME = 5 # Single column exclusively used as a time index
WEIGHT = 6
SAMPLE_WEIGHT = 7
class FeatureSpec:
enabled_attributes = ["name", "feature_type", "feature_embed_type", "cardinality", "scaler"]
def __init__(self, input_dict):
for key in input_dict:
if key in self.enabled_attributes:
setattr(self, key, input_dict[key])
else:
raise ValueError("Attribute not enabled: {attr}".format(attr=key))
self.name = input_dict["name"]
self.feature_type = InputTypes[input_dict["feature_type"]]
self.feature_embed_type = DataTypes[input_dict["feature_embed_type"]]
def get(self, key, value=None):
if hasattr(self, key):
return getattr(self, key)
else:
return value
def __str__(self):
return str((self.name, self.feature_type, self.feature_embed_type))
def __repr__(self):
return str(self)
FEAT_ORDER = [
(InputTypes.STATIC, DataTypes.CATEGORICAL),
(InputTypes.STATIC, DataTypes.CONTINUOUS),
(InputTypes.KNOWN, DataTypes.CATEGORICAL),
(InputTypes.KNOWN, DataTypes.CONTINUOUS),
(InputTypes.OBSERVED, DataTypes.CATEGORICAL),
(InputTypes.OBSERVED, DataTypes.CONTINUOUS),
(InputTypes.TARGET, DataTypes.CONTINUOUS),
(InputTypes.WEIGHT, DataTypes.CONTINUOUS),
(InputTypes.SAMPLE_WEIGHT, DataTypes.CONTINUOUS),
(InputTypes.ID, DataTypes.CATEGORICAL),
]
FEAT_NAMES = ["s_cat", "s_cont", "k_cat", "k_cont", "o_cat", "o_cont", "target", "weight", "sample_weight", "id"]
def group_ids(df, features):
col_names = ["_id_"] + [
x.name
for x in features
if x.feature_embed_type != DataTypes.STR
and x.feature_type != InputTypes.TIME
and x.feature_type != InputTypes.ID
]
grouped = [x[1][col_names].values.astype(np.float32).view(dtype=np.int32) for x in df.groupby("_id_")]
return grouped
def translate_features(features, preproc=False):
all_features = [FeatureSpec(feature) for feature in features]
if preproc:
return all_features
return [FeatureSpec({"name": "_id_", "feature_type": "ID", "feature_embed_type": "CATEGORICAL"})] + [
feature for feature in all_features if feature.feature_type != InputTypes.ID
]
def map_dt(dt):
if isinstance(dt, int):
dt = dt
elif isinstance(dt, ListConfig):
dt = datetime.datetime(*dt)
elif isinstance(dt, str):
dt = datetime.datetime.strptime(dt, "%Y-%m-%d")
return dt
def impute(df, config):
if not (config.get("missing_data_label", False)):
return df, None
else:
imp = SimpleImputer(missing_values=config.missing_data_label, strategy="mean")
mask = df.applymap(lambda x: True if x == config.missing_data_label else False)
data = df.values
col_mask = (data == config.missing_data_label).all(axis=0)
data[:, ~col_mask] = imp.fit_transform(data)
return data, mask
def map_scalers(features):
mapping = {}
for feature in features:
if feature.get("scaler", None):
if mapping.get(feature.scaler, None):
mapping[feature.scaler].append(feature.name)
else:
mapping[feature.scaler] = [feature.name]
return mapping
class Log1pScaler(FunctionTransformer):
@staticmethod
def _inverse(x):
return np.expm1(x)
def __init__(self):
super().__init__(func=np.log1p, inverse_func=Log1pScaler._inverse, validate=False)
class CompositeScaler:
def __init__(self, target_features, input_continuous, scale_per_id):
self.target_mapping = map_scalers(target_features)
self.continuous_mapping = map_scalers(input_continuous)
self.target_features = target_features
self.input_continuous = input_continuous
self.scale_per_id = scale_per_id
self.continuous_scalers = {}
self.target_scalers = {}
def fit(self, df):
for k, v in self.continuous_mapping.items():
self.continuous_scalers[k] = {}
if self.scale_per_id:
for identifier, sliced in df.groupby("_id_"):
scaler = hydra.utils.instantiate(k).fit(sliced[v])
self.continuous_scalers[k][identifier] = scaler
else:
scaler = hydra.utils.instantiate(k).fit(df[v])
self.continuous_scalers[k][""] = scaler
for k, v in self.target_mapping.items():
self.target_scalers[k] = {}
if self.scale_per_id:
for identifier, sliced in df.groupby("_id_"):
scaler = hydra.utils.instantiate(k).fit(sliced[v])
self.target_scalers[k][identifier] = scaler
else:
scaler = hydra.utils.instantiate(k).fit(df[v])
self.target_scalers[k][""] = scaler
def apply_scalers(self, df, name=None):
if name is None:
name = df.name
for k, v in self.continuous_mapping.items():
df[v] = self.continuous_scalers[k][name].transform(df[v])
for k, v in self.target_mapping.items():
df[v] = self.target_scalers[k][name].transform(df[v])
return df
def transform(self, df):
if self.scale_per_id:
df = df.groupby("_id_").apply(self.apply_scalers)
else:
df = self.apply_scalers(df, name="")
return df
def inverse_transform_targets(self, values, ids=None):
# TODO: Assuming single targets for now. This has to be adapted to muti-target
if len(self.target_scalers) > 0:
shape = values.shape
scalers = list(self.target_scalers.values())[0]
if self.scale_per_id:
assert ids is not None
flat_values = values.flatten()
flat_ids = np.repeat(ids, values.shape[1])
df = pd.DataFrame({"id": flat_ids, "value": flat_values})
df_list = []
for identifier, sliced in df.groupby("id"):
df_list.append(np.stack(
[scalers[identifier].inverse_transform(sliced["value"].values.reshape(-1, 1)).flatten(),
sliced.index.values], axis=-1))
tmp = np.concatenate(df_list)
tmp = tmp[tmp[:, -1].argsort()]
return tmp[:, 0].reshape(shape)
else:
flat_values = values.reshape(-1, 1)
flat_values = scalers[""].inverse_transform(flat_values)
return flat_values.reshape(shape)
return values
class Preprocessor:
def __init__(self, config):
self.config = config
self.features = translate_features(self.config["features"], preproc=True)
self.feat_splits = self._get_feature_splits()
self.cont_features_names = [continuous.name for continuous in self.feat_splits["input_continuous"]]
self.dest_path = self.config.dest_path
self.source_path = self.config.source_path
self.preprocessor_state = {}
def _get_feature_splits(self):
splits = {}
splits["dates"] = [feature for feature in self.features if feature.feature_embed_type == DataTypes.DATE]
splits["target_features"] = [feature for feature in self.features if feature.feature_type == InputTypes.TARGET]
splits["time_feature"] = [feature for feature in self.features if feature.feature_type == InputTypes.TIME][0]
splits["id_features"] = [feature for feature in self.features if feature.feature_type == InputTypes.ID]
splits["input_categoricals"] = [
feature
for feature in self.features
if feature.feature_embed_type == DataTypes.CATEGORICAL
and feature.feature_type in [InputTypes.STATIC, InputTypes.KNOWN, InputTypes.OBSERVED]
]
splits["input_continuous"] = [
feature
for feature in self.features
if feature.feature_embed_type == DataTypes.CONTINUOUS
and feature.feature_type in [InputTypes.STATIC, InputTypes.KNOWN, InputTypes.OBSERVED]
]
return splits
def _map_ids(self, df):
print("Mapping nodes")
id_features = [feature.name for feature in self.feat_splits["id_features"]]
if "id_mappings" in self.preprocessor_state:
id_features_df = self.preprocessor_state["id_mappings"]
id_features_dict = id_features_df.set_index(id_features).to_dict()["_id_"]
def id_map_funct(x):
var = tuple(x[id_features])
if len(var) == 1:
var = var[0]
return id_features_dict.get(var, np.nan)
df["_id_"] = df.apply(lambda x: id_map_funct(x), axis=1)
else:
id_features = [feature.name for feature in self.feat_splits["id_features"]]
current_id = df[id_features[0]].astype("category").cat.codes + 1
for additional_id in id_features[1:]:
current_id = df[additional_id].astype("category").cat.codes * (current_id.max() + 1) + current_id + 1
df["_id_"] = current_id.astype("category").cat.codes
id_features_df = df[id_features + ["_id_"]]
id_features_df = id_features_df.drop_duplicates(subset=None).reset_index(drop=True)
self.preprocessor_state["id_mappings"] = id_features_df
def _map_categoricals(self, df):
print("Mapping categoricals to bounded range")
if "categorical_mappings" in self.preprocessor_state:
categorical_mappings = self.preprocessor_state["categorical_mappings"]
for categorical in self.feat_splits['input_categoricals']:
df[categorical.name] = df[categorical.name].map(categorical_mappings[categorical.name])
else:
input_categorical_map_dict = {}
for categorical in self.feat_splits['input_categoricals']:
cat_feature = df[categorical.name].astype("category")
input_categorical_map_dict[categorical.name] = dict(zip([np.nan] + cat_feature.cat.categories.tolist(),
range(0, len(cat_feature.cat.categories)+1)))
df[categorical.name] = cat_feature.cat.codes + 1
self.preprocessor_state["categorical_mappings"] = input_categorical_map_dict
def _get_dataset_splits(self, df):
print("Splitting datasets")
if hasattr(self.config, "valid_boundary") and self.config.valid_boundary is not None:
forecast_len = self.config.example_length - self.config.encoder_length
# The valid split is shifted from the train split by number of the forecast steps to the future.
# The test split is shifted by the number of the forecast steps from the valid split
valid_boundary = map_dt(self.config.valid_boundary)
grouped = df.groupby('_id_')
train_mask = grouped[self.config.time_ids].apply(lambda dates: dates < valid_boundary)
train = df[train_mask]
print('Calculated train.')
train_sizes = train.groupby('_id_').size()
valid_indexes = grouped[self.config.time_ids].apply(
lambda dates: dates.iloc[(train_sizes[dates.name] - self.config.encoder_length):
(train_sizes[dates.name] + forecast_len)].index
if dates.name in train_sizes else pd.Series()
)
valid = df.loc[np.concatenate(valid_indexes)]
print('Calculated valid.')
test_indexes = grouped[self.config.time_ids].apply(
lambda dates: dates.iloc[(train_sizes[dates.name] - self.config.encoder_length + forecast_len):
(train_sizes[dates.name] + 2 * forecast_len)].index
if dates.name in train_sizes else pd.Series()
)
test = df.loc[np.concatenate(test_indexes)]
print('Calculated test.')
elif df.dtypes[self.config.time_ids] not in [np.float64, np.int]:
index = df[self.config.time_ids]
train = df.loc[(index >= map_dt(self.config.train_range[0])) & (index < map_dt(self.config.train_range[1]))]
valid = df.loc[(index >= map_dt(self.config.valid_range[0])) & (index < map_dt(self.config.valid_range[1]))]
test = df.loc[(index >= map_dt(self.config.test_range[0])) & (index < map_dt(self.config.test_range[1]))]
else:
index = df[self.config.time_ids]
train = df.loc[(index >= self.config.train_range[0]) & (index < self.config.train_range[1])]
valid = df.loc[(index >= self.config.valid_range[0]) & (index < self.config.valid_range[1])]
test = df.loc[(index >= self.config.test_range[0]) & (index < self.config.test_range[1])]
train = train[(train.groupby('_id_').size()[train['_id_']] > self.config.encoder_length).values]
valid = valid[(valid.groupby('_id_').size()[valid['_id_']] > self.config.encoder_length).values]
test = test[(test.groupby('_id_').size()[test['_id_']] > self.config.encoder_length).values]
return train, valid, test
def _recombine_datasets(self, train, valid, test):
if hasattr(self.config, "valid_boundary") and self.config.valid_boundary is not None:
forecast_len = self.config.example_length - self.config.encoder_length
# The valid split is shifted from the train split by number of the forecast steps to the future.
# The test split is shifted by the number of the forecast steps from the valid split
train_temp = []
valid_temp = []
for g0, g1 in zip(train.groupby("_id_"), valid.groupby("_id_")):
_train = g0[1].iloc[: -self.config.encoder_length]
_valid = g1[1].iloc[:forecast_len]
train_temp.append(_train)
valid_temp.append(_valid)
train = pd.concat(train_temp, axis=0)
valid = pd.concat(valid_temp, axis=0)
elif train.dtypes[self.config.time_ids] not in [np.float64, np.int]:
train = train[train[self.config.time_ids] < map_dt(self.config.valid_range[0])]
valid = valid[valid[self.config.time_ids] < map_dt(self.config.test_range[0])]
else:
train = train[train[self.config.time_ids] < self.config.valid_range[0]]
valid = valid[valid[self.config.time_ids] < self.config.test_range[0]]
return pd.concat((train, valid, test))
def _drop_unseen_categoricals(self, train, valid, test, drop_unseen=True):
# TODO: Handle this for inference preprocess function
if self.config.get("drop_unseen", False):
print("Dropping unseen categoricals")
if not drop_unseen:
print("Warning: Assuming that inference dataset only has the input categoricals from the training set")
return train, valid, test
if hasattr(self.config, "valid_boundary") and self.config.valid_boundary is not None:
arriter = ["_id_"]
else:
arriter = [cat.name for cat in self.feat_splits["input_categoricals"]] + ["_id_"]
if train is not None:
for categorical in arriter:
seen_values = train[categorical].unique()
valid = valid[valid[categorical].isin(seen_values)]
test = test[test[categorical].isin(seen_values)]
return train, valid, test
def fit_scalers(self, df):
print("Calculating scalers")
self.scaler = CompositeScaler(
self.feat_splits["target_features"], self.feat_splits["input_continuous"], scale_per_id=self.config.get('scale_per_id', False)
)
self.scaler.fit(df)
self.preprocessor_state["scalers"] = self.scaler
def apply_scalers(self, df):
print("Applying scalers")
return self.preprocessor_state["scalers"].transform(df)
def save_datasets(self, train, valid, test):
print(F"Saving processed data at {self.dest_path}")
os.makedirs(self.dest_path, exist_ok=True)
train.to_csv(os.path.join(self.dest_path, "train.csv"))
valid.to_csv(os.path.join(self.dest_path, "valid.csv"))
test.to_csv(os.path.join(self.dest_path, "test.csv"))
self._recombine_datasets(train, valid, test).to_csv(os.path.join(self.dest_path, "full.csv"))
# Save relevant columns in binary form for faster dataloading
# IMORTANT: We always expect id to be a single column indicating the complete timeseries
# We also expect a copy of id in form of static categorical input!!!]]
if self.config.get("binarized", False):
grouped_train = group_ids(train, self.features)
grouped_valid = group_ids(valid, self.features)
grouped_test = group_ids(test, self.features)
pickle.dump(grouped_train, open(os.path.join(self.dest_path, "train.bin"), "wb"))
pickle.dump(grouped_valid, open(os.path.join(self.dest_path, "valid.bin"), "wb"))
pickle.dump(grouped_test, open(os.path.join(self.dest_path, "test.bin"), "wb"))
def save_state(self):
filepath = os.path.join(self.dest_path, "tspp_preprocess.bin")
print(F"Saving preprocessor state at {filepath}")
with open(filepath, "wb") as f:
pickle.dump(self.preprocessor_state, f)
def load_state(self, preprocessor_state_file):
filepath = os.path.join(self.config.dest_path, "tspp_preprocess.bin")
if preprocessor_state_file:
filepath = preprocessor_state_file
if not os.path.exists(filepath):
raise ValueError(F"Invalid preprocessor state file: {filepath}")
print(F"Reading preprocessor state binary file: {filepath}")
f = open(filepath, "rb")
self.preprocessor_state = pickle.load(f)
required_keys = ("id_mappings", "categorical_mappings", "scalers")
if not all(k in self.preprocessor_state for k in required_keys):
raise ValueError(F"preprocessor state binary file at :{filepath} must have keys={required_keys} but found={self.preprocessor_state.keys()}")
def impute(self, df):
print("Fixing any nans in continuous features")
df[self.cont_features_names] = df[self.cont_features_names].replace(np.NaN, 10 ** 9)
return df
def _init_setup(self, dataset=None, drop_na=True):
if dataset is None:
print(F"Reading in data from CSV File: {self.source_path}")
df = pd.read_csv(self.source_path, parse_dates=[d.name for d in self.feat_splits["dates"]])
elif isinstance(dataset, str) and dataset.endswith(".csv"):
print(F"Reading in data from CSV File: {dataset}")
df = pd.read_csv(dataset, parse_dates=[d.name for d in self.feat_splits["dates"]])
elif isinstance(dataset, pd.DataFrame):
print("Input DataFrame provided for preprocessing")
#TODO: check support for parse dates as done during read csv
# Currently date related features are only used for dataset splits during training
df = dataset.copy()
else:
raise ValueError(F"Function either accepts a path to a csv file or a dataframe")
print("Sorting on time feature")
#TODO: Check if we sort df for inference only case
df = df.sort_values([self.feat_splits["time_feature"].name])
f_names = [feature.name for feature in self.features] + [self.config.time_ids]
df = df[list(dict.fromkeys(f_names))]
if self.config.get("missing_data_label", False):
df = df.replace(self.config.get("missing_data_label"), np.NaN)
if drop_na:
df = df.dropna(subset=[t.name for t in self.feat_splits["target_features"]])
return df
def preprocess(self):
df = self._init_setup()
self._map_ids(df)
self._map_categoricals(df)
train, valid, test = self._get_dataset_splits(df)
train, valid, test = self._drop_unseen_categoricals(train, valid, test)
return train, valid, test
def preprocess_test(self, dataset: Union[str, pd.DataFrame]) -> pd.DataFrame:
df = self._init_setup(dataset=dataset, drop_na=False)
self._map_ids(df)
self._map_categoricals(df)
#TODO: this is a workaround and maybe needs to be handled properly in the future
_, _, df = self._drop_unseen_categoricals(None, None, df, drop_unseen=False)
return df | DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/data/data_utils.py |
# Copyright 2021-2022 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Only downloads data if the csv files are present, unless the "force_download"
argument is supplied. For new datasets, the download_and_unzip(.) can be reused
to pull csv files from an online repository, but may require subsequent
dataset-specific processing.
Usage:
python3 script_download_data --dataset {DATASET} --output_dir {DIR}
Command line args:
DATASET: Name of dataset to download {e.g. electricity}
DIR: Path to main dataset diredtory
"""
from __future__ import absolute_import, division, print_function
import argparse
from cmath import nan
import gc
import os
import sys
import warnings
import numpy as np
import pandas as pd
import pyunpack
import wget
import pickle
from datetime import date, timedelta, datetime
from scipy.spatial import distance_matrix
import dgl
import torch
warnings.filterwarnings("ignore")
# General functions for data downloading & aggregation.
def download_from_url(url, output_path):
"""Downloads a file froma url."""
print("Pulling data from {} to {}".format(url, output_path))
wget.download(url, output_path)
print("done")
def unzip(zip_path, output_file, data_folder):
"""Unzips files and checks successful completion."""
print("Unzipping file: {}".format(zip_path))
pyunpack.Archive(zip_path).extractall(data_folder)
# Checks if unzip was successful
if not os.path.exists(output_file):
raise ValueError(
"Error in unzipping process! {} not found.".format(output_file)
)
def download_and_unzip(url, zip_path, csv_path, data_folder):
"""Downloads and unzips an online csv file.
Args:
url: Web address
zip_path: Path to download zip file
csv_path: Expected path to csv file
data_folder: Folder in which data is stored.
"""
download_from_url(url, zip_path)
unzip(zip_path, csv_path, data_folder)
print("Done.")
# Dataset specific download routines.
def download_electricity(data_folder):
"""Downloads electricity dataset from UCI repository."""
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00321/LD2011_2014.txt.zip"
csv_path = os.path.join(data_folder, "LD2011_2014.txt")
zip_path = csv_path + ".zip"
download_and_unzip(url, zip_path, csv_path, data_folder)
print("Aggregating to hourly data")
df = pd.read_csv(csv_path, index_col=0, sep=";", decimal=",")
df.index = pd.to_datetime(df.index)
df.sort_index(inplace=True)
# Used to determine the start and end dates of a series
output = df.resample("1h").mean().replace(0.0, np.nan)
earliest_time = output.index.min()
# Filter to match range used by other academic papers
output = output[(output.index >= '2014-01-01') & (output.index < '2014-09-08')]
df_list = []
for label in output:
srs = output[label]
if srs.isna().all():
continue
start_date = min(srs.fillna(method="ffill").dropna().index)
end_date = max(srs.fillna(method="bfill").dropna().index)
srs = output[label].fillna(0.0)
tmp = pd.DataFrame({"power_usage": srs})
date = tmp.index
tmp["t"] = (date - earliest_time).seconds / 60 / 60 + (
date - earliest_time
).days * 24
tmp["days_from_start"] = (date - earliest_time).days
tmp["categorical_id"] = label
tmp["date"] = date
tmp["id"] = label
tmp["hour"] = date.hour
tmp["day"] = date.day
tmp["day_of_week"] = date.dayofweek
tmp["month"] = date.month
tmp["power_usage_weight"] = ((date >= start_date) & (date <= end_date))
df_list.append(tmp)
output = pd.concat(df_list, axis=0, join="outer").reset_index(drop=True)
output["categorical_id"] = output["id"].copy()
output["hours_from_start"] = output["t"]
output["categorical_day_of_week"] = output["day_of_week"].copy()
output["categorical_hour"] = output["hour"].copy()
output["power_usage_weight"] = output["power_usage_weight"].apply(lambda b: 1 if b else 0)
output.to_csv(data_folder + "/electricity.csv")
print("Done.")
def download_traffic(data_folder):
"""Downloads traffic dataset from UCI repository."""
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00204/PEMS-SF.zip"
csv_path = os.path.join(data_folder, "PEMS_train")
zip_path = os.path.join(data_folder, "PEMS-SF.zip")
download_and_unzip(url, zip_path, csv_path, data_folder)
print("Aggregating to hourly data")
def process_list(s, variable_type=int, delimiter=None):
"""Parses a line in the PEMS format to a list."""
if delimiter is None:
parsed_list = [
variable_type(i)
for i in s.replace("[", "").replace("]", "").split()
]
else:
parsed_list = [
variable_type(i)
for i in s.replace("[", "").replace("]", "").split(delimiter)
]
return parsed_list
def read_single_list(filename):
"""Returns single list from a file in the PEMS-custom format."""
with open(os.path.join(data_folder, filename), "r") as dat:
parsed_list_from_file = process_list(dat.readlines()[0])
return parsed_list_from_file
def read_matrix(filename):
"""Returns a matrix from a file in the PEMS-custom format."""
array_list = []
with open(os.path.join(data_folder, filename), "r") as dat:
lines = dat.readlines()
for i, line in enumerate(lines):
array = [
process_list(row_split, variable_type=float, delimiter=None)
for row_split in process_list(
line, variable_type=str, delimiter=";"
)
]
array_list.append(array)
return array_list
shuffle_order = np.array(read_single_list("randperm")) - 1 # index from 0
train_dayofweek = read_single_list("PEMS_trainlabels")
train_tensor = read_matrix("PEMS_train")
test_dayofweek = read_single_list("PEMS_testlabels")
test_tensor = read_matrix("PEMS_test")
# Inverse permutate shuffle order
print("Shuffling")
inverse_mapping = {
new_location: previous_location
for previous_location, new_location in enumerate(shuffle_order)
}
reverse_shuffle_order = np.array(
[
inverse_mapping[new_location]
for new_location, _ in enumerate(shuffle_order)
]
)
# Group and reoder based on permuation matrix
print("Reodering")
day_of_week = np.array(train_dayofweek + test_dayofweek)
combined_tensor = np.array(train_tensor + test_tensor)
day_of_week = day_of_week[reverse_shuffle_order]
combined_tensor = combined_tensor[reverse_shuffle_order]
# Put everything back into a dataframe
print("Parsing as dataframe")
labels = ["traj_{}".format(i) for i in read_single_list("stations_list")]
hourly_list = []
for day, day_matrix in enumerate(combined_tensor):
# Hourly data
hourly = pd.DataFrame(day_matrix.T, columns=labels)
hourly["hour_on_day"] = [
int(i / 6) for i in hourly.index
] # sampled at 10 min intervals
if hourly["hour_on_day"].max() > 23 or hourly["hour_on_day"].min() < 0:
raise ValueError(
"Invalid hour! {}-{}".format(
hourly["hour_on_day"].min(), hourly["hour_on_day"].max()
)
)
hourly = hourly.groupby("hour_on_day", as_index=True).mean()[labels]
hourly["sensor_day"] = day
hourly["time_on_day"] = hourly.index
hourly["day_of_week"] = day_of_week[day]
hourly_list.append(hourly)
hourly_frame = pd.concat(hourly_list, axis=0, ignore_index=True, sort=False)
# Flatten such that each entitiy uses one row in dataframe
store_columns = [c for c in hourly_frame.columns if "traj" in c]
other_columns = [c for c in hourly_frame.columns if "traj" not in c]
flat_df = pd.DataFrame(
columns=["values", "prev_values", "next_values"]
+ other_columns
+ ["id"]
)
def format_index_string(x):
"""Returns formatted string for key."""
if x < 10:
return "00" + str(x)
elif x < 100:
return "0" + str(x)
elif x < 1000:
return str(x)
raise ValueError("Invalid value of x {}".format(x))
for store in store_columns:
sliced = hourly_frame[[store] + other_columns].copy()
sliced.columns = ["values"] + other_columns
sliced["id"] = int(store.replace("traj_", ""))
# Sort by Sensor-date-time
key = (
sliced["id"].apply(str)
+ sliced["sensor_day"].apply(lambda x: "_" + format_index_string(x))
+ sliced["time_on_day"].apply(
lambda x: "_" + format_index_string(x)
)
)
sliced = sliced.set_index(key).sort_index()
sliced["values"] = sliced["values"].fillna(method="ffill")
sliced["prev_values"] = sliced["values"].shift(1)
sliced["next_values"] = sliced["values"].shift(-1)
flat_df = flat_df.append(sliced.dropna(), ignore_index=True, sort=False)
# Filter to match range used by other academic papers
index = flat_df["sensor_day"]
flat_df = flat_df[index < 173].copy()
# Creating columns fo categorical inputs
flat_df["categorical_id"] = flat_df["id"].copy()
flat_df["hours_from_start"] = (
flat_df["time_on_day"] + flat_df["sensor_day"] * 24.0
)
flat_df["categorical_day_of_week"] = flat_df["day_of_week"].copy()
flat_df["categorical_time_on_day"] = flat_df["time_on_day"].copy()
flat_df.to_csv(data_folder + "/traffic.csv")
def construct_graph(nodes_loc, k=0.8):
"""
Constructs a graph based on a physical location of nodes
nodes_loc: 2D array num_nodes x dim
features: list of node features
"""
dist_mx = distance_matrix(nodes_loc, nodes_loc)
std = dist_mx.std()
adj_mx = np.exp(-np.square(dist_mx / std))
adj_mx[adj_mx < k] = 0
np.fill_diagonal(adj_mx, 0)
edges = np.nonzero(adj_mx)
graph = dgl.graph(edges, num_nodes=nodes_loc.shape[0])
return graph
def main(args):
"""Runs main download routine.
Args:
expt_name: Name of experiment
force_download: Whether to force data download from scratch
output_folder: Folder path for storing data
"""
print("#### Running download script ###")
download_function = DOWNLOAD_FUNCTIONS[args.dataset]
print("Getting {} data...".format(args.dataset))
subdir = os.path.join(args.output_dir, args.dataset)
print(subdir)
if os.path.exists(subdir):
print(f"Warning: Path {subdir} exists. Overwritting files!", file=sys.stderr)
os.makedirs(subdir, exist_ok=True)
download_function(subdir)
print("Download completed.")
DOWNLOAD_FUNCTIONS = {
"electricity": download_electricity,
"traffic": download_traffic,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Data download configs")
parser.add_argument(
"--dataset",
metavar="DATASET",
type=str,
choices=DOWNLOAD_FUNCTIONS.keys(),
required=True,
help="Dataset name"
)
parser.add_argument(
"--output_dir",
metavar="DIR",
type=str,
default=".",
help="Path to folder for data download",
)
args = parser.parse_args()
main(args)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/data/script_download_data.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf import OmegaConf
from data.data_utils import InputTypes, DataTypes, FeatureSpec
OmegaConf.register_new_resolver("and", lambda x, y: x and y, use_cache=True)
OmegaConf.register_new_resolver("feature.selector",
lambda x,feat_type,embed_type:
OmegaConf.create([elem for elem in x if elem.feature_type == feat_type and elem.feature_embed_type == embed_type])
)
OmegaConf.register_new_resolver("add", lambda x,y: x + y)
OmegaConf.register_new_resolver("if", lambda x,y,z: y if x else z)
OmegaConf.register_new_resolver("feature.cardinalities", lambda x: OmegaConf.create([elem.cardinality for elem in x]))
OmegaConf.register_new_resolver("len", len)
OmegaConf.register_new_resolver("cmp", lambda x, y: x == y)
OmegaConf.register_new_resolver("cont.lower", lambda x, y: y.lower() in x.lower())
# XXX I don't know whether it is the best idea to allow user to sum over nested structure without checks
def sum_nested(*args):
if len(args) == 1 and isinstance(args[0], (int, float)):
return args[0]
return sum(arg if isinstance(arg, (int, float)) else sum_nested(*arg) for arg in args)
OmegaConf.register_new_resolver("sum", sum_nested)
| DeepLearningExamples-master | Tools/PyTorch/TimeSeriesPredictionPlatform/conf/conf_utils.py |
# Copyright 2017-2018 The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -----------------------------------------------------------------------
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" train fit utility """
import logging
import math
import glob
import os
import random
import sys
import time
import re
from itertools import starmap
import signal
import pickle
import dllogger
import horovod.mxnet as hvd
import mxnet as mx
import mxnet.contrib.amp as amp
import numpy as np
from mxnet import autograd as ag
from mxnet import gluon
import data
from benchmarking import BenchmarkingDataIter
from global_metrics import CompositeMeter, MaxMeter, MinMeter, AvgMeter, PercentileMeter
class PartitionSignalHandler():
def __init__(self, sync_freq: int = 10):
self.step = 0
self.freq = sync_freq
self.t = mx.nd.array([0])
signal.signal(signal.SIGUSR1, self._signal_handler)
signal.signal(signal.SIGTERM, self._signal_handler)
def sync(self) -> bool:
if self.step % self.freq == 0:
new_sync = hvd.allreduce(self.t, average=False)
if new_sync[0] > 0:
self.t[0] = 1
self.step += 1
return self.should_end()
def should_end(self) -> bool:
return bool(self.t[0] > 0)
def _signal_handler(self, signum, frame):
print("Signal received")
self.t[0] = 1
def add_fit_args(parser):
def int_list(x):
return list(map(int, x.split(',')))
def float_list(x):
return list(map(float, x.split(',')))
train = parser.add_argument_group('Training')
train.add_argument('--mode', default='train_val', choices=('train_val', 'train', 'val', 'pred'),
help='mode')
train.add_argument('--seed', type=int, default=None,
help='random seed')
train.add_argument('--gpus', type=int_list, default=[0],
help='list of gpus to run, e.g. 0 or 0,2,5')
train.add_argument('--kv-store', type=str, default='device', choices=('device', 'horovod'),
help='key-value store type')
train.add_argument('--dtype', type=str, default='float16', choices=('float32', 'float16'),
help='precision')
train.add_argument('--amp', action='store_true',
help='If enabled, turn on AMP (Automatic Mixed Precision)')
train.add_argument('--batch-size', type=int, default=192,
help='the batch size')
train.add_argument('--num-epochs', type=int, default=90,
help='number of epochs')
train.add_argument('--run-epochs', type=int, default=-1,
help='number of epochs to run in single run')
train.add_argument('--lr', type=float, default=0.1,
help='initial learning rate')
train.add_argument('--lr-schedule', choices=('multistep', 'cosine'), default='cosine',
help='learning rate schedule')
train.add_argument('--lr-factor', type=float, default=0.256,
help='the ratio to reduce lr on each step')
train.add_argument('--lr-steps', type=float_list, default=[],
help='the epochs to reduce the lr, e.g. 30,60')
train.add_argument('--warmup-epochs', type=int, default=5,
help='the epochs to ramp-up lr to scaled large-batch value')
train.add_argument('--optimizer', type=str, default='sgd',
help='the optimizer type')
train.add_argument('--mom', type=float, default=0.875,
help='momentum for sgd')
train.add_argument('--wd', type=float, default=1 / 32768,
help='weight decay for sgd')
train.add_argument('--label-smoothing', type=float, default=0.1,
help='label smoothing factor')
train.add_argument('--mixup', type=float, default=0,
help='alpha parameter for mixup (if 0 then mixup is not applied)')
train.add_argument('--disp-batches', type=int, default=20,
help='show progress for every n batches')
train.add_argument('--model-prefix', type=str, default='model',
help='model checkpoint prefix')
train.add_argument('--save-frequency', type=int, default=-1,
help='frequency of saving model in epochs (--model-prefix must be specified). '
'If -1 then save only best model. If 0 then do not save anything.')
train.add_argument('--begin-epoch', type=int, default=0,
help='start the model from an epoch')
train.add_argument('--load', help='checkpoint to load')
train.add_argument('--test-io', action='store_true',
help='test reading speed without training')
train.add_argument('--test-io-mode', default='train', choices=('train', 'val'),
help='data to test')
train.add_argument('--log', type=str, default='log.log',
help='file where to save the log from the experiment')
train.add_argument('--dllogger-log', type=str, default='dllogger_log.log',
help='file where to save the dllogger log from the experiment')
train.add_argument('--workspace', type=str, default='./',
help='path to directory where results will be stored')
train.add_argument('--logdir', type=str, default=None,
help="path to directory where logs will be stored")
train.add_argument('--no-metrics', action='store_true',
help='do not calculate evaluation metrics (for benchmarking)')
train.add_argument('--benchmark-iters', type=int, default=None,
help='run only benchmark-iters iterations from each epoch')
return train
def get_epoch_size(args, kv):
return math.ceil(args.num_examples / args.batch_size)
def get_lr_scheduler(args):
def multistep_schedule(x):
lr = args.lr * \
(args.lr_factor ** (len(list(filter(lambda step: step <= x, args.lr_steps)))))
warmup_coeff = min(1, x / args.warmup_epochs)
return warmup_coeff * lr
def cosine_schedule(x):
steps = args.lr_steps
if not steps or steps[0] > args.warmup_epochs:
steps = [args.warmup_epochs] + steps
elif not steps or steps[0] != 0:
steps = [0] + steps
if steps[-1] != args.num_epochs:
steps.append(args.num_epochs)
if x < args.warmup_epochs:
return args.lr * x / args.warmup_epochs
for i, (step, next_step) in enumerate(zip(steps, steps[1:])):
if next_step > x:
return args.lr * 0.5 * (1 + math.cos(math.pi * (x - step) / (next_step - step))) * (args.lr_factor ** i)
return 0
schedules = {
'multistep': multistep_schedule,
'cosine': cosine_schedule,
}
return schedules[args.lr_schedule]
def load_model(args, model):
file = list(glob.glob(
f"{args.workspace}/{args.model_prefix}_*.params"))
if len(file) == 0:
return -1
file = [x for x in sorted(file) if "best.params" not in x]
if len(file) == 0:
return -1
file = file[-1]
epoch = re.match(f".*{args.model_prefix}_([0-9]*)\.params", file)
if epoch is None:
return -1
epoch = int(epoch.group(1))
model.load_parameters(file)
logging.info('Loaded model {}'.format(file))
return epoch
def save_checkpoint(net, epoch, top1, best_acc, model_prefix, workspace, save_frequency, kvstore, force_save=False):
if model_prefix is None or save_frequency == 0 or ('horovod' in kvstore and hvd.rank() != 0):
return
if (save_frequency > 0 and (epoch + 1) % save_frequency == 0) or force_save:
fname = '{}_{:04}.params'.format(model_prefix, epoch)
fname = os.path.join(workspace, fname)
net.save_parameters(fname)
logging.info('[Epoch {}] Saving checkpoint to {} with Accuracy: {:.4f}'.format(
epoch, fname, top1))
if top1 > best_acc:
fname = os.path.join(workspace, f'{model_prefix}_best.params')
net.save_parameters(fname)
logging.info('[Epoch {}] Saving checkpoint to {} with Accuracy: {:.4f}'.format(
epoch, fname, top1))
def model_pred(args, model, image):
from imagenet_classes import classes
output = model(image.reshape(-1, *image.shape)
)[0].softmax().as_in_context(mx.cpu())
top = output.argsort(is_ascend=False)[:10]
for i, ind in enumerate(top):
ind = int(ind.asscalar())
logging.info('{:2d}. {:5.2f}% -> {}'.format(i + 1,
output[ind].asscalar() * 100, classes[ind]))
def reduce_metrics(args, metrics, kvstore):
if 'horovod' not in kvstore or not metrics[0] or hvd.size() == 1:
return metrics
m = mx.ndarray.array(metrics[1], ctx=mx.gpu(args.gpus[0]))
reduced = hvd.allreduce(m)
values = reduced.as_in_context(mx.cpu()).asnumpy().tolist()
return (metrics[0], values)
def model_score(args, net, val_data, metric, kvstore):
if val_data is None:
logging.info('Omitting validation: no data')
return [], []
if not isinstance(metric, mx.metric.EvalMetric):
metric = mx.metric.create(metric)
metric.reset()
val_data.reset()
total_batch_size = val_data.batch_size * val_data._num_gpus * \
(hvd.size() if 'horovod' in kvstore else 1)
durations = []
tic = time.time()
outputs = []
for batches in val_data:
# synchronize to previous iteration
for o in outputs:
o.wait_to_read()
data = [b.data[0] for b in batches]
label = [b.label[0][:len(b.data[0]) - b.pad]
for b in batches if len(b.data[0]) != b.pad]
outputs = [net(X) for X, b in zip(data, batches)]
outputs = [o[:len(b.data[0]) - b.pad]
for o, b in zip(outputs, batches) if len(b.data[0]) != b.pad]
metric.update(label, outputs)
durations.append(time.time() - tic)
tic = time.time()
metric = reduce_metrics(args, metric.get_global(), kvstore)
durations = durations[min(len(durations) // 10, 100):]
duration_stats = {
'ips': total_batch_size / np.mean(durations),
'latency_avg': np.mean(durations),
}
return metric, duration_stats, durations
class ScalarMetric(mx.metric.Loss):
def update(self, _, scalar):
self.sum_metric += scalar
self.global_sum_metric += scalar
self.num_inst += 1
self.global_num_inst += 1
def label_smoothing(labels, classes, eta):
return labels.one_hot(classes, on_value=1 - eta + eta / classes, off_value=eta / classes)
def model_fit(args, net, train_data, eval_metric, optimizer,
optimizer_params, lr_scheduler, eval_data, global_metrics, kvstore, kv,
begin_epoch, num_epoch, run_epoch, model_prefix):
if not isinstance(eval_metric, mx.metric.EvalMetric):
eval_metric = mx.metric.create(eval_metric)
loss_metric = ScalarMetric()
if 'horovod' in kvstore:
trainer = hvd.DistributedTrainer(
net.collect_params(), optimizer, optimizer_params)
else:
trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params,
kvstore=kv, update_on_kvstore=False)
if args.amp:
amp.init_trainer(trainer)
partition_handler = PartitionSignalHandler(1)
sparse_label_loss = (args.label_smoothing == 0 and args.mixup == 0)
loss = gluon.loss.SoftmaxCrossEntropyLoss(sparse_label=sparse_label_loss)
loss.hybridize(static_shape=True, static_alloc=True)
local_batch_size = train_data.batch_size
total_batch_size = local_batch_size * train_data._num_gpus * \
(hvd.size() if 'horovod' in kvstore else 1)
durations = []
epoch_size = get_epoch_size(args, kv)
run_epoch = num_epoch if (run_epoch == -1) else (begin_epoch + run_epoch)
def transform_data(images, labels):
if args.mixup != 0:
coeffs = mx.nd.array(np.random.beta(args.mixup, args.mixup, size=images.shape[0])).as_in_context(
images.context)
image_coeffs = coeffs.astype(
images.dtype, copy=False).reshape(*coeffs.shape, 1, 1, 1)
ret_images = image_coeffs * images + \
(1 - image_coeffs) * images[::-1]
ret_labels = label_smoothing(
labels, args.num_classes, args.label_smoothing)
label_coeffs = coeffs.reshape(*coeffs.shape, 1)
ret_labels = label_coeffs * ret_labels + \
(1 - label_coeffs) * ret_labels[::-1]
else:
ret_images = images
if not sparse_label_loss:
ret_labels = label_smoothing(
labels, args.num_classes, args.label_smoothing)
else:
ret_labels = labels
return ret_images, ret_labels
i = -1
best_accuracy = -1
for epoch in range(begin_epoch, min(run_epoch, num_epoch)):
tic = time.time()
btic = time.time()
etic = time.time()
train_data.reset()
eval_metric.reset()
loss_metric.reset()
logging.info('Starting epoch {}'.format(epoch))
outputs = []
if not partition_handler.should_end():
for i, batches in enumerate(train_data):
# synchronize to previous iteration
# for o in outputs:
# o.wait_to_read()
trainer.set_learning_rate(lr_scheduler(epoch + i / epoch_size))
data = [b.data[0] for b in batches]
label = [b.label[0].as_in_context(
b.data[0].context) for b in batches]
orig_label = label
data, label = zip(*starmap(transform_data, zip(data, label)))
outputs = []
Ls = []
with ag.record():
for x, y in zip(data, label):
z = net(x)
L = loss(z, y)
# store the loss and do backward after we have done forward
# on all GPUs for better speed on multiple GPUs.
Ls.append(L)
outputs.append(z)
if args.amp:
with amp.scale_loss(Ls, trainer) as scaled_loss:
ag.backward(scaled_loss)
else:
ag.backward(Ls)
if 'horovod' in kvstore:
trainer.step(local_batch_size)
else:
trainer.step(total_batch_size)
loss_metric.update(..., np.mean(
[l.asnumpy() for l in Ls]).item())
if args.disp_batches and not (i + 1) % args.disp_batches:
dllogger_it_data = {
'train.loss': loss_metric.get()[1],
'train.ips': args.disp_batches * total_batch_size / (time.time() - btic),
'train.lr': trainer.learning_rate
}
dllogger.log((epoch, i), data=dllogger_it_data)
loss_metric.reset_local()
btic = time.time()
durations.append(time.time() - tic)
tic = time.time()
else:
break
durations = durations[min(len(durations) // 10, 100):]
dllogger_epoch_data = {
'train.loss': loss_metric.get_global()[1],
'train.ips': total_batch_size / np.mean(durations)
}
should_break = partition_handler.sync()
if args.mode == 'train_val':
logging.info('Validating epoch {}'.format(epoch))
score, duration_stats, _ = model_score(
args, net, eval_data, eval_metric, kvstore)
dllogger_epoch_data.update(
starmap(lambda key, val: (
'val.{}'.format(key), val), zip(*score))
)
dllogger_epoch_data.update(
starmap(lambda key, val: ('val.{}'.format(key), val),
duration_stats.items())
)
score = dict(zip(*score))
accuracy = score.get('accuracy', -1)
save_checkpoint(net, epoch, accuracy, best_accuracy,
model_prefix, args.workspace,
args.save_frequency if args.mode == "train_val" else -1,
kvstore, force_save=should_break)
best_accuracy = max(best_accuracy, accuracy)
global_metrics.update_dict(dllogger_epoch_data)
dllogger.log(step=(epoch,), data=dllogger_epoch_data)
def fit(args, model, data_loader):
"""
train a model
args : argparse returns
model : the the neural network model
data_loader : function that returns the train and val data iterators
"""
start_time = time.time()
# select gpu for horovod process
if 'horovod' in args.kv_store:
args.gpus = [args.gpus[hvd.local_rank()]]
if args.amp:
amp.init()
if args.seed is not None:
logging.info('Setting seeds to {}'.format(args.seed))
random.seed(args.seed)
np.random.seed(args.seed)
mx.random.seed(args.seed)
# kvstore
if 'horovod' in args.kv_store:
kv = None
rank = hvd.rank()
num_workers = hvd.size()
else:
kv = mx.kvstore.create(args.kv_store)
rank = kv.rank
num_workers = kv.num_workers
if args.test_io:
train, val = data_loader(args, kv)
if args.test_io_mode == 'train':
data_iter = train
else:
data_iter = val
tic = time.time()
for i, batch in enumerate(data_iter):
if isinstance(batch, list):
for b in batch:
for j in b.data:
j.wait_to_read()
else:
for j in batch.data:
j.wait_to_read()
if (i + 1) % args.disp_batches == 0:
logging.info('Batch [{}]\tSpeed: {:.2f} samples/sec'.format(
i, args.disp_batches * args.batch_size / (time.time() - tic)))
tic = time.time()
return
start_epoch = load_model(args, model) + 1
if start_epoch == 0:
# all initializers should be specified in the model definition.
# if not, this will raise an error
model.initialize(mx.init.Initializer())
logging.info(f"starting epoch {start_epoch}")
# devices for training
devs = list(map(mx.gpu, args.gpus))
model.collect_params().reset_ctx(devs)
if args.mode == 'pred':
logging.info('Infering image {}'.format(args.data_pred))
model_pred(args, model, data.load_image(args, args.data_pred, devs[0]))
return
# learning rate
lr_scheduler = get_lr_scheduler(args)
optimizer_params = {
'learning_rate': 0,
'wd': args.wd,
'multi_precision': True,
}
# Only a limited number of optimizers have 'momentum' property
has_momentum = {'sgd', 'dcasgd', 'nag', 'signum', 'lbsgd'}
if args.optimizer in has_momentum:
optimizer_params['momentum'] = args.mom
# evaluation metrices
if not args.no_metrics:
eval_metrics = ['accuracy']
eval_metrics.append(mx.metric.create(
'top_k_accuracy', top_k=5))
else:
eval_metrics = []
train, val = data_loader(args, kv)
train = BenchmarkingDataIter(train, args.benchmark_iters)
if val is not None:
val = BenchmarkingDataIter(val, args.benchmark_iters)
if 'horovod' in args.kv_store:
# Fetch and broadcast parameters
params = model.collect_params()
if params is not None:
hvd.broadcast_parameters(params, root_rank=0)
ctx = mx.gpu(hvd.local_rank())
tensor1 = mx.nd.zeros(shape=(1,), dtype='float32', ctx=ctx)
tensor2 = mx.nd.zeros(shape=(1,), dtype='float32', ctx=ctx)
tensor1, tensor2 = hvd.grouped_allreduce([tensor1,tensor2])
global_metrics = CompositeMeter()
if args.mode in ['train_val', 'train']:
global_metrics.register_metric('train.loss', MinMeter())
global_metrics.register_metric('train.ips', AvgMeter())
if args.mode in ['train_val', 'val']:
global_metrics.register_metric('val.accuracy', MaxMeter())
global_metrics.register_metric('val.top_k_accuracy_5', MaxMeter())
global_metrics.register_metric('val.ips', AvgMeter())
global_metrics.register_metric('val.latency_avg', AvgMeter())
if args.mode in ['val']:
global_metrics.register_metric('val.latency_50', PercentileMeter(50))
global_metrics.register_metric('val.latency_90', PercentileMeter(90))
global_metrics.register_metric('val.latency_95', PercentileMeter(95))
global_metrics.register_metric('val.latency_99', PercentileMeter(99))
global_metrics.register_metric('val.latency_100', PercentileMeter(100))
# run
if args.mode in ['train_val', 'train']:
model_fit(
args,
model,
train,
begin_epoch=start_epoch,
num_epoch=args.num_epochs,
run_epoch=args.run_epochs,
eval_data=val,
eval_metric=eval_metrics,
global_metrics=global_metrics,
kvstore=args.kv_store,
kv=kv,
optimizer=args.optimizer,
optimizer_params=optimizer_params,
lr_scheduler=lr_scheduler,
model_prefix=args.model_prefix,
)
elif args.mode == 'val':
for epoch in range(args.num_epochs): # loop for benchmarking
score, duration_stats, durations = model_score(
args, model, val, eval_metrics, args.kv_store)
dllogger_data = dict(starmap(lambda key, val: (
'val.{}'.format(key), val), zip(*score)))
dllogger_data.update(
starmap(lambda key, val: ('val.{}'.format(key), val),
duration_stats.items())
)
global_metrics.update_dict(dllogger_data)
for percentile in [50, 90, 95, 99, 100]:
metric_name = 'val.latency_{}'.format(percentile)
dllogger_data[metric_name] = np.percentile(
durations, percentile)
global_metrics.update_metric(metric_name, durations)
dllogger.log(step=(epoch,), data=dllogger_data)
else:
raise ValueError('Wrong mode')
mx.nd.waitall()
dllogger.log(tuple(), data=global_metrics.get())
| DeepLearningExamples-master | MxNet/Classification/RN50v1.5/fit.py |
#!/usr/bin/env python3
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import json
import traceback
import numpy as np
from collections import OrderedDict
from subprocess import Popen
def int_list(x):
return list(map(int, x.split(',')))
parser = argparse.ArgumentParser(description='Benchmark',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--executable', default='./runner', help='path to runner')
parser.add_argument('-o', '--output', metavar='OUT', required=True, help="path to benchmark report")
parser.add_argument('-n', '--ngpus', metavar='N1,[N2,...]', type=int_list,
required=True, help='numbers of gpus separated by comma')
parser.add_argument('-b', '--batch-sizes', metavar='B1,[B2,...]', type=int_list,
required=True, help='batch sizes separated by comma')
parser.add_argument('-i', '--benchmark-iters', metavar='I',
type=int, default=100, help='iterations')
parser.add_argument('-e', '--epochs', metavar='E',
type=int, default=1, help='number of epochs')
parser.add_argument('-w', '--warmup', metavar='N',
type=int, default=0, help='warmup epochs')
parser.add_argument('--timeout', metavar='T',
type=str, default='inf', help='timeout for each run')
parser.add_argument('--mode', metavar='MODE', choices=('train_val', 'train', 'val'), default='train_val',
help="benchmark mode")
args, other_args = parser.parse_known_args()
latency_percentiles = [50, 90, 95, 99, 100]
harmonic_mean_metrics = ['train.ips', 'val.ips']
res = OrderedDict()
res['model'] = ''
res['ngpus'] = args.ngpus
res['bs'] = args.batch_sizes
res['metric_keys'] = []
if args.mode == 'train' or args.mode == 'train_val':
res['metric_keys'].append('train.ips')
if args.mode == 'val' or args.mode == 'train_val':
res['metric_keys'].append('val.ips')
res['metric_keys'].append('val.latency_avg')
if args.mode == 'val':
for percentile in latency_percentiles:
res['metric_keys'].append('val.latency_{}'.format(percentile))
res['metrics'] = OrderedDict()
for n in args.ngpus:
res['metrics'][str(n)] = OrderedDict()
for bs in args.batch_sizes:
res['metrics'][str(n)][str(bs)] = OrderedDict()
log_file = args.output + '-{},{}'.format(n, bs)
Popen(['timeout', args.timeout, args.executable, '-n', str(n), '-b', str(bs),
'--benchmark-iters', str(args.benchmark_iters),
'-e', str(args.epochs), '--dllogger-log', log_file,
'--mode', args.mode, '--no-metrics'] + other_args,
stdout=sys.stderr).wait()
try:
with open(log_file, 'r') as f:
lines = [line for line in f.read().splitlines() if 'step' in line]
log_data = [json.loads(line[5:]) for line in lines]
epochs_report = list(filter(lambda x: len(x['step']) == 1, log_data))
if len(epochs_report) != args.epochs:
raise ValueError('Wrong number epochs in report')
epochs_report = epochs_report[args.warmup:]
for metric in res['metric_keys']:
data = list(map(lambda x: x['data'][metric], epochs_report))
if metric in harmonic_mean_metrics:
avg = len(data) / sum(map(lambda x: 1 / x, data))
else:
avg = np.mean(data)
res['metrics'][str(n)][str(bs)][metric] = avg
except Exception as e:
traceback.print_exc()
for metric in res['metric_keys']:
res['metrics'][str(n)][str(bs)][metric] = float('nan')
column_len = 11
for m in res['metric_keys']:
print(m, file=sys.stderr)
print(' ' * column_len, end='|', file=sys.stderr)
for bs in args.batch_sizes:
print(str(bs).center(column_len), end='|', file=sys.stderr)
print(file=sys.stderr)
print('-' * (len(args.batch_sizes) + 1) * (column_len + 1), file=sys.stderr)
for n in args.ngpus:
print(str(n).center(column_len), end='|', file=sys.stderr)
for bs in args.batch_sizes:
print('{:.5g}'.format(res['metrics'][str(n)][str(bs)][m]).center(column_len), end='|', file=sys.stderr)
print(file=sys.stderr)
print(file=sys.stderr)
with open(args.output, 'w') as f:
json.dump(res, f, indent=4)
| DeepLearningExamples-master | MxNet/Classification/RN50v1.5/benchmark.py |
# Copyright 2017-2018 The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -----------------------------------------------------------------------
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import mxnet as mx
from mxnet.gluon.block import HybridBlock
from mxnet.gluon import nn
def add_model_args(parser):
model = parser.add_argument_group('Model')
model.add_argument('--arch', default='resnetv15',
choices=['resnetv1', 'resnetv15',
'resnextv1', 'resnextv15',
'xception'],
help='model architecture')
model.add_argument('--num-layers', type=int, default=50,
help='number of layers in the neural network, \
required by some networks such as resnet')
model.add_argument('--num-groups', type=int, default=32,
help='number of groups for grouped convolutions, \
required by some networks such as resnext')
model.add_argument('--num-classes', type=int, default=1000,
help='the number of classes')
model.add_argument('--batchnorm-eps', type=float, default=1e-5,
help='the amount added to the batchnorm variance to prevent output explosion.')
model.add_argument('--batchnorm-mom', type=float, default=0.9,
help='the leaky-integrator factor controling the batchnorm mean and variance.')
model.add_argument('--fuse-bn-relu', type=int, default=0,
help='have batchnorm kernel perform activation relu')
model.add_argument('--fuse-bn-add-relu', type=int, default=0,
help='have batchnorm kernel perform add followed by activation relu')
return model
class Builder:
def __init__(self, dtype, input_layout, conv_layout, bn_layout,
pooling_layout, bn_eps, bn_mom, fuse_bn_relu, fuse_bn_add_relu):
self.dtype = dtype
self.input_layout = input_layout
self.conv_layout = conv_layout
self.bn_layout = bn_layout
self.pooling_layout = pooling_layout
self.bn_eps = bn_eps
self.bn_mom = bn_mom
self.fuse_bn_relu = fuse_bn_relu
self.fuse_bn_add_relu = fuse_bn_add_relu
self.act_type = 'relu'
self.bn_gamma_initializer = lambda last: 'zeros' if last else 'ones'
self.linear_initializer = lambda groups=1: mx.init.Xavier(rnd_type='gaussian', factor_type="in",
magnitude=2 * (groups ** 0.5))
self.last_layout = self.input_layout
def copy(self):
return copy.copy(self)
def batchnorm(self, last=False):
gamma_initializer = self.bn_gamma_initializer(last)
bn_axis = 3 if self.bn_layout == 'NHWC' else 1
return self.sequence(
self.transpose(self.bn_layout),
nn.BatchNorm(axis=bn_axis, momentum=self.bn_mom, epsilon=self.bn_eps,
gamma_initializer=gamma_initializer,
running_variance_initializer=gamma_initializer)
)
def batchnorm_add_relu(self, last=False):
gamma_initializer = self.bn_gamma_initializer(last)
if self.fuse_bn_add_relu:
bn_axis = 3 if self.bn_layout == 'NHWC' else 1
return self.sequence(
self.transpose(self.bn_layout),
BatchNormAddRelu(axis=bn_axis, momentum=self.bn_mom,
epsilon=self.bn_eps, act_type=self.act_type,
gamma_initializer=gamma_initializer,
running_variance_initializer=gamma_initializer)
)
return NonFusedBatchNormAddRelu(self, last=last)
def batchnorm_relu(self, last=False):
gamma_initializer = self.bn_gamma_initializer(last)
if self.fuse_bn_relu:
bn_axis = 3 if self.bn_layout == 'NHWC' else 1
return self.sequence(
self.transpose(self.bn_layout),
nn.BatchNorm(axis=bn_axis, momentum=self.bn_mom,
epsilon=self.bn_eps, act_type=self.act_type,
gamma_initializer=gamma_initializer,
running_variance_initializer=gamma_initializer)
)
return self.sequence(self.batchnorm(last=last), self.activation())
def activation(self):
return nn.Activation(self.act_type)
def global_avg_pool(self):
return self.sequence(
self.transpose(self.pooling_layout),
nn.GlobalAvgPool2D(layout=self.pooling_layout)
)
def max_pool(self, pool_size, strides=1, padding=True):
padding = pool_size // 2 if padding is True else int(padding)
return self.sequence(
self.transpose(self.pooling_layout),
nn.MaxPool2D(pool_size, strides=strides, padding=padding,
layout=self.pooling_layout)
)
def conv(self, channels, kernel_size, padding=True, strides=1, groups=1, in_channels=0):
padding = kernel_size // 2 if padding is True else int(padding)
initializer = self.linear_initializer(groups=groups)
return self.sequence(
self.transpose(self.conv_layout),
nn.Conv2D(channels, kernel_size=kernel_size, strides=strides,
padding=padding, use_bias=False, groups=groups,
in_channels=in_channels, layout=self.conv_layout,
weight_initializer=initializer)
)
def separable_conv(self, channels, kernel_size, in_channels, padding=True, strides=1):
return self.sequence(
self.conv(in_channels, kernel_size, padding=padding,
strides=strides, groups=in_channels, in_channels=in_channels),
self.conv(channels, 1, in_channels=in_channels)
)
def dense(self, units, in_units=0):
return nn.Dense(units, in_units=in_units,
weight_initializer=self.linear_initializer())
def transpose(self, to_layout):
if self.last_layout == to_layout:
return None
ret = Transpose(self.last_layout, to_layout)
self.last_layout = to_layout
return ret
def sequence(self, *seq):
seq = list(filter(lambda x: x is not None, seq))
if len(seq) == 1:
return seq[0]
ret = nn.HybridSequential()
ret.add(*seq)
return ret
class Transpose(HybridBlock):
def __init__(self, from_layout, to_layout):
super().__init__()
supported_layouts = ['NCHW', 'NHWC']
if from_layout not in supported_layouts:
raise ValueError('Not prepared to handle layout: {}'.format(from_layout))
if to_layout not in supported_layouts:
raise ValueError('Not prepared to handle layout: {}'.format(to_layout))
self.from_layout = from_layout
self.to_layout = to_layout
def hybrid_forward(self, F, x):
# Insert transpose if from_layout and to_layout don't match
if self.from_layout == 'NCHW' and self.to_layout == 'NHWC':
return F.transpose(x, axes=(0, 2, 3, 1))
elif self.from_layout == 'NHWC' and self.to_layout == 'NCHW':
return F.transpose(x, axes=(0, 3, 1, 2))
else:
return x
def __repr__(self):
s = '{name}({content})'
if self.from_layout == self.to_layout:
content = 'passthrough ' + self.from_layout
else:
content = self.from_layout + ' -> ' + self.to_layout
return s.format(name=self.__class__.__name__,
content=content)
class LayoutWrapper(HybridBlock):
def __init__(self, op, io_layout, op_layout, **kwargs):
super(LayoutWrapper, self).__init__(**kwargs)
with self.name_scope():
self.layout1 = Transpose(io_layout, op_layout)
self.op = op
self.layout2 = Transpose(op_layout, io_layout)
def hybrid_forward(self, F, *x):
return self.layout2(self.op(*(self.layout1(y) for y in x)))
class BatchNormAddRelu(nn.BatchNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self._kwargs.pop('act_type') != 'relu':
raise ValueError('BatchNormAddRelu can be used only with ReLU as activation')
def hybrid_forward(self, F, x, y, gamma, beta, running_mean, running_var):
return F.BatchNormAddRelu(data=x, addend=y, gamma=gamma, beta=beta,
moving_mean=running_mean, moving_var=running_var, name='fwd', **self._kwargs)
class NonFusedBatchNormAddRelu(HybridBlock):
def __init__(self, builder, **kwargs):
super().__init__()
self.bn = builder.batchnorm(**kwargs)
self.act = builder.activation()
def hybrid_forward(self, F, x, y):
return self.act(self.bn(x) + y)
# Blocks
class ResNetBasicBlock(HybridBlock):
def __init__(self, builder, channels, stride, downsample=False, in_channels=0,
version='1', resnext_groups=None, **kwargs):
super().__init__()
assert not resnext_groups
self.transpose = builder.transpose(builder.conv_layout)
builder_copy = builder.copy()
body = [
builder.conv(channels, 3, strides=stride, in_channels=in_channels),
builder.batchnorm_relu(),
builder.conv(channels, 3),
]
self.body = builder.sequence(*body)
self.bn_add_relu = builder.batchnorm_add_relu(last=True)
builder = builder_copy
if downsample:
self.downsample = builder.sequence(
builder.conv(channels, 1, strides=stride, in_channels=in_channels),
builder.batchnorm()
)
else:
self.downsample = None
def hybrid_forward(self, F, x):
if self.transpose is not None:
x = self.transpose(x)
residual = x
x = self.body(x)
if self.downsample:
residual = self.downsample(residual)
x = self.bn_add_relu(x, residual)
return x
class ResNetBottleNeck(HybridBlock):
def __init__(self, builder, channels, stride, downsample=False, in_channels=0,
version='1', resnext_groups=None):
super().__init__()
stride1 = stride if version == '1' else 1
stride2 = 1 if version == '1' else stride
mult = 2 if resnext_groups else 1
groups = resnext_groups or 1
self.transpose = builder.transpose(builder.conv_layout)
builder_copy = builder.copy()
body = [
builder.conv(channels * mult // 4, 1, strides=stride1, in_channels=in_channels),
builder.batchnorm_relu(),
builder.conv(channels * mult // 4, 3, strides=stride2),
builder.batchnorm_relu(),
builder.conv(channels, 1)
]
self.body = builder.sequence(*body)
self.bn_add_relu = builder.batchnorm_add_relu(last=True)
builder = builder_copy
if downsample:
self.downsample = builder.sequence(
builder.conv(channels, 1, strides=stride, in_channels=in_channels),
builder.batchnorm()
)
else:
self.downsample = None
def hybrid_forward(self, F, x):
if self.transpose is not None:
x = self.transpose(x)
residual = x
x = self.body(x)
if self.downsample:
residual = self.downsample(residual)
x = self.bn_add_relu(x, residual)
return x
class XceptionBlock(HybridBlock):
def __init__(self, builder, definition, in_channels, relu_at_beginning=True):
super().__init__()
self.transpose = builder.transpose(builder.conv_layout)
builder_copy = builder.copy()
body = []
if relu_at_beginning:
body.append(builder.activation())
last_channels = in_channels
for channels1, channels2 in zip(definition, definition[1:] + [0]):
if channels1 > 0:
body.append(builder.separable_conv(channels1, 3, in_channels=last_channels))
if channels2 > 0:
body.append(builder.batchnorm_relu())
else:
body.append(builder.batchnorm(last=True))
last_channels = channels1
else:
body.append(builder.max_pool(3, 2))
self.body = builder.sequence(*body)
builder = builder_copy
if any(map(lambda x: x <= 0, definition)):
self.shortcut = builder.sequence(
builder.conv(last_channels, 1, strides=2, in_channels=in_channels),
builder.batchnorm(),
)
else:
self.shortcut = builder.sequence()
def hybrid_forward(self, F, x):
return self.shortcut(x) + self.body(x)
# Nets
class ResNet(HybridBlock):
def __init__(self, builder, block, layers, channels, classes=1000,
version='1', resnext_groups=None):
super().__init__()
assert len(layers) == len(channels) - 1
self.version = version
with self.name_scope():
features = [
builder.conv(channels[0], 7, strides=2),
builder.batchnorm_relu(),
builder.max_pool(3, 2),
]
for i, num_layer in enumerate(layers):
stride = 1 if i == 0 else 2
features.append(self.make_layer(builder, block, num_layer, channels[i+1],
stride, in_channels=channels[i],
resnext_groups=resnext_groups))
features.append(builder.global_avg_pool())
self.features = builder.sequence(*features)
self.output = builder.dense(classes, in_units=channels[-1])
def make_layer(self, builder, block, layers, channels, stride,
in_channels=0, resnext_groups=None):
layer = []
layer.append(block(builder, channels, stride, channels != in_channels,
in_channels=in_channels, version=self.version,
resnext_groups=resnext_groups))
for _ in range(layers-1):
layer.append(block(builder, channels, 1, False, in_channels=channels,
version=self.version, resnext_groups=resnext_groups))
return builder.sequence(*layer)
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
class Xception(HybridBlock):
def __init__(self, builder,
definition=([32, 64],
[[128, 128, 0], [256, 256, 0], [728, 728, 0],
*([[728, 728, 728]] * 8), [728, 1024, 0]],
[1536, 2048]),
classes=1000):
super().__init__()
definition1, definition2, definition3 = definition
with self.name_scope():
features = []
last_channels = 0
for i, channels in enumerate(definition1):
features += [
builder.conv(channels, 3, strides=(2 if i == 0 else 1), in_channels=last_channels),
builder.batchnorm_relu(),
]
last_channels = channels
for i, block_definition in enumerate(definition2):
features.append(XceptionBlock(builder, block_definition, in_channels=last_channels,
relu_at_beginning=False if i == 0 else True))
last_channels = list(filter(lambda x: x > 0, block_definition))[-1]
for i, channels in enumerate(definition3):
features += [
builder.separable_conv(channels, 3, in_channels=last_channels),
builder.batchnorm_relu(),
]
last_channels = channels
features.append(builder.global_avg_pool())
self.features = builder.sequence(*features)
self.output = builder.dense(classes, in_units=last_channels)
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
resnet_spec = {18: (ResNetBasicBlock, [2, 2, 2, 2], [64, 64, 128, 256, 512]),
34: (ResNetBasicBlock, [3, 4, 6, 3], [64, 64, 128, 256, 512]),
50: (ResNetBottleNeck, [3, 4, 6, 3], [64, 256, 512, 1024, 2048]),
101: (ResNetBottleNeck, [3, 4, 23, 3], [64, 256, 512, 1024, 2048]),
152: (ResNetBottleNeck, [3, 8, 36, 3], [64, 256, 512, 1024, 2048])}
def create_resnet(builder, version, num_layers=50, resnext=False, classes=1000):
assert num_layers in resnet_spec, \
"Invalid number of layers: {}. Options are {}".format(
num_layers, str(resnet_spec.keys()))
block_class, layers, channels = resnet_spec[num_layers]
assert not resnext or num_layers >= 50, \
"Cannot create resnext with less then 50 layers"
net = ResNet(builder, block_class, layers, channels, version=version,
resnext_groups=args.num_groups if resnext else None)
return net
class fp16_model(mx.gluon.block.HybridBlock):
def __init__(self, net, **kwargs):
super(fp16_model, self).__init__(**kwargs)
with self.name_scope():
self._net = net
def hybrid_forward(self, F, x):
y = self._net(x)
y = F.cast(y, dtype='float32')
return y
def get_model(arch, num_classes, num_layers, image_shape, dtype, amp,
input_layout, conv_layout, batchnorm_layout, pooling_layout,
batchnorm_eps, batchnorm_mom, fuse_bn_relu, fuse_bn_add_relu, **kwargs):
builder = Builder(
dtype = dtype,
input_layout = input_layout,
conv_layout = conv_layout,
bn_layout = batchnorm_layout,
pooling_layout = pooling_layout,
bn_eps = batchnorm_eps,
bn_mom = batchnorm_mom,
fuse_bn_relu = fuse_bn_relu,
fuse_bn_add_relu = fuse_bn_add_relu,
)
if arch.startswith('resnet') or arch.startswith('resnext'):
version = '1' if arch in {'resnetv1', 'resnextv1'} else '1.5'
net = create_resnet(
builder = builder,
version = version,
resnext = arch.startswith('resnext'),
num_layers = num_layers,
classes = num_classes,
)
elif arch == 'xception':
net = Xception(builder, classes=num_classes)
else:
raise ValueError('Wrong model architecture')
net.hybridize(static_shape=True, static_alloc=True)
if not amp:
net.cast(dtype)
if dtype == 'float16':
net = fp16_model(net)
return net
| DeepLearningExamples-master | MxNet/Classification/RN50v1.5/models.py |
import logging
import os
import sys
import dllogger
import horovod.mxnet as hvd
def format_step(step):
if isinstance(step, str):
return step
s = ""
if len(step) > 0:
s += "Epoch: {} ".format(step[0])
if len(step) > 1:
s += "Iteration: {} ".format(step[1])
if len(step) > 2:
s += "Validation Iteration: {} ".format(step[2])
if len(step) == 0:
s = "Summary:"
return s
def setup_logging(args):
logging.basicConfig(level=logging.DEBUG, format='{asctime}:{levelname}: {message}', style='{')
if hvd.rank() == 0:
logging_dir = args.logdir if args.logdir is not None else args.workspace
dllogger.init(backends=[
dllogger.StdOutBackend(dllogger.Verbosity.DEFAULT, step_format=format_step),
dllogger.JSONStreamBackend(
dllogger.Verbosity.VERBOSE, os.path.join(logging_dir, args.dllogger_log)),
])
else:
dllogger.init([])
dllogger.metadata("val.accuracy", {"unit": None})
dllogger.metadata("val.top_k_accuracy_5", {"unit": None})
dllogger.metadata("train.ips", {"unit": "images/s"})
dllogger.metadata("val.ips", {"unit": "images/s"})
dllogger.metadata("val.latency_50", {"unit": "s"})
dllogger.metadata("val.latency_90", {"unit": "s"})
dllogger.metadata("val.latency_avg", {"unit": "s"})
| DeepLearningExamples-master | MxNet/Classification/RN50v1.5/log_utils.py |
import numpy as np
class CompositeMeter:
def __init__(self):
self.register = {}
def register_metric(self, name, metric):
self.register[name] = metric
def _validate(self, metric_name):
if metric_name not in self.register:
raise ValueError('{} is not registered metric'.format(metric_name))
def update_metric(self, metric_name, value):
self._validate(metric_name)
self.register[metric_name].update(value)
def update_dict(self, dict_metric):
for name, val in dict_metric.items():
if name in self.register.keys():
self.update_metric(name, val)
def get(self, metric_name=None):
if metric_name is not None:
self._validate(metric_name)
return self.register[metric_name].get()
res_dict = {name: metric.get() for name, metric in self.register.items()}
return res_dict
class MaxMeter:
def __init__(self):
self.max = None
self.n = 0
def reset(self):
self.max = None
self.n = 0
def update(self, val):
if self.max is None:
self.max = val
else:
self.max = max(self.max, val)
def get(self):
return self.max
class MinMeter:
def __init__(self):
self.min = None
self.n = 0
def reset(self):
self.min = None
self.n = 0
def update(self, val):
if self.min is None:
self.min = val
else:
self.min = min(self.min, val)
def get(self):
return self.min
class AvgMeter:
def __init__(self):
self.sum = 0
self.n = 0
def reset(self):
self.sum = 0
self.n = 0
def update(self, val):
self.sum += val
self.n += 1
def get(self):
return self.sum / self.n
class PercentileMeter:
def __init__(self, q):
self.data = []
self.q = q
def reset(self):
self.data = []
def update(self, data):
self.data.extend(data)
def get(self):
return np.percentile(self.data, self.q)
| DeepLearningExamples-master | MxNet/Classification/RN50v1.5/global_metrics.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from packaging.version import Version
from nvidia import dali
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
from nvidia.dali.plugin.mxnet import DALIClassificationIterator
import horovod.mxnet as hvd
def add_dali_args(parser):
group = parser.add_argument_group('DALI data backend', 'entire group applies only to dali data backend')
group.add_argument('--dali-separ-val', action='store_true',
help='each process will perform independent validation on whole val-set')
group.add_argument('--dali-threads', type=int, default=6, help="number of threads" +\
"per GPU for DALI")
group.add_argument('--dali-validation-threads', type=int, default=10, help="number of threads" +\
"per GPU for DALI for validation")
group.add_argument('--dali-prefetch-queue', type=int, default=5, help="DALI prefetch queue depth")
group.add_argument('--dali-nvjpeg-memory-padding', type=int, default=64, help="Memory padding value for nvJPEG (in MB)")
group.add_argument('--dali-fuse-decoder', type=int, default=1, help="0 or 1 whether to fuse decoder or not")
group.add_argument('--dali-nvjpeg-width-hint', type=int, default=5980, help="Width hint value for nvJPEG (in pixels)")
group.add_argument('--dali-nvjpeg-height-hint', type=int, default=6430, help="Height hint value for nvJPEG (in pixels)")
group.add_argument('--dali-dont-use-mmap', default=False, action='store_true', help="Use plain I/O instead of MMAP for datasets")
return parser
class HybridTrainPipe(Pipeline):
def __init__(self, args, batch_size, num_threads, device_id, rec_path, idx_path,
shard_id, num_shards, crop_shape, nvjpeg_padding, prefetch_queue=3,
output_layout=types.NCHW, pad_output=True, dtype='float16', dali_cpu=False,
nvjpeg_width_hint=5980, nvjpeg_height_hint=6430,
):
super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed=12 + device_id, prefetch_queue_depth = prefetch_queue)
self.input = ops.MXNetReader(path=[rec_path], index_path=[idx_path],
random_shuffle=True, shard_id=shard_id, num_shards=num_shards,
dont_use_mmap=args.dali_dont_use_mmap)
if dali_cpu:
dali_device = "cpu"
decoder_device = "cpu"
else:
dali_device = "gpu"
decoder_device = "mixed"
dali_kwargs_fallback = {}
if Version(dali.__version__) >= Version("1.2.0"):
dali_kwargs_fallback = {
"preallocate_width_hint": nvjpeg_width_hint,
"preallocate_height_hint": nvjpeg_height_hint,
}
if args.dali_fuse_decoder:
self.decode = ops.ImageDecoderRandomCrop(device=decoder_device, output_type=types.RGB,
device_memory_padding=nvjpeg_padding,
host_memory_padding=nvjpeg_padding,
**dali_kwargs_fallback)
else:
self.decode = ops.ImageDecoder(device=decoder_device, output_type=types.RGB,
device_memory_padding=nvjpeg_padding,
host_memory_padding=nvjpeg_padding,
**dali_kwargs_fallback)
if args.dali_fuse_decoder:
self.resize = ops.Resize(device=dali_device, resize_x=crop_shape[1], resize_y=crop_shape[0])
else:
self.resize = ops.RandomResizedCrop(device=dali_device, size=crop_shape)
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT16 if dtype == 'float16' else types.FLOAT,
output_layout=output_layout, crop=crop_shape, pad_output=pad_output,
image_type=types.RGB, mean=args.rgb_mean, std=args.rgb_std)
self.coin = ops.CoinFlip(probability=0.5)
def define_graph(self):
rng = self.coin()
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
images = self.resize(images)
output = self.cmnp(images.gpu(), mirror=rng)
return [output, self.labels]
class HybridValPipe(Pipeline):
def __init__(self, args, batch_size, num_threads, device_id, rec_path, idx_path,
shard_id, num_shards, crop_shape, nvjpeg_padding, prefetch_queue=3, resize_shp=None,
output_layout=types.NCHW, pad_output=True, dtype='float16', dali_cpu=False,
nvjpeg_width_hint=5980, nvjpeg_height_hint=6430):
super(HybridValPipe, self).__init__(batch_size, num_threads, device_id, seed=12 + device_id, prefetch_queue_depth=prefetch_queue)
self.input = ops.MXNetReader(path=[rec_path], index_path=[idx_path],
random_shuffle=False, shard_id=shard_id, num_shards=num_shards,
dont_use_mmap=args.dali_dont_use_mmap)
if dali_cpu:
dali_device = "cpu"
decoder_device = "cpu"
else:
dali_device = "gpu"
decoder_device = "mixed"
dali_kwargs_fallback = {}
if Version(dali.__version__) >= Version("1.2.0"):
dali_kwargs_fallback = {
"preallocate_width_hint": nvjpeg_width_hint,
"preallocate_height_hint": nvjpeg_height_hint
}
self.decode = ops.ImageDecoder(device=decoder_device, output_type=types.RGB,
device_memory_padding=nvjpeg_padding,
host_memory_padding=nvjpeg_padding,
**dali_kwargs_fallback)
self.resize = ops.Resize(device=dali_device, resize_shorter=resize_shp) if resize_shp else None
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT16 if dtype == 'float16' else types.FLOAT,
output_layout=output_layout, crop=crop_shape, pad_output=pad_output,
image_type=types.RGB, mean=args.rgb_mean, std=args.rgb_std)
def define_graph(self):
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
if self.resize:
images = self.resize(images)
output = self.cmnp(images.gpu())
return [output, self.labels]
def get_rec_iter(args, kv=None, dali_cpu=False):
gpus = args.gpus
num_threads = args.dali_threads
num_validation_threads = args.dali_validation_threads
pad_output = (args.image_shape[0] == 4)
# the input_layout w.r.t. the model is the output_layout of the image pipeline
output_layout = types.NHWC if args.input_layout == 'NHWC' else types.NCHW
if 'horovod' in args.kv_store:
rank = hvd.rank()
nWrk = hvd.size()
else:
rank = kv.rank if kv else 0
nWrk = kv.num_workers if kv else 1
batch_size = args.batch_size // nWrk // len(gpus)
trainpipes = [HybridTrainPipe(args = args,
batch_size = batch_size,
num_threads = num_threads,
device_id = gpu_id,
rec_path = args.data_train,
idx_path = args.data_train_idx,
shard_id = gpus.index(gpu_id) + len(gpus)*rank,
num_shards = len(gpus)*nWrk,
crop_shape = args.image_shape[1:],
output_layout = output_layout,
dtype = args.dtype,
pad_output = pad_output,
dali_cpu = dali_cpu,
nvjpeg_padding = args.dali_nvjpeg_memory_padding * 1024 * 1024,
prefetch_queue = args.dali_prefetch_queue,
nvjpeg_width_hint = args.dali_nvjpeg_width_hint,
nvjpeg_height_hint = args.dali_nvjpeg_height_hint) for gpu_id in gpus]
if args.data_val:
valpipes = [HybridValPipe(args = args,
batch_size = batch_size,
num_threads = num_validation_threads,
device_id = gpu_id,
rec_path = args.data_val,
idx_path = args.data_val_idx,
shard_id = 0 if args.dali_separ_val
else gpus.index(gpu_id) + len(gpus)*rank,
num_shards = 1 if args.dali_separ_val else len(gpus)*nWrk,
crop_shape = args.image_shape[1:],
resize_shp = args.data_val_resize,
output_layout = output_layout,
dtype = args.dtype,
pad_output = pad_output,
dali_cpu = dali_cpu,
nvjpeg_padding = args.dali_nvjpeg_memory_padding * 1024 * 1024,
prefetch_queue = args.dali_prefetch_queue,
nvjpeg_width_hint = args.dali_nvjpeg_width_hint,
nvjpeg_height_hint = args.dali_nvjpeg_height_hint) for gpu_id in gpus] if args.data_val else None
trainpipes[0].build()
if args.data_val:
valpipes[0].build()
worker_val_examples = valpipes[0].epoch_size("Reader")
if not args.dali_separ_val:
worker_val_examples = worker_val_examples // nWrk
if rank < valpipes[0].epoch_size("Reader") % nWrk:
worker_val_examples += 1
if args.num_examples < trainpipes[0].epoch_size("Reader"):
warnings.warn("{} training examples will be used, although full training set contains {} examples".format(args.num_examples, trainpipes[0].epoch_size("Reader")))
dali_train_iter = DALIClassificationIterator(trainpipes, args.num_examples // nWrk)
if args.data_val:
dali_val_iter = DALIClassificationIterator(valpipes, worker_val_examples, fill_last_batch = False) if args.data_val else None
else:
dali_val_iter = None
return dali_train_iter, dali_val_iter
| DeepLearningExamples-master | MxNet/Classification/RN50v1.5/dali.py |
classes = {
0: 'tench, Tinca tinca',
1: 'goldfish, Carassius auratus',
2: 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias',
3: 'tiger shark, Galeocerdo cuvieri',
4: 'hammerhead, hammerhead shark',
5: 'electric ray, crampfish, numbfish, torpedo',
6: 'stingray',
7: 'cock',
8: 'hen',
9: 'ostrich, Struthio camelus',
10: 'brambling, Fringilla montifringilla',
11: 'goldfinch, Carduelis carduelis',
12: 'house finch, linnet, Carpodacus mexicanus',
13: 'junco, snowbird',
14: 'indigo bunting, indigo finch, indigo bird, Passerina cyanea',
15: 'robin, American robin, Turdus migratorius',
16: 'bulbul',
17: 'jay',
18: 'magpie',
19: 'chickadee',
20: 'water ouzel, dipper',
21: 'kite',
22: 'bald eagle, American eagle, Haliaeetus leucocephalus',
23: 'vulture',
24: 'great grey owl, great gray owl, Strix nebulosa',
25: 'European fire salamander, Salamandra salamandra',
26: 'common newt, Triturus vulgaris',
27: 'eft',
28: 'spotted salamander, Ambystoma maculatum',
29: 'axolotl, mud puppy, Ambystoma mexicanum',
30: 'bullfrog, Rana catesbeiana',
31: 'tree frog, tree-frog',
32: 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui',
33: 'loggerhead, loggerhead turtle, Caretta caretta',
34: 'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea',
35: 'mud turtle',
36: 'terrapin',
37: 'box turtle, box tortoise',
38: 'banded gecko',
39: 'common iguana, iguana, Iguana iguana',
40: 'American chameleon, anole, Anolis carolinensis',
41: 'whiptail, whiptail lizard',
42: 'agama',
43: 'frilled lizard, Chlamydosaurus kingi',
44: 'alligator lizard',
45: 'Gila monster, Heloderma suspectum',
46: 'green lizard, Lacerta viridis',
47: 'African chameleon, Chamaeleo chamaeleon',
48: 'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis',
49: 'African crocodile, Nile crocodile, Crocodylus niloticus',
50: 'American alligator, Alligator mississipiensis',
51: 'triceratops',
52: 'thunder snake, worm snake, Carphophis amoenus',
53: 'ringneck snake, ring-necked snake, ring snake',
54: 'hognose snake, puff adder, sand viper',
55: 'green snake, grass snake',
56: 'king snake, kingsnake',
57: 'garter snake, grass snake',
58: 'water snake',
59: 'vine snake',
60: 'night snake, Hypsiglena torquata',
61: 'boa constrictor, Constrictor constrictor',
62: 'rock python, rock snake, Python sebae',
63: 'Indian cobra, Naja naja',
64: 'green mamba',
65: 'sea snake',
66: 'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus',
67: 'diamondback, diamondback rattlesnake, Crotalus adamanteus',
68: 'sidewinder, horned rattlesnake, Crotalus cerastes',
69: 'trilobite',
70: 'harvestman, daddy longlegs, Phalangium opilio',
71: 'scorpion',
72: 'black and gold garden spider, Argiope aurantia',
73: 'barn spider, Araneus cavaticus',
74: 'garden spider, Aranea diademata',
75: 'black widow, Latrodectus mactans',
76: 'tarantula',
77: 'wolf spider, hunting spider',
78: 'tick',
79: 'centipede',
80: 'black grouse',
81: 'ptarmigan',
82: 'ruffed grouse, partridge, Bonasa umbellus',
83: 'prairie chicken, prairie grouse, prairie fowl',
84: 'peacock',
85: 'quail',
86: 'partridge',
87: 'African grey, African gray, Psittacus erithacus',
88: 'macaw',
89: 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita',
90: 'lorikeet',
91: 'coucal',
92: 'bee eater',
93: 'hornbill',
94: 'hummingbird',
95: 'jacamar',
96: 'toucan',
97: 'drake',
98: 'red-breasted merganser, Mergus serrator',
99: 'goose',
100: 'black swan, Cygnus atratus',
101: 'tusker',
102: 'echidna, spiny anteater, anteater',
103: 'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus',
104: 'wallaby, brush kangaroo',
105: 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus',
106: 'wombat',
107: 'jellyfish',
108: 'sea anemone, anemone',
109: 'brain coral',
110: 'flatworm, platyhelminth',
111: 'nematode, nematode worm, roundworm',
112: 'conch',
113: 'snail',
114: 'slug',
115: 'sea slug, nudibranch',
116: 'chiton, coat-of-mail shell, sea cradle, polyplacophore',
117: 'chambered nautilus, pearly nautilus, nautilus',
118: 'Dungeness crab, Cancer magister',
119: 'rock crab, Cancer irroratus',
120: 'fiddler crab',
121: 'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica',
122: 'American lobster, Northern lobster, Maine lobster, Homarus americanus',
123: 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish',
124: 'crayfish, crawfish, crawdad, crawdaddy',
125: 'hermit crab',
126: 'isopod',
127: 'white stork, Ciconia ciconia',
128: 'black stork, Ciconia nigra',
129: 'spoonbill',
130: 'flamingo',
131: 'little blue heron, Egretta caerulea',
132: 'American egret, great white heron, Egretta albus',
133: 'bittern',
134: 'crane',
135: 'limpkin, Aramus pictus',
136: 'European gallinule, Porphyrio porphyrio',
137: 'American coot, marsh hen, mud hen, water hen, Fulica americana',
138: 'bustard',
139: 'ruddy turnstone, Arenaria interpres',
140: 'red-backed sandpiper, dunlin, Erolia alpina',
141: 'redshank, Tringa totanus',
142: 'dowitcher',
143: 'oystercatcher, oyster catcher',
144: 'pelican',
145: 'king penguin, Aptenodytes patagonica',
146: 'albatross, mollymawk',
147: 'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus',
148: 'killer whale, killer, orca, grampus, sea wolf, Orcinus orca',
149: 'dugong, Dugong dugon',
150: 'sea lion',
151: 'Chihuahua',
152: 'Japanese spaniel',
153: 'Maltese dog, Maltese terrier, Maltese',
154: 'Pekinese, Pekingese, Peke',
155: 'Shih-Tzu',
156: 'Blenheim spaniel',
157: 'papillon',
158: 'toy terrier',
159: 'Rhodesian ridgeback',
160: 'Afghan hound, Afghan',
161: 'basset, basset hound',
162: 'beagle',
163: 'bloodhound, sleuthhound',
164: 'bluetick',
165: 'black-and-tan coonhound',
166: 'Walker hound, Walker foxhound',
167: 'English foxhound',
168: 'redbone',
169: 'borzoi, Russian wolfhound',
170: 'Irish wolfhound',
171: 'Italian greyhound',
172: 'whippet',
173: 'Ibizan hound, Ibizan Podenco',
174: 'Norwegian elkhound, elkhound',
175: 'otterhound, otter hound',
176: 'Saluki, gazelle hound',
177: 'Scottish deerhound, deerhound',
178: 'Weimaraner',
179: 'Staffordshire bullterrier, Staffordshire bull terrier',
180: 'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier',
181: 'Bedlington terrier',
182: 'Border terrier',
183: 'Kerry blue terrier',
184: 'Irish terrier',
185: 'Norfolk terrier',
186: 'Norwich terrier',
187: 'Yorkshire terrier',
188: 'wire-haired fox terrier',
189: 'Lakeland terrier',
190: 'Sealyham terrier, Sealyham',
191: 'Airedale, Airedale terrier',
192: 'cairn, cairn terrier',
193: 'Australian terrier',
194: 'Dandie Dinmont, Dandie Dinmont terrier',
195: 'Boston bull, Boston terrier',
196: 'miniature schnauzer',
197: 'giant schnauzer',
198: 'standard schnauzer',
199: 'Scotch terrier, Scottish terrier, Scottie',
200: 'Tibetan terrier, chrysanthemum dog',
201: 'silky terrier, Sydney silky',
202: 'soft-coated wheaten terrier',
203: 'West Highland white terrier',
204: 'Lhasa, Lhasa apso',
205: 'flat-coated retriever',
206: 'curly-coated retriever',
207: 'golden retriever',
208: 'Labrador retriever',
209: 'Chesapeake Bay retriever',
210: 'German short-haired pointer',
211: 'vizsla, Hungarian pointer',
212: 'English setter',
213: 'Irish setter, red setter',
214: 'Gordon setter',
215: 'Brittany spaniel',
216: 'clumber, clumber spaniel',
217: 'English springer, English springer spaniel',
218: 'Welsh springer spaniel',
219: 'cocker spaniel, English cocker spaniel, cocker',
220: 'Sussex spaniel',
221: 'Irish water spaniel',
222: 'kuvasz',
223: 'schipperke',
224: 'groenendael',
225: 'malinois',
226: 'briard',
227: 'kelpie',
228: 'komondor',
229: 'Old English sheepdog, bobtail',
230: 'Shetland sheepdog, Shetland sheep dog, Shetland',
231: 'collie',
232: 'Border collie',
233: 'Bouvier des Flandres, Bouviers des Flandres',
234: 'Rottweiler',
235: 'German shepherd, German shepherd dog, German police dog, alsatian',
236: 'Doberman, Doberman pinscher',
237: 'miniature pinscher',
238: 'Greater Swiss Mountain dog',
239: 'Bernese mountain dog',
240: 'Appenzeller',
241: 'EntleBucher',
242: 'boxer',
243: 'bull mastiff',
244: 'Tibetan mastiff',
245: 'French bulldog',
246: 'Great Dane',
247: 'Saint Bernard, St Bernard',
248: 'Eskimo dog, husky',
249: 'malamute, malemute, Alaskan malamute',
250: 'Siberian husky',
251: 'dalmatian, coach dog, carriage dog',
252: 'affenpinscher, monkey pinscher, monkey dog',
253: 'basenji',
254: 'pug, pug-dog',
255: 'Leonberg',
256: 'Newfoundland, Newfoundland dog',
257: 'Great Pyrenees',
258: 'Samoyed, Samoyede',
259: 'Pomeranian',
260: 'chow, chow chow',
261: 'keeshond',
262: 'Brabancon griffon',
263: 'Pembroke, Pembroke Welsh corgi',
264: 'Cardigan, Cardigan Welsh corgi',
265: 'toy poodle',
266: 'miniature poodle',
267: 'standard poodle',
268: 'Mexican hairless',
269: 'timber wolf, grey wolf, gray wolf, Canis lupus',
270: 'white wolf, Arctic wolf, Canis lupus tundrarum',
271: 'red wolf, maned wolf, Canis rufus, Canis niger',
272: 'coyote, prairie wolf, brush wolf, Canis latrans',
273: 'dingo, warrigal, warragal, Canis dingo',
274: 'dhole, Cuon alpinus',
275: 'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus',
276: 'hyena, hyaena',
277: 'red fox, Vulpes vulpes',
278: 'kit fox, Vulpes macrotis',
279: 'Arctic fox, white fox, Alopex lagopus',
280: 'grey fox, gray fox, Urocyon cinereoargenteus',
281: 'tabby, tabby cat',
282: 'tiger cat',
283: 'Persian cat',
284: 'Siamese cat, Siamese',
285: 'Egyptian cat',
286: 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor',
287: 'lynx, catamount',
288: 'leopard, Panthera pardus',
289: 'snow leopard, ounce, Panthera uncia',
290: 'jaguar, panther, Panthera onca, Felis onca',
291: 'lion, king of beasts, Panthera leo',
292: 'tiger, Panthera tigris',
293: 'cheetah, chetah, Acinonyx jubatus',
294: 'brown bear, bruin, Ursus arctos',
295: 'American black bear, black bear, Ursus americanus, Euarctos americanus',
296: 'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus',
297: 'sloth bear, Melursus ursinus, Ursus ursinus',
298: 'mongoose',
299: 'meerkat, mierkat',
300: 'tiger beetle',
301: 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle',
302: 'ground beetle, carabid beetle',
303: 'long-horned beetle, longicorn, longicorn beetle',
304: 'leaf beetle, chrysomelid',
305: 'dung beetle',
306: 'rhinoceros beetle',
307: 'weevil',
308: 'fly',
309: 'bee',
310: 'ant, emmet, pismire',
311: 'grasshopper, hopper',
312: 'cricket',
313: 'walking stick, walkingstick, stick insect',
314: 'cockroach, roach',
315: 'mantis, mantid',
316: 'cicada, cicala',
317: 'leafhopper',
318: 'lacewing, lacewing fly',
319: "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk",
320: 'damselfly',
321: 'admiral',
322: 'ringlet, ringlet butterfly',
323: 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus',
324: 'cabbage butterfly',
325: 'sulphur butterfly, sulfur butterfly',
326: 'lycaenid, lycaenid butterfly',
327: 'starfish, sea star',
328: 'sea urchin',
329: 'sea cucumber, holothurian',
330: 'wood rabbit, cottontail, cottontail rabbit',
331: 'hare',
332: 'Angora, Angora rabbit',
333: 'hamster',
334: 'porcupine, hedgehog',
335: 'fox squirrel, eastern fox squirrel, Sciurus niger',
336: 'marmot',
337: 'beaver',
338: 'guinea pig, Cavia cobaya',
339: 'sorrel',
340: 'zebra',
341: 'hog, pig, grunter, squealer, Sus scrofa',
342: 'wild boar, boar, Sus scrofa',
343: 'warthog',
344: 'hippopotamus, hippo, river horse, Hippopotamus amphibius',
345: 'ox',
346: 'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis',
347: 'bison',
348: 'ram, tup',
349: 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis',
350: 'ibex, Capra ibex',
351: 'hartebeest',
352: 'impala, Aepyceros melampus',
353: 'gazelle',
354: 'Arabian camel, dromedary, Camelus dromedarius',
355: 'llama',
356: 'weasel',
357: 'mink',
358: 'polecat, fitch, foulmart, foumart, Mustela putorius',
359: 'black-footed ferret, ferret, Mustela nigripes',
360: 'otter',
361: 'skunk, polecat, wood pussy',
362: 'badger',
363: 'armadillo',
364: 'three-toed sloth, ai, Bradypus tridactylus',
365: 'orangutan, orang, orangutang, Pongo pygmaeus',
366: 'gorilla, Gorilla gorilla',
367: 'chimpanzee, chimp, Pan troglodytes',
368: 'gibbon, Hylobates lar',
369: 'siamang, Hylobates syndactylus, Symphalangus syndactylus',
370: 'guenon, guenon monkey',
371: 'patas, hussar monkey, Erythrocebus patas',
372: 'baboon',
373: 'macaque',
374: 'langur',
375: 'colobus, colobus monkey',
376: 'proboscis monkey, Nasalis larvatus',
377: 'marmoset',
378: 'capuchin, ringtail, Cebus capucinus',
379: 'howler monkey, howler',
380: 'titi, titi monkey',
381: 'spider monkey, Ateles geoffroyi',
382: 'squirrel monkey, Saimiri sciureus',
383: 'Madagascar cat, ring-tailed lemur, Lemur catta',
384: 'indri, indris, Indri indri, Indri brevicaudatus',
385: 'Indian elephant, Elephas maximus',
386: 'African elephant, Loxodonta africana',
387: 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens',
388: 'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca',
389: 'barracouta, snoek',
390: 'eel',
391: 'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch',
392: 'rock beauty, Holocanthus tricolor',
393: 'anemone fish',
394: 'sturgeon',
395: 'gar, garfish, garpike, billfish, Lepisosteus osseus',
396: 'lionfish',
397: 'puffer, pufferfish, blowfish, globefish',
398: 'abacus',
399: 'abaya',
400: "academic gown, academic robe, judge's robe",
401: 'accordion, piano accordion, squeeze box',
402: 'acoustic guitar',
403: 'aircraft carrier, carrier, flattop, attack aircraft carrier',
404: 'airliner',
405: 'airship, dirigible',
406: 'altar',
407: 'ambulance',
408: 'amphibian, amphibious vehicle',
409: 'analog clock',
410: 'apiary, bee house',
411: 'apron',
412: 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin',
413: 'assault rifle, assault gun',
414: 'backpack, back pack, knapsack, packsack, rucksack, haversack',
415: 'bakery, bakeshop, bakehouse',
416: 'balance beam, beam',
417: 'balloon',
418: 'ballpoint, ballpoint pen, ballpen, Biro',
419: 'Band Aid',
420: 'banjo',
421: 'bannister, banister, balustrade, balusters, handrail',
422: 'barbell',
423: 'barber chair',
424: 'barbershop',
425: 'barn',
426: 'barometer',
427: 'barrel, cask',
428: 'barrow, garden cart, lawn cart, wheelbarrow',
429: 'baseball',
430: 'basketball',
431: 'bassinet',
432: 'bassoon',
433: 'bathing cap, swimming cap',
434: 'bath towel',
435: 'bathtub, bathing tub, bath, tub',
436: 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon',
437: 'beacon, lighthouse, beacon light, pharos',
438: 'beaker',
439: 'bearskin, busby, shako',
440: 'beer bottle',
441: 'beer glass',
442: 'bell cote, bell cot',
443: 'bib',
444: 'bicycle-built-for-two, tandem bicycle, tandem',
445: 'bikini, two-piece',
446: 'binder, ring-binder',
447: 'binoculars, field glasses, opera glasses',
448: 'birdhouse',
449: 'boathouse',
450: 'bobsled, bobsleigh, bob',
451: 'bolo tie, bolo, bola tie, bola',
452: 'bonnet, poke bonnet',
453: 'bookcase',
454: 'bookshop, bookstore, bookstall',
455: 'bottlecap',
456: 'bow',
457: 'bow tie, bow-tie, bowtie',
458: 'brass, memorial tablet, plaque',
459: 'brassiere, bra, bandeau',
460: 'breakwater, groin, groyne, mole, bulwark, seawall, jetty',
461: 'breastplate, aegis, egis',
462: 'broom',
463: 'bucket, pail',
464: 'buckle',
465: 'bulletproof vest',
466: 'bullet train, bullet',
467: 'butcher shop, meat market',
468: 'cab, hack, taxi, taxicab',
469: 'caldron, cauldron',
470: 'candle, taper, wax light',
471: 'cannon',
472: 'canoe',
473: 'can opener, tin opener',
474: 'cardigan',
475: 'car mirror',
476: 'carousel, carrousel, merry-go-round, roundabout, whirligig',
477: "carpenter's kit, tool kit",
478: 'carton',
479: 'car wheel',
480: 'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM',
481: 'cassette',
482: 'cassette player',
483: 'castle',
484: 'catamaran',
485: 'CD player',
486: 'cello, violoncello',
487: 'cellular telephone, cellular phone, cellphone, cell, mobile phone',
488: 'chain',
489: 'chainlink fence',
490: 'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour',
491: 'chain saw, chainsaw',
492: 'chest',
493: 'chiffonier, commode',
494: 'chime, bell, gong',
495: 'china cabinet, china closet',
496: 'Christmas stocking',
497: 'church, church building',
498: 'cinema, movie theater, movie theatre, movie house, picture palace',
499: 'cleaver, meat cleaver, chopper',
500: 'cliff dwelling',
501: 'cloak',
502: 'clog, geta, patten, sabot',
503: 'cocktail shaker',
504: 'coffee mug',
505: 'coffeepot',
506: 'coil, spiral, volute, whorl, helix',
507: 'combination lock',
508: 'computer keyboard, keypad',
509: 'confectionery, confectionary, candy store',
510: 'container ship, containership, container vessel',
511: 'convertible',
512: 'corkscrew, bottle screw',
513: 'cornet, horn, trumpet, trump',
514: 'cowboy boot',
515: 'cowboy hat, ten-gallon hat',
516: 'cradle',
517: 'crane',
518: 'crash helmet',
519: 'crate',
520: 'crib, cot',
521: 'Crock Pot',
522: 'croquet ball',
523: 'crutch',
524: 'cuirass',
525: 'dam, dike, dyke',
526: 'desk',
527: 'desktop computer',
528: 'dial telephone, dial phone',
529: 'diaper, nappy, napkin',
530: 'digital clock',
531: 'digital watch',
532: 'dining table, board',
533: 'dishrag, dishcloth',
534: 'dishwasher, dish washer, dishwashing machine',
535: 'disk brake, disc brake',
536: 'dock, dockage, docking facility',
537: 'dogsled, dog sled, dog sleigh',
538: 'dome',
539: 'doormat, welcome mat',
540: 'drilling platform, offshore rig',
541: 'drum, membranophone, tympan',
542: 'drumstick',
543: 'dumbbell',
544: 'Dutch oven',
545: 'electric fan, blower',
546: 'electric guitar',
547: 'electric locomotive',
548: 'entertainment center',
549: 'envelope',
550: 'espresso maker',
551: 'face powder',
552: 'feather boa, boa',
553: 'file, file cabinet, filing cabinet',
554: 'fireboat',
555: 'fire engine, fire truck',
556: 'fire screen, fireguard',
557: 'flagpole, flagstaff',
558: 'flute, transverse flute',
559: 'folding chair',
560: 'football helmet',
561: 'forklift',
562: 'fountain',
563: 'fountain pen',
564: 'four-poster',
565: 'freight car',
566: 'French horn, horn',
567: 'frying pan, frypan, skillet',
568: 'fur coat',
569: 'garbage truck, dustcart',
570: 'gasmask, respirator, gas helmet',
571: 'gas pump, gasoline pump, petrol pump, island dispenser',
572: 'goblet',
573: 'go-kart',
574: 'golf ball',
575: 'golfcart, golf cart',
576: 'gondola',
577: 'gong, tam-tam',
578: 'gown',
579: 'grand piano, grand',
580: 'greenhouse, nursery, glasshouse',
581: 'grille, radiator grille',
582: 'grocery store, grocery, food market, market',
583: 'guillotine',
584: 'hair slide',
585: 'hair spray',
586: 'half track',
587: 'hammer',
588: 'hamper',
589: 'hand blower, blow dryer, blow drier, hair dryer, hair drier',
590: 'hand-held computer, hand-held microcomputer',
591: 'handkerchief, hankie, hanky, hankey',
592: 'hard disc, hard disk, fixed disk',
593: 'harmonica, mouth organ, harp, mouth harp',
594: 'harp',
595: 'harvester, reaper',
596: 'hatchet',
597: 'holster',
598: 'home theater, home theatre',
599: 'honeycomb',
600: 'hook, claw',
601: 'hoopskirt, crinoline',
602: 'horizontal bar, high bar',
603: 'horse cart, horse-cart',
604: 'hourglass',
605: 'iPod',
606: 'iron, smoothing iron',
607: "jack-o'-lantern",
608: 'jean, blue jean, denim',
609: 'jeep, landrover',
610: 'jersey, T-shirt, tee shirt',
611: 'jigsaw puzzle',
612: 'jinrikisha, ricksha, rickshaw',
613: 'joystick',
614: 'kimono',
615: 'knee pad',
616: 'knot',
617: 'lab coat, laboratory coat',
618: 'ladle',
619: 'lampshade, lamp shade',
620: 'laptop, laptop computer',
621: 'lawn mower, mower',
622: 'lens cap, lens cover',
623: 'letter opener, paper knife, paperknife',
624: 'library',
625: 'lifeboat',
626: 'lighter, light, igniter, ignitor',
627: 'limousine, limo',
628: 'liner, ocean liner',
629: 'lipstick, lip rouge',
630: 'Loafer',
631: 'lotion',
632: 'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system',
633: "loupe, jeweler's loupe",
634: 'lumbermill, sawmill',
635: 'magnetic compass',
636: 'mailbag, postbag',
637: 'mailbox, letter box',
638: 'maillot',
639: 'maillot, tank suit',
640: 'manhole cover',
641: 'maraca',
642: 'marimba, xylophone',
643: 'mask',
644: 'matchstick',
645: 'maypole',
646: 'maze, labyrinth',
647: 'measuring cup',
648: 'medicine chest, medicine cabinet',
649: 'megalith, megalithic structure',
650: 'microphone, mike',
651: 'microwave, microwave oven',
652: 'military uniform',
653: 'milk can',
654: 'minibus',
655: 'miniskirt, mini',
656: 'minivan',
657: 'missile',
658: 'mitten',
659: 'mixing bowl',
660: 'mobile home, manufactured home',
661: 'Model T',
662: 'modem',
663: 'monastery',
664: 'monitor',
665: 'moped',
666: 'mortar',
667: 'mortarboard',
668: 'mosque',
669: 'mosquito net',
670: 'motor scooter, scooter',
671: 'mountain bike, all-terrain bike, off-roader',
672: 'mountain tent',
673: 'mouse, computer mouse',
674: 'mousetrap',
675: 'moving van',
676: 'muzzle',
677: 'nail',
678: 'neck brace',
679: 'necklace',
680: 'nipple',
681: 'notebook, notebook computer',
682: 'obelisk',
683: 'oboe, hautboy, hautbois',
684: 'ocarina, sweet potato',
685: 'odometer, hodometer, mileometer, milometer',
686: 'oil filter',
687: 'organ, pipe organ',
688: 'oscilloscope, scope, cathode-ray oscilloscope, CRO',
689: 'overskirt',
690: 'oxcart',
691: 'oxygen mask',
692: 'packet',
693: 'paddle, boat paddle',
694: 'paddlewheel, paddle wheel',
695: 'padlock',
696: 'paintbrush',
697: "pajama, pyjama, pj's, jammies",
698: 'palace',
699: 'panpipe, pandean pipe, syrinx',
700: 'paper towel',
701: 'parachute, chute',
702: 'parallel bars, bars',
703: 'park bench',
704: 'parking meter',
705: 'passenger car, coach, carriage',
706: 'patio, terrace',
707: 'pay-phone, pay-station',
708: 'pedestal, plinth, footstall',
709: 'pencil box, pencil case',
710: 'pencil sharpener',
711: 'perfume, essence',
712: 'Petri dish',
713: 'photocopier',
714: 'pick, plectrum, plectron',
715: 'pickelhaube',
716: 'picket fence, paling',
717: 'pickup, pickup truck',
718: 'pier',
719: 'piggy bank, penny bank',
720: 'pill bottle',
721: 'pillow',
722: 'ping-pong ball',
723: 'pinwheel',
724: 'pirate, pirate ship',
725: 'pitcher, ewer',
726: "plane, carpenter's plane, woodworking plane",
727: 'planetarium',
728: 'plastic bag',
729: 'plate rack',
730: 'plow, plough',
731: "plunger, plumber's helper",
732: 'Polaroid camera, Polaroid Land camera',
733: 'pole',
734: 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria',
735: 'poncho',
736: 'pool table, billiard table, snooker table',
737: 'pop bottle, soda bottle',
738: 'pot, flowerpot',
739: "potter's wheel",
740: 'power drill',
741: 'prayer rug, prayer mat',
742: 'printer',
743: 'prison, prison house',
744: 'projectile, missile',
745: 'projector',
746: 'puck, hockey puck',
747: 'punching bag, punch bag, punching ball, punchball',
748: 'purse',
749: 'quill, quill pen',
750: 'quilt, comforter, comfort, puff',
751: 'racer, race car, racing car',
752: 'racket, racquet',
753: 'radiator',
754: 'radio, wireless',
755: 'radio telescope, radio reflector',
756: 'rain barrel',
757: 'recreational vehicle, RV, R.V.',
758: 'reel',
759: 'reflex camera',
760: 'refrigerator, icebox',
761: 'remote control, remote',
762: 'restaurant, eating house, eating place, eatery',
763: 'revolver, six-gun, six-shooter',
764: 'rifle',
765: 'rocking chair, rocker',
766: 'rotisserie',
767: 'rubber eraser, rubber, pencil eraser',
768: 'rugby ball',
769: 'rule, ruler',
770: 'running shoe',
771: 'safe',
772: 'safety pin',
773: 'saltshaker, salt shaker',
774: 'sandal',
775: 'sarong',
776: 'sax, saxophone',
777: 'scabbard',
778: 'scale, weighing machine',
779: 'school bus',
780: 'schooner',
781: 'scoreboard',
782: 'screen, CRT screen',
783: 'screw',
784: 'screwdriver',
785: 'seat belt, seatbelt',
786: 'sewing machine',
787: 'shield, buckler',
788: 'shoe shop, shoe-shop, shoe store',
789: 'shoji',
790: 'shopping basket',
791: 'shopping cart',
792: 'shovel',
793: 'shower cap',
794: 'shower curtain',
795: 'ski',
796: 'ski mask',
797: 'sleeping bag',
798: 'slide rule, slipstick',
799: 'sliding door',
800: 'slot, one-armed bandit',
801: 'snorkel',
802: 'snowmobile',
803: 'snowplow, snowplough',
804: 'soap dispenser',
805: 'soccer ball',
806: 'sock',
807: 'solar dish, solar collector, solar furnace',
808: 'sombrero',
809: 'soup bowl',
810: 'space bar',
811: 'space heater',
812: 'space shuttle',
813: 'spatula',
814: 'speedboat',
815: "spider web, spider's web",
816: 'spindle',
817: 'sports car, sport car',
818: 'spotlight, spot',
819: 'stage',
820: 'steam locomotive',
821: 'steel arch bridge',
822: 'steel drum',
823: 'stethoscope',
824: 'stole',
825: 'stone wall',
826: 'stopwatch, stop watch',
827: 'stove',
828: 'strainer',
829: 'streetcar, tram, tramcar, trolley, trolley car',
830: 'stretcher',
831: 'studio couch, day bed',
832: 'stupa, tope',
833: 'submarine, pigboat, sub, U-boat',
834: 'suit, suit of clothes',
835: 'sundial',
836: 'sunglass',
837: 'sunglasses, dark glasses, shades',
838: 'sunscreen, sunblock, sun blocker',
839: 'suspension bridge',
840: 'swab, swob, mop',
841: 'sweatshirt',
842: 'swimming trunks, bathing trunks',
843: 'swing',
844: 'switch, electric switch, electrical switch',
845: 'syringe',
846: 'table lamp',
847: 'tank, army tank, armored combat vehicle, armoured combat vehicle',
848: 'tape player',
849: 'teapot',
850: 'teddy, teddy bear',
851: 'television, television system',
852: 'tennis ball',
853: 'thatch, thatched roof',
854: 'theater curtain, theatre curtain',
855: 'thimble',
856: 'thresher, thrasher, threshing machine',
857: 'throne',
858: 'tile roof',
859: 'toaster',
860: 'tobacco shop, tobacconist shop, tobacconist',
861: 'toilet seat',
862: 'torch',
863: 'totem pole',
864: 'tow truck, tow car, wrecker',
865: 'toyshop',
866: 'tractor',
867: 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi',
868: 'tray',
869: 'trench coat',
870: 'tricycle, trike, velocipede',
871: 'trimaran',
872: 'tripod',
873: 'triumphal arch',
874: 'trolleybus, trolley coach, trackless trolley',
875: 'trombone',
876: 'tub, vat',
877: 'turnstile',
878: 'typewriter keyboard',
879: 'umbrella',
880: 'unicycle, monocycle',
881: 'upright, upright piano',
882: 'vacuum, vacuum cleaner',
883: 'vase',
884: 'vault',
885: 'velvet',
886: 'vending machine',
887: 'vestment',
888: 'viaduct',
889: 'violin, fiddle',
890: 'volleyball',
891: 'waffle iron',
892: 'wall clock',
893: 'wallet, billfold, notecase, pocketbook',
894: 'wardrobe, closet, press',
895: 'warplane, military plane',
896: 'washbasin, handbasin, washbowl, lavabo, wash-hand basin',
897: 'washer, automatic washer, washing machine',
898: 'water bottle',
899: 'water jug',
900: 'water tower',
901: 'whiskey jug',
902: 'whistle',
903: 'wig',
904: 'window screen',
905: 'window shade',
906: 'Windsor tie',
907: 'wine bottle',
908: 'wing',
909: 'wok',
910: 'wooden spoon',
911: 'wool, woolen, woollen',
912: 'worm fence, snake fence, snake-rail fence, Virginia fence',
913: 'wreck',
914: 'yawl',
915: 'yurt',
916: 'web site, website, internet site, site',
917: 'comic book',
918: 'crossword puzzle, crossword',
919: 'street sign',
920: 'traffic light, traffic signal, stoplight',
921: 'book jacket, dust cover, dust jacket, dust wrapper',
922: 'menu',
923: 'plate',
924: 'guacamole',
925: 'consomme',
926: 'hot pot, hotpot',
927: 'trifle',
928: 'ice cream, icecream',
929: 'ice lolly, lolly, lollipop, popsicle',
930: 'French loaf',
931: 'bagel, beigel',
932: 'pretzel',
933: 'cheeseburger',
934: 'hotdog, hot dog, red hot',
935: 'mashed potato',
936: 'head cabbage',
937: 'broccoli',
938: 'cauliflower',
939: 'zucchini, courgette',
940: 'spaghetti squash',
941: 'acorn squash',
942: 'butternut squash',
943: 'cucumber, cuke',
944: 'artichoke, globe artichoke',
945: 'bell pepper',
946: 'cardoon',
947: 'mushroom',
948: 'Granny Smith',
949: 'strawberry',
950: 'orange',
951: 'lemon',
952: 'fig',
953: 'pineapple, ananas',
954: 'banana',
955: 'jackfruit, jak, jack',
956: 'custard apple',
957: 'pomegranate',
958: 'hay',
959: 'carbonara',
960: 'chocolate sauce, chocolate syrup',
961: 'dough',
962: 'meat loaf, meatloaf',
963: 'pizza, pizza pie',
964: 'potpie',
965: 'burrito',
966: 'red wine',
967: 'espresso',
968: 'cup',
969: 'eggnog',
970: 'alp',
971: 'bubble',
972: 'cliff, drop, drop-off',
973: 'coral reef',
974: 'geyser',
975: 'lakeside, lakeshore',
976: 'promontory, headland, head, foreland',
977: 'sandbar, sand bar',
978: 'seashore, coast, seacoast, sea-coast',
979: 'valley, vale',
980: 'volcano',
981: 'ballplayer, baseball player',
982: 'groom, bridegroom',
983: 'scuba diver',
984: 'rapeseed',
985: 'daisy',
986: "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum",
987: 'corn',
988: 'acorn',
989: 'hip, rose hip, rosehip',
990: 'buckeye, horse chestnut, conker',
991: 'coral fungus',
992: 'agaric',
993: 'gyromitra',
994: 'stinkhorn, carrion fungus',
995: 'earthstar',
996: 'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa',
997: 'bolete',
998: 'ear, spike, capitulum',
999: 'toilet tissue, toilet paper, bathroom tissue'
}
| DeepLearningExamples-master | MxNet/Classification/RN50v1.5/imagenet_classes.py |
# Copyright 2017-2018 The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -----------------------------------------------------------------------
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import dllogger
import horovod.mxnet as hvd
import dali
import data
import fit
import models
from log_utils import setup_logging
def parse_args():
parser = argparse.ArgumentParser(description="Train classification models on ImageNet",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
models.add_model_args(parser)
fit.add_fit_args(parser)
data.add_data_args(parser)
dali.add_dali_args(parser)
data.add_data_aug_args(parser)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
if 'horovod' in args.kv_store:
hvd.init()
setup_logging(args)
dllogger.log(step='PARAMETER', data=vars(args))
model = models.get_model(**vars(args))
data_loader = data.get_data_loader(args)
fit.fit(args, model, data_loader)
| DeepLearningExamples-master | MxNet/Classification/RN50v1.5/train.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.