python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...runner.pipeline import Pipeline
pipeline = Pipeline()
pipeline.model_export(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
python3 triton/export_model.py \
--input-path triton/model.py \
--input-type pyt \
--output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-type ${EXPORT_FORMAT} \
--ignore-unknown-parameters \
--onnx-opset 13 \
--torch-jit ${TORCH_JIT} \
\
--config /workspace/gpunet/configs/batch1/GV100/1.75ms.json \
--checkpoint ${CHECKPOINT_DIR}/1.75ms.pth.tar \
--precision ${EXPORT_PRECISION} \
\
--dataloader triton/dataloader.py \
--val-path ${DATASETS_DIR}/ \
--is-prunet False \
--batch-size 1
""",
)
)
pipeline.model_conversion(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
model-navigator convert \
--model-name ${MODEL_NAME} \
--model-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-path ${SHARED_DIR}/converted_model \
--target-formats ${FORMAT} \
--target-precisions ${PRECISION} \
--launch-mode local \
--override-workspace \
--verbose \
\
--onnx-opsets 13 \
--max-batch-size ${MAX_BATCH_SIZE} \
--container-version 21.12 \
--max-workspace-size 10000000000 \
--atol OUTPUT__0=100 \
--rtol OUTPUT__0=100
""",
)
)
pipeline.model_deploy(
commands=(
r"""
model-navigator triton-config-model \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--model-version 1 \
--model-path ${SHARED_DIR}/converted_model \
--model-format ${FORMAT} \
--model-control-mode explicit \
--load-model \
--load-model-timeout-s 100 \
--verbose \
\
--backend-accelerator ${BACKEND_ACCELERATOR} \
--tensorrt-precision ${PRECISION} \
--tensorrt-capture-cuda-graph \
--tensorrt-max-workspace-size 10000000000 \
--max-batch-size ${MAX_BATCH_SIZE} \
--batching ${MODEL_BATCHING} \
--preferred-batch-sizes ${MAX_BATCH_SIZE} \
--engine-count-per-device gpu=${NUMBER_OF_MODEL_INSTANCES}
""",
)
)
pipeline.triton_performance_offline_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 2 4 8 16 32 64 \
--concurrency 1 \
--evaluation-mode offline \
--measurement-request-count 10 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_offline.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_offline.csv",
)
pipeline.triton_performance_online_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 \
--concurrency 8 16 24 32 40 48 56 64 72 80 88 96 104 112 120 128 136 144 152 160 168 176 184 192 200 208 216 224 232 240 248 256 \
--evaluation-mode online \
--measurement-request-count 500 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_online.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_online.csv",
) | DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/175ms/runner/pipeline_impl.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pathlib
from typing import List
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...runner.config import Config
from ...runner.executor import Executor
from ...runner.finalizer import ExperimentFinalizer
from ...runner.maintainer import DockerMaintainer
from ...runner.preparer import ExperimentPreparer
from ...runner.runner_proxy import RunnerProxy
from .pipeline_impl import pipeline
class ExperimentRunner(RunnerProxy):
"""
Experiment Runner proxy for runner wrapper
"""
maintainer_cls = DockerMaintainer
executor_cls = Executor
preparer_cls = ExperimentPreparer
finalizer_cls = ExperimentFinalizer
def execute(config_path: str, devices: List[str]):
if len(devices) == 0:
devices = ["0"]
config = Config.from_file(config_path)
runner = ExperimentRunner(config=config, pipeline=pipeline, devices=devices)
runner.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config-path", type=str, required=True, help="Path to configuration file with details.")
parser.add_argument(
"--devices", type=str, nargs="*", required=False, help="Path to configuration file with details."
)
args = parser.parse_args()
config_path = args.config_path
devices = args.devices
execute(config_path, devices) | DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/175ms/runner/__main__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...runner.pipeline import Pipeline
pipeline = Pipeline()
pipeline.model_export(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
python3 triton/export_model.py \
--input-path triton/model.py \
--input-type pyt \
--output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-type ${EXPORT_FORMAT} \
--ignore-unknown-parameters \
--onnx-opset 13 \
--torch-jit ${TORCH_JIT} \
\
--config /workspace/gpunet/configs/batch1/GV100/2.25ms-D.json \
--checkpoint ${CHECKPOINT_DIR}/2.25ms-D.pth.tar \
--precision ${EXPORT_PRECISION} \
\
--dataloader triton/dataloader.py \
--val-path ${DATASETS_DIR}/ \
--is-prunet False \
--batch-size 1
""",
)
)
pipeline.model_conversion(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
model-navigator convert \
--model-name ${MODEL_NAME} \
--model-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-path ${SHARED_DIR}/converted_model \
--target-formats ${FORMAT} \
--target-precisions ${PRECISION} \
--launch-mode local \
--override-workspace \
--verbose \
\
--onnx-opsets 13 \
--max-batch-size ${MAX_BATCH_SIZE} \
--container-version 21.12 \
--max-workspace-size 10000000000 \
--atol OUTPUT__0=100 \
--rtol OUTPUT__0=100
""",
)
)
pipeline.model_deploy(
commands=(
r"""
model-navigator triton-config-model \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--model-version 1 \
--model-path ${SHARED_DIR}/converted_model \
--model-format ${FORMAT} \
--model-control-mode explicit \
--load-model \
--load-model-timeout-s 100 \
--verbose \
\
--backend-accelerator ${BACKEND_ACCELERATOR} \
--tensorrt-precision ${PRECISION} \
--tensorrt-capture-cuda-graph \
--tensorrt-max-workspace-size 10000000000 \
--max-batch-size ${MAX_BATCH_SIZE} \
--batching ${MODEL_BATCHING} \
--preferred-batch-sizes ${MAX_BATCH_SIZE} \
--engine-count-per-device gpu=${NUMBER_OF_MODEL_INSTANCES}
""",
)
)
pipeline.triton_performance_offline_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 2 4 8 16 32 64 \
--concurrency 1 \
--evaluation-mode offline \
--measurement-request-count 10 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_offline.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_offline.csv",
)
pipeline.triton_performance_online_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 \
--concurrency 8 16 24 32 40 48 56 64 72 80 88 96 104 112 120 128 136 144 152 160 168 176 184 192 200 208 216 224 232 240 248 256 \
--evaluation-mode online \
--measurement-request-count 500 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_online.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_online.csv",
) | DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/225ms-D/runner/pipeline_impl.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pathlib
from typing import List
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...runner.config import Config
from ...runner.executor import Executor
from ...runner.finalizer import ExperimentFinalizer
from ...runner.maintainer import DockerMaintainer
from ...runner.preparer import ExperimentPreparer
from ...runner.runner_proxy import RunnerProxy
from .pipeline_impl import pipeline
class ExperimentRunner(RunnerProxy):
"""
Experiment Runner proxy for runner wrapper
"""
maintainer_cls = DockerMaintainer
executor_cls = Executor
preparer_cls = ExperimentPreparer
finalizer_cls = ExperimentFinalizer
def execute(config_path: str, devices: List[str]):
if len(devices) == 0:
devices = ["0"]
config = Config.from_file(config_path)
runner = ExperimentRunner(config=config, pipeline=pipeline, devices=devices)
runner.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config-path", type=str, required=True, help="Path to configuration file with details.")
parser.add_argument(
"--devices", type=str, nargs="*", required=False, help="Path to configuration file with details."
)
args = parser.parse_args()
config_path = args.config_path
devices = args.devices
execute(config_path, devices) | DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/225ms-D/runner/__main__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...runner.pipeline import Pipeline
pipeline = Pipeline()
pipeline.model_export(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
python3 triton/export_model.py \
--input-path triton/model.py \
--input-type pyt \
--output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-type ${EXPORT_FORMAT} \
--ignore-unknown-parameters \
--onnx-opset 13 \
--torch-jit ${TORCH_JIT} \
\
--config /workspace/gpunet/configs/batch1/GV100/0.65ms.json \
--checkpoint ${CHECKPOINT_DIR}/0.65ms.pth.tar \
--precision ${EXPORT_PRECISION} \
\
--dataloader triton/dataloader.py \
--val-path ${DATASETS_DIR}/ \
--is-prunet False \
--batch-size 1
""",
)
)
pipeline.model_conversion(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
model-navigator convert \
--model-name ${MODEL_NAME} \
--model-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-path ${SHARED_DIR}/converted_model \
--target-formats ${FORMAT} \
--target-precisions ${PRECISION} \
--launch-mode local \
--override-workspace \
--verbose \
\
--onnx-opsets 13 \
--max-batch-size ${MAX_BATCH_SIZE} \
--container-version 21.12 \
--max-workspace-size 10000000000 \
--atol OUTPUT__0=100 \
--rtol OUTPUT__0=100
""",
)
)
pipeline.model_deploy(
commands=(
r"""
model-navigator triton-config-model \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--model-version 1 \
--model-path ${SHARED_DIR}/converted_model \
--model-format ${FORMAT} \
--model-control-mode explicit \
--load-model \
--load-model-timeout-s 100 \
--verbose \
\
--backend-accelerator ${BACKEND_ACCELERATOR} \
--tensorrt-precision ${PRECISION} \
--tensorrt-capture-cuda-graph \
--tensorrt-max-workspace-size 10000000000 \
--max-batch-size ${MAX_BATCH_SIZE} \
--batching ${MODEL_BATCHING} \
--preferred-batch-sizes ${MAX_BATCH_SIZE} \
--engine-count-per-device gpu=${NUMBER_OF_MODEL_INSTANCES}
""",
)
)
pipeline.triton_performance_offline_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 2 4 8 16 32 64 \
--concurrency 1 \
--evaluation-mode offline \
--measurement-request-count 10 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_offline.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_offline.csv",
)
pipeline.triton_performance_online_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 \
--concurrency 8 16 24 32 40 48 56 64 72 80 88 96 104 112 120 128 136 144 152 160 168 176 184 192 200 208 216 224 232 240 248 256 \
--evaluation-mode online \
--measurement-request-count 500 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_online.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_online.csv",
) | DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/065ms/runner/pipeline_impl.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pathlib
from typing import List
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...runner.config import Config
from ...runner.executor import Executor
from ...runner.finalizer import ExperimentFinalizer
from ...runner.maintainer import DockerMaintainer
from ...runner.preparer import ExperimentPreparer
from ...runner.runner_proxy import RunnerProxy
from .pipeline_impl import pipeline
class ExperimentRunner(RunnerProxy):
"""
Experiment Runner proxy for runner wrapper
"""
maintainer_cls = DockerMaintainer
executor_cls = Executor
preparer_cls = ExperimentPreparer
finalizer_cls = ExperimentFinalizer
def execute(config_path: str, devices: List[str]):
if len(devices) == 0:
devices = ["0"]
config = Config.from_file(config_path)
runner = ExperimentRunner(config=config, pipeline=pipeline, devices=devices)
runner.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config-path", type=str, required=True, help="Path to configuration file with details.")
parser.add_argument(
"--devices", type=str, nargs="*", required=False, help="Path to configuration file with details."
)
args = parser.parse_args()
config_path = args.config_path
devices = args.devices
execute(config_path, devices) | DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/065ms/runner/__main__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...runner.pipeline import Pipeline
pipeline = Pipeline()
pipeline.model_export(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
python3 triton/export_model.py \
--input-path triton/model.py \
--input-type pyt \
--output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-type ${EXPORT_FORMAT} \
--ignore-unknown-parameters \
--onnx-opset 13 \
--torch-jit ${TORCH_JIT} \
\
--config /workspace/gpunet/configs/batch1/GV100/0.8ms-D.json \
--checkpoint ${CHECKPOINT_DIR}/0.8ms-D.pth.tar \
--precision ${EXPORT_PRECISION} \
\
--dataloader triton/dataloader.py \
--val-path ${DATASETS_DIR}/ \
--is-prunet True \
--batch-size 1
""",
)
)
pipeline.model_conversion(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
model-navigator convert \
--model-name ${MODEL_NAME} \
--model-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-path ${SHARED_DIR}/converted_model \
--target-formats ${FORMAT} \
--target-precisions ${PRECISION} \
--launch-mode local \
--override-workspace \
--verbose \
\
--onnx-opsets 13 \
--max-batch-size ${MAX_BATCH_SIZE} \
--container-version 21.12 \
--max-workspace-size 10000000000 \
--atol OUTPUT__0=100 \
--rtol OUTPUT__0=100
""",
)
)
pipeline.model_deploy(
commands=(
r"""
model-navigator triton-config-model \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--model-version 1 \
--model-path ${SHARED_DIR}/converted_model \
--model-format ${FORMAT} \
--model-control-mode explicit \
--load-model \
--load-model-timeout-s 100 \
--verbose \
\
--backend-accelerator ${BACKEND_ACCELERATOR} \
--tensorrt-precision ${PRECISION} \
--tensorrt-capture-cuda-graph \
--tensorrt-max-workspace-size 10000000000 \
--max-batch-size ${MAX_BATCH_SIZE} \
--batching ${MODEL_BATCHING} \
--preferred-batch-sizes ${MAX_BATCH_SIZE} \
--engine-count-per-device gpu=${NUMBER_OF_MODEL_INSTANCES}
""",
)
)
pipeline.triton_performance_offline_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 2 4 8 16 32 64 \
--concurrency 1 \
--evaluation-mode offline \
--measurement-request-count 10 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_offline.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_offline.csv",
)
pipeline.triton_performance_online_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 \
--concurrency 8 16 24 32 40 48 56 64 72 80 88 96 104 112 120 128 136 144 152 160 168 176 184 192 200 208 216 224 232 240 248 256 \
--evaluation-mode online \
--measurement-request-count 500 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_online.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_online.csv",
) | DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/08ms-D/runner/pipeline_impl.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pathlib
from typing import List
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...runner.config import Config
from ...runner.executor import Executor
from ...runner.finalizer import ExperimentFinalizer
from ...runner.maintainer import DockerMaintainer
from ...runner.preparer import ExperimentPreparer
from ...runner.runner_proxy import RunnerProxy
from .pipeline_impl import pipeline
class ExperimentRunner(RunnerProxy):
"""
Experiment Runner proxy for runner wrapper
"""
maintainer_cls = DockerMaintainer
executor_cls = Executor
preparer_cls = ExperimentPreparer
finalizer_cls = ExperimentFinalizer
def execute(config_path: str, devices: List[str]):
if len(devices) == 0:
devices = ["0"]
config = Config.from_file(config_path)
runner = ExperimentRunner(config=config, pipeline=pipeline, devices=devices)
runner.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config-path", type=str, required=True, help="Path to configuration file with details.")
parser.add_argument(
"--devices", type=str, nargs="*", required=False, help="Path to configuration file with details."
)
args = parser.parse_args()
config_path = args.config_path
devices = args.devices
execute(config_path, devices) | DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/08ms-D/runner/__main__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...runner.pipeline import Pipeline
pipeline = Pipeline()
pipeline.model_export(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
python3 triton/export_model.py \
--input-path triton/model.py \
--input-type pyt \
--output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-type ${EXPORT_FORMAT} \
--ignore-unknown-parameters \
--onnx-opset 13 \
--torch-jit ${TORCH_JIT} \
\
--config /workspace/gpunet/configs/batch1/GV100/1.25ms-D.json \
--checkpoint ${CHECKPOINT_DIR}/1.25ms-D.pth.tar \
--precision ${EXPORT_PRECISION} \
\
--dataloader triton/dataloader.py \
--val-path ${DATASETS_DIR}/ \
--is-prunet False \
--batch-size 1
""",
)
)
pipeline.model_conversion(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
model-navigator convert \
--model-name ${MODEL_NAME} \
--model-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-path ${SHARED_DIR}/converted_model \
--target-formats ${FORMAT} \
--target-precisions ${PRECISION} \
--launch-mode local \
--override-workspace \
--verbose \
\
--onnx-opsets 13 \
--max-batch-size ${MAX_BATCH_SIZE} \
--container-version 21.12 \
--max-workspace-size 10000000000 \
--atol OUTPUT__0=100 \
--rtol OUTPUT__0=100
""",
)
)
pipeline.model_deploy(
commands=(
r"""
model-navigator triton-config-model \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--model-version 1 \
--model-path ${SHARED_DIR}/converted_model \
--model-format ${FORMAT} \
--model-control-mode explicit \
--load-model \
--load-model-timeout-s 100 \
--verbose \
\
--backend-accelerator ${BACKEND_ACCELERATOR} \
--tensorrt-precision ${PRECISION} \
--tensorrt-capture-cuda-graph \
--tensorrt-max-workspace-size 10000000000 \
--max-batch-size ${MAX_BATCH_SIZE} \
--batching ${MODEL_BATCHING} \
--preferred-batch-sizes ${MAX_BATCH_SIZE} \
--engine-count-per-device gpu=${NUMBER_OF_MODEL_INSTANCES}
""",
)
)
pipeline.triton_performance_offline_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 2 4 8 16 32 64 \
--concurrency 1 \
--evaluation-mode offline \
--measurement-request-count 10 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_offline.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_offline.csv",
)
pipeline.triton_performance_online_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 \
--concurrency 8 16 24 32 40 48 56 64 72 80 88 96 104 112 120 128 136 144 152 160 168 176 184 192 200 208 216 224 232 240 248 256 \
--evaluation-mode online \
--measurement-request-count 500 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_online.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_online.csv",
) | DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/125ms-D/runner/pipeline_impl.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pathlib
from typing import List
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...runner.config import Config
from ...runner.executor import Executor
from ...runner.finalizer import ExperimentFinalizer
from ...runner.maintainer import DockerMaintainer
from ...runner.preparer import ExperimentPreparer
from ...runner.runner_proxy import RunnerProxy
from .pipeline_impl import pipeline
class ExperimentRunner(RunnerProxy):
"""
Experiment Runner proxy for runner wrapper
"""
maintainer_cls = DockerMaintainer
executor_cls = Executor
preparer_cls = ExperimentPreparer
finalizer_cls = ExperimentFinalizer
def execute(config_path: str, devices: List[str]):
if len(devices) == 0:
devices = ["0"]
config = Config.from_file(config_path)
runner = ExperimentRunner(config=config, pipeline=pipeline, devices=devices)
runner.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config-path", type=str, required=True, help="Path to configuration file with details.")
parser.add_argument(
"--devices", type=str, nargs="*", required=False, help="Path to configuration file with details."
)
args = parser.parse_args()
config_path = args.config_path
devices = args.devices
execute(config_path, devices) | DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/125ms-D/runner/__main__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...runner.pipeline import Pipeline
pipeline = Pipeline()
pipeline.model_export(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
python3 triton/export_model.py \
--input-path triton/model.py \
--input-type pyt \
--output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-type ${EXPORT_FORMAT} \
--ignore-unknown-parameters \
--onnx-opset 13 \
--torch-jit ${TORCH_JIT} \
\
--config /workspace/gpunet/configs/batch1/GV100/0.85ms.json \
--checkpoint ${CHECKPOINT_DIR}/0.85ms.pth.tar \
--precision ${EXPORT_PRECISION} \
\
--dataloader triton/dataloader.py \
--val-path ${DATASETS_DIR}/ \
--is-prunet False \
--batch-size 1
""",
)
)
pipeline.model_conversion(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
model-navigator convert \
--model-name ${MODEL_NAME} \
--model-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-path ${SHARED_DIR}/converted_model \
--target-formats ${FORMAT} \
--target-precisions ${PRECISION} \
--launch-mode local \
--override-workspace \
--verbose \
\
--onnx-opsets 13 \
--max-batch-size ${MAX_BATCH_SIZE} \
--container-version 21.12 \
--max-workspace-size 10000000000 \
--atol OUTPUT__0=100 \
--rtol OUTPUT__0=100
""",
)
)
pipeline.model_deploy(
commands=(
r"""
model-navigator triton-config-model \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--model-version 1 \
--model-path ${SHARED_DIR}/converted_model \
--model-format ${FORMAT} \
--model-control-mode explicit \
--load-model \
--load-model-timeout-s 100 \
--verbose \
\
--backend-accelerator ${BACKEND_ACCELERATOR} \
--tensorrt-precision ${PRECISION} \
--tensorrt-capture-cuda-graph \
--tensorrt-max-workspace-size 10000000000 \
--max-batch-size ${MAX_BATCH_SIZE} \
--batching ${MODEL_BATCHING} \
--preferred-batch-sizes ${MAX_BATCH_SIZE} \
--engine-count-per-device gpu=${NUMBER_OF_MODEL_INSTANCES}
""",
)
)
pipeline.triton_performance_offline_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 2 4 8 16 32 64 \
--concurrency 1 \
--evaluation-mode offline \
--measurement-request-count 10 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_offline.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_offline.csv",
)
pipeline.triton_performance_online_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 \
--concurrency 8 16 24 32 40 48 56 64 72 80 88 96 104 112 120 128 136 144 152 160 168 176 184 192 200 208 216 224 232 240 248 256 \
--evaluation-mode online \
--measurement-request-count 500 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_online.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_online.csv",
) | DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/085ms/runner/pipeline_impl.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pathlib
from typing import List
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...runner.config import Config
from ...runner.executor import Executor
from ...runner.finalizer import ExperimentFinalizer
from ...runner.maintainer import DockerMaintainer
from ...runner.preparer import ExperimentPreparer
from ...runner.runner_proxy import RunnerProxy
from .pipeline_impl import pipeline
class ExperimentRunner(RunnerProxy):
"""
Experiment Runner proxy for runner wrapper
"""
maintainer_cls = DockerMaintainer
executor_cls = Executor
preparer_cls = ExperimentPreparer
finalizer_cls = ExperimentFinalizer
def execute(config_path: str, devices: List[str]):
if len(devices) == 0:
devices = ["0"]
config = Config.from_file(config_path)
runner = ExperimentRunner(config=config, pipeline=pipeline, devices=devices)
runner.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config-path", type=str, required=True, help="Path to configuration file with details.")
parser.add_argument(
"--devices", type=str, nargs="*", required=False, help="Path to configuration file with details."
)
args = parser.parse_args()
config_path = args.config_path
devices = args.devices
execute(config_path, devices) | DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/085ms/runner/__main__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019-2022 Ross Wightman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from collections import OrderedDict
import torch
import torch.nn as nn
from timm.data import create_dataset, create_loader
from timm.models.helpers import load_checkpoint
from timm.utils import AverageMeter, accuracy
from .gpunet_modules import (
ConvBnAct,
EdgeResidual,
Epilogue,
EpilogueD,
Fused_IRB,
Inverted_Residual_Block,
InvertedResidual,
Prologue,
PrologueD,
PrologueLargeD,
)
class GPUNet(nn.Module):
def __init__(self, imgRes):
super(GPUNet, self).__init__()
self.imgRes = imgRes
self.network = nn.Sequential()
def add_layer(self, name, layer):
self.network.add_module(name, layer)
def forward(self, x):
return self.network(x)
class GPUNet_Builder:
def config_checker(self, layerConfig):
assert "layer_type" in layerConfig.keys()
layerType = layerConfig["layer_type"]
if layerType == "head":
assert "num_in_channels" in layerConfig.keys()
assert "num_out_channels" in layerConfig.keys()
elif layerType == "tail":
assert "num_in_channels" in layerConfig.keys()
assert "num_out_channels" in layerConfig.keys()
assert "num_classes" in layerConfig.keys()
elif layerType == "irb":
assert "num_in_channels" in layerConfig.keys()
assert "num_out_channels" in layerConfig.keys()
assert "kernel_size" in layerConfig.keys()
assert "expansion" in layerConfig.keys()
assert "stride" in layerConfig.keys()
def test_model(
self,
model: nn.Module = None,
testBatch: int = 10,
checkpoint: str = "./pth",
imgRes: tuple = (3, 224, 224),
dtype: str = "fp16",
val_path: str = "/mnt/dldata/",
crop_pct: float = 0.942,
is_prunet: bool = False,
):
assert model is not None
if dtype == "fp16":
dtype = torch.float16
elif dtype == "fp32":
dtype = torch.float32
else:
raise NotImplementedError
errMsg = "checkpoint not found at {}, ".format(checkpoint)
errMsg += "retrieve with get_config_and_checkpoint_files "
assert os.path.isfile(checkpoint) is True, errMsg
if is_prunet:
model.load_state_dict(torch.load(checkpoint))
else:
load_checkpoint(model, checkpoint, use_ema=True)
model = model.to("cuda", dtype)
imagenet_val_path = val_path
dataset = create_dataset(
root=imagenet_val_path,
name="",
split="validation",
load_bytes=False,
class_map="",
)
criterion = nn.CrossEntropyLoss().cuda()
data_config = {
"input_size": (3, imgRes[1], imgRes[2]),
"interpolation": "bicubic",
"mean": (0.485, 0.456, 0.406),
"std": (0.229, 0.224, 0.225),
"crop_pct": crop_pct,
}
print("data_config:", data_config)
batch_size = testBatch
loader = create_loader(
dataset,
input_size=data_config["input_size"],
batch_size=batch_size,
use_prefetcher=True,
interpolation=data_config["interpolation"],
mean=data_config["mean"],
std=data_config["std"],
num_workers=1,
crop_pct=data_config["crop_pct"],
pin_memory=False,
tf_preprocessing=False,
)
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
input = torch.randn((batch_size,) + tuple(data_config["input_size"])).to(
"cuda", dtype
)
with torch.no_grad():
# warmup, reduce variability of first batch time
# especially for comparing torchscript
model(input)
end = time.time()
for batch_idx, (input, target) in enumerate(loader):
target = target.to("cuda")
input = input.to("cuda", dtype)
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output.detach(), target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1.item(), input.size(0))
top5.update(acc5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % 10 == 0:
print(
"Test: [{0:>4d}/{1}] "
"Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) "
"Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) "
"Acc@1: {top1.val:>7.3f} ({top1.avg:>7.3f}) "
"Acc@5: {top5.val:>7.3f} ({top5.avg:>7.3f})".format(
batch_idx,
len(loader),
batch_time=batch_time,
rate_avg=input.size(0) / batch_time.avg,
loss=losses,
top1=top1,
top5=top5,
)
)
top1a, top5a = top1.avg, top5.avg
results = OrderedDict(
top1=round(top1a, 4),
top1_err=round(100 - top1a, 4),
top5=round(top5a, 4),
top5_err=round(100 - top5a, 4),
img_size=data_config["input_size"][-1],
interpolation=data_config["interpolation"],
)
print(
" * Acc@1 {:.3f} ({:.3f}) Acc@5 {:.3f} ({:.3f})".format(
results["top1"],
results["top1_err"],
results["top5"],
results["top5_err"],
)
)
return results
def export_onnx(self, model: GPUNet = None, name: str = "gpunet.onnx"):
assert model is not None, "please input the model"
x = torch.rand((1, 3, model.imgRes, model.imgRes))
torch.onnx.export(model, x, name, export_params=True, opset_version=10)
def get_model(self, config: list = None):
msg = "the model json needs specify whether a distilled model or not."
assert "distill" in config[0].keys(), msg
if config[0]["distill"]:
return self._get_distill_model(config)
else:
return self._get_model(config)
def _get_model(self, config: list = None):
assert len(config) > 0
dataLayer = config[0]
assert dataLayer["layer_type"] == "data"
assert dataLayer["img_resolution"] > 0
imgRes = dataLayer["img_resolution"]
net = GPUNet(imgRes)
dropPathRateBase = 0.2
layerCount = len(config) - 1
layerCounter = 0
for layerConfig in config:
dropPathRate = dropPathRateBase * layerCounter / layerCount
layerCounter = layerCounter + 1
assert "layer_type" in layerConfig.keys()
self.config_checker(layerConfig)
layerType = layerConfig["layer_type"]
if layerType == "head":
name = "head: " + str(layerCounter)
layer = Prologue(
num_in_channels=layerConfig["num_in_channels"],
num_out_channels=layerConfig["num_out_channels"],
act_layer=layerConfig.get("act", "swish"),
)
net.add_layer(name, layer)
elif layerType == "tail":
name = " layer" + str(layerCounter)
layer = Epilogue(
num_in_channels=layerConfig["num_in_channels"],
num_out_channels=layerConfig["num_out_channels"],
num_classes=layerConfig["num_classes"],
)
net.add_layer(name, layer)
elif layerType == "conv":
name = "stage: " + str(layerConfig["stage"]) + " layer"
name += str(layerCounter)
layer = ConvBnAct(
in_chs=layerConfig["num_in_channels"],
out_chs=layerConfig["num_out_channels"],
kernel_size=layerConfig["kernel_size"],
stride=layerConfig["stride"],
act_layer=layerConfig["act"],
drop_path_rate=dropPathRate,
)
net.add_layer(name, layer)
elif layerType == "irb":
name = "stage: " + str(layerConfig["stage"]) + " layer"
name += str(layerCounter)
layer = InvertedResidual(
in_chs=layerConfig["num_in_channels"],
out_chs=layerConfig["num_out_channels"],
dw_kernel_size=layerConfig["kernel_size"],
stride=layerConfig["stride"],
exp_ratio=layerConfig["expansion"],
use_se=layerConfig["use_se"],
act_layer=layerConfig["act"],
drop_path_rate=dropPathRate,
)
net.add_layer(name, layer)
elif layerType == "fused_irb":
name = "stage: " + str(layerConfig["stage"]) + " layer"
name += str(layerCounter)
layer = EdgeResidual(
in_chs=layerConfig["num_in_channels"],
out_chs=layerConfig["num_out_channels"],
exp_kernel_size=layerConfig["kernel_size"],
stride=layerConfig["stride"],
dilation=1,
pad_type="same",
exp_ratio=layerConfig["expansion"],
use_se=layerConfig["use_se"],
act_layer=layerConfig["act"],
drop_path_rate=dropPathRate,
)
net.add_layer(name, layer)
elif layerType == "data":
net.imgRes = layerConfig["img_resolution"]
else:
raise NotImplementedError
net.eval()
return net
def _get_distill_model(self, config: list = None):
assert config is not None
# json -> model
dataLayer = config[0]
assert dataLayer["layer_type"] == "data"
assert dataLayer["img_resolution"] > 0
imgRes = dataLayer["img_resolution"]
net = GPUNet(imgRes)
irbCounter = 0
for layerConfig in config:
irbCounter = irbCounter + 1
assert "layer_type" in layerConfig.keys()
self.config_checker(layerConfig)
layerType = layerConfig["layer_type"]
if layerType == "head":
name = "head:"
layer = PrologueD(
num_in_channels=layerConfig["num_in_channels"],
num_out_channels=layerConfig["num_out_channels"],
)
net.add_layer(name, layer)
elif layerType == "head_large":
name = "head:"
layer = PrologueLargeD(
num_in_channels=layerConfig["num_in_channels"],
num_out_channels=layerConfig["num_out_channels"],
)
net.add_layer(name, layer)
elif layerType == "tail":
name = "tail:"
layer = EpilogueD(
num_in_channels=layerConfig["num_in_channels"],
num_out_channels=layerConfig["num_out_channels"],
num_classes=layerConfig["num_classes"],
)
net.add_layer(name, layer)
elif layerType == "irb":
name = "stage: " + str(layerConfig["stage"]) + " irb"
name += str(irbCounter)
layer = Inverted_Residual_Block(
num_in_channels=layerConfig["num_in_channels"],
num_out_channels=layerConfig["num_out_channels"],
kernel_size=layerConfig["kernel_size"],
stride=layerConfig["stride"],
expansion=layerConfig["expansion"],
groups=layerConfig["groups"],
)
net.add_layer(name, layer)
elif layerType == "fused_irb":
name = "stage: " + str(layerConfig["stage"]) + " fused_irb"
name += str(irbCounter)
layer = Fused_IRB(
num_in_channels=layerConfig["num_in_channels"],
num_out_channels=layerConfig["num_out_channels"],
kernel_size=layerConfig["kernel_size"],
stride=layerConfig["stride"],
expansion=layerConfig["expansion"],
groups=layerConfig["groups"],
)
net.add_layer(name, layer)
elif layerType == "data":
net.imgRes = layerConfig["img_resolution"]
else:
raise NotImplementedError
return net
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/models/gpunet_builder.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019-2022 Ross Wightman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import List, Tuple
import torch
import torch.nn as nn
from timm.models.layers import create_act_layer
from torch.nn import functional as F
# Calculate symmetric padding for a convolution
def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int:
padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
return padding
# Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution
def get_same_padding(x: int, k: int, s: int, d: int):
return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0)
# Can SAME padding for given args be done statically?
def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_):
return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0
# Dynamically pad input x with 'SAME' padding for conv with specified args
def pad_same(x, k: List[int], s: List[int], d: List[int] = (1, 1), value: float = 0):
ih, iw = x.size()[-2:]
pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(
iw, k[1], s[1], d[1]
)
if pad_h > 0 or pad_w > 0:
x = F.pad(
x,
[pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2],
value=value,
)
return x
def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]:
dynamic = False
if isinstance(padding, str):
# for any string padding, the padding will be calculated for you, one of three ways
padding = padding.lower()
if padding == "same":
# TF compatible 'SAME' padding, has a performance and GPU memory allocation impact
if is_static_pad(kernel_size, **kwargs):
# static case, no extra overhead
padding = get_padding(kernel_size, **kwargs)
else:
# dynamic 'SAME' padding, has runtime/GPU memory overhead
padding = 0
dynamic = True
elif padding == "valid":
# 'VALID' padding, same as padding=0
padding = 0
else:
# Default to PyTorch style 'same'-ish symmetric padding
padding = get_padding(kernel_size, **kwargs)
return padding, dynamic
def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs):
padding = kwargs.pop("padding", "")
kwargs.setdefault("bias", False)
padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs)
return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs)
def create_conv2d(in_channels, out_channels, kernel_size, **kwargs):
"""Select a 2d convolution implementation based on arguments
Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d.
Used extensively by EfficientNet, MobileNetv3 and related networks.
"""
if isinstance(kernel_size, list):
raise NotImplementedError
else:
depthwise = kwargs.pop("depthwise", False)
# for DW out_channels must be multiple of in_channels as must have out_channels % groups == 0
groups = in_channels if depthwise else kwargs.pop("groups", 1)
if "num_experts" in kwargs and kwargs["num_experts"] > 0:
raise NotImplementedError
else:
m = create_conv2d_pad(
in_channels, out_channels, kernel_size, groups=groups, **kwargs
)
return m
def get_act(actType: str = ""):
if actType == "swish":
return nn.SiLU
elif actType == "relu":
return nn.ReLU
else:
raise NotImplementedError
def make_divisible(v, divisor=8, min_value=None, round_limit=0.9):
min_value = min_value or divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < round_limit * v:
new_v += divisor
return new_v
def drop_path(x, drop_prob: float = 0.0, training: bool = False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (
x.ndim - 1
) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class SqueezeExcite(nn.Module):
"""Squeeze-and-Excitation w/ specific features for EfficientNet/MobileNet family
Args:
in_chs (int): input channels to layer
rd_ratio (float): ratio of squeeze reduction
act_layer (nn.Module): activation layer of containing block
gate_layer (Callable): attention gate function
force_act_layer (nn.Module): override block's activation fn if this is set/bound
rd_round_fn (Callable): specify a fn to calculate rounding of reduced chs
"""
def __init__(
self,
in_chs,
rd_ratio=0.25,
rd_channels=None,
act_layer=nn.ReLU,
gate_layer=nn.Sigmoid,
force_act_layer=None,
rd_round_fn=None,
):
super(SqueezeExcite, self).__init__()
if rd_channels is None:
rd_round_fn = rd_round_fn or round
rd_channels = rd_round_fn(in_chs * rd_ratio)
act_layer = force_act_layer or act_layer
self.conv_reduce = nn.Conv2d(in_chs, rd_channels, 1, bias=True)
self.act1 = create_act_layer(act_layer, inplace=True)
self.conv_expand = nn.Conv2d(rd_channels, in_chs, 1, bias=True)
self.gate = create_act_layer(gate_layer)
def forward(self, x):
x_se = x.mean((2, 3), keepdim=True)
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
return x * self.gate(x_se)
class ConvBnAct(nn.Module):
"""Conv + Norm Layer + Activation w/ optional skip connection"""
def __init__(
self,
in_chs,
out_chs,
kernel_size,
stride=1,
dilation=1,
pad_type="",
skip=False,
act_layer="relu",
norm_layer=nn.BatchNorm2d,
drop_path_rate=0.0,
):
super(ConvBnAct, self).__init__()
self.has_residual = skip and stride == 1 and in_chs == out_chs
self.drop_path_rate = drop_path_rate
self.conv = create_conv2d(
in_chs,
out_chs,
kernel_size,
stride=stride,
dilation=dilation,
padding=pad_type,
)
self.bn1 = norm_layer(out_chs, eps=0.001)
self.act1 = get_act(act_layer)(inplace=True)
# for representation.
self.in_channels = in_chs
self.out_channels = out_chs
self.kernel_size = kernel_size
self.stride = stride
self.act_layer = act_layer
def feature_info(self, location):
if location == "expansion":
# output of conv after act, same as block coutput
info = dict(
module="act1", hook_type="forward", num_chs=self.conv.out_channels
)
else:
info = dict(module="", hook_type="", num_chs=self.conv.out_channels)
return info
def __repr__(self):
name = "conv_k{}_i{}_o{}_s{}_{}".format(
self.kernel_size,
self.in_channels,
self.out_channels,
self.stride,
self.act_layer,
)
return name
def forward(self, x):
shortcut = x
x = self.conv(x)
x = self.bn1(x)
x = self.act1(x)
if self.has_residual:
if self.drop_path_rate > 0.0:
x = drop_path(x, self.drop_path_rate, self.training)
x += shortcut
return x
class DepthwiseSeparableConv(nn.Module):
"""DepthwiseSeparable block
Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion
(factor of 1.0). This is an alternative to having a IR with an optional first pw conv.
"""
def __init__(
self,
in_chs,
out_chs,
dw_kernel_size=3,
stride=1,
dilation=1,
pad_type="",
noskip=False,
pw_kernel_size=1,
pw_act=False,
act_layer="relu",
norm_layer=nn.BatchNorm2d,
se_layer=None,
drop_path_rate=0.0,
):
super(DepthwiseSeparableConv, self).__init__()
self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip
self.has_pw_act = pw_act # activation after point-wise conv
self.drop_path_rate = drop_path_rate
self.conv_dw = create_conv2d(
in_chs,
in_chs,
dw_kernel_size,
stride=stride,
dilation=dilation,
padding=pad_type,
depthwise=True,
)
self.bn1 = norm_layer(in_chs, eps=0.001)
self.act1 = get_act(act_layer)(inplace=True)
# Squeeze-and-excitation
self.se = (
se_layer(in_chs, act_layer=get_act(act_layer))
if se_layer
else nn.Identity()
)
self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type)
self.bn2 = norm_layer(out_chs, eps=0.001)
self.act2 = act_layer(inplace=True) if self.has_pw_act else nn.Identity()
def feature_info(self, location):
if location == "expansion": # after SE, input to PW
info = dict(
module="conv_pw",
hook_type="forward_pre",
num_chs=self.conv_pw.in_channels,
)
else: # location == 'bottleneck', block output
info = dict(module="", hook_type="", num_chs=self.conv_pw.out_channels)
return info
def forward(self, x):
shortcut = x
x = self.conv_dw(x)
x = self.bn1(x)
x = self.act1(x)
x = self.se(x)
x = self.conv_pw(x)
x = self.bn2(x)
x = self.act2(x)
if self.has_residual:
if self.drop_path_rate > 0.0:
x = drop_path(x, self.drop_path_rate, self.training)
x += shortcut
return x
class InvertedResidual(nn.Module):
"""Inverted residual block w/ optional SE
Originally used in MobileNet-V2 - https://arxiv.org/abs/1801.04381v4, this layer is often
referred to as 'MBConv' for (Mobile inverted bottleneck conv) and is also used in
* MNasNet - https://arxiv.org/abs/1807.11626
* EfficientNet - https://arxiv.org/abs/1905.11946
* MobileNet-V3 - https://arxiv.org/abs/1905.02244
"""
def __init__(
self,
in_chs,
out_chs,
dw_kernel_size=3,
stride=1,
dilation=1,
pad_type="",
noskip=False,
exp_ratio=1.0,
exp_kernel_size=1,
pw_kernel_size=1,
act_layer="relu",
norm_layer=nn.BatchNorm2d,
use_se=None,
se_ratio=0.25,
conv_kwargs=None,
drop_path_rate=0.0,
):
super(InvertedResidual, self).__init__()
conv_kwargs = conv_kwargs or {}
mid_chs = make_divisible(in_chs * exp_ratio)
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.drop_path_rate = drop_path_rate
# Point-wise expansion
self.conv_pw = create_conv2d(
in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs
)
self.bn1 = norm_layer(mid_chs, eps=0.001)
self.act1 = get_act(act_layer)(inplace=True)
# Depth-wise convolution
self.conv_dw = create_conv2d(
mid_chs,
mid_chs,
dw_kernel_size,
stride=stride,
dilation=dilation,
padding=pad_type,
depthwise=True,
**conv_kwargs
)
self.bn2 = norm_layer(mid_chs, eps=0.001)
self.act2 = get_act(act_layer)(inplace=True)
# Squeeze-and-excitation
self.use_se = use_se
if use_se:
rd_ratio = se_ratio / exp_ratio
self.se = SqueezeExcite(
mid_chs, act_layer=get_act(act_layer), rd_ratio=rd_ratio
)
else:
self.se = nn.Identity()
# Point-wise linear projection
self.conv_pwl = create_conv2d(
mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs
)
self.bn3 = norm_layer(out_chs, eps=0.001)
# For representation
self.in_channels = in_chs
self.out_channels = out_chs
self.kernel_size = dw_kernel_size
self.expansion = exp_ratio
self.stride = stride
self.act_layer = act_layer
def feature_info(self, location):
if location == "expansion": # after SE, input to PWL
info = dict(
module="conv_pwl",
hook_type="forward_pre",
num_chs=self.conv_pwl.in_channels,
)
else: # location == 'bottleneck', block output
info = dict(module="", hook_type="", num_chs=self.conv_pwl.out_channels)
return info
def __repr__(self):
name = "irb_k{}_e{}_i{}_o{}_s{}_{}_se_{}".format(
self.kernel_size,
self.expansion,
self.in_channels,
self.out_channels,
self.stride,
self.act_layer,
self.use_se,
)
return name
def forward(self, x):
shortcut = x
# Point-wise expansion
x = self.conv_pw(x)
x = self.bn1(x)
x = self.act1(x)
# Depth-wise convolution
x = self.conv_dw(x)
x = self.bn2(x)
x = self.act2(x)
# Squeeze-and-excitation
x = self.se(x)
# Point-wise linear projection
x = self.conv_pwl(x)
x = self.bn3(x)
if self.has_residual:
if self.drop_path_rate > 0.0:
x = drop_path(x, self.drop_path_rate, self.training)
x += shortcut
return x
class EdgeResidual(nn.Module):
"""Residual block with expansion convolution followed by pointwise-linear w/ stride
Originally introduced in `EfficientNet-EdgeTPU: Creating Accelerator-Optimized Neural Networks with AutoML`
- https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html
This layer is also called FusedMBConv in the MobileDet, EfficientNet-X, and EfficientNet-V2 papers
* MobileDet - https://arxiv.org/abs/2004.14525
* EfficientNet-X - https://arxiv.org/abs/2102.05610
* EfficientNet-V2 - https://arxiv.org/abs/2104.00298
"""
def __init__(
self,
in_chs,
out_chs,
exp_kernel_size=3,
stride=1,
dilation=1,
pad_type="",
force_in_chs=0,
noskip=False,
exp_ratio=1.0,
pw_kernel_size=1,
act_layer="relu",
norm_layer=nn.BatchNorm2d,
use_se=False,
se_ratio=0.25,
drop_path_rate=0.0,
):
super(EdgeResidual, self).__init__()
if force_in_chs > 0:
mid_chs = make_divisible(force_in_chs * exp_ratio)
else:
mid_chs = make_divisible(in_chs * exp_ratio)
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.drop_path_rate = drop_path_rate
# Expansion convolution
self.conv_exp = create_conv2d(
in_chs,
mid_chs,
exp_kernel_size,
stride=stride,
dilation=dilation,
padding=pad_type,
)
self.bn1 = norm_layer(mid_chs, eps=0.001)
self.act1 = get_act(act_layer)(inplace=True)
# Squeeze-and-excitation
self.use_se = use_se
if use_se:
rd_ratio = se_ratio / exp_ratio
self.se = SqueezeExcite(
mid_chs, act_layer=get_act(act_layer), rd_ratio=rd_ratio
)
else:
self.se = nn.Identity()
# Point-wise linear projection
self.conv_pwl = create_conv2d(
mid_chs, out_chs, pw_kernel_size, padding=pad_type
)
self.bn2 = norm_layer(out_chs, eps=0.001)
self.kernel_size = exp_kernel_size
self.expansion = exp_ratio
self.in_channels = in_chs
self.out_channels = out_chs
self.stride = stride
self.act_layer = act_layer
def feature_info(self, location):
if location == "expansion": # after SE, before PWL
info = dict(
module="conv_pwl",
hook_type="forward_pre",
num_chs=self.conv_pwl.in_channels,
)
else: # location == 'bottleneck', block output
info = dict(module="", hook_type="", num_chs=self.conv_pwl.out_channels)
return info
def __repr__(self):
name = "er_k{}_e{}_i{}_o{}_s{}_{}_se_{}".format(
self.kernel_size,
self.expansion,
self.in_channels,
self.out_channels,
self.stride,
self.act_layer,
self.use_se,
)
return name
def forward(self, x):
shortcut = x
# Expansion convolution
x = self.conv_exp(x)
x = self.bn1(x)
x = self.act1(x)
# Squeeze-and-excitation
x = self.se(x)
# Point-wise linear projection
x = self.conv_pwl(x)
x = self.bn2(x)
if self.has_residual:
if self.drop_path_rate > 0.0:
x = drop_path(x, self.drop_path_rate, self.training)
x += shortcut
return x
class ProloguePool(nn.Module):
def __init__(self, num_in_channels, num_out_channels, act_layer="relu"):
super().__init__()
self.num_in_channels = num_in_channels
self.num_out_channels = num_out_channels
self.net = nn.Sequential(
nn.Conv2d(
self.num_in_channels,
self.num_out_channels,
3,
padding=1,
stride=2,
bias=False,
),
nn.BatchNorm2d(self.num_out_channels, eps=1e-03),
get_act(act_layer)(inplace=True),
nn.MaxPool2d(
kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False
),
)
# for representation
self.num_in_channels = num_in_channels
self.num_out_channels = num_out_channels
self.act_layer = act_layer
def __repr__(self):
name = "prologue_i{}_o{}_s{}_{}".format(
self.num_in_channels, self.num_out_channels, 2, self.act_layer
)
return name
def forward(self, x):
return self.net(x)
class Prologue(nn.Module):
def __init__(self, num_in_channels, num_out_channels, act_layer="relu"):
super().__init__()
self.num_in_channels = num_in_channels
self.num_out_channels = num_out_channels
self.net = nn.Sequential(
nn.Conv2d(
self.num_in_channels,
self.num_out_channels,
3,
padding=1,
stride=2,
bias=False,
),
nn.BatchNorm2d(self.num_out_channels, eps=1e-03),
get_act(act_layer)(inplace=True),
)
# for representation
self.num_in_channels = num_in_channels
self.num_out_channels = num_out_channels
self.act_layer = act_layer
def __repr__(self):
name = "prologue_i{}_o{}_s{}_{}".format(
self.num_in_channels, self.num_out_channels, 2, self.act_layer
)
return name
def forward(self, x):
return self.net(x)
class Epilogue(nn.Module):
def __init__(
self, num_in_channels, num_out_channels, num_classes, act_layer="relu"
):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(num_in_channels, num_out_channels, 1, bias=False),
nn.BatchNorm2d(num_out_channels, eps=1e-03),
get_act(act_layer)(inplace=True),
nn.AdaptiveAvgPool2d(1),
nn.Flatten(),
nn.Dropout(p=0.2),
nn.Linear(num_out_channels, num_classes),
)
# for representation
self.num_in_channels = num_in_channels
self.num_out_channels = num_out_channels
self.act_layer = act_layer
def __repr__(self):
name = "epilogue_i{}_o{}_s{}_{}".format(
self.num_in_channels, self.num_out_channels, 1, self.act_layer
)
return name
def forward(self, x):
x = self.net(x)
return x
# modules for distilled GPUNet
class PrologueD(nn.Module):
def __init__(self, num_in_channels, num_out_channels):
super().__init__()
self.num_in_channels = num_in_channels
self.num_out_channels = num_out_channels
self.net = nn.Sequential(
nn.Conv2d(
self.num_in_channels,
self.num_out_channels,
3,
padding=1,
stride=2,
bias=False,
),
nn.BatchNorm2d(self.num_out_channels),
nn.ReLU(),
)
def __repr__(self):
return "Prologue"
def forward(self, x):
return self.net(x)
class PrologueLargeD(nn.Module):
def __init__(self, num_in_channels, num_out_channels):
super().__init__()
self.num_in_channels = num_in_channels
self.num_out_channels = num_out_channels
self.net = nn.Sequential(
nn.Conv2d(
self.num_in_channels,
self.num_out_channels,
3,
padding=1,
stride=2,
bias=False,
),
nn.BatchNorm2d(self.num_out_channels),
nn.ReLU(),
nn.Conv2d(
self.num_out_channels,
self.num_out_channels,
3,
padding=1,
stride=1,
bias=False,
),
nn.BatchNorm2d(self.num_out_channels),
nn.ReLU(),
nn.Conv2d(
self.num_out_channels,
self.num_out_channels,
3,
padding=1,
stride=1,
bias=False,
),
nn.BatchNorm2d(self.num_out_channels),
nn.ReLU(),
)
def __repr__(self):
return "PrologueLarge"
def forward(self, x):
return self.net(x)
class Fused_IRB(nn.Module):
def __init__(
self,
num_in_channels: int = 1,
num_out_channels: int = 1,
kernel_size: int = 3,
stride: int = 1,
expansion: int = 1,
groups: int = 1,
):
super().__init__()
self.drop_connect_rate = 0.0
self.in_channels = num_in_channels
self.out_channels = num_out_channels
self.kernel_size = kernel_size
self.stride = stride
self.expansion = expansion
self.groups = groups
self.body = nn.Sequential(
# merge pw and dw
nn.Conv2d(
in_channels=self.in_channels,
out_channels=self.in_channels * self.expansion,
kernel_size=kernel_size,
stride=stride,
padding=kernel_size // 2,
groups=1,
bias=False,
),
nn.BatchNorm2d(self.in_channels * self.expansion, eps=0.001),
nn.ReLU(),
# pw
nn.Conv2d(
in_channels=self.in_channels * self.expansion,
out_channels=self.out_channels,
kernel_size=1,
stride=1,
groups=1,
bias=False,
),
nn.BatchNorm2d(self.out_channels, eps=0.001),
)
if self.stride == 1 and self.in_channels == self.out_channels:
self.shortcut = nn.Identity()
else:
self.shortcut = None
def drop_connect(self, inputs, training=False, drop_connect_rate=0.0):
"""Apply drop connect."""
if not training:
return inputs
keep_prob = 1 - drop_connect_rate
random_tensor = keep_prob + torch.rand(
(inputs.size()[0], 1, 1, 1), dtype=inputs.dtype, device=inputs.device
)
random_tensor.floor_() # binarize
output = inputs.div(keep_prob) * random_tensor
return output
def forward(self, x):
res = self.body(x)
if self.shortcut is not None:
if self.drop_connect_rate > 0 and self.training:
res = self.drop_connect(res, self.training, self.drop_connect_rate)
res = res + self.shortcut(x)
return res
else:
return res
def __repr__(self):
name = "k{}_e{}_g{}_i{}_o{}_s{}".format(
self.kernel_size,
self.expansion,
self.groups,
self.in_channels,
self.out_channels,
self.stride,
)
return name
class Inverted_Residual_Block(nn.Module):
def __init__(
self, num_in_channels, num_out_channels, kernel_size, stride, expansion, groups
):
super().__init__()
self.drop_connect_rate = 0.0
self.in_channels = num_in_channels
self.out_channels = num_out_channels
self.kernel_size = kernel_size
self.stride = stride
self.expansion = expansion
self.groups = groups
self.body = nn.Sequential(
nn.Conv2d(
self.in_channels,
self.in_channels * self.expansion,
1,
groups=groups,
bias=False,
),
nn.BatchNorm2d(self.in_channels * self.expansion),
nn.ReLU(),
nn.Conv2d(
self.in_channels * self.expansion,
self.in_channels * self.expansion,
kernel_size,
padding=kernel_size // 2,
stride=stride,
groups=self.in_channels * self.expansion,
bias=False,
),
nn.BatchNorm2d(self.in_channels * self.expansion),
nn.ReLU(),
nn.Conv2d(
self.in_channels * self.expansion,
self.out_channels,
1,
groups=groups,
bias=False,
),
nn.BatchNorm2d(self.out_channels),
)
if self.stride == 1 and self.in_channels == self.out_channels:
self.shortcut = nn.Identity()
else:
self.shortcut = None
def drop_connect(self, inputs, training=False, drop_connect_rate=0.0):
"""Apply drop connect."""
if not training:
return inputs
keep_prob = 1 - drop_connect_rate
random_tensor = keep_prob + torch.rand(
(inputs.size()[0], 1, 1, 1), dtype=inputs.dtype, device=inputs.device
)
random_tensor.floor_() # binarize
output = inputs.div(keep_prob) * random_tensor
return output
def forward(self, x):
res = self.body(x)
if self.shortcut is not None:
if self.drop_connect_rate > 0 and self.training:
res = self.drop_connect(res, self.training, self.drop_connect_rate)
res = res + self.shortcut(x)
return res
else:
return res
def __repr__(self):
name = "k{}_e{}_g{}_i{}_o{}_s{}".format(
self.kernel_size,
self.expansion,
self.groups,
self.in_channels,
self.out_channels,
self.stride,
)
return name
class EpilogueD(nn.Module):
def __init__(self, num_in_channels, num_out_channels, num_classes):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(num_in_channels, 1152, 1, bias=False),
nn.BatchNorm2d(1152),
nn.ReLU(),
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(1152, num_out_channels, 1, bias=False),
nn.ReLU(),
nn.Flatten(),
nn.Dropout(p=0.2),
nn.Linear(num_out_channels, num_classes),
)
def __repr__(self):
return "Epilogue"
def forward(self, x):
x = self.net(x)
return x
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/models/gpunet_modules.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pathlib
from pathlib import Path
import shutil
import urllib.request
from typing import Any, Callable
from zipfile import ZipFile
from tqdm.auto import tqdm
# Predefined model config files
MODEL_ZOO_KEYS_B1_NGC = {}
MODEL_ZOO_KEYS_B1_NGC["GV100"] = {}
# GPUNet-0: 0.62ms on GV100
MODEL_ZOO_KEYS_B1_NGC["GV100"]["0.65ms"] = "https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_0_pyt_ckpt/versions/21.12.0_amp/zip"
# GPUNet-1: 0.85ms on GV100
MODEL_ZOO_KEYS_B1_NGC["GV100"]["0.85ms"] = "https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_1_pyt_ckpt/versions/21.12.0_amp/zip"
# GPUNet-2: 1.76ms on GV100
MODEL_ZOO_KEYS_B1_NGC["GV100"]["1.75ms"] = "https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_2_pyt_ckpt/versions/21.12.0_amp/zip"
# GPUNet-D1: 1.25ms on GV100
MODEL_ZOO_KEYS_B1_NGC["GV100"]["1.25ms-D"] = "https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_d1_pyt_ckpt/versions/21.12.0_amp/zip"
# GPUNet-D2: 2.25ms on GV100
MODEL_ZOO_KEYS_B1_NGC["GV100"]["2.25ms-D"] = "https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_d2_pyt_ckpt/versions/21.12.0_amp/zip"
# GPUNet-P0: 0.5ms on GV100
MODEL_ZOO_KEYS_B1_NGC["GV100"]["0.5ms-D"] = "https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_p0_pyt_ckpt/versions/21.12.0_amp/zip"
# GPUNet-P1: 0.8ms on GV100
MODEL_ZOO_KEYS_B1_NGC["GV100"]["0.8ms-D"] = "https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_p1_pyt_ckpt/versions/21.12.0_amp/zip"
MODEL_ZOO_BATCH_NGC = {
"1": MODEL_ZOO_KEYS_B1_NGC,
}
MODEL_ZOO_NAME2TYPE_B1 = {}
MODEL_ZOO_NAME2TYPE_B1["GPUNet-0"] = "0.65ms"
MODEL_ZOO_NAME2TYPE_B1["GPUNet-1"] = "0.85ms"
MODEL_ZOO_NAME2TYPE_B1["GPUNet-2"] = "1.75ms"
MODEL_ZOO_NAME2TYPE_B1["GPUNet-P0"] = "0.5ms-D"
MODEL_ZOO_NAME2TYPE_B1["GPUNet-P1"] = "0.8ms-D"
MODEL_ZOO_NAME2TYPE_B1["GPUNet-D1"] = "1.25ms-D"
MODEL_ZOO_NAME2TYPE_B1["GPUNet-D2"] = "2.25ms-D"
def get_model_list(batch: int = 1):
"""Get a list of models in model zoo."""
batch = str(batch)
err_msg = "Batch {} is not yet optimized.".format(batch)
assert batch in MODEL_ZOO_BATCH_NGC.keys(), err_msg
return list(MODEL_ZOO_BATCH_NGC[batch].keys())
def get_configs(
batch: int = 1,
latency: str = "GPUNet_1ms",
gpuType: str = "GV100",
config_root_dir: str = "./configs",
download: bool = True
):
"""Get file with model config (downloads if necessary)."""
batch = str(batch)
errMsg0 = "Batch {} not found, available batches are {}".format(
batch, list(MODEL_ZOO_BATCH_NGC.keys())
)
assert batch in MODEL_ZOO_BATCH_NGC.keys(), errMsg0
availGPUs = list(MODEL_ZOO_BATCH_NGC[batch].keys())
errMsg1 = "GPU {} not found, available GPUs are {}".format(gpuType, availGPUs)
assert gpuType in availGPUs, errMsg1
errMsg2 = "Latency {} not found, available Latencies are {}".format(
latency, list(MODEL_ZOO_BATCH_NGC[batch][gpuType])
)
assert latency in MODEL_ZOO_BATCH_NGC[batch][gpuType].keys(), errMsg2
print("testing:", " batch=", batch, " latency=", latency, " gpu=", gpuType)
configPath = config_root_dir + "/batch" + str(batch)
configPath += "/" + gpuType + "/" + latency + ".json"
checkpointPath = config_root_dir + "/batch" + str(batch) + "/"
checkpointPath += gpuType + "/"
ngcCheckpointPath = Path(checkpointPath)
checkpointPath += latency + ".pth.tar"
ngcUrl = MODEL_ZOO_BATCH_NGC[batch][gpuType][latency]
if download:
download_checkpoint_ngc(ngcUrl, ngcCheckpointPath)
with open(configPath) as configFile:
modelJSON = json.load(configFile)
configFile.close()
return modelJSON, checkpointPath
def unzip(checkpoint_path: pathlib.Path, archive_path: pathlib.Path) -> None:
"""
Unzip acrhive to provided path
Args:
checkpoint_path: Path where archive has to be unpacked
archive_path: Path to archive Archive filename
Returns:
None
"""
checkpoint_path.mkdir(parents=True, exist_ok=True)
with ZipFile(archive_path, "r") as zf:
zf.extractall(path=checkpoint_path)
archive_path.unlink()
def download_progress(t: Any) -> Callable:
"""
Progress bar
Args:
t: progress
Returns:
Callable
"""
last_b = [0]
def update_to(b: int = 1, bsize: int = 1, tsize: int = None):
if tsize not in (None, -1):
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return update_to
def download_checkpoint_ngc(checkpoint_url: str, checkpoint_path: pathlib.Path) -> None:
"""
Download checkpoint from given url to provided path
Args:
checkpoint_url: Url from which checkpoint has to be downloaded
checkpoint_path: Path where checkpoint has to be stored
Returns:
None
"""
with tqdm(unit="B") as t:
reporthook = download_progress(t)
result = urllib.request.urlretrieve(checkpoint_url, reporthook=reporthook)
filename = result[0]
file_path = pathlib.Path(filename)
assert file_path.is_file() or file_path.is_dir(), "Checkpoint was not downloaded"
shutil.move(file_path, checkpoint_path.parent / file_path.name)
archive_path = checkpoint_path.parent / file_path.name
unzip(checkpoint_path, archive_path)
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/configs/model_hub.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
def nvidia_gpunet(pretrained=True, **kwargs):
"""Constructs a gpunet model (nn.module with additional infer(input) method).
For detailed information on model input and output, training recipies, inference and performance
visit: github.com/NVIDIA/DeepLearningExamples and/or ngc.nvidia.com
Args (type[, default value]):
pretrained (bool, True): If True, returns a pretrained model. Pretrained only gpunets.
model_math (str, 'fp32'): returns a model in given precision ('fp32' or 'fp16'). Precision fp32 only gpunets
model_type (str, 'GPUNet-0'): loads selected model type GPUNet-1.... or GPUNet-P0/P1 or GPUNet-D1/D2. Defaults to GPUNet-0
"""
from ..models.gpunet_builder import GPUNet_Builder
from .model_hub import get_configs, MODEL_ZOO_NAME2TYPE_B1
from timm.models.helpers import load_checkpoint
modelType = kwargs.get('model_type', 'GPUNet-0')
print("model_type=", modelType)
errMsg = "model_type {} not found, available models are {}".format(
modelType, list(MODEL_ZOO_NAME2TYPE_B1.keys())
)
assert modelType in MODEL_ZOO_NAME2TYPE_B1.keys(), errMsg
is_prunet = False
if "GPUNet-P0" in modelType or "GPUNet-P1" in modelType:
is_prunet = True
modelLatency = MODEL_ZOO_NAME2TYPE_B1[modelType]
print("mapped model latency=", modelLatency)
modelJSON, cpkPath = get_configs(batch=1, latency=modelLatency, gpuType="GV100", download=pretrained, config_root_dir=os.path.dirname(__file__))
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
if pretrained:
errMsg = "checkpoint not found at {}, ".format(cpkPath)
errMsg += "retrieve with get_config_and_checkpoint_files "
assert os.path.isfile(cpkPath) is True, errMsg
if is_prunet:
model.load_state_dict(torch.load(cpkPath))
else:
load_checkpoint(model, cpkPath, use_ema=True)
modelMath = kwargs.get('model_math', 'fp32')
if modelMath == "fp16":
model.half()
return model
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/configs/gpunet_torchhub.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PIL import Image
import argparse
import numpy as np
import json
import torch
from torch.cuda.amp import autocast
import torch.backends.cudnn as cudnn
from image_classification import models
import torchvision.transforms as transforms
from image_classification.models import (
resnet50,
resnext101_32x4d,
se_resnext101_32x4d,
efficientnet_b0,
efficientnet_b4,
efficientnet_widese_b0,
efficientnet_widese_b4,
efficientnet_quant_b0,
efficientnet_quant_b4,
)
def available_models():
models = {
m.name: m
for m in [
resnet50,
resnext101_32x4d,
se_resnext101_32x4d,
efficientnet_b0,
efficientnet_b4,
efficientnet_widese_b0,
efficientnet_widese_b4,
efficientnet_quant_b0,
efficientnet_quant_b4,
]
}
return models
def add_parser_arguments(parser):
model_names = available_models().keys()
parser.add_argument("--image-size", default="224", type=int)
parser.add_argument(
"--arch",
"-a",
metavar="ARCH",
default="resnet50",
choices=model_names,
help="model architecture: " + " | ".join(model_names) + " (default: resnet50)",
)
parser.add_argument(
"--precision", metavar="PREC", default="AMP", choices=["AMP", "FP32"]
)
parser.add_argument("--cpu", action="store_true", help="perform inference on CPU")
parser.add_argument("--image", metavar="<path>", help="path to classified image")
def load_jpeg_from_file(path, image_size, cuda=True):
img_transforms = transforms.Compose(
[
transforms.Resize(image_size + 32),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
]
)
img = img_transforms(Image.open(path))
with torch.no_grad():
# mean and std are not multiplied by 255 as they are in training script
# torch dataloader reads data into bytes whereas loading directly
# through PIL creates a tensor with floats in [0,1] range
mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)
std = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)
if cuda:
mean = mean.cuda()
std = std.cuda()
img = img.cuda()
img = img.float()
input = img.unsqueeze(0).sub_(mean).div_(std)
return input
def check_quant_weight_correctness(checkpoint_path, model):
state_dict = torch.load(checkpoint_path, map_location=torch.device("cpu"))
state_dict = {
k[len("module.") :] if k.startswith("module.") else k: v
for k, v in state_dict.items()
}
quantizers_sd_keys = {
f"{n[0]}._amax" for n in model.named_modules() if "quantizer" in n[0]
}
sd_all_keys = quantizers_sd_keys | set(model.state_dict().keys())
assert set(state_dict.keys()) == sd_all_keys, (
f"Passed quantized architecture, but following keys are missing in "
f"checkpoint: {list(sd_all_keys - set(state_dict.keys()))}"
)
def main(args, model_args):
imgnet_classes = np.array(json.load(open("./LOC_synset_mapping.json", "r")))
try:
model = available_models()[args.arch](**model_args.__dict__)
except RuntimeError as e:
print_in_box(
"Error when creating model, did you forget to run checkpoint2model script?"
)
raise e
if args.arch in ["efficientnet-quant-b0", "efficientnet-quant-b4"]:
check_quant_weight_correctness(model_args.pretrained_from_file, model)
if not args.cpu:
model = model.cuda()
model.eval()
input = load_jpeg_from_file(args.image, args.image_size, cuda=not args.cpu)
with torch.no_grad(), autocast(enabled=args.precision == "AMP"):
output = torch.nn.functional.softmax(model(input), dim=1)
output = output.float().cpu().view(-1).numpy()
top5 = np.argsort(output)[-5:][::-1]
print(args.image)
for c, v in zip(imgnet_classes[top5], output[top5]):
print(f"{c}: {100*v:.1f}%")
def print_in_box(msg):
print("#" * (len(msg) + 10))
print(f"#### {msg} ####")
print("#" * (len(msg) + 10))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="PyTorch ImageNet Classification")
add_parser_arguments(parser)
args, rest = parser.parse_known_args()
model_args, rest = available_models()[args.arch].parser().parse_known_args(rest)
assert len(rest) == 0, f"Unknown args passed: {rest}"
cudnn.benchmark = True
main(args, model_args)
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/classify.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
def add_parser_arguments(parser):
parser.add_argument(
"--checkpoint-path", metavar="<path>", help="checkpoint filename"
)
parser.add_argument(
"--weight-path", metavar="<path>", help="name of file in which to store weights"
)
parser.add_argument("--ema", action="store_true", default=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="PyTorch ImageNet Training")
add_parser_arguments(parser)
args = parser.parse_args()
checkpoint = torch.load(args.checkpoint_path, map_location=torch.device("cpu"))
key = "state_dict" if not args.ema else "ema_state_dict"
model_state_dict = {
k[len("module.") :] if "module." in k else k: v
for k, v in checkpoint["state_dict"].items()
}
print(f"Loaded model, acc : {checkpoint['best_prec1']}")
torch.save(model_state_dict, args.weight_path)
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/checkpoint2model.py |
# Copyright (c) 2018-2019, NVIDIA CORPORATION
# Copyright (c) 2017- Facebook, Inc
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import random
from copy import deepcopy
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from image_classification.training import *
from image_classification.utils import *
from image_classification.quantization import *
from image_classification.models import efficientnet_quant_b0, efficientnet_quant_b4
from main import prepare_for_training, add_parser_arguments as parse_training
import dllogger
def available_models():
models = {
m.name: m
for m in [
efficientnet_quant_b0,
efficientnet_quant_b4,
]
}
return models
def parse_quantization(parser):
model_names = available_models().keys()
parser.add_argument(
"--arch",
"-a",
metavar="ARCH",
default="efficientnet-quant-b0",
choices=model_names,
help="model architecture: "
+ " | ".join(model_names)
+ " (default: efficientnet-quant-b0)",
)
parser.add_argument(
"--skip-calibration",
action="store_true",
help="skip calibration before training, (default: false)",
)
def parse_training_args(parser):
from main import add_parser_arguments
return add_parser_arguments(parser)
def main(args, model_args, model_arch):
exp_start_time = time.time()
global best_prec1
best_prec1 = 0
skip_calibration = args.skip_calibration or args.evaluate or args.resume is not None
select_default_calib_method()
(
trainer,
lr_policy,
train_loader,
train_loader_len,
val_loader,
logger,
start_epoch,
) = prepare_for_training(args, model_args, model_arch)
print(f"RUNNING QUANTIZATION")
if not skip_calibration:
calibrate(trainer.model_and_loss.model, train_loader, logger, calib_iter=10)
train_loop(
trainer,
lr_policy,
train_loader,
train_loader_len,
val_loader,
logger,
should_backup_checkpoint(args),
start_epoch=start_epoch,
end_epoch=min((start_epoch + args.run_epochs), args.epochs)
if args.run_epochs != -1
else args.epochs,
best_prec1=best_prec1,
prof=args.prof,
skip_training=args.evaluate,
skip_validation=args.training_only,
save_checkpoints=args.save_checkpoints,
checkpoint_dir=args.workspace,
checkpoint_filename="quantized_" + args.checkpoint_filename,
)
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
logger.end()
print("Experiment ended")
if __name__ == "__main__":
epilog = [
"Based on the architecture picked by --arch flag, you may use the following options:\n"
]
for model, ep in available_models().items():
model_help = "\n".join(ep.parser().format_help().split("\n")[2:])
epilog.append(model_help)
parser = argparse.ArgumentParser(
description="PyTorch ImageNet Training",
epilog="\n".join(epilog),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parse_quantization(parser)
parse_training(parser, skip_arch=True)
args, rest = parser.parse_known_args()
model_arch = available_models()[args.arch]
model_args, rest = model_arch.parser().parse_known_args(rest)
print(model_args)
assert len(rest) == 0, f"Unknown args passed: {rest}"
cudnn.benchmark = True
main(args, model_args, model_arch)
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/quant_main.py |
import os
from pathlib import Path
from dataclasses import dataclass
from typing import Dict, Any
import yaml
from main import main, add_parser_arguments, available_models
import torch.backends.cudnn as cudnn
import argparse
def get_config_path():
return Path(os.path.dirname(os.path.abspath(__file__))) / "configs.yml"
if __name__ == "__main__":
yaml_cfg_parser = argparse.ArgumentParser(add_help=False)
yaml_cfg_parser.add_argument(
"--cfg_file",
default=get_config_path(),
type=str,
help="path to yaml config file",
)
yaml_cfg_parser.add_argument("--model", default=None, type=str, required=True)
yaml_cfg_parser.add_argument("--mode", default=None, type=str, required=True)
yaml_cfg_parser.add_argument("--precision", default=None, type=str, required=True)
yaml_cfg_parser.add_argument("--platform", default=None, type=str, required=True)
yaml_args, rest = yaml_cfg_parser.parse_known_args()
with open(yaml_args.cfg_file, "r") as cfg_file:
config = yaml.load(cfg_file, Loader=yaml.FullLoader)
cfg = {
**config["precision"][yaml_args.precision],
**config["platform"][yaml_args.platform],
**config["models"][yaml_args.model][yaml_args.platform][yaml_args.precision],
**config["mode"][yaml_args.mode],
}
parser = argparse.ArgumentParser(description="PyTorch ImageNet Training")
add_parser_arguments(parser)
parser.set_defaults(**cfg)
args, rest = parser.parse_known_args(rest)
model_arch = available_models()[args.arch]
model_args, rest = model_arch.parser().parse_known_args(rest)
assert len(rest) == 0, f"Unknown args passed: {rest}"
cudnn.benchmark = True
main(args, model_args, model_arch)
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/launch.py |
import argparse
import torch
import pytorch_quantization
from image_classification.models import (
resnet50,
resnext101_32x4d,
se_resnext101_32x4d,
efficientnet_b0,
efficientnet_b4,
efficientnet_widese_b0,
efficientnet_widese_b4,
efficientnet_quant_b0,
efficientnet_quant_b4,
)
def available_models():
models = {
m.name: m
for m in [
resnet50,
resnext101_32x4d,
se_resnext101_32x4d,
efficientnet_b0,
efficientnet_b4,
efficientnet_widese_b0,
efficientnet_widese_b4,
efficientnet_quant_b0,
efficientnet_quant_b4,
]
}
return models
def parse_args(parser):
"""
Parse commandline arguments.
"""
model_names = available_models().keys()
parser.add_argument("--arch", "-a", metavar="ARCH", default="resnet50", choices=model_names,
help="model architecture: " + " | ".join(model_names) + " (default: resnet50)")
parser.add_argument("--device", metavar="DEVICE", default="cuda", choices=['cpu', 'cuda'],
help="device on which model is settled: cpu, cuda (default: cuda)")
parser.add_argument("--image-size", default=None, type=int, help="resolution of image")
parser.add_argument('--output', type=str, help='Path to converted model')
parser.add_argument("-b", "--batch-size", default=256, type=int, metavar="N",
help="mini-batch size (default: 256) per gpu")
return parser
def final_name(base_name):
splitted = base_name.split('.')
if 'pt' in splitted:
fin_name = base_name.replace('pt', 'onnx')
elif 'pth' in splitted:
fin_name = base_name.replace('pth', 'onnx')
elif len(splitted) > 1:
fin_name = '.'.join(splitted[:-1] + ['onnx'])
else:
fin_name = base_name + '.onnx'
return fin_name
def get_dataloader(image_size, bs, num_classes):
"""return dataloader for inference"""
from image_classification.dataloaders import get_synthetic_loader
def data_loader():
loader, _ = get_synthetic_loader(None, image_size, bs, num_classes, False)
for inp, _ in loader:
yield inp
break
return data_loader()
def prepare_inputs(dataloader, device):
"""load sample inputs to device"""
inputs = []
for batch in dataloader:
if type(batch) is torch.Tensor:
batch_d = batch.to(device)
batch_d = (batch_d, )
inputs.append(batch_d)
else:
batch_d = []
for x in batch:
assert type(x) is torch.Tensor, "input is not a tensor"
batch_d.append(x.to(device))
batch_d = tuple(batch_d)
inputs.append(batch_d)
return inputs
def check_quant_weight_correctness(checkpoint_path, model):
state_dict = torch.load(checkpoint_path, map_location=torch.device('cpu'))
state_dict = {k[len("module."):] if k.startswith("module.") else k: v for k, v in state_dict.items()}
quantizers_sd_keys = {f'{n[0]}._amax' for n in model.named_modules() if 'quantizer' in n[0]}
sd_all_keys = quantizers_sd_keys | set(model.state_dict().keys())
assert set(state_dict.keys()) == sd_all_keys, (f'Passed quantized architecture, but following keys are missing in '
f'checkpoint: {list(sd_all_keys - set(state_dict.keys()))}')
def main(args, model_args, model_arch):
quant_arch = args.arch in ['efficientnet-quant-b0', 'efficientnet-quant-b4']
if quant_arch:
pytorch_quantization.nn.modules.tensor_quantizer.TensorQuantizer.use_fb_fake_quant = True
model = model_arch(**model_args.__dict__)
if quant_arch and model_args.pretrained_from_file is not None:
check_quant_weight_correctness(model_args.pretrained_from_file, model)
image_size = args.image_size if args.image_size is not None else model.arch.default_image_size
train_loader = get_dataloader(image_size, args.batch_size, model_args.num_classes)
inputs = prepare_inputs(train_loader, args.device)
final_model_path = args.output if args.output is not None else final_name(model_args.pretrained_from_file)
model.to(args.device)
model.eval()
with torch.no_grad():
torch.onnx.export(model,
inputs[0],
final_model_path,
verbose=True,
opset_version=13,
enable_onnx_checker=True,
do_constant_folding=True)
if __name__ == '__main__':
epilog = [
"Based on the architecture picked by --arch flag, you may use the following options:\n"
]
for model, ep in available_models().items():
model_help = "\n".join(ep.parser().format_help().split("\n")[2:])
epilog.append(model_help)
parser = argparse.ArgumentParser(
description="PyTorch ImageNet Training",
epilog="\n".join(epilog),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser = parse_args(parser)
args, rest = parser.parse_known_args()
model_arch = available_models()[args.arch]
model_args, rest = model_arch.parser().parse_known_args(rest)
assert len(rest) == 0, f"Unknown args passed: {rest}"
main(args, model_args, model_arch)
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/model2onnx.py |
# Copyright (c) 2018-2019, NVIDIA CORPORATION
# Copyright (c) 2017- Facebook, Inc
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
os.environ[
"KMP_AFFINITY"
] = "disabled" # We need to do this before importing anything else as a workaround for this bug: https://github.com/pytorch/pytorch/issues/28389
import argparse
import random
from copy import deepcopy
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import image_classification.logger as log
from image_classification.smoothing import LabelSmoothing
from image_classification.mixup import NLLMultiLabelSmooth, MixUpWrapper
from image_classification.dataloaders import *
from image_classification.training import *
from image_classification.utils import *
from image_classification.models import (
resnet50,
resnext101_32x4d,
se_resnext101_32x4d,
efficientnet_b0,
efficientnet_b4,
efficientnet_widese_b0,
efficientnet_widese_b4,
)
from image_classification.optimizers import (
get_optimizer,
lr_cosine_policy,
lr_linear_policy,
lr_step_policy,
)
from image_classification.gpu_affinity import set_affinity, AffinityMode
import dllogger
def available_models():
models = {
m.name: m
for m in [
resnet50,
resnext101_32x4d,
se_resnext101_32x4d,
efficientnet_b0,
efficientnet_b4,
efficientnet_widese_b0,
efficientnet_widese_b4,
]
}
return models
def add_parser_arguments(parser, skip_arch=False):
parser.add_argument("data", metavar="DIR", help="path to dataset")
parser.add_argument(
"--data-backend",
metavar="BACKEND",
default="dali-cpu",
choices=DATA_BACKEND_CHOICES,
help="data backend: "
+ " | ".join(DATA_BACKEND_CHOICES)
+ " (default: dali-cpu)",
)
parser.add_argument(
"--interpolation",
metavar="INTERPOLATION",
default="bilinear",
help="interpolation type for resizing images: bilinear, bicubic or triangular(DALI only)",
)
if not skip_arch:
model_names = available_models().keys()
parser.add_argument(
"--arch",
"-a",
metavar="ARCH",
default="resnet50",
choices=model_names,
help="model architecture: "
+ " | ".join(model_names)
+ " (default: resnet50)",
)
parser.add_argument(
"-j",
"--workers",
default=5,
type=int,
metavar="N",
help="number of data loading workers (default: 5)",
)
parser.add_argument(
"--prefetch",
default=2,
type=int,
metavar="N",
help="number of samples prefetched by each loader",
)
parser.add_argument(
"--epochs",
default=90,
type=int,
metavar="N",
help="number of total epochs to run",
)
parser.add_argument(
"--run-epochs",
default=-1,
type=int,
metavar="N",
help="run only N epochs, used for checkpointing runs",
)
parser.add_argument(
"--early-stopping-patience",
default=-1,
type=int,
metavar="N",
help="early stopping after N epochs without validation accuracy improving",
)
parser.add_argument(
"--image-size", default=None, type=int, help="resolution of image"
)
parser.add_argument(
"-b",
"--batch-size",
default=256,
type=int,
metavar="N",
help="mini-batch size (default: 256) per gpu",
)
parser.add_argument(
"--optimizer-batch-size",
default=-1,
type=int,
metavar="N",
help="size of a total batch size, for simulating bigger batches using gradient accumulation",
)
parser.add_argument(
"--lr",
"--learning-rate",
default=0.1,
type=float,
metavar="LR",
help="initial learning rate",
)
parser.add_argument(
"--lr-schedule",
default="step",
type=str,
metavar="SCHEDULE",
choices=["step", "linear", "cosine"],
help="Type of LR schedule: {}, {}, {}".format("step", "linear", "cosine"),
)
parser.add_argument("--end-lr", default=0, type=float)
parser.add_argument(
"--warmup", default=0, type=int, metavar="E", help="number of warmup epochs"
)
parser.add_argument(
"--label-smoothing",
default=0.0,
type=float,
metavar="S",
help="label smoothing",
)
parser.add_argument(
"--mixup", default=0.0, type=float, metavar="ALPHA", help="mixup alpha"
)
parser.add_argument(
"--optimizer", default="sgd", type=str, choices=("sgd", "rmsprop")
)
parser.add_argument(
"--momentum", default=0.9, type=float, metavar="M", help="momentum"
)
parser.add_argument(
"--weight-decay",
"--wd",
default=1e-4,
type=float,
metavar="W",
help="weight decay (default: 1e-4)",
)
parser.add_argument(
"--bn-weight-decay",
action="store_true",
help="use weight_decay on batch normalization learnable parameters, (default: false)",
)
parser.add_argument(
"--rmsprop-alpha",
default=0.9,
type=float,
help="value of alpha parameter in rmsprop optimizer (default: 0.9)",
)
parser.add_argument(
"--rmsprop-eps",
default=1e-3,
type=float,
help="value of eps parameter in rmsprop optimizer (default: 1e-3)",
)
parser.add_argument(
"--nesterov",
action="store_true",
help="use nesterov momentum, (default: false)",
)
parser.add_argument(
"--print-freq",
"-p",
default=10,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument(
"--resume",
default=None,
type=str,
metavar="PATH",
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"--static-loss-scale",
type=float,
default=1,
help="Static loss scale, positive power of 2 values can improve amp convergence.",
)
parser.add_argument(
"--prof", type=int, default=-1, metavar="N", help="Run only N iterations"
)
parser.add_argument(
"--amp",
action="store_true",
help="Run model AMP (automatic mixed precision) mode.",
)
parser.add_argument(
"--seed", default=None, type=int, help="random seed used for numpy and pytorch"
)
parser.add_argument(
"--gather-checkpoints",
default="0",
type=int,
help=(
"Gather N last checkpoints throughout the training,"
" without this flag only best and last checkpoints will be stored. "
"Use -1 for all checkpoints"
),
)
parser.add_argument(
"--raport-file",
default="experiment_raport.json",
type=str,
help="file in which to store JSON experiment raport",
)
parser.add_argument(
"--evaluate", action="store_true", help="evaluate checkpoint/model"
)
parser.add_argument("--training-only", action="store_true", help="do not evaluate")
parser.add_argument(
"--no-checkpoints",
action="store_false",
dest="save_checkpoints",
help="do not store any checkpoints, useful for benchmarking",
)
parser.add_argument(
"--jit",
type=str,
default="no",
choices=["no", "script"],
help="no -> do not use torch.jit; script -> use torch.jit.script",
)
parser.add_argument("--checkpoint-filename", default="checkpoint.pth.tar", type=str)
parser.add_argument(
"--workspace",
type=str,
default="./",
metavar="DIR",
help="path to directory where checkpoints will be stored",
)
parser.add_argument(
"--memory-format",
type=str,
default="nchw",
choices=["nchw", "nhwc"],
help="memory layout, nchw or nhwc",
)
parser.add_argument("--use-ema", default=None, type=float, help="use EMA")
parser.add_argument(
"--augmentation",
type=str,
default=None,
choices=[None, "autoaugment"],
help="augmentation method",
)
parser.add_argument(
"--gpu-affinity",
type=str,
default="none",
required=False,
choices=[am.name for am in AffinityMode],
)
parser.add_argument(
"--topk",
type=int,
default=5,
required=False,
)
def prepare_for_training(args, model_args, model_arch):
args.distributed = False
if "WORLD_SIZE" in os.environ:
args.distributed = int(os.environ["WORLD_SIZE"]) > 1
args.local_rank = int(os.environ["LOCAL_RANK"])
else:
args.local_rank = 0
args.gpu = 0
args.world_size = 1
if args.distributed:
args.gpu = args.local_rank % torch.cuda.device_count()
torch.cuda.set_device(args.gpu)
dist.init_process_group(backend="nccl", init_method="env://")
args.world_size = torch.distributed.get_world_size()
affinity = set_affinity(args.gpu, mode=args.gpu_affinity)
print(f"Training process {args.local_rank} affinity: {affinity}")
if args.seed is not None:
print("Using seed = {}".format(args.seed))
torch.manual_seed(args.seed + args.local_rank)
torch.cuda.manual_seed(args.seed + args.local_rank)
np.random.seed(seed=args.seed + args.local_rank)
random.seed(args.seed + args.local_rank)
def _worker_init_fn(id):
# Worker process should inherit its affinity from parent
affinity = os.sched_getaffinity(0)
print(f"Process {args.local_rank} Worker {id} set affinity to: {affinity}")
np.random.seed(seed=args.seed + args.local_rank + id)
random.seed(args.seed + args.local_rank + id)
else:
def _worker_init_fn(id):
# Worker process should inherit its affinity from parent
affinity = os.sched_getaffinity(0)
print(f"Process {args.local_rank} Worker {id} set affinity to: {affinity}")
if args.static_loss_scale != 1.0:
if not args.amp:
print("Warning: if --amp is not used, static_loss_scale will be ignored.")
if args.optimizer_batch_size < 0:
batch_size_multiplier = 1
else:
tbs = args.world_size * args.batch_size
if args.optimizer_batch_size % tbs != 0:
print(
"Warning: simulated batch size {} is not divisible by actual batch size {}".format(
args.optimizer_batch_size, tbs
)
)
batch_size_multiplier = int(args.optimizer_batch_size / tbs)
print("BSM: {}".format(batch_size_multiplier))
start_epoch = 0
best_prec1 = 0
# optionally resume from a checkpoint
if args.resume is not None:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(
args.resume, map_location=lambda storage, loc: storage.cuda(args.gpu)
)
start_epoch = checkpoint["epoch"]
best_prec1 = checkpoint["best_prec1"]
model_state = checkpoint["state_dict"]
optimizer_state = checkpoint["optimizer"]
if "state_dict_ema" in checkpoint:
model_state_ema = checkpoint["state_dict_ema"]
print(
"=> loaded checkpoint '{}' (epoch {})".format(
args.resume, checkpoint["epoch"]
)
)
if start_epoch >= args.epochs:
print(
f"Launched training for {args.epochs}, checkpoint already run {start_epoch}"
)
exit(1)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
model_state = None
model_state_ema = None
optimizer_state = None
else:
model_state = None
model_state_ema = None
optimizer_state = None
loss = nn.CrossEntropyLoss
if args.mixup > 0.0:
loss = lambda: NLLMultiLabelSmooth(args.label_smoothing)
elif args.label_smoothing > 0.0:
loss = lambda: LabelSmoothing(args.label_smoothing)
memory_format = (
torch.channels_last if args.memory_format == "nhwc" else torch.contiguous_format
)
model = model_arch(
**{
k: v
if k != "pretrained"
else v and (not args.distributed or dist.get_rank() == 0)
for k, v in model_args.__dict__.items()
}
)
image_size = (
args.image_size
if args.image_size is not None
else model.arch.default_image_size
)
scaler = torch.cuda.amp.GradScaler(
init_scale=args.static_loss_scale,
growth_factor=2,
backoff_factor=0.5,
growth_interval=100,
enabled=args.amp,
)
executor = Executor(
model,
loss(),
cuda=True,
memory_format=memory_format,
amp=args.amp,
scaler=scaler,
divide_loss=batch_size_multiplier,
ts_script=args.jit == "script",
)
# Create data loaders and optimizers as needed
if args.data_backend == "pytorch":
get_train_loader = get_pytorch_train_loader
get_val_loader = get_pytorch_val_loader
elif args.data_backend == "dali-gpu":
get_train_loader = get_dali_train_loader(dali_cpu=False)
get_val_loader = get_dali_val_loader()
elif args.data_backend == "dali-cpu":
get_train_loader = get_dali_train_loader(dali_cpu=True)
get_val_loader = get_dali_val_loader()
elif args.data_backend == "synthetic":
get_val_loader = get_synthetic_loader
get_train_loader = get_synthetic_loader
else:
print("Bad databackend picked")
exit(1)
train_loader, train_loader_len = get_train_loader(
args.data,
image_size,
args.batch_size,
model_args.num_classes,
args.mixup > 0.0,
interpolation=args.interpolation,
augmentation=args.augmentation,
start_epoch=start_epoch,
workers=args.workers,
_worker_init_fn=_worker_init_fn,
memory_format=memory_format,
prefetch_factor=args.prefetch,
)
if args.mixup != 0.0:
train_loader = MixUpWrapper(args.mixup, train_loader)
val_loader, val_loader_len = get_val_loader(
args.data,
image_size,
args.batch_size,
model_args.num_classes,
False,
interpolation=args.interpolation,
workers=args.workers,
_worker_init_fn=_worker_init_fn,
memory_format=memory_format,
prefetch_factor=args.prefetch,
)
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
logger = log.Logger(
args.print_freq,
[
dllogger.StdOutBackend(
dllogger.Verbosity.DEFAULT, step_format=log.format_step
),
dllogger.JSONStreamBackend(
dllogger.Verbosity.VERBOSE,
os.path.join(args.workspace, args.raport_file),
),
],
start_epoch=start_epoch - 1,
)
else:
logger = log.Logger(args.print_freq, [], start_epoch=start_epoch - 1)
logger.log_parameter(args.__dict__, verbosity=dllogger.Verbosity.DEFAULT)
logger.log_parameter(
{f"model.{k}": v for k, v in model_args.__dict__.items()},
verbosity=dllogger.Verbosity.DEFAULT,
)
optimizer = get_optimizer(
list(executor.model.named_parameters()),
args.lr,
args=args,
state=optimizer_state,
)
if args.lr_schedule == "step":
lr_policy = lr_step_policy(args.lr, [30, 60, 80], 0.1, args.warmup)
elif args.lr_schedule == "cosine":
lr_policy = lr_cosine_policy(
args.lr, args.warmup, args.epochs, end_lr=args.end_lr
)
elif args.lr_schedule == "linear":
lr_policy = lr_linear_policy(args.lr, args.warmup, args.epochs)
if args.distributed:
executor.distributed(args.gpu)
if model_state is not None:
executor.model.load_state_dict(model_state)
trainer = Trainer(
executor,
optimizer,
grad_acc_steps=batch_size_multiplier,
ema=args.use_ema,
)
if (args.use_ema is not None) and (model_state_ema is not None):
trainer.ema_executor.model.load_state_dict(model_state_ema)
return (
trainer,
lr_policy,
train_loader,
train_loader_len,
val_loader,
logger,
start_epoch,
best_prec1,
)
def main(args, model_args, model_arch):
exp_start_time = time.time()
(
trainer,
lr_policy,
train_loader,
train_loader_len,
val_loader,
logger,
start_epoch,
best_prec1,
) = prepare_for_training(args, model_args, model_arch)
train_loop(
trainer,
lr_policy,
train_loader,
train_loader_len,
val_loader,
logger,
start_epoch=start_epoch,
end_epoch=min((start_epoch + args.run_epochs), args.epochs)
if args.run_epochs != -1
else args.epochs,
early_stopping_patience=args.early_stopping_patience,
best_prec1=best_prec1,
prof=args.prof,
skip_training=args.evaluate,
skip_validation=args.training_only,
save_checkpoints=args.save_checkpoints and not args.evaluate,
checkpoint_dir=args.workspace,
checkpoint_filename=args.checkpoint_filename,
keep_last_n_checkpoints=args.gather_checkpoints,
topk=args.topk,
)
exp_duration = time.time() - exp_start_time
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
logger.end()
print("Experiment ended")
if __name__ == "__main__":
epilog = [
"Based on the architecture picked by --arch flag, you may use the following options:\n"
]
for model, ep in available_models().items():
model_help = "\n".join(ep.parser().format_help().split("\n")[2:])
epilog.append(model_help)
parser = argparse.ArgumentParser(
description="PyTorch ImageNet Training",
epilog="\n".join(epilog),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
add_parser_arguments(parser)
args, rest = parser.parse_known_args()
model_arch = available_models()[args.arch]
model_args, rest = model_arch.parser().parse_known_args(rest)
print(model_args)
assert len(rest) == 0, f"Unknown args passed: {rest}"
cudnn.benchmark = True
main(args, model_args, model_arch)
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/main.py |
# From PyTorch:
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2016- Facebook, Inc (Adam Paszke)
# Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
# Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (Clement Farabet)
# Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
# Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
# Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
#
# From Caffe2:
#
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
#
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
#
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
#
# All contributions by Yangqing Jia:
# Copyright (c) 2015 Yangqing Jia
# All rights reserved.
#
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
#
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
#
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import subprocess
import os
import socket
import time
from argparse import ArgumentParser, REMAINDER
import torch
def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(
description="PyTorch distributed training launch "
"helper utilty that will spawn up "
"multiple distributed processes"
)
# Optional arguments for the launch helper
parser.add_argument(
"--nnodes",
type=int,
default=1,
help="The number of nodes to use for distributed " "training",
)
parser.add_argument(
"--node_rank",
type=int,
default=0,
help="The rank of the node for multi-node distributed " "training",
)
parser.add_argument(
"--nproc_per_node",
type=int,
default=1,
help="The number of processes to launch on each node, "
"for GPU training, this is recommended to be set "
"to the number of GPUs in your system so that "
"each process can be bound to a single GPU.",
)
parser.add_argument(
"--master_addr",
default="127.0.0.1",
type=str,
help="Master node (rank 0)'s address, should be either "
"the IP address or the hostname of node 0, for "
"single node multi-proc training, the "
"--master_addr can simply be 127.0.0.1",
)
parser.add_argument(
"--master_port",
default=29500,
type=int,
help="Master node (rank 0)'s free port that needs to "
"be used for communciation during distributed "
"training",
)
# positional
parser.add_argument(
"training_script",
type=str,
help="The full path to the single GPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script",
)
# rest from the training program
parser.add_argument("training_script_args", nargs=REMAINDER)
return parser.parse_args()
def main():
args = parse_args()
# world size in terms of number of processes
dist_world_size = args.nproc_per_node * args.nnodes
# set PyTorch distributed related environmental variables
current_env = os.environ.copy()
current_env["MASTER_ADDR"] = args.master_addr
current_env["MASTER_PORT"] = str(args.master_port)
current_env["WORLD_SIZE"] = str(dist_world_size)
processes = []
for local_rank in range(0, args.nproc_per_node):
# each process's rank
dist_rank = args.nproc_per_node * args.node_rank + local_rank
current_env["RANK"] = str(dist_rank)
current_env["LOCAL_RANK"] = str(local_rank)
# spawn the processes
cmd = [sys.executable, "-u", args.training_script] + args.training_script_args
print(cmd)
stdout = (
None if local_rank == 0 else open("GPU_" + str(local_rank) + ".log", "w")
)
process = subprocess.Popen(cmd, env=current_env, stdout=stdout, stderr=stdout)
processes.append(process)
try:
up = True
error = False
while up and not error:
up = False
for p in processes:
ret = p.poll()
if ret is None:
up = True
elif ret != 0:
error = True
time.sleep(1)
if error:
for p in processes:
if p.poll() is None:
p.terminate()
exit(1)
except KeyboardInterrupt:
for p in processes:
p.terminate()
raise
except SystemExit:
for p in processes:
p.terminate()
raise
except:
for p in processes:
p.terminate()
raise
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/multiproc.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Using `calculate_metrics.py` script, you can obtain model accuracy/error metrics using defined `MetricsCalculator` class.
Data provided to `MetricsCalculator` are obtained from npz dump files
stored in directory pointed by `--dump-dir` argument.
Above files are prepared by `run_inference_on_fw.py` and `run_inference_on_triton.py` scripts.
Output data is stored in csv file pointed by `--csv` argument.
Example call:
```shell script
python ./triton/calculate_metrics.py \
--dump-dir /results/dump_triton \
--csv /results/accuracy_results.csv \
--metrics metrics.py \
--metric-class-param1 value
```
"""
import argparse
import csv
import logging
import string
from pathlib import Path
import numpy as np
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import BaseMetricsCalculator, load_from_file
from .deployment_toolkit.dump import pad_except_batch_axis
LOGGER = logging.getLogger("calculate_metrics")
TOTAL_COLUMN_NAME = "_total_"
def get_data(dump_dir, prefix):
"""Loads and concatenates dump files for given prefix (ex. inputs, outputs, labels, ids)"""
dump_dir = Path(dump_dir)
npz_files = sorted(dump_dir.glob(f"{prefix}*.npz"))
data = None
if npz_files:
# assume that all npz files with given prefix contain same set of names
names = list(np.load(npz_files[0].as_posix()).keys())
# calculate target shape
target_shape = {
name: tuple(np.max([np.load(npz_file.as_posix())[name].shape for npz_file in npz_files], axis=0))
for name in names
}
# pad and concatenate data
data = {
name: np.concatenate(
[pad_except_batch_axis(np.load(npz_file.as_posix())[name], target_shape[name]) for npz_file in npz_files]
)
for name in names
}
return data
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="Run models with given dataloader", allow_abbrev=False)
parser.add_argument("--metrics", help=f"Path to python module containing metrics calculator", required=True)
parser.add_argument("--csv", help="Path to csv file", required=True)
parser.add_argument("--dump-dir", help="Path to directory with dumped outputs (and labels)", required=True)
args, *_ = parser.parse_known_args()
MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator")
ArgParserGenerator(MetricsCalculator).update_argparser(parser)
args = parser.parse_args()
LOGGER.info(f"args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator")
metrics_calculator: BaseMetricsCalculator = ArgParserGenerator(MetricsCalculator).from_args(args)
ids = get_data(args.dump_dir, "ids")["ids"]
x = get_data(args.dump_dir, "inputs")
y_true = get_data(args.dump_dir, "labels")
y_pred = get_data(args.dump_dir, "outputs")
common_keys = list({k for k in (y_true or [])} & {k for k in (y_pred or [])})
for key in common_keys:
if y_true[key].shape != y_pred[key].shape:
LOGGER.warning(
f"Model predictions and labels shall have equal shapes. "
f"y_pred[{key}].shape={y_pred[key].shape} != "
f"y_true[{key}].shape={y_true[key].shape}"
)
metrics = metrics_calculator.calc(ids=ids, x=x, y_pred=y_pred, y_real=y_true)
metrics = {TOTAL_COLUMN_NAME: len(ids), **metrics}
metric_names_with_space = [name for name in metrics if any([c in string.whitespace for c in name])]
if metric_names_with_space:
raise ValueError(f"Metric names shall have no spaces; Incorrect names: {', '.join(metric_names_with_space)}")
csv_path = Path(args.csv)
csv_path.parent.mkdir(parents=True, exist_ok=True)
with csv_path.open("w") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=list(metrics.keys()))
writer.writeheader()
writer.writerow(metrics)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/calculate_metrics.py |
#!/usr/bin/python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import torch
import argparse
import triton.deployer_lib as deployer_lib
def get_model_args(model_args):
""" the arguments initialize_model will receive """
parser = argparse.ArgumentParser()
## Required parameters by the model.
parser.add_argument(
"--config",
default="resnet50",
type=str,
required=True,
help="Network to deploy",
)
parser.add_argument(
"--checkpoint", default=None, type=str, help="The checkpoint of the model. "
)
parser.add_argument(
"--batch_size", default=1000, type=int, help="Batch size for inference"
)
parser.add_argument(
"--fp16", default=False, action="store_true", help="FP16 inference"
)
parser.add_argument(
"--dump_perf_data",
type=str,
default=None,
help="Directory to dump perf data sample for testing",
)
return parser.parse_args(model_args)
def initialize_model(args):
""" return model, ready to trace """
from image_classification.resnet import build_resnet
model = build_resnet(args.config, "fanin", 1000, fused_se=False)
if args.checkpoint:
state_dict = torch.load(args.checkpoint, map_location="cpu")
model.load_state_dict(
{k.replace("module.", ""): v for k, v in state_dict.items()}
)
model.load_state_dict(state_dict)
return model.half() if args.fp16 else model
def get_dataloader(args):
""" return dataloader for inference """
from image_classification.dataloaders import get_synthetic_loader
def data_loader():
loader, _ = get_synthetic_loader(None, 128, 1000, True, fp16=args.fp16)
processed = 0
for inp, _ in loader:
yield inp
processed += 1
if processed > 10:
break
return data_loader()
if __name__ == "__main__":
# don't touch this!
deployer, model_argv = deployer_lib.create_deployer(
sys.argv[1:]
) # deployer and returns removed deployer arguments
model_args = get_model_args(model_argv)
model = initialize_model(model_args)
dataloader = get_dataloader(model_args)
if model_args.dump_perf_data:
input_0 = next(iter(dataloader))
if model_args.fp16:
input_0 = input_0.half()
os.makedirs(model_args.dump_perf_data, exist_ok=True)
input_0.detach().cpu().numpy()[0].tofile(
os.path.join(model_args.dump_perf_data, "input__0")
)
deployer.deploy(dataloader, model)
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/deployer.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
To infer the model deployed on Triton, you can use `run_inference_on_triton.py` script.
It sends a request with data obtained from pointed data loader and dumps received data into npz files.
Those files are stored in directory pointed by `--output-dir` argument.
Currently, the client communicates with the Triton server asynchronously using GRPC protocol.
Example call:
```shell script
python ./triton/run_inference_on_triton.py \
--server-url localhost:8001 \
--model-name ResNet50 \
--model-version 1 \
--dump-labels \
--output-dir /results/dump_triton
```
"""
import argparse
import functools
import logging
import queue
import threading
import time
from pathlib import Path
from typing import Optional
from tqdm import tqdm
# pytype: disable=import-error
try:
from tritonclient import utils as client_utils # noqa: F401
from tritonclient.grpc import (
InferenceServerClient,
InferInput,
InferRequestedOutput,
)
except ImportError:
import tritongrpcclient as grpc_client
from tritongrpcclient import (
InferenceServerClient,
InferInput,
InferRequestedOutput,
)
# pytype: enable=import-error
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import DATALOADER_FN_NAME, load_from_file
from .deployment_toolkit.dump import NpzWriter
LOGGER = logging.getLogger("run_inference_on_triton")
class AsyncGRPCTritonRunner:
DEFAULT_MAX_RESP_WAIT_S = 120
DEFAULT_MAX_UNRESP_REQS = 128
DEFAULT_MAX_FINISH_WAIT_S = 900 # 15min
def __init__(
self,
server_url: str,
model_name: str,
model_version: str,
*,
dataloader,
verbose=False,
resp_wait_s: Optional[float] = None,
max_unresponded_reqs: Optional[int] = None,
):
self._server_url = server_url
self._model_name = model_name
self._model_version = model_version
self._dataloader = dataloader
self._verbose = verbose
self._response_wait_t = self.DEFAULT_MAX_RESP_WAIT_S if resp_wait_s is None else resp_wait_s
self._max_unresp_reqs = self.DEFAULT_MAX_UNRESP_REQS if max_unresponded_reqs is None else max_unresponded_reqs
self._results = queue.Queue()
self._processed_all = False
self._errors = []
self._num_waiting_for = 0
self._sync = threading.Condition()
self._req_thread = threading.Thread(target=self.req_loop, daemon=True)
def __iter__(self):
self._req_thread.start()
timeout_s = 0.050 # check flags processed_all and error flags every 50ms
while True:
try:
ids, x, y_pred, y_real = self._results.get(timeout=timeout_s)
yield ids, x, y_pred, y_real
except queue.Empty:
shall_stop = self._processed_all or self._errors
if shall_stop:
break
LOGGER.debug("Waiting for request thread to stop")
self._req_thread.join()
if self._errors:
error_msg = "\n".join(map(str, self._errors))
raise RuntimeError(error_msg)
def _on_result(self, ids, x, y_real, output_names, result, error):
with self._sync:
if error:
self._errors.append(error)
else:
y_pred = {name: result.as_numpy(name) for name in output_names}
self._results.put((ids, x, y_pred, y_real))
self._num_waiting_for -= 1
self._sync.notify_all()
def req_loop(self):
client = InferenceServerClient(self._server_url, verbose=self._verbose)
self._errors = self._verify_triton_state(client)
if self._errors:
return
LOGGER.debug(
f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} " f"are up and ready!"
)
model_config = client.get_model_config(self._model_name, self._model_version)
model_metadata = client.get_model_metadata(self._model_name, self._model_version)
LOGGER.info(f"Model config {model_config}")
LOGGER.info(f"Model metadata {model_metadata}")
inputs = {tm.name: tm for tm in model_metadata.inputs}
outputs = {tm.name: tm for tm in model_metadata.outputs}
output_names = list(outputs)
outputs_req = [InferRequestedOutput(name) for name in outputs]
self._num_waiting_for = 0
for ids, x, y_real in self._dataloader:
infer_inputs = []
for name in inputs:
data = x[name]
infer_input = InferInput(name, data.shape, inputs[name].datatype)
target_np_dtype = client_utils.triton_to_np_dtype(inputs[name].datatype)
data = data.astype(target_np_dtype)
infer_input.set_data_from_numpy(data)
infer_inputs.append(infer_input)
with self._sync:
def _check_can_send():
return self._num_waiting_for < self._max_unresp_reqs
can_send = self._sync.wait_for(_check_can_send, timeout=self._response_wait_t)
if not can_send:
error_msg = f"Runner could not send new requests for {self._response_wait_t}s"
self._errors.append(error_msg)
break
callback = functools.partial(AsyncGRPCTritonRunner._on_result, self, ids, x, y_real, output_names)
client.async_infer(
model_name=self._model_name,
model_version=self._model_version,
inputs=infer_inputs,
outputs=outputs_req,
callback=callback,
)
self._num_waiting_for += 1
# wait till receive all requested data
with self._sync:
def _all_processed():
LOGGER.debug(f"wait for {self._num_waiting_for} unprocessed jobs")
return self._num_waiting_for == 0
self._processed_all = self._sync.wait_for(_all_processed, self.DEFAULT_MAX_FINISH_WAIT_S)
if not self._processed_all:
error_msg = f"Runner {self._response_wait_t}s timeout received while waiting for results from server"
self._errors.append(error_msg)
LOGGER.debug("Finished request thread")
def _verify_triton_state(self, triton_client):
errors = []
if not triton_client.is_server_live():
errors.append(f"Triton server {self._server_url} is not live")
elif not triton_client.is_server_ready():
errors.append(f"Triton server {self._server_url} is not ready")
elif not triton_client.is_model_ready(self._model_name, self._model_version):
errors.append(f"Model {self._model_name}:{self._model_version} is not ready")
return errors
def _parse_args():
parser = argparse.ArgumentParser(description="Infer model on Triton server", allow_abbrev=False)
parser.add_argument(
"--server-url", type=str, default="localhost:8001", help="Inference server URL (default localhost:8001)"
)
parser.add_argument("--model-name", help="The name of the model used for inference.", required=True)
parser.add_argument("--model-version", help="The version of the model used for inference.", required=True)
parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True)
parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False)
parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False)
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
parser.add_argument("--output-dir", required=True, help="Path to directory where outputs will be saved")
parser.add_argument("--response-wait-time", required=False, help="Maximal time to wait for response", default=120)
parser.add_argument(
"--max-unresponded-requests", required=False, help="Maximal number of unresponded requests", default=128
)
args, *_ = parser.parse_known_args()
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
args = parser.parse_args()
return args
def main():
args = _parse_args()
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
log_level = logging.INFO if not args.verbose else logging.DEBUG
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info(f"args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
runner = AsyncGRPCTritonRunner(
args.server_url,
args.model_name,
args.model_version,
dataloader=dataloader_fn(),
verbose=False,
resp_wait_s=args.response_wait_time,
max_unresponded_reqs=args.max_unresponded_requests,
)
with NpzWriter(output_dir=args.output_dir) as writer:
start = time.time()
for ids, x, y_pred, y_real in tqdm(runner, unit="batch", mininterval=10):
data = _verify_and_format_dump(args, ids, x, y_pred, y_real)
writer.write(**data)
stop = time.time()
LOGGER.info(f"\nThe inference took {stop - start:0.3f}s")
def _verify_and_format_dump(args, ids, x, y_pred, y_real):
data = {"outputs": y_pred, "ids": {"ids": ids}}
if args.dump_inputs:
data["inputs"] = x
if args.dump_labels:
if not y_real:
raise ValueError(
"Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument"
)
data["labels"] = y_real
return data
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/run_inference_on_triton.py |
#!/usr/bin/python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import json
import torch
import argparse
import statistics
from collections import Counter
torch_type_to_triton_type = {
torch.bool: "TYPE_BOOL",
torch.int8: "TYPE_INT8",
torch.int16: "TYPE_INT16",
torch.int32: "TYPE_INT32",
torch.int64: "TYPE_INT64",
torch.uint8: "TYPE_UINT8",
torch.float16: "TYPE_FP16",
torch.float32: "TYPE_FP32",
torch.float64: "TYPE_FP64",
}
CONFIG_TEMPLATE = r"""
name: "{model_name}"
platform: "{platform}"
max_batch_size: {max_batch_size}
input [
{spec_inputs}
]
output [
{spec_outputs}
]
{dynamic_batching}
{model_optimizations}
instance_group [
{{
count: {engine_count}
kind: KIND_GPU
gpus: [ {gpu_list} ]
}}
]"""
INPUT_TEMPLATE = r"""
{{
name: "input__{num}"
data_type: {type}
dims: {dims}
{reshape}
}},"""
OUTPUT_TEMPLATE = r"""
{{
name: "output__{num}"
data_type: {type}
dims: {dims}
{reshape}
}},"""
MODEL_OPTIMIZATION_TEMPLATE = r"""
optimization {{
{execution_accelerator}
cuda {{
graphs: {capture_cuda_graph}
}}
}}"""
EXECUTION_ACCELERATOR_TEMPLATE = r"""
execution_accelerators {{
gpu_execution_accelerator: [
{{
name: "tensorrt"
}}
]
}},"""
def remove_empty_lines(text):
""" removes empty lines from text, returns the result """
ret = "".join([s for s in text.strip().splitlines(True) if s.strip()])
return ret
def create_deployer(argv):
""" takes a list of arguments, returns a deployer object and the list of unused arguments """
parser = argparse.ArgumentParser()
# required args
method = parser.add_mutually_exclusive_group(required=True)
method.add_argument(
"--ts-script",
action="store_true",
help="convert to torchscript using torch.jit.script",
)
method.add_argument(
"--ts-trace",
action="store_true",
help="convert to torchscript using torch.jit.trace",
)
method.add_argument(
"--onnx", action="store_true", help="convert to onnx using torch.onnx.export"
)
method.add_argument(
"--trt", action="store_true", help="convert to trt using tensorrt"
)
# triton related args
arguments = parser.add_argument_group("triton related flags")
arguments.add_argument(
"--triton-no-cuda", action="store_true", help="Use the CPU for tracing."
)
arguments.add_argument(
"--triton-model-name",
type=str,
default="model",
help="exports to appropriate directory structure for TRITON",
)
arguments.add_argument(
"--triton-model-version",
type=int,
default=1,
help="exports to appropriate directory structure for TRITON",
)
arguments.add_argument(
"--triton-max-batch-size",
type=int,
default=8,
help="Specifies the 'max_batch_size' in the TRITON model config.\
See the TRITON documentation for more info.",
)
arguments.add_argument(
"--triton-dyn-batching-delay",
type=float,
default=0,
help="Determines the dynamic_batching queue delay in milliseconds(ms) for\
the TRITON model config. Use '0' or '-1' to specify static batching.\
See the TRITON documentation for more info.",
)
arguments.add_argument(
"--triton-engine-count",
type=int,
default=1,
help="Specifies the 'instance_group' count value in the TRITON model config.\
See the TRITON documentation for more info.",
)
arguments.add_argument(
"--save-dir", type=str, default="./triton_models", help="Saved model directory"
)
# optimization args
arguments = parser.add_argument_group("optimization flags")
arguments.add_argument(
"--max_workspace_size",
type=int,
default=512 * 1024 * 1024,
help="set the size of the workspace for trt export",
)
arguments.add_argument(
"--trt-fp16",
action="store_true",
help="trt flag ---- export model in mixed precision mode",
)
arguments.add_argument(
"--capture-cuda-graph",
type=int,
default=1,
help="capture cuda graph for obtaining speedup. possible values: 0, 1. default: 1. ",
)
# remainder args
arguments.add_argument(
"model_arguments",
nargs=argparse.REMAINDER,
help="arguments that will be ignored by deployer lib and will be forwarded to your deployer script",
)
#
args = parser.parse_args(argv)
deployer = Deployer(args)
#
return deployer, args.model_arguments[1:]
class DeployerLibrary:
def __init__(self, args):
self.args = args
self.platform = None
def set_platform(self, platform):
""" sets the platform
:: platform :: "pytorch_libtorch" or "onnxruntime_onnx" or "tensorrt_plan"
"""
self.platform = platform
def build_trt_engine(self, model_file, shapes):
""" takes a path to an onnx file, and shape information, returns a trt engine
:: model_file :: path to an onnx model
:: shapes :: dictionary containing min shape, max shape, opt shape for the trt engine
"""
import tensorrt as trt
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
builder = trt.Builder(TRT_LOGGER)
builder.fp16_mode = self.args.trt_fp16
builder.max_batch_size = self.args.triton_max_batch_size
#
config = builder.create_builder_config()
config.max_workspace_size = self.args.max_workspace_size
if self.args.trt_fp16:
config.flags |= 1 << int(trt.BuilderFlag.FP16)
profile = builder.create_optimization_profile()
for s in shapes:
profile.set_shape(s["name"], min=s["min"], opt=s["opt"], max=s["max"])
config.add_optimization_profile(profile)
explicit_batch = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
network = builder.create_network(explicit_batch)
#
with trt.OnnxParser(network, TRT_LOGGER) as parser:
with open(model_file, "rb") as model:
parser.parse(model.read())
for i in range(parser.num_errors):
e = parser.get_error(i)
print("||||e", e)
engine = builder.build_engine(network, config=config)
return engine
def load_engine(self, engine_filepath):
""" loads a trt engine from engine_filepath, returns it """
import tensorrt as trt
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
with open(engine_filepath, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
engine = runtime.deserialize_cuda_engine(f.read())
return engine
def prepare_inputs(self, dataloader, device):
""" load sample inputs to device """
inputs = []
for batch in dataloader:
if type(batch) is torch.Tensor:
batch_d = batch.to(device)
batch_d = (batch_d,)
inputs.append(batch_d)
else:
batch_d = []
for x in batch:
assert type(x) is torch.Tensor, "input is not a tensor"
batch_d.append(x.to(device))
batch_d = tuple(batch_d)
inputs.append(batch_d)
return inputs
def get_list_of_shapes(self, l, fun):
""" returns the list of min/max shapes, depending on fun
:: l :: list of tuples of tensors
:: fun :: min or max
"""
tensor_tuple = l[0]
shapes = [list(x.shape) for x in tensor_tuple]
for tensor_tuple in l:
assert len(tensor_tuple) == len(
shapes
), "tensors with varying shape lengths are not supported"
for i, x in enumerate(tensor_tuple):
for j in range(len(x.shape)):
shapes[i][j] = fun(shapes[i][j], x.shape[j])
return shapes # a list of shapes
def get_tuple_of_min_shapes(self, l):
""" returns the tuple of min shapes
:: l :: list of tuples of tensors """
shapes = self.get_list_of_shapes(l, min)
min_batch = 1
shapes = [[min_batch, *shape[1:]] for shape in shapes]
shapes = tuple(shapes)
return shapes # tuple of min shapes
def get_tuple_of_max_shapes(self, l):
""" returns the tuple of max shapes
:: l :: list of tuples of tensors """
shapes = self.get_list_of_shapes(l, max)
max_batch = max(2, shapes[0][0])
shapes = [[max_batch, *shape[1:]] for shape in shapes]
shapes = tuple(shapes)
return shapes # tuple of max shapes
def get_tuple_of_opt_shapes(self, l):
""" returns the tuple of opt shapes
:: l :: list of tuples of tensors """
counter = Counter()
for tensor_tuple in l:
shapes = [tuple(x.shape) for x in tensor_tuple]
shapes = tuple(shapes)
counter[shapes] += 1
shapes = counter.most_common(1)[0][0]
return shapes # tuple of most common occuring shapes
def get_tuple_of_dynamic_shapes(self, l):
""" returns a tuple of dynamic shapes: variable tensor dimensions
(for ex. batch size) occur as -1 in the tuple
:: l :: list of tuples of tensors """
tensor_tuple = l[0]
shapes = [list(x.shape) for x in tensor_tuple]
for tensor_tuple in l:
err_msg = "tensors with varying shape lengths are not supported"
assert len(tensor_tuple) == len(shapes), err_msg
for i, x in enumerate(tensor_tuple):
for j in range(len(x.shape)):
if shapes[i][j] != x.shape[j] or j == 0:
shapes[i][j] = -1
shapes = tuple(shapes)
return shapes # tuple of dynamic shapes
def run_models(self, models, inputs):
""" run the models on inputs, return the outputs and execution times """
ret = []
for model in models:
torch.cuda.synchronize()
time_start = time.time()
outputs = []
for input in inputs:
with torch.no_grad():
output = model(*input)
if type(output) is torch.Tensor:
output = [output]
outputs.append(output)
torch.cuda.synchronize()
time_end = time.time()
t = time_end - time_start
ret.append(outputs)
ret.append(t)
return ret
def compute_tensor_stats(self, tensor):
return {
"std": tensor.std().item(),
"mean": tensor.mean().item(),
"max": tensor.max().item(),
"min": tensor.min().item(),
}
def compute_errors(self, outputs_A, outputs_B):
""" returns dictionary with errors statistics """
device = outputs_A[0][0][0].device
dtype = outputs_A[0][0][0].dtype
x_values = torch.zeros(0, device=device, dtype=dtype)
y_values = torch.zeros(0, device=device, dtype=dtype)
d_values = torch.zeros(0, device=device, dtype=dtype)
for output_A, output_B in zip(outputs_A, outputs_B):
for x, y in zip(output_A, output_B):
d = abs(x - y)
x_values = torch.cat((x_values, x), 0)
y_values = torch.cat((y_values, y), 0)
d_values = torch.cat((d_values, d), 0)
Error_stats = {
"Original": self.compute_tensor_stats(x_values),
"Converted": self.compute_tensor_stats(y_values),
"Absolute difference": self.compute_tensor_stats(d_values),
}
return Error_stats
def print_errors(self, Error_stats):
""" print various statistcs of Linf errors """
print()
print("conversion correctness test results")
print("-----------------------------------")
import pandas as pd
print(pd.DataFrame(Error_stats))
def write_config(
self, config_filename, input_shapes, input_types, output_shapes, output_types
):
""" writes TRTIS config file
:: config_filename :: the file to write the config file into
:: input_shapes :: tuple of dynamic shapes of the input tensors
:: input_types :: tuple of torch types of the input tensors
:: output_shapes :: tuple of dynamic shapes of the output tensors
:: output_types :: tuple of torch types of the output tensors
"""
assert self.platform is not None, "error - platform is not set"
config_template = CONFIG_TEMPLATE
input_template = INPUT_TEMPLATE
optimization_template = MODEL_OPTIMIZATION_TEMPLATE
accelerator_template = EXECUTION_ACCELERATOR_TEMPLATE
spec_inputs = r""""""
for i, (shape, typ) in enumerate(zip(input_shapes, input_types)):
d = {
"num": str(i),
"type": torch_type_to_triton_type[typ],
"dims": str([1])
if len(shape) == 1
else str(list(shape)[1:]), # first dimension is the batch size
}
d["reshape"] = "reshape: { shape: [ ] }" if len(shape) == 1 else ""
spec_inputs += input_template.format_map(d)
spec_inputs = spec_inputs[:-1]
output_template = OUTPUT_TEMPLATE
spec_outputs = r""""""
for i, (shape, typ) in enumerate(zip(output_shapes, output_types)):
d = {
"num": str(i),
"type": torch_type_to_triton_type[typ],
"dims": str([1])
if len(shape) == 1
else str(list(shape)[1:]), # first dimension is the batch size
}
d["reshape"] = "reshape: { shape: [ ] }" if len(shape) == 1 else ""
spec_outputs += output_template.format_map(d)
spec_outputs = spec_outputs[:-1]
batching_str = ""
max_batch_size = self.args.triton_max_batch_size
if self.args.triton_dyn_batching_delay >= 0:
# Use only full and half full batches
pref_batch_size = [int(max_batch_size / 2.0), max_batch_size]
if self.args.triton_dyn_batching_delay > 0:
dyn_batch_delay_str = f"max_queue_delay_microseconds: {int(self.args.triton_dyn_batching_delay * 1000.0)}"
else:
dyn_batch_delay_str = ""
batching_str = r"""
dynamic_batching {{
preferred_batch_size: [{0}]
{1}
}}""".format(
", ".join([str(x) for x in pref_batch_size]), dyn_batch_delay_str
)
accelerator_str = ""
d = {
"execution_accelerator": accelerator_str,
"capture_cuda_graph": str(self.args.capture_cuda_graph),
}
optimization_str = optimization_template.format_map(d)
config_values = {
"model_name": self.args.triton_model_name,
"platform": self.platform,
"max_batch_size": max_batch_size,
"spec_inputs": spec_inputs,
"spec_outputs": spec_outputs,
"dynamic_batching": batching_str,
"model_optimizations": optimization_str,
"gpu_list": ", ".join([str(x) for x in range(torch.cuda.device_count())]),
"engine_count": self.args.triton_engine_count,
}
# write config
with open(config_filename, "w") as file:
final_config_str = config_template.format_map(config_values)
final_config_str = remove_empty_lines(final_config_str)
file.write(final_config_str)
class Deployer:
def __init__(self, args):
self.args = args
self.lib = DeployerLibrary(args)
def deploy(self, dataloader, model):
""" deploy the model and test for correctness with dataloader """
if self.args.ts_script or self.args.ts_trace:
self.lib.set_platform("pytorch_libtorch")
print(
"deploying model "
+ self.args.triton_model_name
+ " in format "
+ self.lib.platform
)
self.to_triton_torchscript(dataloader, model)
elif self.args.onnx:
self.lib.set_platform("onnxruntime_onnx")
print(
"deploying model "
+ self.args.triton_model_name
+ " in format "
+ self.lib.platform
)
self.to_triton_onnx(dataloader, model)
elif self.args.trt:
self.lib.set_platform("tensorrt_plan")
print(
"deploying model "
+ self.args.triton_model_name
+ " in format "
+ self.lib.platform
)
self.to_triton_trt(dataloader, model)
else:
assert False, "error"
print("done")
def to_triton_trt(self, dataloader, model):
""" export the model to trt and test correctness on dataloader """
import tensorrt as trt
# setup device
if self.args.triton_no_cuda:
device = torch.device("cpu")
else:
device = torch.device("cuda")
# prepare model
model.to(device)
model.eval()
assert not model.training, "internal error - model should be in eval() mode! "
# prepare inputs
inputs = self.lib.prepare_inputs(dataloader, device)
# generate outputs
outputs = []
for input in inputs:
with torch.no_grad():
output = model(*input)
if type(output) is torch.Tensor:
output = [output]
outputs.append(output)
# generate input shapes - dynamic tensor shape support
input_shapes = self.lib.get_tuple_of_dynamic_shapes(inputs)
# generate output shapes - dynamic tensor shape support
output_shapes = self.lib.get_tuple_of_dynamic_shapes(outputs)
# generate input types
input_types = [x.dtype for x in inputs[0]]
# generate output types
output_types = [x.dtype for x in outputs[0]]
# get input names
rng = range(len(input_types))
input_names = ["input__" + str(num) for num in rng]
# get output names
rng = range(len(output_types))
output_names = ["output__" + str(num) for num in rng]
# prepare save path
model_folder = os.path.join(self.args.save_dir, self.args.triton_model_name)
version_folder = os.path.join(model_folder, str(self.args.triton_model_version))
if not os.path.exists(version_folder):
os.makedirs(version_folder)
final_model_path = os.path.join(version_folder, "model.plan")
# get indices of dynamic input and output shapes
dynamic_axes = {}
for input_name, shape in zip(input_names, input_shapes):
dynamic_axes[input_name] = [i for i, x in enumerate(shape) if x == -1]
for output_name, shape in zip(output_names, output_shapes):
dynamic_axes[output_name] = [i for i, x in enumerate(shape) if x == -1]
# export the model to onnx first
with torch.no_grad():
torch.onnx.export(
model,
inputs[0],
final_model_path,
verbose=False,
input_names=input_names,
output_names=output_names,
dynamic_axes=dynamic_axes,
opset_version=11,
)
# get shapes
min_shapes = self.lib.get_tuple_of_min_shapes(inputs)
opt_shapes = self.lib.get_tuple_of_opt_shapes(inputs)
max_shapes = self.lib.get_tuple_of_max_shapes(inputs)
zipped = zip(input_names, min_shapes, opt_shapes, max_shapes)
shapes = []
for name, min_shape, opt_shape, max_shape in zipped:
d = {"name": name, "min": min_shape, "opt": opt_shape, "max": max_shape}
shapes.append(d)
# build trt engine
engine = self.lib.build_trt_engine(final_model_path, shapes)
assert engine is not None, " trt export failure "
# write trt engine
with open(final_model_path, "wb") as f:
f.write(engine.serialize())
# load the model
engine = self.lib.load_engine(final_model_path)
class TRT_model:
def __init__(self, engine, input_names, output_names, output_types, device):
self.engine = engine
self.context = self.engine.create_execution_context()
self.input_names = input_names
self.output_names = output_names
self.output_types = output_types
self.device = device
def is_dimension_dynamic(self, dim):
return dim is None or dim <= 0
def is_shape_dynamic(self, shape):
return any([self.is_dimension_dynamic(dim) for dim in shape])
def __call__(self, *inputs):
# get input shapes
input_shapes = [x.shape for x in inputs]
# bindings
bindings = [None] * self.engine.num_bindings
# set input shapes, bind input tensors
zipped = zip(self.input_names, inputs)
for key, input in zipped:
idx = self.engine.get_binding_index(key)
bindings[idx] = input.data_ptr()
if self.engine.is_shape_binding(idx) and self.is_shape_dynamic(
self.context.get_shape(idx)
):
self.context.set_shape_input(idx, input)
elif self.is_shape_dynamic(self.engine.get_binding_shape(idx)):
self.context.set_binding_shape(idx, input.shape)
assert self.context.all_binding_shapes_specified, "trt error"
assert self.context.all_shape_inputs_specified, "trt error"
# calculate output shapes, allocate output tensors and bind them
outputs = []
zipped = zip(self.output_names, self.output_types)
for key, dtype in zipped:
idx = self.engine.get_binding_index(key)
shape = self.context.get_binding_shape(idx)
shape = tuple(shape)
assert -1 not in shape, "trt error"
tensor = torch.zeros(shape, dtype=dtype, device=self.device)
outputs.append(tensor)
bindings[idx] = outputs[-1].data_ptr()
# run inference
self.context.execute_v2(bindings=bindings)
# return the result
if len(outputs) == 1:
outputs = outputs[0]
return outputs
model_trt = TRT_model(engine, input_names, output_names, output_types, device)
# run both models on inputs
assert not model.training, "internal error - model should be in eval() mode! "
models = (model, model_trt)
outputs, time_model, outputs_trt, time_model_trt = self.lib.run_models(
models, inputs
)
# check for errors
Error_stats = self.lib.compute_errors(outputs, outputs_trt)
self.lib.print_errors(Error_stats)
print("time of error check of native model: ", time_model, "seconds")
print("time of error check of trt model: ", time_model_trt, "seconds")
print()
# write TRTIS config
config_filename = os.path.join(model_folder, "config.pbtxt")
self.lib.write_config(
config_filename, input_shapes, input_types, output_shapes, output_types
)
def name_onnx_nodes(self, model_path):
"""
Name all unnamed nodes in ONNX model
parameter model_path: path ONNX model
return: none
"""
model = onnx.load(model_path)
node_id = 0
for node in model.graph.node:
if len(node.name) == 0:
node.name = "unnamed_node_%d" % node_id
node_id += 1
# This check partially validates model
onnx.checker.check_model(model)
onnx.save(model, model_path)
# Only inference really checks ONNX model for some issues
# like duplicated node names
onnxruntime.InferenceSession(model_path, None)
def to_triton_onnx(self, dataloader, model):
""" export the model to onnx and test correctness on dataloader """
import onnx as local_onnx
global onnx
onnx = local_onnx
import onnxruntime as local_onnxruntime
global onnxruntime
onnxruntime = local_onnxruntime
# setup device
if self.args.triton_no_cuda:
device = torch.device("cpu")
else:
device = torch.device("cuda")
# prepare model
model.to(device)
model.eval()
assert not model.training, "internal error - model should be in eval() mode! "
# prepare inputs
inputs = self.lib.prepare_inputs(dataloader, device)
# generate outputs
outputs = []
for input in inputs:
with torch.no_grad():
output = model(*input)
if type(output) is torch.Tensor:
output = [output]
outputs.append(output)
# generate input shapes - dynamic tensor shape support
input_shapes = self.lib.get_tuple_of_dynamic_shapes(inputs)
# generate output shapes - dynamic tensor shape support
output_shapes = self.lib.get_tuple_of_dynamic_shapes(outputs)
# generate input types
input_types = [x.dtype for x in inputs[0]]
# generate output types
output_types = [x.dtype for x in outputs[0]]
# get input names
rng = range(len(input_types))
input_names = ["input__" + str(num) for num in rng]
# get output names
rng = range(len(output_types))
output_names = ["output__" + str(num) for num in rng]
# prepare save path
model_folder = os.path.join(self.args.save_dir, self.args.triton_model_name)
version_folder = os.path.join(model_folder, str(self.args.triton_model_version))
if not os.path.exists(version_folder):
os.makedirs(version_folder)
final_model_path = os.path.join(version_folder, "model.onnx")
# get indices of dynamic input and output shapes
dynamic_axes = {}
for input_name, input_shape in zip(input_names, input_shapes):
dynamic_axes[input_name] = [i for i, x in enumerate(input_shape) if x == -1]
for output_name, output_shape in zip(output_names, output_shapes):
dynamic_axes[output_name] = [
i for i, x in enumerate(output_shape) if x == -1
]
# export the model
assert not model.training, "internal error - model should be in eval() mode! "
with torch.no_grad():
torch.onnx.export(
model,
inputs[0],
final_model_path,
verbose=True,
input_names=input_names,
output_names=output_names,
dynamic_axes=dynamic_axes,
opset_version=11,
)
# syntactic error check
converted_model = onnx.load(final_model_path)
# check that the IR is well formed
onnx.checker.check_model(converted_model)
# Name unnamed nodes - it helps for some other processing tools
self.name_onnx_nodes(final_model_path)
converted_model = onnx.load(final_model_path)
# load the model
session = onnxruntime.InferenceSession(final_model_path, None)
class ONNX_model:
def __init__(self, session, input_names, device):
self.session = session
self.input_names = input_names
def to_numpy(self, tensor):
return (
tensor.detach().cpu().numpy()
if tensor.requires_grad
else tensor.cpu().numpy()
)
def __call__(self, *inputs):
inp = [
(input_name, inputs[i])
for i, input_name in enumerate(self.input_names)
]
inp = {input_name: self.to_numpy(x) for input_name, x in inp}
outputs = self.session.run(None, inp)
outputs = [torch.from_numpy(output) for output in outputs]
outputs = [output.to(device) for output in outputs]
if len(outputs) == 1:
outputs = outputs[0]
return outputs
# switch to eval mode
model_onnx = ONNX_model(session, input_names, device)
# run both models on inputs
assert not model.training, "internal error - model should be in eval() mode! "
models = (model, model_onnx)
outputs, time_model, outputs_onnx, time_model_onnx = self.lib.run_models(
models, inputs
)
# check for errors
Error_stats = self.lib.compute_errors(outputs, outputs_onnx)
self.lib.print_errors(Error_stats)
print("time of error check of native model: ", time_model, "seconds")
print("time of error check of onnx model: ", time_model_onnx, "seconds")
print()
# write TRTIS config
config_filename = os.path.join(model_folder, "config.pbtxt")
self.lib.write_config(
config_filename, input_shapes, input_types, output_shapes, output_types
)
def to_triton_torchscript(self, dataloader, model):
""" export the model to torchscript and test correctness on dataloader """
# setup device
if self.args.triton_no_cuda:
device = torch.device("cpu")
else:
device = torch.device("cuda")
# prepare model
model.to(device)
model.eval()
assert not model.training, "internal error - model should be in eval() mode! "
# prepare inputs
inputs = self.lib.prepare_inputs(dataloader, device)
# generate input shapes - dynamic tensor shape support
input_shapes = self.lib.get_tuple_of_dynamic_shapes(inputs)
# generate input types
input_types = [x.dtype for x in inputs[0]]
# prepare save path
model_folder = os.path.join(self.args.save_dir, self.args.triton_model_name)
version_folder = os.path.join(model_folder, str(self.args.triton_model_version))
if not os.path.exists(version_folder):
os.makedirs(version_folder)
final_model_path = os.path.join(version_folder, "model.pt")
# convert the model
with torch.no_grad():
if self.args.ts_trace: # trace it
model_ts = torch.jit.trace(model, inputs[0])
if self.args.ts_script: # script it
model_ts = torch.jit.script(model)
# save the model
torch.jit.save(model_ts, final_model_path)
# load the model
model_ts = torch.jit.load(final_model_path)
model_ts.eval() # WAR for bug : by default, model_ts gets loaded in training mode
# run both models on inputs
assert not model.training, "internal error - model should be in eval() mode! "
assert (
not model_ts.training
), "internal error - converted model should be in eval() mode! "
models = (model, model_ts)
outputs, time_model, outputs_ts, time_model_ts = self.lib.run_models(
models, inputs
)
# check for errors
Error_stats = self.lib.compute_errors(outputs, outputs_ts)
self.lib.print_errors(Error_stats)
print("time of error check of native model: ", time_model, "seconds")
print("time of error check of ts model: ", time_model_ts, "seconds")
print()
# generate output shapes - dynamic tensor shape support
output_shapes = self.lib.get_tuple_of_dynamic_shapes(outputs)
# generate output types
output_types = [x.dtype for x in outputs[0]]
# now we build the config for TRTIS
config_filename = os.path.join(model_folder, "config.pbtxt")
self.lib.write_config(
config_filename, input_shapes, input_types, output_shapes, output_types
)
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/deployer_lib.py |
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import numpy as np
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from image_classification.dataloaders import get_pytorch_val_loader
from tqdm import tqdm
import tritongrpcclient
from tritonclientutils import InferenceServerException
def get_data_loader(batch_size, *, data_path):
valdir = os.path.join(data_path, "val-jpeg")
val_dataset = datasets.ImageFolder(
valdir,
transforms.Compose(
[transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor()]
),
)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=batch_size, shuffle=False
)
return val_loader
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--triton-server-url",
type=str,
required=True,
help="URL adress of trtion server (with port)",
)
parser.add_argument(
"--triton-model-name",
type=str,
required=True,
help="Triton deployed model name",
)
parser.add_argument(
"-v", "--verbose", action="store_true", default=False, help="Verbose mode."
)
parser.add_argument(
"--inference_data", type=str, help="Path to file with inference data."
)
parser.add_argument(
"--batch_size", type=int, default=1, help="Inference request batch size"
)
parser.add_argument(
"--fp16",
action="store_true",
default=False,
help="Use fp16 precision for input data",
)
FLAGS = parser.parse_args()
triton_client = tritongrpcclient.InferenceServerClient(
url=FLAGS.triton_server_url, verbose=FLAGS.verbose
)
dataloader = get_data_loader(FLAGS.batch_size, data_path=FLAGS.inference_data)
inputs = []
inputs.append(
tritongrpcclient.InferInput(
"input__0",
[FLAGS.batch_size, 3, 224, 224],
"FP16" if FLAGS.fp16 else "FP32",
)
)
outputs = []
outputs.append(tritongrpcclient.InferRequestedOutput("output__0"))
all_img = 0
cor_img = 0
result_prev = None
for image, target in tqdm(dataloader):
if FLAGS.fp16:
image = image.half()
inputs[0].set_data_from_numpy(image.numpy())
result = triton_client.infer(
FLAGS.triton_model_name, inputs, outputs=outputs, headers=None
)
result = result.as_numpy("output__0")
result = np.argmax(result, axis=1)
cor_img += np.sum(result == target.numpy())
all_img += result.shape[0]
acc = cor_img / all_img
print(f"Final accuracy {acc:.04f}")
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/client.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
For models with variable-sized inputs you must provide the --input-shape argument so that perf_analyzer knows
what shape tensors to use. For example, for a model that has an input called IMAGE that has shape [ 3, N, M ],
where N and M are variable-size dimensions, to tell perf_analyzer to send batch-size 4 requests of shape [ 3, 224, 224 ]
`--shape IMAGE:3,224,224`.
"""
import argparse
import csv
import os
import sys
from pathlib import Path
from typing import Dict, List, Optional
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.report import save_results, show_results, sort_results
from .deployment_toolkit.warmup import warmup
def calculate_average_latency(r):
avg_sum_fields = [
"Client Send",
"Network+Server Send/Recv",
"Server Queue",
"Server Compute",
"Server Compute Input",
"Server Compute Infer",
"Server Compute Output",
"Client Recv",
]
avg_latency = sum([int(r.get(f, 0)) for f in avg_sum_fields])
return avg_latency
def update_performance_data(results: List, batch_size: int, performance_partial_file: str):
row: Dict = {"batch_size": batch_size}
with open(performance_partial_file, "r") as csvfile:
reader = csv.DictReader(csvfile)
for r in reader:
avg_latency = calculate_average_latency(r)
row = {**row, **r, "avg latency": avg_latency}
results.append(row)
def _parse_batch_sizes(batch_sizes: str):
batches = batch_sizes.split(sep=",")
return list(map(lambda x: int(x.strip()), batches))
def offline_performance(
model_name: str,
batch_sizes: List[int],
result_path: str,
input_shapes: Optional[List[str]] = None,
profiling_data: str = "random",
triton_instances: int = 1,
server_url: str = "localhost",
measurement_window: int = 10000,
shared_memory: bool = False
):
print("\n")
print(f"==== Static batching analysis start ====")
print("\n")
input_shapes = " ".join(map(lambda shape: f" --shape {shape}", input_shapes)) if input_shapes else ""
results: List[Dict] = list()
for batch_size in batch_sizes:
print(f"Running performance tests for batch size: {batch_size}")
performance_partial_file = f"triton_performance_partial_{batch_size}.csv"
exec_args = f"""-max-threads {triton_instances} \
-m {model_name} \
-x 1 \
-c {triton_instances} \
-t {triton_instances} \
-p {measurement_window} \
-v \
-i http \
-u {server_url}:8000 \
-b {batch_size} \
-f {performance_partial_file} \
--input-data {profiling_data} {input_shapes}"""
if shared_memory:
exec_args += " --shared-memory=cuda"
result = os.system(f"perf_client {exec_args}")
if result != 0:
print(f"Failed running performance tests. Perf client failed with exit code {result}")
sys.exit(1)
update_performance_data(results, batch_size, performance_partial_file)
os.remove(performance_partial_file)
results = sort_results(results=results)
save_results(filename=result_path, data=results)
show_results(results=results)
print("Performance results for static batching stored in: {0}".format(result_path))
print("\n")
print(f"==== Analysis done ====")
print("\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model-name", type=str, required=True, help="Name of the model to test")
parser.add_argument(
"--input-data", type=str, required=False, default="random", help="Input data to perform profiling."
)
parser.add_argument(
"--input-shape",
action="append",
required=False,
help="Input data shape in form INPUT_NAME:<full_shape_without_batch_axis>.",
)
parser.add_argument("--batch-sizes", type=str, required=True, help="List of batch sizes to tests. Comma separated.")
parser.add_argument("--result-path", type=str, required=True, help="Path where result file is going to be stored.")
parser.add_argument("--triton-instances", type=int, default=1, help="Number of Triton Server instances")
parser.add_argument("--server-url", type=str, required=False, default="localhost", help="Url to Triton server")
parser.add_argument(
"--measurement-window", required=False, help="Time which perf_analyzer will wait for results", default=10000
)
parser.add_argument("--shared-memory", help="Use shared memory for communication with Triton", action="store_true",
default=False)
args = parser.parse_args()
warmup(
server_url=args.server_url,
model_name=args.model_name,
batch_sizes=_parse_batch_sizes(args.batch_sizes),
triton_instances=args.triton_instances,
profiling_data=args.input_data,
input_shapes=args.input_shape,
measurement_window=args.measurement_window,
shared_memory=args.shared_memory
)
offline_performance(
server_url=args.server_url,
model_name=args.model_name,
batch_sizes=_parse_batch_sizes(args.batch_sizes),
triton_instances=args.triton_instances,
profiling_data=args.input_data,
input_shapes=args.input_shape,
result_path=args.result_path,
measurement_window=args.measurement_window,
shared_memory=args.shared_memory
)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/run_offline_performance_test_on_triton.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
def update_argparser(parser):
parser.add_argument(
"--config", default="resnet50", type=str, required=True, help="Network to deploy")
parser.add_argument(
"--checkpoint", default=None, type=str, help="The checkpoint of the model. ")
parser.add_argument("--classes", type=int, default=1000, help="Number of classes")
parser.add_argument("--precision", type=str, default="fp32",
choices=["fp32", "fp16"], help="Inference precision")
def get_model(**model_args):
from image_classification import models
model = models.resnet50(pretrained=False)
if "checkpoint" in model_args:
print(f"loading checkpoint {model_args['checkpoint']}")
state_dict = torch.load(model_args["checkpoint"], map_location="cpu")
try:
model.load_state_dict(
{
k.replace("module.", ""): v
for k, v in state_dict.items()
}
)
except RuntimeError as RE:
if not hasattr(model, "ngc_checkpoint_remap"):
raise RE
remap_old = model.ngc_checkpoint_remap(version="20.06.0")
remap_dist = lambda k: k.replace("module.", "")
model.load_state_dict(
{
remap_old(remap_dist(k)): v
for k, v in state_dict.items()
}
)
if model_args["precision"] == "fp16":
model = model.half()
model = model.cuda()
model.eval()
tensor_names = {"inputs": ["INPUT__0"],
"outputs": ["OUTPUT__0"]}
return model, tensor_names
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/model.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, NamedTuple, Optional
import numpy as np
from deployment_toolkit.core import BaseMetricsCalculator
class MetricsCalculator(BaseMetricsCalculator):
def __init__(self):
pass
def calc(
self,
*,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
) -> Dict[str, float]:
categories = np.argmax(y_pred["OUTPUT__0"], axis=-1)
print(categories.shape)
print(categories[:128], y_pred["OUTPUT__0"] )
print(y_real["OUTPUT__0"][:128])
return {
"accuracy": np.mean(np.argmax(y_pred["OUTPUT__0"], axis=-1) ==
np.argmax(y_real["OUTPUT__0"], axis=-1))
}
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/metric.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
To infer the model on framework runtime, you can use `run_inference_on_fw.py` script.
It infers data obtained from pointed data loader locally and saves received data into npz files.
Those files are stored in directory pointed by `--output-dir` argument.
Example call:
```shell script
python ./triton/run_inference_on_fw.py \
--input-path /models/exported/model.onnx \
--input-type onnx \
--dataloader triton/dataloader.py \
--data-dir /data/imagenet \
--batch-size 32 \
--output-dir /results/dump_local \
--dump-labels
```
"""
import argparse
import logging
import os
from pathlib import Path
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "0"
from tqdm import tqdm
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import DATALOADER_FN_NAME, BaseLoader, BaseRunner, Format, load_from_file
from .deployment_toolkit.dump import NpzWriter
from .deployment_toolkit.extensions import loaders, runners
LOGGER = logging.getLogger("run_inference_on_fw")
def _verify_and_format_dump(args, ids, x, y_pred, y_real):
data = {"outputs": y_pred, "ids": {"ids": ids}}
if args.dump_inputs:
data["inputs"] = x
if args.dump_labels:
if not y_real:
raise ValueError(
"Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument"
)
data["labels"] = y_real
return data
def _parse_and_validate_args():
supported_inputs = set(runners.supported_extensions) & set(loaders.supported_extensions)
parser = argparse.ArgumentParser(description="Dump local inference output of given model", allow_abbrev=False)
parser.add_argument("--input-path", help="Path to input model", required=True)
parser.add_argument("--input-type", help="Input model type", choices=supported_inputs, required=True)
parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True)
parser.add_argument("--output-dir", help="Path to dir where output files will be stored", required=True)
parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False)
parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False)
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
args, *_ = parser.parse_known_args()
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
Loader: BaseLoader = loaders.get(args.input_type)
ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser)
Runner: BaseRunner = runners.get(args.input_type)
ArgParserGenerator(Runner).update_argparser(parser)
args = parser.parse_args()
types_requiring_io_params = []
if args.input_type in types_requiring_io_params and not all(p for p in [args.inputs, args.outputs]):
parser.error(f"For {args.input_type} input provide --inputs and --outputs parameters")
return args
def main():
args = _parse_and_validate_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info(f"args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
Loader: BaseLoader = loaders.get(args.input_type)
Runner: BaseRunner = runners.get(args.input_type)
loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args)
runner = ArgParserGenerator(Runner).from_args(args)
LOGGER.info(f"Loading {args.input_path}")
model = loader.load(args.input_path)
with runner.init_inference(model=model) as runner_session, NpzWriter(args.output_dir) as writer:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
LOGGER.info(f"Data loader initialized; Running inference")
for ids, x, y_real in tqdm(dataloader_fn(), unit="batch", mininterval=10):
y_pred = runner_session(x)
data = _verify_and_format_dump(args, ids=ids, x=x, y_pred=y_pred, y_real=y_real)
writer.write(**data)
LOGGER.info(f"Inference finished")
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/run_inference_on_fw.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pathlib import Path
import numpy as np
from PIL import Image
LOGGER = logging.getLogger(__name__)
def get_dataloader_fn(
*, data_dir: str, batch_size: int = 1, width: int = 224, height: int = 224, images_num: int = None,
precision: str = "fp32", classes: int = 1000
):
def _dataloader():
image_extensions = [".gif", ".png", ".jpeg", ".jpg"]
image_paths = sorted([p for p in Path(data_dir).rglob("*") if p.suffix.lower() in image_extensions])
if images_num is not None:
image_paths = image_paths[:images_num]
LOGGER.info(
f"Creating PIL dataloader on data_dir={data_dir} #images={len(image_paths)} "
f"image_size=({width}, {height}) batch_size={batch_size}"
)
onehot = np.eye(classes)
batch = []
for image_path in image_paths:
img = Image.open(image_path.as_posix()).convert("RGB")
img = img.resize((width, height))
img = (np.array(img).astype(np.float32) / 255) - np.array([0.485, 0.456, 0.406], dtype=np.float32).reshape(1, 1, 3)
img = img / np.array([0.229, 0.224, 0.225], dtype=np.float32).reshape(1, 1, 3)
true_class = np.array([int(image_path.parent.name)])
assert tuple(img.shape) == (height, width, 3)
img = img[np.newaxis, ...]
batch.append((img, image_path.as_posix(), true_class))
if len(batch) >= batch_size:
ids = [image_path for _, image_path, *_ in batch]
x = {"INPUT__0": np.ascontiguousarray(
np.transpose(np.concatenate([img for img, *_ in batch]),
(0, 3, 1, 2)).astype(np.float32 if precision == "fp32" else np.float16))}
y_real = {"OUTPUT__0": onehot[np.concatenate([class_ for *_, class_ in batch])].astype(
np.float32 if precision == "fp32" else np.float16
)}
batch = []
yield ids, x, y_real
return _dataloader
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/dataloader.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
To configure model on Triton, you can use `config_model_on_triton.py` script.
This will prepare layout of Model Repository, including Model Configuration.
```shell script
python ./triton/config_model_on_triton.py \
--model-repository /model_repository \
--model-path /models/exported/model.onnx \
--model-format onnx \
--model-name ResNet50 \
--model-version 1 \
--max-batch-size 32 \
--precision fp16 \
--backend-accelerator trt \
--load-model explicit \
--timeout 120 \
--verbose
```
If Triton server to which we prepare model repository is running with **explicit model control mode**,
use `--load-model` argument to send request load_model request to Triton Inference Server.
If server is listening on non-default address or port use `--server-url` argument to point server control endpoint.
If it is required to use HTTP protocol to communicate with Triton server use `--http` argument.
To improve inference throughput you can use
[dynamic batching](https://github.com/triton-inference-server/server/blob/master/docs/model_configuration.md#dynamic-batcher)
for your model by providing `--preferred-batch-sizes` and `--max-queue-delay-us` parameters.
For models which doesn't support batching, set `--max-batch-sizes` to 0.
By default Triton will [automatically obtain inputs and outputs definitions](https://github.com/triton-inference-server/server/blob/master/docs/model_configuration.md#auto-generated-model-configuration).
but for TorchScript ang TF GraphDef models script uses file with I/O specs. This file is automatically generated
when the model is converted to ScriptModule (either traced or scripted).
If there is a need to pass different than default path to I/O spec file use `--io-spec` CLI argument.
I/O spec file is yaml file with below structure:
```yaml
- inputs:
- name: input
dtype: float32 # np.dtype name
shape: [None, 224, 224, 3]
- outputs:
- name: probabilities
dtype: float32
shape: [None, 1001]
- name: classes
dtype: int32
shape: [None, 1]
```
"""
import argparse
import logging
import time
from model_navigator import Accelerator, Format, Precision
from model_navigator.args import str2bool
from model_navigator.log import set_logger, log_dict
from model_navigator.triton import ModelConfig, TritonClient, TritonModelStore
LOGGER = logging.getLogger("config_model")
def _available_enum_values(my_enum):
return [item.value for item in my_enum]
def main():
parser = argparse.ArgumentParser(
description="Create Triton model repository and model configuration", allow_abbrev=False
)
parser.add_argument("--model-repository", required=True, help="Path to Triton model repository.")
parser.add_argument("--model-path", required=True, help="Path to model to configure")
# TODO: automation
parser.add_argument(
"--model-format",
required=True,
choices=_available_enum_values(Format),
help="Format of model to deploy",
)
parser.add_argument("--model-name", required=True, help="Model name")
parser.add_argument("--model-version", default="1", help="Version of model (default 1)")
parser.add_argument(
"--max-batch-size",
type=int,
default=32,
help="Maximum batch size allowed for inference. "
"A max_batch_size value of 0 indicates that batching is not allowed for the model",
)
# TODO: automation
parser.add_argument(
"--precision",
type=str,
default=Precision.FP16.value,
choices=_available_enum_values(Precision),
help="Model precision (parameter used only by Tensorflow backend with TensorRT optimization)",
)
# Triton Inference Server endpoint
parser.add_argument(
"--server-url",
type=str,
default="grpc://localhost:8001",
help="Inference server URL in format protocol://host[:port] (default grpc://localhost:8001)",
)
parser.add_argument(
"--load-model",
choices=["none", "poll", "explicit"],
help="Loading model while Triton Server is in given model control mode",
)
parser.add_argument(
"--timeout", default=120, help="Timeout in seconds to wait till model load (default=120)", type=int
)
# optimization related
parser.add_argument(
"--backend-accelerator",
type=str,
choices=_available_enum_values(Accelerator),
default=Accelerator.TRT.value,
help="Select Backend Accelerator used to serve model",
)
parser.add_argument("--number-of-model-instances", type=int, default=1, help="Number of model instances per GPU")
parser.add_argument(
"--preferred-batch-sizes",
type=int,
nargs="*",
help="Batch sizes that the dynamic batcher should attempt to create. "
"In case --max-queue-delay-us is set and this parameter is not, default value will be --max-batch-size",
)
parser.add_argument(
"--max-queue-delay-us",
type=int,
default=0,
help="Max delay time which dynamic batcher shall wait to form a batch (default 0)",
)
parser.add_argument(
"--capture-cuda-graph",
type=int,
default=0,
help="Use cuda capture graph (used only by TensorRT platform)",
)
parser.add_argument("-v", "--verbose", help="Provide verbose logs", type=str2bool, default=False)
args = parser.parse_args()
set_logger(verbose=args.verbose)
log_dict("args", vars(args))
config = ModelConfig.create(
model_path=args.model_path,
# model definition
model_name=args.model_name,
model_version=args.model_version,
model_format=args.model_format,
precision=args.precision,
max_batch_size=args.max_batch_size,
# optimization
accelerator=args.backend_accelerator,
gpu_engine_count=args.number_of_model_instances,
preferred_batch_sizes=args.preferred_batch_sizes or [],
max_queue_delay_us=args.max_queue_delay_us,
capture_cuda_graph=args.capture_cuda_graph,
)
model_store = TritonModelStore(args.model_repository)
model_store.deploy_model(model_config=config, model_path=args.model_path)
if args.load_model != "none":
client = TritonClient(server_url=args.server_url, verbose=args.verbose)
client.wait_for_server_ready(timeout=args.timeout)
if args.load_model == "explicit":
client.load_model(model_name=args.model_name)
if args.load_model == "poll":
time.sleep(15)
client.wait_for_model(model_name=args.model_name, model_version=args.model_version, timeout_s=args.timeout)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/config_model_on_triton.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
For models with variable-sized inputs you must provide the --input-shape argument so that perf_analyzer knows
what shape tensors to use. For example, for a model that has an input called IMAGE that has shape [ 3, N, M ],
where N and M are variable-size dimensions, to tell perf_analyzer to send batch-size 4 requests of shape [ 3, 224, 224 ]
`--shape IMAGE:3,224,224`.
"""
import argparse
import csv
import os
import sys
from pathlib import Path
from typing import List, Optional
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.report import save_results, show_results, sort_results
from .deployment_toolkit.warmup import warmup
def calculate_average_latency(r):
avg_sum_fields = [
"Client Send",
"Network+Server Send/Recv",
"Server Queue",
"Server Compute",
"Server Compute Input",
"Server Compute Infer",
"Server Compute Output",
"Client Recv",
]
avg_latency = sum([int(r.get(f, 0)) for f in avg_sum_fields])
return avg_latency
def update_performance_data(results: List, performance_file: str):
with open(performance_file, "r") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
row["avg latency"] = calculate_average_latency(row)
results.append(row)
def _parse_batch_sizes(batch_sizes: str):
batches = batch_sizes.split(sep=",")
return list(map(lambda x: int(x.strip()), batches))
def online_performance(
model_name: str,
batch_sizes: List[int],
result_path: str,
input_shapes: Optional[List[str]] = None,
profiling_data: str = "random",
triton_instances: int = 1,
triton_gpu_engine_count: int = 1,
server_url: str = "localhost",
measurement_window: int = 10000,
shared_memory: bool = False
):
print("\n")
print(f"==== Dynamic batching analysis start ====")
print("\n")
input_shapes = " ".join(map(lambda shape: f" --shape {shape}", input_shapes)) if input_shapes else ""
print(f"Running performance tests for dynamic batching")
performance_file = f"triton_performance_dynamic_partial.csv"
max_batch_size = max(batch_sizes)
max_total_requests = 2 * max_batch_size * triton_instances * triton_gpu_engine_count
max_concurrency = min(256, max_total_requests)
batch_size = max(1, max_total_requests // 256)
step = max(1, max_concurrency // 32)
min_concurrency = step
exec_args = f"""-m {model_name} \
-x 1 \
-p {measurement_window} \
-v \
-i http \
-u {server_url}:8000 \
-b {batch_size} \
-f {performance_file} \
--concurrency-range {min_concurrency}:{max_concurrency}:{step} \
--input-data {profiling_data} {input_shapes}"""
if shared_memory:
exec_args += " --shared-memory=cuda"
result = os.system(f"perf_client {exec_args}")
if result != 0:
print(f"Failed running performance tests. Perf client failed with exit code {result}")
sys.exit(1)
results = list()
update_performance_data(results=results, performance_file=performance_file)
results = sort_results(results=results)
save_results(filename=result_path, data=results)
show_results(results=results)
os.remove(performance_file)
print("Performance results for dynamic batching stored in: {0}".format(result_path))
print("\n")
print(f"==== Analysis done ====")
print("\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model-name", type=str, required=True, help="Name of the model to test")
parser.add_argument(
"--input-data", type=str, required=False, default="random", help="Input data to perform profiling."
)
parser.add_argument(
"--input-shape",
action="append",
required=False,
help="Input data shape in form INPUT_NAME:<full_shape_without_batch_axis>.",
)
parser.add_argument("--batch-sizes", type=str, required=True, help="List of batch sizes to tests. Comma separated.")
parser.add_argument("--triton-instances", type=int, default=1, help="Number of Triton Server instances")
parser.add_argument(
"--number-of-model-instances", type=int, default=1, help="Number of models instances on Triton Server"
)
parser.add_argument("--result-path", type=str, required=True, help="Path where result file is going to be stored.")
parser.add_argument("--server-url", type=str, required=False, default="localhost", help="Url to Triton server")
parser.add_argument(
"--measurement-window", required=False, help="Time which perf_analyzer will wait for results", default=10000
)
parser.add_argument("--shared-memory", help="Use shared memory for communication with Triton", action="store_true",
default=False)
args = parser.parse_args()
warmup(
server_url=args.server_url,
model_name=args.model_name,
batch_sizes=_parse_batch_sizes(args.batch_sizes),
triton_instances=args.triton_instances,
triton_gpu_engine_count=args.number_of_model_instances,
profiling_data=args.input_data,
input_shapes=args.input_shape,
measurement_window=args.measurement_window,
shared_memory=args.shared_memory
)
online_performance(
server_url=args.server_url,
model_name=args.model_name,
batch_sizes=_parse_batch_sizes(args.batch_sizes),
triton_instances=args.triton_instances,
triton_gpu_engine_count=args.number_of_model_instances,
profiling_data=args.input_data,
input_shapes=args.input_shape,
result_path=args.result_path,
measurement_window=args.measurement_window,
shared_memory=args.shared_memory
)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/run_online_performance_test_on_triton.py |
#!/usr/bin/env python3
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tarfile
from pathlib import Path
from typing import Tuple, Dict, List
from PIL import Image
from tqdm import tqdm
DATASETS_DIR = os.environ.get("DATASETS_DIR", None)
IMAGENET_DIRNAME = "imagenet"
IMAGE_ARCHIVE_FILENAME = "ILSVRC2012_img_val.tar"
DEVKIT_ARCHIVE_FILENAME = "ILSVRC2012_devkit_t12.tar.gz"
LABELS_REL_PATH = "ILSVRC2012_devkit_t12/data/ILSVRC2012_validation_ground_truth.txt"
META_REL_PATH = "ILSVRC2012_devkit_t12/data/meta.mat"
TARGET_SIZE = (224, 224) # (width, height)
_RESIZE_MIN = 256 # resize preserving aspect ratio to where this is minimal size
def parse_meta_mat(metafile) -> Dict[int, str]:
import scipy.io
meta = scipy.io.loadmat(metafile, squeeze_me=True)["synsets"]
nums_children = list(zip(*meta))[4]
meta = [meta[idx] for idx, num_children in enumerate(nums_children) if num_children == 0]
idcs, wnids = list(zip(*meta))[:2]
idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)}
return idx_to_wnid
def _process_image(image_file, target_size):
image = Image.open(image_file)
original_size = image.size
# scale image to size where minimal size is _RESIZE_MIN
scale_factor = max(_RESIZE_MIN / original_size[0], _RESIZE_MIN / original_size[1])
resize_to = int(original_size[0] * scale_factor), int(original_size[1] * scale_factor)
resized_image = image.resize(resize_to)
# central crop of image to target_size
left, upper = (resize_to[0] - target_size[0]) // 2, (resize_to[1] - target_size[1]) // 2
cropped_image = resized_image.crop((left, upper, left + target_size[0], upper + target_size[1]))
return cropped_image
def main():
import argparse
parser = argparse.ArgumentParser(description="short_description")
parser.add_argument(
"--dataset-dir",
help="Path to dataset directory where imagenet archives are stored and processed files will be saved.",
required=False,
default=DATASETS_DIR,
)
parser.add_argument(
"--target-size",
help="Size of target image. Format it as <width>,<height>.",
required=False,
default=",".join(map(str, TARGET_SIZE)),
)
args = parser.parse_args()
if args.dataset_dir is None:
raise ValueError(
"Please set $DATASETS_DIR env variable to point dataset dir with original dataset archives "
"and where processed files should be stored. Alternatively provide --dataset-dir CLI argument"
)
datasets_dir = Path(args.dataset_dir)
target_size = tuple(map(int, args.target_size.split(",")))
image_archive_path = datasets_dir / IMAGE_ARCHIVE_FILENAME
if not image_archive_path.exists():
raise RuntimeError(
f"There should be {IMAGE_ARCHIVE_FILENAME} file in {datasets_dir}."
f"You need to download the dataset from http://www.image-net.org/download."
)
devkit_archive_path = datasets_dir / DEVKIT_ARCHIVE_FILENAME
if not devkit_archive_path.exists():
raise RuntimeError(
f"There should be {DEVKIT_ARCHIVE_FILENAME} file in {datasets_dir}."
f"You need to download the dataset from http://www.image-net.org/download."
)
with tarfile.open(devkit_archive_path, mode="r") as devkit_archive_file:
labels_file = devkit_archive_file.extractfile(LABELS_REL_PATH)
labels = list(map(int, labels_file.readlines()))
# map validation labels (idxes from LABELS_REL_PATH) into WNID compatible with training set
meta_file = devkit_archive_file.extractfile(META_REL_PATH)
idx_to_wnid = parse_meta_mat(meta_file)
labels_wnid = [idx_to_wnid[idx] for idx in labels]
# remap WNID into index in sorted list of all WNIDs - this is how network outputs class
available_wnids = sorted(set(labels_wnid))
wnid_to_newidx = {wnid: new_cls for new_cls, wnid in enumerate(available_wnids)}
labels = [wnid_to_newidx[wnid] for wnid in labels_wnid]
output_dir = datasets_dir / IMAGENET_DIRNAME
with tarfile.open(image_archive_path, mode="r") as image_archive_file:
image_rel_paths = sorted(image_archive_file.getnames())
for cls, image_rel_path in tqdm(zip(labels, image_rel_paths), total=len(image_rel_paths)):
output_path = output_dir / str(cls) / image_rel_path
original_image_file = image_archive_file.extractfile(image_rel_path)
processed_image = _process_image(original_image_file, target_size)
output_path.parent.mkdir(parents=True, exist_ok=True)
processed_image.save(output_path.as_posix())
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/process_dataset.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
`convert_model.py` script allows to convert between model formats with additional model optimizations
for faster inference.
It converts model from results of get_model function.
Currently supported input and output formats are:
- inputs
- `tf-estimator` - `get_model` function returning Tensorflow Estimator
- `tf-keras` - `get_model` function returning Tensorflow Keras Model
- `tf-savedmodel` - Tensorflow SavedModel binary
- `pyt` - `get_model` function returning PyTorch Module
- output
- `tf-savedmodel` - Tensorflow saved model
- `tf-trt` - TF-TRT saved model
- `ts-trace` - PyTorch traced ScriptModule
- `ts-script` - PyTorch scripted ScriptModule
- `onnx` - ONNX
- `trt` - TensorRT plan file
For tf-keras input you can use:
- --large-model flag - helps loading model which exceeds maximum protobuf size of 2GB
- --tf-allow-growth flag - control limiting GPU memory growth feature
(https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth). By default it is disabled.
"""
import argparse
import logging
import os
from pathlib import Path
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "1"
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import (
DATALOADER_FN_NAME,
BaseConverter,
BaseLoader,
BaseSaver,
Format,
Precision,
load_from_file,
)
from .deployment_toolkit.extensions import converters, loaders, savers
LOGGER = logging.getLogger("convert_model")
INPUT_MODEL_TYPES = [Format.TF_ESTIMATOR, Format.TF_KERAS, Format.TF_SAVEDMODEL, Format.PYT]
OUTPUT_MODEL_TYPES = [Format.TF_SAVEDMODEL, Format.TF_TRT, Format.ONNX, Format.TRT, Format.TS_TRACE, Format.TS_SCRIPT]
def _get_args():
parser = argparse.ArgumentParser(description="Script for conversion between model formats.", allow_abbrev=False)
parser.add_argument("--input-path", help="Path to input model file (python module or binary file)", required=True)
parser.add_argument(
"--input-type", help="Input model type", choices=[f.value for f in INPUT_MODEL_TYPES], required=True
)
parser.add_argument("--output-path", help="Path to output model file", required=True)
parser.add_argument(
"--output-type", help="Output model type", choices=[f.value for f in OUTPUT_MODEL_TYPES], required=True
)
parser.add_argument("--dataloader", help="Path to python module containing data loader")
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
parser.add_argument(
"--ignore-unknown-parameters",
help="Ignore unknown parameters (argument often used in CI where set of arguments is constant)",
action="store_true",
default=False,
)
args, unparsed_args = parser.parse_known_args()
Loader: BaseLoader = loaders.get(args.input_type)
ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser)
converter_name = f"{args.input_type}--{args.output_type}"
Converter: BaseConverter = converters.get(converter_name)
if Converter is not None:
ArgParserGenerator(Converter).update_argparser(parser)
Saver: BaseSaver = savers.get(args.output_type)
ArgParserGenerator(Saver).update_argparser(parser)
if args.dataloader is not None:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
if args.ignore_unknown_parameters:
args, unknown_args = parser.parse_known_args()
LOGGER.warning(f"Got additional args {unknown_args}")
else:
args = parser.parse_args()
return args
def main():
args = _get_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info(f"args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
requested_model_precision = Precision(args.precision)
dataloader_fn = None
# if conversion is required, temporary change model load precision to that required by converter
# it is for TensorRT converters which require fp32 models for all requested precisions
converter_name = f"{args.input_type}--{args.output_type}"
Converter: BaseConverter = converters.get(converter_name)
if Converter:
args.precision = Converter.required_source_model_precision(requested_model_precision).value
Loader: BaseLoader = loaders.get(args.input_type)
loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args)
model = loader.load(args.input_path)
LOGGER.info("inputs: %s", model.inputs)
LOGGER.info("outputs: %s", model.outputs)
if Converter: # if conversion is needed
# dataloader must much source model precision - so not recovering it yet
if args.dataloader is not None:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
# recover precision to that requested by user
args.precision = requested_model_precision.value
if Converter:
converter = ArgParserGenerator(Converter).from_args(args)
model = converter.convert(model, dataloader_fn=dataloader_fn)
Saver: BaseSaver = savers.get(args.output_type)
saver = ArgParserGenerator(Saver).from_args(args)
saver.save(model, args.output_path)
return 0
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/convert_model.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/deployment_toolkit/__init__.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import importlib
import logging
import os
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
import numpy as np
LOGGER = logging.getLogger(__name__)
DATALOADER_FN_NAME = "get_dataloader_fn"
GET_MODEL_FN_NAME = "get_model"
GET_SERVING_INPUT_RECEIVER_FN = "get_serving_input_receiver_fn"
GET_ARGPARSER_FN_NAME = "update_argparser"
class TensorSpec(NamedTuple):
name: str
dtype: str
shape: Tuple
class Parameter(Enum):
def __lt__(self, other: "Parameter") -> bool:
return self.value < other.value
class Accelerator(Parameter):
AMP = "amp"
CUDA = "cuda"
TRT = "trt"
class Precision(Parameter):
FP16 = "fp16"
FP32 = "fp32"
TF32 = "tf32" # Deprecated
class Format(Parameter):
TF_GRAPHDEF = "tf-graphdef"
TF_SAVEDMODEL = "tf-savedmodel"
TF_TRT = "tf-trt"
TF_ESTIMATOR = "tf-estimator"
TF_KERAS = "tf-keras"
ONNX = "onnx"
TRT = "trt"
TS_SCRIPT = "ts-script"
TS_TRACE = "ts-trace"
PYT = "pyt"
class Model(NamedTuple):
handle: object
precision: Optional[Precision]
inputs: Dict[str, TensorSpec]
outputs: Dict[str, TensorSpec]
def load_from_file(file_path, label, target):
spec = importlib.util.spec_from_file_location(name=label, location=file_path)
my_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
return getattr(my_module, target, None)
class BaseLoader(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def load(self, model_path: Union[str, Path], **kwargs) -> Model:
"""
Loads and process model from file based on given set of args
"""
pass
class BaseSaver(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def save(self, model: Model, model_path: Union[str, Path]) -> None:
"""
Save model to file
"""
pass
class BaseRunner(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def init_inference(self, model: Model):
raise NotImplementedError
class BaseRunnerSession(abc.ABC):
def __init__(self, model: Model):
self._model = model
@abc.abstractmethod
def __enter__(self):
raise NotImplementedError()
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError()
@abc.abstractmethod
def __call__(self, x: Dict[str, object]):
raise NotImplementedError()
def _set_env_variables(self) -> Dict[str, object]:
"""this method not remove values; fix it if needed"""
to_set = {}
old_values = {k: os.environ.pop(k, None) for k in to_set}
os.environ.update(to_set)
return old_values
def _recover_env_variables(self, old_envs: Dict[str, object]):
for name, value in old_envs.items():
if value is None:
del os.environ[name]
else:
os.environ[name] = str(value)
class BaseConverter(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def convert(self, model: Model, dataloader_fn) -> Model:
raise NotImplementedError()
@staticmethod
def required_source_model_precision(requested_model_precision: Precision) -> Precision:
return requested_model_precision
class BaseMetricsCalculator(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def calc(
self,
*,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
) -> Dict[str, float]:
"""
Calculates error/accuracy metrics
Args:
ids: List of ids identifying each sample in the batch
y_pred: model output as dict where key is output name and value is output value
x: model input as dict where key is input name and value is input value
y_real: input ground truth as dict where key is output name and value is output value
Returns:
dictionary where key is metric name and value is its value
"""
pass
class ShapeSpec(NamedTuple):
min: Tuple
opt: Tuple
max: Tuple
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/deployment_toolkit/core.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Dict, Iterable
import numpy as np
MB2B = 2 ** 20
B2MB = 1 / MB2B
FLUSH_THRESHOLD_B = 256 * MB2B
def pad_except_batch_axis(data: np.ndarray, target_shape_with_batch_axis: Iterable[int]):
assert all(
[current_size <= target_size for target_size, current_size in zip(target_shape_with_batch_axis, data.shape)]
), "target_shape should have equal or greater all dimensions comparing to data.shape"
padding = [(0, 0)] + [ # (0, 0) - do not pad on batch_axis (with index 0)
(0, target_size - current_size)
for target_size, current_size in zip(target_shape_with_batch_axis[1:], data.shape[1:])
]
return np.pad(data, padding, "constant", constant_values=np.nan)
class NpzWriter:
"""
Dumps dicts of numpy arrays into npz files
It can/shall be used as context manager:
```
with OutputWriter('mydir') as writer:
writer.write(outputs={'classes': np.zeros(8), 'probs': np.zeros((8, 4))},
labels={'classes': np.zeros(8)},
inputs={'input': np.zeros((8, 240, 240, 3)})
```
## Variable size data
Only dynamic of last axis is handled. Data is padded with np.nan value.
Also each generated file may have different size of dynamic axis.
"""
def __init__(self, output_dir, compress=False):
self._output_dir = Path(output_dir)
self._items_cache: Dict[str, Dict[str, np.ndarray]] = {}
self._items_counters: Dict[str, int] = {}
self._flush_threshold_b = FLUSH_THRESHOLD_B
self._compress = compress
@property
def cache_size(self):
return {name: sum([a.nbytes for a in data.values()]) for name, data in self._items_cache.items()}
def _append_to_cache(self, prefix, data):
if data is None:
return
if not isinstance(data, dict):
raise ValueError(f"{prefix} data to store shall be dict")
cached_data = self._items_cache.get(prefix, {})
for name, value in data.items():
assert isinstance(
value, (list, np.ndarray)
), f"Values shall be lists or np.ndarrays; current type {type(value)}"
if not isinstance(value, np.ndarray):
value = np.array(value)
assert value.dtype.kind in ["S", "U"] or not np.any(
np.isnan(value)
), f"Values with np.nan is not supported; {name}={value}"
cached_value = cached_data.get(name, None)
if cached_value is not None:
target_shape = np.max([cached_value.shape, value.shape], axis=0)
cached_value = pad_except_batch_axis(cached_value, target_shape)
value = pad_except_batch_axis(value, target_shape)
value = np.concatenate((cached_value, value))
cached_data[name] = value
self._items_cache[prefix] = cached_data
def write(self, **kwargs):
"""
Writes named list of dictionaries of np.ndarrays.
Finally keyword names will be later prefixes of npz files where those dictionaries will be stored.
ex. writer.write(inputs={'input': np.zeros((2, 10))},
outputs={'classes': np.zeros((2,)), 'probabilities': np.zeros((2, 32))},
labels={'classes': np.zeros((2,))})
Args:
**kwargs: named list of dictionaries of np.ndarrays to store
"""
for prefix, data in kwargs.items():
self._append_to_cache(prefix, data)
biggest_item_size = max(self.cache_size.values())
if biggest_item_size > self._flush_threshold_b:
self.flush()
def flush(self):
for prefix, data in self._items_cache.items():
self._dump(prefix, data)
self._items_cache = {}
def _dump(self, prefix, data):
idx = self._items_counters.setdefault(prefix, 0)
filename = f"{prefix}-{idx:012d}.npz"
output_path = self._output_dir / filename
if self._compress:
np.savez_compressed(output_path, **data)
else:
np.savez(output_path, **data)
nitems = len(list(data.values())[0])
msg_for_labels = (
"If these are correct shapes - consider moving loading of them into metrics.py."
if prefix == "labels"
else ""
)
shapes = {name: value.shape if isinstance(value, np.ndarray) else (len(value),) for name, value in data.items()}
assert all(len(v) == nitems for v in data.values()), (
f'All items in "{prefix}" shall have same size on 0 axis equal to batch size. {msg_for_labels}'
f'{", ".join(f"{name}: {shape}" for name, shape in shapes.items())}'
)
self._items_counters[prefix] += nitems
def __enter__(self):
if self._output_dir.exists() and len(list(self._output_dir.iterdir())):
raise ValueError(f"{self._output_dir.as_posix()} is not empty")
self._output_dir.mkdir(parents=True, exist_ok=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.flush()
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/deployment_toolkit/dump.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import logging
import os
import re
from pathlib import Path
from typing import List
LOGGER = logging.getLogger(__name__)
class ExtensionManager:
def __init__(self, name: str):
self._name = name
self._registry = {}
def register_extension(self, extension: str, clazz):
already_registered_class = self._registry.get(extension, None)
if already_registered_class and already_registered_class.__module__ != clazz.__module__:
raise RuntimeError(
f"Conflicting extension {self._name}/{extension}; "
f"{already_registered_class.__module__}.{already_registered_class.__name} "
f"and "
f"{clazz.__module__}.{clazz.__name__}"
)
elif already_registered_class is None:
clazz_full_name = f"{clazz.__module__}.{clazz.__name__}" if clazz is not None else "None"
LOGGER.debug(f"Registering extension {self._name}/{extension}: {clazz_full_name}")
self._registry[extension] = clazz
def get(self, extension):
if extension not in self._registry:
raise RuntimeError(f"Missing extension {self._name}/{extension}")
return self._registry[extension]
@property
def supported_extensions(self):
return list(self._registry)
@staticmethod
def scan_for_extensions(extension_dirs: List[Path]):
register_pattern = r".*\.register_extension\(.*"
for extension_dir in extension_dirs:
for python_path in extension_dir.rglob("*.py"):
if not python_path.is_file():
continue
payload = python_path.read_text()
if re.findall(register_pattern, payload):
import_path = python_path.relative_to(toolkit_root_dir.parent)
package = import_path.parent.as_posix().replace(os.sep, ".")
package_with_module = f"{package}.{import_path.stem}"
spec = importlib.util.spec_from_file_location(name=package_with_module, location=python_path)
my_module = importlib.util.module_from_spec(spec)
my_module.__package__ = package
try:
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
except ModuleNotFoundError as e:
LOGGER.error(
f"Could not load extensions from {import_path} due to missing python packages; {e}"
)
runners = ExtensionManager("runners")
loaders = ExtensionManager("loaders")
savers = ExtensionManager("savers")
converters = ExtensionManager("converters")
toolkit_root_dir = (Path(__file__).parent / "..").resolve()
ExtensionManager.scan_for_extensions([toolkit_root_dir])
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/deployment_toolkit/extensions.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from typing import List, Optional
def warmup(
model_name: str,
batch_sizes: List[int],
triton_gpu_engine_count: int = 1,
triton_instances: int = 1,
profiling_data: str = "random",
input_shapes: Optional[List[str]] = None,
server_url: str = "localhost",
measurement_window: int = 10000,
shared_memory: bool = False
):
print("\n")
print(f"==== Warmup start ====")
print("\n")
input_shapes = " ".join(map(lambda shape: f" --shape {shape}", input_shapes)) if input_shapes else ""
measurement_window = 6 * measurement_window
max_batch_size = max(batch_sizes)
max_total_requests = 2 * max_batch_size * triton_instances * triton_gpu_engine_count
max_concurrency = min(256, max_total_requests)
batch_size = max(1, max_total_requests // 256)
step = max(1, max_concurrency // 2)
min_concurrency = step
exec_args = f"""-m {model_name} \
-x 1 \
-p {measurement_window} \
-v \
-i http \
-u {server_url}:8000 \
-b {batch_size} \
--concurrency-range {min_concurrency}:{max_concurrency}:{step} \
--input-data {profiling_data} {input_shapes}"""
if shared_memory:
exec_args += " --shared-memory=cuda"
result = os.system(f"perf_client {exec_args}")
if result != 0:
print(f"Failed running performance tests. Perf client failed with exit code {result}")
sys.exit(1)
print("\n")
print(f"==== Warmup done ====")
print("\n")
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/deployment_toolkit/warmup.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import inspect
import logging
from typing import Any, Callable, Dict, Optional, Union
from .core import GET_ARGPARSER_FN_NAME, load_from_file
LOGGER = logging.getLogger(__name__)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def filter_fn_args(args: Union[dict, argparse.Namespace], fn: Callable) -> dict:
signature = inspect.signature(fn)
parameters_names = list(signature.parameters)
if isinstance(args, argparse.Namespace):
args = vars(args)
args = {k: v for k, v in args.items() if k in parameters_names}
return args
def add_args_for_fn_signature(parser, fn) -> argparse.ArgumentParser:
parser.conflict_handler = "resolve"
signature = inspect.signature(fn)
for parameter in signature.parameters.values():
if parameter.name in ["self", "args", "kwargs"]:
continue
argument_kwargs = {}
if parameter.annotation != inspect.Parameter.empty:
if parameter.annotation == bool:
argument_kwargs["type"] = str2bool
argument_kwargs["choices"] = [0, 1]
elif isinstance(parameter.annotation, type(Optional[Any])):
types = [type_ for type_ in parameter.annotation.__args__ if not isinstance(None, type_)]
if len(types) != 1:
raise RuntimeError(
f"Could not prepare argument parser for {parameter.name}: {parameter.annotation} in {fn}"
)
argument_kwargs["type"] = types[0]
else:
argument_kwargs["type"] = parameter.annotation
if parameter.default != inspect.Parameter.empty:
if parameter.annotation == bool:
argument_kwargs["default"] = str2bool(parameter.default)
else:
argument_kwargs["default"] = parameter.default
else:
argument_kwargs["required"] = True
name = parameter.name.replace("_", "-")
LOGGER.debug(f"Adding argument {name} with {argument_kwargs}")
parser.add_argument(f"--{name}", **argument_kwargs)
return parser
class ArgParserGenerator:
def __init__(self, cls_or_fn, module_path: Optional[str] = None):
self._cls_or_fn = cls_or_fn
self._handle = cls_or_fn if inspect.isfunction(cls_or_fn) else getattr(cls_or_fn, "__init__")
input_is_python_file = module_path and module_path.endswith(".py")
self._input_path = module_path if input_is_python_file else None
self._required_fn_name_for_signature_parsing = getattr(
cls_or_fn, "required_fn_name_for_signature_parsing", None
)
def update_argparser(self, parser):
name = self._handle.__name__
group_parser = parser.add_argument_group(name)
add_args_for_fn_signature(group_parser, fn=self._handle)
self._update_argparser(group_parser)
def get_args(self, args: argparse.Namespace):
filtered_args = filter_fn_args(args, fn=self._handle)
tmp_parser = argparse.ArgumentParser(allow_abbrev=False)
self._update_argparser(tmp_parser)
custom_names = [
p.dest.replace("-", "_") for p in tmp_parser._actions if not isinstance(p, argparse._HelpAction)
]
custom_params = {n: getattr(args, n) for n in custom_names}
filtered_args = {**filtered_args, **custom_params}
return filtered_args
def from_args(self, args: Union[argparse.Namespace, Dict]):
args = self.get_args(args)
LOGGER.info(f"Initializing {self._cls_or_fn.__name__}({args})")
return self._cls_or_fn(**args)
def _update_argparser(self, parser):
label = "argparser_update"
if self._input_path:
update_argparser_handle = load_from_file(self._input_path, label=label, target=GET_ARGPARSER_FN_NAME)
if update_argparser_handle:
update_argparser_handle(parser)
elif self._required_fn_name_for_signature_parsing:
fn_handle = load_from_file(
self._input_path, label=label, target=self._required_fn_name_for_signature_parsing
)
if fn_handle:
add_args_for_fn_signature(parser, fn_handle)
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/deployment_toolkit/args.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import re
from typing import Dict, List
from natsort import natsorted
from tabulate import tabulate
def sort_results(results: List):
results = natsorted(results, key=lambda item: [item[key] for key in item.keys()])
return results
def save_results(filename: str, data: List, formatted: bool = False):
data = format_data(data=data) if formatted else data
with open(filename, "a") as csvfile:
fieldnames = data[0].keys()
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in data:
writer.writerow(row)
def format_data(data: List[Dict]) -> List[Dict]:
formatted_data = list()
for item in data:
formatted_item = format_keys(data=item)
formatted_data.append(formatted_item)
return formatted_data
def format_keys(data: Dict) -> Dict:
keys = {format_key(key=key): value for key, value in data.items()}
return keys
def format_key(key: str) -> str:
key = " ".join([k.capitalize() for k in re.split("_| ", key)])
return key
def show_results(results: List[Dict]):
headers = list(results[0].keys())
summary = map(lambda x: list(map(lambda item: item[1], x.items())), results)
print(tabulate(summary, headers=headers))
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/deployment_toolkit/report.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Dict, Iterable, Optional
# pytype: disable=import-error
import onnx
import tensorrt as trt
from ..core import BaseConverter, Format, Model, Precision, ShapeSpec
from ..extensions import converters
from .utils import get_input_shapes
# pytype: enable=import-error
LOGGER = logging.getLogger(__name__)
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
class Onnx2TRTConverter(BaseConverter):
def __init__(self, *, max_batch_size: int, max_workspace_size: int, precision: str):
self._max_batch_size = max_batch_size
self._max_workspace_size = max_workspace_size
self._precision = Precision(precision)
def convert(self, model: Model, dataloader_fn) -> Model:
input_shapes = get_input_shapes(dataloader_fn(), self._max_batch_size)
cuda_engine = onnx2trt(
model.handle,
shapes=input_shapes,
max_workspace_size=self._max_workspace_size,
max_batch_size=self._max_batch_size,
model_precision=self._precision.value,
)
return model._replace(handle=cuda_engine)
@staticmethod
def required_source_model_precision(requested_model_precision: Precision) -> Precision:
# TensorRT requires source models to be in FP32 precision
return Precision.FP32
def onnx2trt(
onnx_model: onnx.ModelProto,
*,
shapes: Dict[str, ShapeSpec],
max_workspace_size: int,
max_batch_size: int,
model_precision: str,
) -> "trt.ICudaEngine":
"""
Converts onnx model to TensorRT ICudaEngine
Args:
onnx_model: onnx.Model to convert
shapes: dictionary containing min shape, max shape, opt shape for each input name
max_workspace_size: The maximum GPU temporary memory which the CudaEngine can use at execution time.
max_batch_size: The maximum batch size which can be used at execution time,
and also the batch size for which the CudaEngine will be optimized.
model_precision: precision of kernels (possible values: fp16, fp32)
Returns: TensorRT ICudaEngine
"""
# Whether or not 16-bit kernels are permitted.
# During :class:`ICudaEngine` build fp16 kernels will also be tried when this mode is enabled.
fp16_mode = "16" in model_precision
builder = trt.Builder(TRT_LOGGER)
builder.fp16_mode = fp16_mode
builder.max_batch_size = max_batch_size
builder.max_workspace_size = max_workspace_size
# In TensorRT 7.0, the ONNX parser only supports full-dimensions mode,
# meaning that your network definition must be created with the explicitBatch flag set.
# For more information, see
# https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#work_dynamic_shapes
flags = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
network = builder.create_network(flags)
with trt.OnnxParser(network, TRT_LOGGER) as parser:
# onnx model parsing
if not parser.parse(onnx_model.SerializeToString()):
for i in range(parser.num_errors):
LOGGER.error(f"OnnxParser error {i}/{parser.num_errors}: {parser.get_error(i)}")
raise RuntimeError("Error during parsing ONNX model (see logs for details)")
# OnnxParser produces here FP32 TensorRT engine for FP16 network
# so we force FP16 here for first input/output
if fp16_mode:
network.get_input(0).dtype = trt.DataType.HALF
network.get_output(0).dtype = trt.DataType.HALF
# optimization
config = builder.create_builder_config()
config.flags |= bool(fp16_mode) << int(trt.BuilderFlag.FP16)
config.max_workspace_size = max_workspace_size
profile = builder.create_optimization_profile()
for name, spec in shapes.items():
profile.set_shape(name, **spec._asdict())
config.add_optimization_profile(profile)
engine = builder.build_engine(network, config=config)
return engine
converters.register_extension(f"{Format.ONNX.value}--{Format.TRT.value}", Onnx2TRTConverter)
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/deployment_toolkit/library/onnx2trt_conv.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pathlib import Path
from typing import Dict, Optional, Union
import numpy as np
# pytype: disable=import-error
import onnx
import onnx.optimizer
import onnx.shape_inference
import onnxruntime
from google.protobuf import text_format
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
# pytype: enable=import-error
from ..core import BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec
from ..extensions import loaders, runners, savers
from .utils import infer_precision
LOGGER = logging.getLogger(__name__)
def _value_info2tensor_spec(value_info: onnx.ValueInfoProto):
onnx_data_type_map = {"float": "float32", "double": "float64"}
elem_type_name = onnx.TensorProto.DataType.Name(value_info.type.tensor_type.elem_type).lower()
dtype = onnx_data_type_map.get(elem_type_name, elem_type_name)
def _get_dim(dim):
which = dim.WhichOneof("value")
if which is not None: # which is None when dim is None
dim = getattr(dim, which)
return None if isinstance(dim, (str, bytes)) else dim
shape = value_info.type.tensor_type.shape
shape = tuple([_get_dim(d) for d in shape.dim])
return TensorSpec(value_info.name, dtype=dtype, shape=shape)
def _infer_graph_precision(onnx_graph: onnx.GraphProto) -> Optional[Precision]:
import networkx as nx
# build directed graph
nx_graph = nx.DiGraph()
def _get_dtype(vi):
t = vi.type
if hasattr(t, "tensor_type"):
type_id = t.tensor_type.elem_type
else:
raise NotImplementedError("Not implemented yet")
return TENSOR_TYPE_TO_NP_TYPE[type_id]
node_output2type = {vi.name: _get_dtype(vi) for vi in onnx_graph.value_info}
node_outputs2node = {output_name: node for node in onnx_graph.node for output_name in node.output}
node_inputs2node = {input_name: node for node in onnx_graph.node for input_name in node.input}
for node in onnx_graph.node:
node_dtype = node_output2type.get("+".join(node.output), None)
nx_graph.add_node(
node.name,
op=node.op_type,
attr={a.name: a for a in node.attribute},
dtype=node_dtype,
)
for input_name in node.input:
prev_node = node_outputs2node.get(input_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, node.name)
for input_node in onnx_graph.input:
input_name = input_node.name
nx_graph.add_node(input_name, op="input", dtype=_get_dtype(input_node))
next_node = node_inputs2node.get(input_name, None)
if next_node:
nx_graph.add_edge(input_name, next_node.name)
for output in onnx_graph.output:
output_name = output.name
nx_graph.add_node(output_name, op="output", dtype=_get_dtype(output))
prev_node = node_outputs2node.get(output_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, output_name)
else:
LOGGER.warning(f"Could not find previous node for {output_name}")
input_names = [n.name for n in onnx_graph.input]
output_names = [n.name for n in onnx_graph.output]
most_common_dtype = infer_precision(nx_graph, input_names, output_names, lambda node: node.get("dtype", None))
if most_common_dtype is not None:
precision = {np.dtype("float32"): Precision.FP32, np.dtype("float16"): Precision.FP16}[most_common_dtype]
else:
precision = None
return precision
class OnnxLoader(BaseLoader):
def load(self, model_path: Union[str, Path], **_) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
model = onnx.load(model_path)
onnx.checker.check_model(model)
onnx.helper.strip_doc_string(model)
model = onnx.shape_inference.infer_shapes(model)
# TODO: probably modification of onnx model ios causes error on optimize
# from onnx.utils import polish_model
# model = polish_model(model) # run checker, docs strip, optimizer and shape inference
inputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.input}
outputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.output}
precision = _infer_graph_precision(model.graph)
return Model(model, precision, inputs, outputs)
class OnnxSaver(BaseSaver):
def __init__(self, as_text: bool = False):
self._as_text = as_text
def save(self, model: Model, model_path: Union[str, Path]) -> None:
model_path = Path(model_path)
LOGGER.debug(f"Saving ONNX model to {model_path.as_posix()}")
model_path.parent.mkdir(parents=True, exist_ok=True)
onnx_model: onnx.ModelProto = model.handle
if self._as_text:
with model_path.open("w") as f:
f.write(text_format.MessageToString(onnx_model))
else:
with model_path.open("wb") as f:
f.write(onnx_model.SerializeToString())
"""
ExecutionProviders on onnxruntime 1.4.0
['TensorrtExecutionProvider',
'CUDAExecutionProvider',
'MIGraphXExecutionProvider',
'NGRAPHExecutionProvider',
'OpenVINOExecutionProvider',
'DnnlExecutionProvider',
'NupharExecutionProvider',
'VitisAIExecutionProvider',
'ArmNNExecutionProvider',
'ACLExecutionProvider',
'CPUExecutionProvider']
"""
def _check_providers(providers):
providers = providers or []
if not isinstance(providers, (list, tuple)):
providers = [providers]
available_providers = onnxruntime.get_available_providers()
unavailable = set(providers) - set(available_providers)
if unavailable:
raise RuntimeError(f"Unavailable providers {unavailable}")
return providers
class OnnxRunner(BaseRunner):
def __init__(self, verbose_runtime_logs: bool = False):
self._providers = None
self._verbose_runtime_logs = verbose_runtime_logs
def init_inference(self, model: Model):
assert isinstance(model.handle, onnx.ModelProto)
return OnnxRunnerSession(
model=model, providers=self._providers, verbose_runtime_logs=self._verbose_runtime_logs
)
class OnnxRunnerSession(BaseRunnerSession):
def __init__(self, model: Model, providers, verbose_runtime_logs: bool = False):
super().__init__(model)
self._input_names = None
self._output_names = None
self._session = None
self._providers = providers
self._verbose_runtime_logs = verbose_runtime_logs
self._old_env_values = {}
def __enter__(self):
self._old_env_values = self._set_env_variables()
sess_options = onnxruntime.SessionOptions() # default session options
if self._verbose_runtime_logs:
sess_options.log_severity_level = 0
sess_options.log_verbosity_level = 1
LOGGER.info(
f"Starting inference session for onnx model providers={self._providers} sess_options={sess_options}"
)
self._input_names = list(self._model.inputs)
self._output_names = list(self._model.outputs)
model_payload = self._model.handle.SerializeToString()
self._session = onnxruntime.InferenceSession(
model_payload, providers=self._providers, sess_options=sess_options
)
return self
def __exit__(self, exc_type, exc_value, traceback):
self._input_names = None
self._output_names = None
self._session = None
self._recover_env_variables(self._old_env_values)
def __call__(self, x: Dict[str, object]):
feed_dict = {k: x[k] for k in self._input_names}
y_pred = self._session.run(self._output_names, feed_dict)
y_pred = dict(zip(self._output_names, y_pred))
return y_pred
loaders.register_extension(Format.ONNX.value, OnnxLoader)
runners.register_extension(Format.ONNX.value, OnnxRunner)
savers.register_extension(Format.ONNX.value, OnnxSaver)
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/deployment_toolkit/library/onnx.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/deployment_toolkit/library/__init__.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from typing import Callable, Dict, List
import networkx as nx
from ..core import ShapeSpec
def infer_precision(
nx_graph: nx.Graph,
input_names: List[str],
output_names: List[str],
get_node_dtype_fn: Callable,
):
node_dtypes = [nx_graph.nodes[node_name].get("dtype", None) for node_name in nx_graph.nodes]
node_dtypes = [dt for dt in node_dtypes if dt is None or dt.kind not in ["i", "b"]]
dtypes_counter = Counter(node_dtypes)
return dtypes_counter.most_common()[0][0]
def get_shapes_with_dynamic_axes(dataloader, batch_size_dim=0):
def _set_dynamic_shapes(t, shapes):
for k, v in t.items():
shape = list(v.shape)
for dim, s in enumerate(shape):
if shapes[k][dim] != -1 and shapes[k][dim] != s:
shapes[k][dim] = -1
## get all shapes from input and output tensors
input_shapes = {}
output_shapes = {}
for batch in dataloader:
_, x, y = batch
for k, v in x.items():
input_shapes[k] = list(v.shape)
for k, v in y.items():
output_shapes[k] = list(v.shape)
break
# based on max <max_num_iters> iterations, check which
# dimensions differ to determine dynamic_axes
max_num_iters = 100
for idx, batch in enumerate(dataloader):
if idx >= max_num_iters:
break
_, x, y = batch
_set_dynamic_shapes(x, input_shapes)
_set_dynamic_shapes(y, output_shapes)
return input_shapes, output_shapes
def get_dynamic_axes(dataloader, batch_size_dim=0):
input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader, batch_size_dim)
all_shapes = {**input_shapes, **output_shapes}
dynamic_axes = {}
for k, shape in all_shapes.items():
for idx, s in enumerate(shape):
if s == -1:
dynamic_axes[k] = {idx: k + "_" + str(idx)}
for k, v in all_shapes.items():
if k in dynamic_axes:
dynamic_axes[k].update({batch_size_dim: "batch_size_" + str(batch_size_dim)})
else:
dynamic_axes[k] = {batch_size_dim: "batch_size_" + str(batch_size_dim)}
return dynamic_axes
def get_input_shapes(dataloader, max_batch_size=1) -> Dict[str, ShapeSpec]:
def init_counters_and_shapes(x, counters, min_shapes, max_shapes):
for k, v in x.items():
counters[k] = Counter()
min_shapes[k] = [float("inf")] * v.ndim
max_shapes[k] = [float("-inf")] * v.ndim
counters = {}
min_shapes: Dict[str, tuple] = {}
max_shapes: Dict[str, tuple] = {}
for idx, batch in enumerate(dataloader):
ids, x, y = batch
if idx == 0:
init_counters_and_shapes(x, counters, min_shapes, max_shapes)
for k, v in x.items():
shape = v.shape
counters[k][shape] += 1
min_shapes[k] = tuple([min(a, b) for a, b in zip(min_shapes[k], shape)])
max_shapes[k] = tuple([max(a, b) for a, b in zip(max_shapes[k], shape)])
opt_shapes: Dict[str, tuple] = {}
for k, v in counters.items():
opt_shapes[k] = v.most_common(1)[0][0]
shapes = {}
for k in opt_shapes.keys(): # same keys in min_shapes and max_shapes
shapes[k] = ShapeSpec(
min=(1,) + min_shapes[k][1:],
max=(max_batch_size,) + max_shapes[k][1:],
opt=(max_batch_size,) + opt_shapes[k][1:],
)
return shapes
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/deployment_toolkit/library/utils.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from pathlib import Path
from typing import Dict, NamedTuple, Optional, Union
import numpy as np
# pytype: disable=import-error
try:
import pycuda.autoinit
import pycuda.driver as cuda
except (ImportError, Exception) as e:
logging.getLogger(__name__).warning(f"Problems with importing pycuda package; {e}")
# pytype: enable=import-error
import tensorrt as trt # pytype: disable=import-error
from ..core import BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec
from ..extensions import loaders, runners, savers
LOGGER = logging.getLogger(__name__)
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
"""
documentation:
https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/index.html
https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#python_samples_section
"""
class TensorRTLoader(BaseLoader):
def load(self, model_path: Union[str, Path], **_) -> Model:
model_path = Path(model_path)
LOGGER.debug(f"Loading TensorRT engine from {model_path}")
with model_path.open("rb") as fh, trt.Runtime(TRT_LOGGER) as runtime:
engine = runtime.deserialize_cuda_engine(fh.read())
if engine is None:
raise RuntimeError(f"Could not load ICudaEngine from {model_path}")
inputs = {}
outputs = {}
for binding_idx in range(engine.num_bindings):
name = engine.get_binding_name(binding_idx)
is_input = engine.binding_is_input(binding_idx)
dtype = engine.get_binding_dtype(binding_idx)
shape = engine.get_binding_shape(binding_idx)
if is_input:
inputs[name] = TensorSpec(name, dtype, shape)
else:
outputs[name] = TensorSpec(name, dtype, shape)
return Model(engine, None, inputs, outputs)
class TensorRTSaver(BaseSaver):
def __init__(self):
pass
def save(self, model: Model, model_path: Union[str, Path]) -> None:
model_path = Path(model_path)
LOGGER.debug(f"Saving TensorRT engine to {model_path.as_posix()}")
model_path.parent.mkdir(parents=True, exist_ok=True)
engine: "trt.ICudaEngine" = model.handle
with model_path.open("wb") as fh:
fh.write(engine.serialize())
class TRTBuffers(NamedTuple):
x_host: Optional[Dict[str, object]]
x_dev: Dict[str, object]
y_pred_host: Dict[str, object]
y_pred_dev: Dict[str, object]
class TensorRTRunner(BaseRunner):
def __init__(self):
pass
def init_inference(self, model: Model):
return TensorRTRunnerSession(model=model)
class TensorRTRunnerSession(BaseRunnerSession):
def __init__(self, model: Model):
super().__init__(model)
assert isinstance(model.handle, trt.ICudaEngine)
self._model = model
self._has_dynamic_shapes = None
self._context = None
self._engine: trt.ICudaEngine = self._model.handle
self._cuda_context = pycuda.autoinit.context
self._input_names = None
self._output_names = None
self._buffers = None
def __enter__(self):
self._context = self._engine.create_execution_context()
self._context.__enter__()
self._input_names = [
self._engine[idx] for idx in range(self._engine.num_bindings) if self._engine.binding_is_input(idx)
]
self._output_names = [
self._engine[idx] for idx in range(self._engine.num_bindings) if not self._engine.binding_is_input(idx)
]
# all_binding_shapes_specified is True for models without dynamic shapes
# so initially this variable is False for models with dynamic shapes
self._has_dynamic_shapes = not self._context.all_binding_shapes_specified
return self
def __exit__(self, exc_type, exc_value, traceback):
self._context.__exit__(exc_type, exc_value, traceback)
self._input_names = None
self._output_names = None
# TODO: are cuda buffers dealloc automatically?
self._buffers = None
def __call__(self, x):
buffers = self._prepare_buffers_if_needed(x)
bindings = self._update_bindings(buffers)
for name in self._input_names:
cuda.memcpy_htod(buffers.x_dev[name], buffers.x_host[name])
self._cuda_context.push()
self._context.execute_v2(bindings=bindings)
self._cuda_context.pop()
for name in self._output_names:
cuda.memcpy_dtoh(buffers.y_pred_host[name], buffers.y_pred_dev[name])
return buffers.y_pred_host
def _update_bindings(self, buffers: TRTBuffers):
bindings = [None] * self._engine.num_bindings
for name in buffers.y_pred_dev:
binding_idx: int = self._engine[name]
bindings[binding_idx] = buffers.y_pred_dev[name]
for name in buffers.x_dev:
binding_idx: int = self._engine[name]
bindings[binding_idx] = buffers.x_dev[name]
return bindings
def _set_dynamic_input_shapes(self, x_host):
def _is_shape_dynamic(input_shape):
return any([dim is None or dim == -1 for dim in input_shape])
for name in self._input_names:
bindings_idx = self._engine[name]
data_shape = x_host[name].shape # pytype: disable=attribute-error
if self._engine.is_shape_binding(bindings_idx):
input_shape = self._context.get_shape(bindings_idx)
if _is_shape_dynamic(input_shape):
self._context.set_shape_input(bindings_idx, data_shape)
else:
input_shape = self._engine.get_binding_shape(bindings_idx)
if _is_shape_dynamic(input_shape):
self._context.set_binding_shape(bindings_idx, data_shape)
assert self._context.all_binding_shapes_specified and self._context.all_shape_inputs_specified
def _prepare_buffers_if_needed(self, x_host: Dict[str, object]):
# pytype: disable=attribute-error
new_batch_size = list(x_host.values())[0].shape[0]
current_batch_size = list(self._buffers.y_pred_host.values())[0].shape[0] if self._buffers else 0
# pytype: enable=attribute-error
if self._has_dynamic_shapes or new_batch_size != current_batch_size:
# TODO: are CUDA buffers dealloc automatically?
self._set_dynamic_input_shapes(x_host)
y_pred_host = {}
for name in self._output_names:
shape = self._context.get_binding_shape(self._engine[name])
y_pred_host[name] = np.zeros(shape, dtype=trt.nptype(self._model.outputs[name].dtype))
y_pred_dev = {name: cuda.mem_alloc(data.nbytes) for name, data in y_pred_host.items()}
x_dev = {
name: cuda.mem_alloc(host_input.nbytes)
for name, host_input in x_host.items()
if name in self._input_names # pytype: disable=attribute-error
}
self._buffers = TRTBuffers(None, x_dev, y_pred_host, y_pred_dev)
return self._buffers._replace(x_host=x_host)
if "pycuda.driver" in sys.modules:
loaders.register_extension(Format.TRT.value, TensorRTLoader)
runners.register_extension(Format.TRT.value, TensorRTRunner)
savers.register_extension(Format.TRT.value, TensorRTSaver)
else:
LOGGER.warning("Do not register TensorRT extension due problems with importing pycuda.driver package.")
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/deployment_toolkit/library/tensorrt.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from collections import Counter
from pathlib import Path
from typing import Dict, Iterable, NamedTuple, Optional, Union
import torch # pytype: disable=import-error
import yaml
from ..core import (
GET_MODEL_FN_NAME,
BaseConverter,
BaseLoader,
BaseRunner,
BaseRunnerSession,
BaseSaver,
Format,
Model,
Precision,
TensorSpec,
load_from_file,
)
from ..extensions import converters, loaders, runners, savers
from .utils import get_dynamic_axes, get_input_shapes, get_shapes_with_dynamic_axes
LOGGER = logging.getLogger(__name__)
class InputOutputSpec(NamedTuple):
inputs: Dict[str, TensorSpec]
outputs: Dict[str, TensorSpec]
def get_sample_input(dataloader, device):
for batch in dataloader:
_, x, _ = batch
break
if isinstance(x, dict):
sample_input = list(x.values())
elif isinstance(x, list):
sample_input = x
else:
raise TypeError("The first element (x) of batch returned by dataloader must be a list or a dict")
for idx, s in enumerate(sample_input):
sample_input[idx] = torch.from_numpy(s).to(device)
return tuple(sample_input)
def get_model_device(torch_model):
if next(torch_model.parameters()).is_cuda:
return "cuda"
else:
return "cpu"
def infer_model_precision(model):
counter = Counter()
for param in model.parameters():
counter[param.dtype] += 1
if counter[torch.float16] > 0:
return Precision.FP16
else:
return Precision.FP32
def _get_tensor_dtypes(dataloader, precision):
def _get_dtypes(t):
dtypes = {}
for k, v in t.items():
dtype = str(v.dtype)
if dtype == "float64":
dtype = "float32"
if precision == Precision.FP16 and dtype == "float32":
dtype = "float16"
dtypes[k] = dtype
return dtypes
input_dtypes = {}
output_dtypes = {}
for batch in dataloader:
_, x, y = batch
input_dtypes = _get_dtypes(x)
output_dtypes = _get_dtypes(y)
break
return input_dtypes, output_dtypes
### TODO assumption: floating point input
### type has same precision as the model
def _get_io_spec(model, dataloader_fn):
precision = model.precision
dataloader = dataloader_fn()
input_dtypes, output_dtypes = _get_tensor_dtypes(dataloader, precision)
input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader)
inputs = {
name: TensorSpec(name=name, dtype=input_dtypes[name], shape=tuple(input_shapes[name])) for name in model.inputs
}
outputs = {
name: TensorSpec(name=name, dtype=output_dtypes[name], shape=tuple(output_shapes[name]))
for name in model.outputs
}
return InputOutputSpec(inputs, outputs)
class PyTorchModelLoader(BaseLoader):
required_fn_name_for_signature_parsing: Optional[str] = GET_MODEL_FN_NAME
def __init__(self, **kwargs):
self._model_args = kwargs
def load(self, model_path: Union[str, Path], **_) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
get_model = load_from_file(model_path, "model", GET_MODEL_FN_NAME)
model, tensor_infos = get_model(**self._model_args)
io_spec = InputOutputSpec(tensor_infos["inputs"], tensor_infos["outputs"])
precision = infer_model_precision(model)
return Model(handle=model, precision=precision, inputs=io_spec.inputs, outputs=io_spec.outputs)
class TorchScriptLoader(BaseLoader):
def __init__(self, tensor_names_path: str = None, **kwargs):
self._model_args = kwargs
self._io_spec = None
if tensor_names_path is not None:
with Path(tensor_names_path).open("r") as fh:
tensor_infos = yaml.load(fh, Loader=yaml.SafeLoader)
self._io_spec = InputOutputSpec(tensor_infos["inputs"], tensor_infos["outputs"])
def load(self, model_path: Union[str, Path], **_) -> Model:
if not isinstance(model_path, Path):
model_path = Path(model_path)
model = torch.jit.load(model_path.as_posix())
precision = infer_model_precision(model)
io_spec = self._io_spec
if not io_spec:
yaml_path = model_path.parent / f"{model_path.stem}.yaml"
if not yaml_path.is_file():
raise ValueError(
f"If `--tensor-names-path is not provided, "
f"TorchScript model loader expects file {yaml_path} with tensor information."
)
with yaml_path.open("r") as fh:
tensor_info = yaml.load(fh, Loader=yaml.SafeLoader)
io_spec = InputOutputSpec(tensor_info["inputs"], tensor_info["outputs"])
return Model(handle=model, precision=precision, inputs=io_spec.inputs, outputs=io_spec.outputs)
class TorchScriptTraceConverter(BaseConverter):
def __init__(self):
pass
def convert(self, model: Model, dataloader_fn) -> Model:
device = get_model_device(model.handle)
dummy_input = get_sample_input(dataloader_fn(), device)
converted_model = torch.jit.trace_module(model.handle, {"forward": dummy_input})
io_spec = _get_io_spec(model, dataloader_fn)
return Model(converted_model, precision=model.precision, inputs=io_spec.inputs, outputs=io_spec.outputs)
class TorchScriptScriptConverter(BaseConverter):
def __init__(self):
pass
def convert(self, model: Model, dataloader_fn) -> Model:
converted_model = torch.jit.script(model.handle)
io_spec = _get_io_spec(model, dataloader_fn)
return Model(converted_model, precision=model.precision, inputs=io_spec.inputs, outputs=io_spec.outputs)
class PYT2ONNXConverter(BaseConverter):
def __init__(self, onnx_opset: int = None):
self._onnx_opset = onnx_opset
def convert(self, model: Model, dataloader_fn) -> Model:
import tempfile
import onnx # pytype: disable=import-error
assert isinstance(model.handle, torch.jit.ScriptModule) or isinstance(
model.handle, torch.nn.Module
), "The model must be of type 'torch.jit.ScriptModule' or 'torch.nn.Module'. Converter aborted."
dynamic_axes = get_dynamic_axes(dataloader_fn())
device = get_model_device(model.handle)
dummy_input = get_sample_input(dataloader_fn(), device)
with tempfile.TemporaryDirectory() as tmpdirname:
export_path = os.path.join(tmpdirname, "model.onnx")
with torch.no_grad():
torch.onnx.export(
model.handle,
dummy_input,
export_path,
do_constant_folding=True,
input_names=list(model.inputs),
output_names=list(model.outputs),
dynamic_axes=dynamic_axes,
opset_version=self._onnx_opset,
enable_onnx_checker=True,
)
onnx_model = onnx.load(export_path)
onnx.checker.check_model(onnx_model)
onnx.helper.strip_doc_string(onnx_model)
onnx_model = onnx.shape_inference.infer_shapes(onnx_model)
return Model(
handle=onnx_model,
precision=model.precision,
inputs=model.inputs,
outputs=model.outputs,
)
class PYT2TensorRTConverter(BaseConverter):
def __init__(self, max_batch_size: int, max_workspace_size: int, onnx_opset: int, precision: str):
self._max_batch_size = max_batch_size
self._max_workspace_size = max_workspace_size
self._onnx_opset = onnx_opset
self._precision = Precision(precision)
def convert(self, model: Model, dataloader_fn) -> Model:
from .onnx import _infer_graph_precision
from .onnx2trt_conv import onnx2trt
pyt2onnx_converter = PYT2ONNXConverter(self._onnx_opset)
onnx_model = pyt2onnx_converter.convert(model, dataloader_fn).handle
precision = _infer_graph_precision(onnx_model.graph)
input_shapes = get_input_shapes(dataloader_fn(), self._max_batch_size)
cuda_engine = onnx2trt(
onnx_model,
shapes=input_shapes,
max_workspace_size=self._max_workspace_size,
max_batch_size=self._max_batch_size,
model_precision=self._precision.value,
)
return Model(
handle=cuda_engine,
precision=model.precision,
inputs=model.inputs,
outputs=model.outputs,
)
@staticmethod
def required_source_model_precision(requested_model_precision: Precision) -> Precision:
# TensorRT requires source models to be in FP32 precision
return Precision.FP32
class TorchScriptSaver(BaseSaver):
def save(self, model: Model, model_path: Union[str, Path]) -> None:
if not isinstance(model_path, Path):
model_path = Path(model_path)
if isinstance(model.handle, torch.jit.ScriptModule):
torch.jit.save(model.handle, model_path.as_posix())
else:
print("The model must be of type 'torch.jit.ScriptModule'. Saving aborted.")
assert False # temporary error handling
def _format_tensor_spec(tensor_spec):
# wrapping shape with list and whole tensor_spec with dict() is required for correct yaml dump
tensor_spec = tensor_spec._replace(shape=list(tensor_spec.shape))
tensor_spec = dict(tensor_spec._asdict())
return tensor_spec
# store TensorSpecs from inputs and outputs in a yaml file
tensor_specs = {
"inputs": {k: _format_tensor_spec(v) for k, v in model.inputs.items()},
"outputs": {k: _format_tensor_spec(v) for k, v in model.outputs.items()},
}
yaml_path = model_path.parent / f"{model_path.stem}.yaml"
with Path(yaml_path).open("w") as fh:
yaml.dump(tensor_specs, fh, indent=4)
class PyTorchRunner(BaseRunner):
def __init__(self):
pass
def init_inference(self, model: Model):
return PyTorchRunnerSession(model=model)
class PyTorchRunnerSession(BaseRunnerSession):
def __init__(self, model: Model):
super().__init__(model)
assert isinstance(model.handle, torch.jit.ScriptModule) or isinstance(
model.handle, torch.nn.Module
), "The model must be of type 'torch.jit.ScriptModule' or 'torch.nn.Module'. Runner aborted."
self._model = model
self._output_names = None
def __enter__(self):
self._output_names = list(self._model.outputs)
return self
def __exit__(self, exc_type, exc_value, traceback):
self._output_names = None
self._model = None
def __call__(self, x: Dict[str, object]):
with torch.no_grad():
feed_list = [torch.from_numpy(v).cuda() for k, v in x.items()]
y_pred = self._model.handle(*feed_list)
if isinstance(y_pred, torch.Tensor):
y_pred = (y_pred,)
y_pred = [t.cpu().numpy() for t in y_pred]
y_pred = dict(zip(self._output_names, y_pred))
return y_pred
loaders.register_extension(Format.PYT.value, PyTorchModelLoader)
loaders.register_extension(Format.TS_TRACE.value, TorchScriptLoader)
loaders.register_extension(Format.TS_SCRIPT.value, TorchScriptLoader)
converters.register_extension(f"{Format.PYT.value}--{Format.TS_SCRIPT.value}", TorchScriptScriptConverter)
converters.register_extension(f"{Format.PYT.value}--{Format.TS_TRACE.value}", TorchScriptTraceConverter)
converters.register_extension(f"{Format.PYT.value}--{Format.ONNX.value}", PYT2ONNXConverter)
converters.register_extension(f"{Format.PYT.value}--{Format.TRT.value}", PYT2TensorRTConverter)
savers.register_extension(Format.TS_SCRIPT.value, TorchScriptSaver)
savers.register_extension(Format.TS_TRACE.value, TorchScriptSaver)
runners.register_extension(Format.PYT.value, PyTorchRunner)
runners.register_extension(Format.TS_SCRIPT.value, PyTorchRunner)
runners.register_extension(Format.TS_TRACE.value, PyTorchRunner)
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/triton/deployment_toolkit/library/pyt.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import numpy as np
def mixup(alpha, data, target):
with torch.no_grad():
bs = data.size(0)
c = np.random.beta(alpha, alpha)
perm = torch.randperm(bs).cuda()
md = c * data + (1 - c) * data[perm, :]
mt = c * target + (1 - c) * target[perm, :]
return md, mt
class MixUpWrapper(object):
def __init__(self, alpha, dataloader):
self.alpha = alpha
self.dataloader = dataloader
def mixup_loader(self, loader):
for input, target in loader:
i, t = mixup(self.alpha, input, target)
yield i, t
def __iter__(self):
return self.mixup_loader(self.dataloader)
def __len__(self):
return len(self.dataloader)
class NLLMultiLabelSmooth(nn.Module):
def __init__(self, smoothing=0.0):
super(NLLMultiLabelSmooth, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
def forward(self, x, target):
if self.training:
x = x.float()
target = target.float()
logprobs = torch.nn.functional.log_softmax(x, dim=-1)
nll_loss = -logprobs * target
nll_loss = nll_loss.sum(-1)
smooth_loss = -logprobs.mean(dim=-1)
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()
else:
return torch.nn.functional.cross_entropy(x, target)
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/image_classification/mixup.py |
import collections
import itertools
import os
import pathlib
import re
import pynvml
from typing import Union
class Device:
# assume nvml returns list of 64 bit ints
_nvml_bit_affinity = 64
_nvml_affinity_elements = (
os.cpu_count() + _nvml_bit_affinity - 1
) // _nvml_bit_affinity
def __init__(self, device_idx):
super().__init__()
self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx)
def get_name(self):
return pynvml.nvmlDeviceGetName(self.handle)
def get_uuid(self):
return pynvml.nvmlDeviceGetUUID(self.handle)
def get_cpu_affinity(self):
affinity_string = ""
for j in pynvml.nvmlDeviceGetCpuAffinity(
self.handle, Device._nvml_affinity_elements
):
# assume nvml returns list of 64 bit ints
affinity_string = "{:064b}".format(j) + affinity_string
affinity_list = [int(x) for x in affinity_string]
affinity_list.reverse() # so core 0 is in 0th element of list
ret = [i for i, e in enumerate(affinity_list) if e != 0]
return ret
def get_thread_siblings_list():
"""
Returns a list of 2-element integer tuples representing pairs of
hyperthreading cores.
"""
path = "/sys/devices/system/cpu/cpu*/topology/thread_siblings_list"
thread_siblings_list = []
pattern = re.compile(r"(\d+)\D(\d+)")
for fname in pathlib.Path(path[0]).glob(path[1:]):
with open(fname) as f:
content = f.read().strip()
res = pattern.findall(content)
if res:
pair = tuple(sorted(map(int, res[0])))
thread_siblings_list.append(pair)
thread_siblings_list = list(set(thread_siblings_list))
return thread_siblings_list
def build_thread_siblings_dict(siblings_list):
siblings_dict = {}
for siblings_tuple in siblings_list:
for core in siblings_tuple:
siblings_dict[core] = siblings_tuple
return siblings_dict
def group_list_by_dict(affinity, siblings_dict):
sorted_affinity = sorted(affinity, key=lambda x: siblings_dict.get(x, (x,)))
grouped = itertools.groupby(
sorted_affinity, key=lambda x: siblings_dict.get(x, (x,))
)
grouped_affinity = []
for key, group in grouped:
grouped_affinity.append(tuple(group))
return grouped_affinity
def group_affinity_by_siblings(socket_affinities):
siblings_list = get_thread_siblings_list()
siblings_dict = build_thread_siblings_dict(siblings_list)
grouped_socket_affinities = []
for socket_affinity in socket_affinities:
grouped_socket_affinities.append(
group_list_by_dict(socket_affinity, siblings_dict)
)
return grouped_socket_affinities
def ungroup_affinities(affinities, cores):
ungrouped_affinities = []
for affinity in affinities:
if cores == "all_logical":
ungrouped_affinities.append(list(itertools.chain(*affinity)))
elif cores == "single_logical":
ungrouped_affinities.append([group[0] for group in affinity])
else:
raise RuntimeError("Unknown cores mode")
return ungrouped_affinities
def check_socket_affinities(socket_affinities):
# sets of cores should be either identical or disjoint
for i, j in itertools.product(socket_affinities, socket_affinities):
if not set(i) == set(j) and not set(i).isdisjoint(set(j)):
raise RuntimeError(
f"Sets of cores should be either identical or disjoint, "
f"but got {i} and {j}."
)
def get_socket_affinities(nproc_per_node, exclude_unavailable_cores=True):
devices = [Device(i) for i in range(nproc_per_node)]
socket_affinities = [dev.get_cpu_affinity() for dev in devices]
if exclude_unavailable_cores:
available_cores = os.sched_getaffinity(0)
socket_affinities = [
list(set(affinity) & available_cores) for affinity in socket_affinities
]
check_socket_affinities(socket_affinities)
return socket_affinities
def get_grouped_socket_affinities(nproc_per_node, exclude_unavailable_cores=True):
socket_affinities = get_socket_affinities(nproc_per_node, exclude_unavailable_cores)
grouped_socket_affinities = group_affinity_by_siblings(socket_affinities)
return grouped_socket_affinities
def set_socket_affinity(gpu_id, nproc_per_node, cores):
"""
The process is assigned with all available physical CPU cores from the CPU
socket connected to the GPU with a given id.
Args:
gpu_id: index of a GPU
nproc_per_node: number of processes per node
cores: 'all_logical' or 'single_logical'
"""
grouped_socket_affinities = get_grouped_socket_affinities(nproc_per_node)
ungrouped_affinities = ungroup_affinities(grouped_socket_affinities, cores)
os.sched_setaffinity(0, ungrouped_affinities[gpu_id])
def set_socket_single_affinity(gpu_id, nproc_per_node, cores):
"""
The process is assigned with the first available physical CPU core from the
list of all CPU physical cores from the CPU socket connected to the GPU with
a given id.
Args:
gpu_id: index of a GPU
nproc_per_node: number of processes per node
cores: 'all_logical' or 'single_logical'
"""
grouped_socket_affinities = get_grouped_socket_affinities(nproc_per_node)
single_grouped_socket_affinities = [
group[:1] for group in grouped_socket_affinities
]
ungrouped_affinities = ungroup_affinities(single_grouped_socket_affinities, cores)
os.sched_setaffinity(0, ungrouped_affinities[gpu_id])
def set_socket_single_unique_affinity(gpu_id, nproc_per_node, cores):
"""
The process is assigned with a single unique available physical CPU core
from the list of all CPU cores from the CPU socket connected to the GPU with
a given id.
Args:
gpu_id: index of a GPU
nproc_per_node: number of processes per node
cores: 'all_logical' or 'single_logical'
"""
grouped_socket_affinities = get_grouped_socket_affinities(nproc_per_node)
affinities = []
assigned_groups = set()
for grouped_socket_affinity in grouped_socket_affinities:
for group in grouped_socket_affinity:
if group not in assigned_groups:
affinities.append([group])
assigned_groups.add(group)
break
ungrouped_affinities = ungroup_affinities(affinities, cores)
os.sched_setaffinity(0, ungrouped_affinities[gpu_id])
def set_socket_unique_affinity(gpu_id, nproc_per_node, cores, mode, balanced=True):
"""
The process is assigned with a unique subset of available physical CPU
cores from the CPU socket connected to a GPU with a given id.
Assignment automatically includes hyperthreading siblings (if siblings are
available).
Args:
gpu_id: index of a GPU
nproc_per_node: number of processes per node
cores: 'all_logical' or 'single_logical'
mode: 'contiguous' or 'interleaved'
balanced: assign an equal number of physical cores to each process,
"""
grouped_socket_affinities = get_grouped_socket_affinities(nproc_per_node)
grouped_socket_affinities_to_device_ids = collections.defaultdict(list)
for idx, grouped_socket_affinity in enumerate(grouped_socket_affinities):
grouped_socket_affinities_to_device_ids[tuple(grouped_socket_affinity)].append(
idx
)
# compute minimal number of physical cores per GPU across all GPUs and
# sockets, code assigns this number of cores per GPU if balanced == True
min_physical_cores_per_gpu = min(
[
len(cores) // len(gpus)
for cores, gpus in grouped_socket_affinities_to_device_ids.items()
]
)
grouped_unique_affinities = [None] * nproc_per_node
for (
grouped_socket_affinity,
device_ids,
) in grouped_socket_affinities_to_device_ids.items():
devices_per_group = len(device_ids)
if balanced:
cores_per_device = min_physical_cores_per_gpu
grouped_socket_affinity = grouped_socket_affinity[
: devices_per_group * min_physical_cores_per_gpu
]
else:
cores_per_device = len(grouped_socket_affinity) // devices_per_group
for socket_subgroup_id, device_id in enumerate(device_ids):
# In theory there should be no difference in performance between
# 'interleaved' and 'contiguous' pattern on Intel-based DGX-1,
# but 'contiguous' should be better for DGX A100 because on AMD
# Rome 4 consecutive cores are sharing L3 cache.
# TODO: code doesn't attempt to automatically detect layout of
# L3 cache, also external environment may already exclude some
# cores, this code makes no attempt to detect it and to align
# mapping to multiples of 4.
if mode == "interleaved":
unique_grouped_affinity = list(
grouped_socket_affinity[socket_subgroup_id::devices_per_group]
)
elif mode == "contiguous":
unique_grouped_affinity = list(
grouped_socket_affinity[
socket_subgroup_id
* cores_per_device : (socket_subgroup_id + 1)
* cores_per_device
]
)
else:
raise RuntimeError("Unknown set_socket_unique_affinity mode")
grouped_unique_affinities[device_id] = unique_grouped_affinity
ungrouped_affinities = ungroup_affinities(grouped_unique_affinities, cores)
os.sched_setaffinity(0, ungrouped_affinities[gpu_id])
from enum import Enum, auto
class AffinityMode(Enum):
none = auto()
socket = auto()
socket_single = auto()
socket_single_unique = auto()
socket_unique_interleaved = auto()
socket_unique_contiguous = auto()
def set_affinity(
gpu_id,
nproc_per_node=None,
*,
mode: Union[str, AffinityMode] = AffinityMode.socket_unique_contiguous,
cores="all_logical",
balanced=True,
):
"""
The process is assigned with a proper CPU affinity that matches CPU-GPU
hardware architecture on a given platform. Usually, it improves and
stabilizes the performance of deep learning training workloads.
This function assumes that the workload runs in multi-process single-device
mode (there are multiple training processes, and each process is running on
a single GPU). This is typical for multi-GPU data-parallel training
workloads (e.g., using `torch.nn.parallel.DistributedDataParallel`).
Available affinity modes:
* 'socket' - the process is assigned with all available physical CPU cores
from the CPU socket connected to the GPU with a given id.
* 'socket_single' - the process is assigned with the first available
physical CPU core from the list of all CPU cores from the CPU socket
connected to the GPU with a given id (multiple GPUs could be assigned with
the same CPU core).
* 'socket_single_unique' - the process is assigned with a single unique
available physical CPU core from the list of all CPU cores from the CPU
socket connected to the GPU with a given id.
* 'socket_unique_interleaved' - the process is assigned with a unique
subset of available physical CPU cores from the CPU socket connected to a
GPU with a given id, cores are assigned with interleaved indexing pattern
* 'socket_unique_contiguous' - (the default) the process is assigned with a
unique subset of available physical CPU cores from the CPU socket connected
to a GPU with a given id, cores are assigned with contiguous indexing
pattern
Available "cores" modes:
* 'all_logical' - assigns the process with all logical cores associated with
a given corresponding physical core (i.e., automatically includes all
available hyperthreading siblings)
* 'single_logical' - assigns the process with only one logical core
associated with a given corresponding physical core (i.e., excludes
hyperthreading siblings)
'socket_unique_contiguous' is the recommended mode for deep learning
training workloads on NVIDIA DGX machines.
Args:
gpu_id: integer index of a GPU, value from 0 to 'nproc_per_node' - 1
nproc_per_node: number of processes per node
mode: affinity mode
balanced: assign an equal number of physical cores to each process,
affects only 'socket_unique_interleaved' and
'socket_unique_contiguous' affinity modes
cores: 'all_logical' or 'single_logical'
Returns a set of logical CPU cores on which the process is eligible to run.
Example:
import argparse
import os
import gpu_affinity
import torch
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--local_rank',
type=int,
default=os.getenv('LOCAL_RANK', 0),
)
args = parser.parse_args()
nproc_per_node = torch.cuda.device_count()
affinity = gpu_affinity.set_affinity(args.local_rank, nproc_per_node)
print(f'{args.local_rank}: core affinity: {affinity}')
if __name__ == "__main__":
main()
Launch the example with:
python -m torch.distributed.launch --nproc_per_node <#GPUs> example.py
WARNING: On DGX A100, only half of the CPU cores have direct access to GPUs.
This function restricts execution only to the CPU cores directly connected
to GPUs, so on DGX A100, it will limit the code to half of the CPU cores and
half of CPU memory bandwidth (which may be fine for many DL models).
WARNING: Intel's OpenMP implementation resets affinity on the first call to
an OpenMP function after a fork. It's recommended to run with env variable:
`KMP_AFFINITY=disabled` if the affinity set by gpu_affinity should be
preserved after a fork (e.g. in PyTorch DataLoader workers).
"""
if not isinstance(mode, AffinityMode):
mode = AffinityMode[mode]
pynvml.nvmlInit()
if nproc_per_node is None:
nproc_per_node = pynvml.nvmlDeviceGetCount()
if mode == AffinityMode.none:
pass
elif mode == AffinityMode.socket:
set_socket_affinity(gpu_id, nproc_per_node, cores)
elif mode == AffinityMode.socket_single:
set_socket_single_affinity(gpu_id, nproc_per_node, cores)
elif mode == AffinityMode.socket_single_unique:
set_socket_single_unique_affinity(gpu_id, nproc_per_node, cores)
elif mode == AffinityMode.socket_unique_interleaved:
set_socket_unique_affinity(
gpu_id, nproc_per_node, cores, "interleaved", balanced
)
elif mode == AffinityMode.socket_unique_contiguous:
set_socket_unique_affinity(
gpu_id, nproc_per_node, cores, "contiguous", balanced
)
else:
raise RuntimeError("Unknown affinity mode")
affinity = os.sched_getaffinity(0)
return affinity
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/image_classification/gpu_affinity.py |
from tqdm import tqdm
import torch
import contextlib
import time
import logging
from pytorch_quantization import quant_modules
from pytorch_quantization import nn as quant_nn
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
from . import logger as log
from .utils import calc_ips
import dllogger
initialize = quant_modules.initialize
deactivate = quant_modules.deactivate
IPS_METADATA = {"unit": "img/s", "format": ":.2f"}
TIME_METADATA = {"unit": "s", "format": ":.5f"}
def select_default_calib_method(calib_method='histogram'):
"""Set up selected calibration method in whole network"""
quant_desc_input = QuantDescriptor(calib_method=calib_method)
quant_nn.QuantConv1d.set_default_quant_desc_input(quant_desc_input)
quant_nn.QuantConv2d.set_default_quant_desc_input(quant_desc_input)
quant_nn.QuantLinear.set_default_quant_desc_input(quant_desc_input)
quant_nn.QuantAdaptiveAvgPool2d.set_default_quant_desc_input(quant_desc_input)
def quantization_setup(calib_method='histogram'):
"""Change network into quantized version "automatically" and selects histogram as default quantization method"""
select_default_calib_method(calib_method)
initialize()
def disable_calibration(model):
"""Disables calibration in whole network. Should be run always before running interference."""
for name, module in model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer):
if module._calibrator is not None:
module.enable_quant()
module.disable_calib()
else:
module.enable()
def collect_stats(model, data_loader, logger, num_batches):
"""Feed data to the network and collect statistic"""
if logger is not None:
logger.register_metric(
f"calib.total_ips",
log.PERF_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
metadata=IPS_METADATA,
)
logger.register_metric(
f"calib.data_time",
log.PERF_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
metadata=TIME_METADATA,
)
logger.register_metric(
f"calib.compute_latency",
log.PERF_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
metadata=TIME_METADATA,
)
# Enable calibrators
data_iter = enumerate(data_loader)
if logger is not None:
data_iter = logger.iteration_generator_wrapper(data_iter, mode='calib')
for name, module in model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
end = time.time()
if logger is not None:
logger.start_calibration()
for i, (image, _) in data_iter:
bs = image.size(0)
data_time = time.time() - end
model(image.cuda())
it_time = time.time() - end
if logger is not None:
logger.log_metric(f"calib.total_ips", calc_ips(bs, it_time))
logger.log_metric(f"calib.data_time", data_time)
logger.log_metric(f"calib.compute_latency", it_time - data_time)
if i >= num_batches:
time.sleep(5)
break
end = time.time()
if logger is not None:
logger.end_calibration()
logging.disable(logging.WARNING)
disable_calibration(model)
def compute_amax(model, **kwargs):
"""Loads statistics of data and calculates quantization parameters in whole network"""
for name, module in model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer) and module._calibrator is not None:
if isinstance(module._calibrator, calib.MaxCalibrator):
module.load_calib_amax()
else:
module.load_calib_amax(**kwargs)
model.cuda()
def calibrate(model, train_loader, logger, calib_iter=1, percentile=99.99):
"""Calibrates whole network i.e. gathers data for quantization and calculates quantization parameters"""
model.eval()
with torch.no_grad():
collect_stats(model, train_loader, logger, num_batches=calib_iter)
compute_amax(model, method="percentile", percentile=percentile)
logging.disable(logging.NOTSET)
@contextlib.contextmanager
def switch_on_quantization(do_quantization=True):
"""Context manager for quantization activation"""
if do_quantization:
initialize()
try:
yield
finally:
if do_quantization:
deactivate()
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/image_classification/quantization.py |
# Copyright (c) 2018-2019, NVIDIA CORPORATION
# Copyright (c) 2017- Facebook, Inc
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import torch
import numpy as np
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from PIL import Image
from functools import partial
from torchvision.transforms.functional import InterpolationMode
from image_classification.autoaugment import AutoaugmentImageNetPolicy
DATA_BACKEND_CHOICES = ["pytorch", "synthetic"]
try:
from nvidia.dali.plugin.pytorch import DALIClassificationIterator
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
DATA_BACKEND_CHOICES.append("dali-gpu")
DATA_BACKEND_CHOICES.append("dali-cpu")
except ImportError:
print(
"Please install DALI from https://www.github.com/NVIDIA/DALI to run this example."
)
def load_jpeg_from_file(path, cuda=True):
img_transforms = transforms.Compose(
[transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor()]
)
img = img_transforms(Image.open(path))
with torch.no_grad():
# mean and std are not multiplied by 255 as they are in training script
# torch dataloader reads data into bytes whereas loading directly
# through PIL creates a tensor with floats in [0,1] range
mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)
std = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)
if cuda:
mean = mean.cuda()
std = std.cuda()
img = img.cuda()
img = img.float()
input = img.unsqueeze(0).sub_(mean).div_(std)
return input
class HybridTrainPipe(Pipeline):
def __init__(
self,
batch_size,
num_threads,
device_id,
data_dir,
interpolation,
crop,
dali_cpu=False,
):
super(HybridTrainPipe, self).__init__(
batch_size, num_threads, device_id, seed=12 + device_id
)
interpolation = {
"bicubic": types.INTERP_CUBIC,
"bilinear": types.INTERP_LINEAR,
"triangular": types.INTERP_TRIANGULAR,
}[interpolation]
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
rank = 0
world_size = 1
self.input = ops.FileReader(
file_root=data_dir,
shard_id=rank,
num_shards=world_size,
random_shuffle=True,
pad_last_batch=True,
)
if dali_cpu:
dali_device = "cpu"
self.decode = ops.ImageDecoder(device=dali_device, output_type=types.RGB)
else:
dali_device = "gpu"
# This padding sets the size of the internal nvJPEG buffers to be able to handle all images from full-sized ImageNet
# without additional reallocations
self.decode = ops.ImageDecoder(
device="mixed",
output_type=types.RGB,
device_memory_padding=211025920,
host_memory_padding=140544512,
)
self.res = ops.RandomResizedCrop(
device=dali_device,
size=[crop, crop],
interp_type=interpolation,
random_aspect_ratio=[0.75, 4.0 / 3.0],
random_area=[0.08, 1.0],
num_attempts=100,
antialias=False,
)
self.cmnp = ops.CropMirrorNormalize(
device="gpu",
dtype=types.FLOAT,
output_layout=types.NCHW,
crop=(crop, crop),
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255],
)
self.coin = ops.CoinFlip(probability=0.5)
def define_graph(self):
rng = self.coin()
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images.gpu(), mirror=rng)
return [output, self.labels]
class HybridValPipe(Pipeline):
def __init__(
self, batch_size, num_threads, device_id, data_dir, interpolation, crop, size
):
super(HybridValPipe, self).__init__(
batch_size, num_threads, device_id, seed=12 + device_id
)
interpolation = {
"bicubic": types.INTERP_CUBIC,
"bilinear": types.INTERP_LINEAR,
"triangular": types.INTERP_TRIANGULAR,
}[interpolation]
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
rank = 0
world_size = 1
self.input = ops.FileReader(
file_root=data_dir,
shard_id=rank,
num_shards=world_size,
random_shuffle=False,
pad_last_batch=True,
)
self.decode = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self.res = ops.Resize(
device="gpu",
resize_shorter=size,
interp_type=interpolation,
antialias=False,
)
self.cmnp = ops.CropMirrorNormalize(
device="gpu",
dtype=types.FLOAT,
output_layout=types.NCHW,
crop=(crop, crop),
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255],
)
def define_graph(self):
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images)
return [output, self.labels]
class DALIWrapper(object):
def gen_wrapper(dalipipeline, num_classes, one_hot, memory_format):
for data in dalipipeline:
input = data[0]["data"].contiguous(memory_format=memory_format)
target = torch.reshape(data[0]["label"], [-1]).cuda().long()
if one_hot:
target = expand(num_classes, torch.float, target)
yield input, target
dalipipeline.reset()
def __init__(self, dalipipeline, num_classes, one_hot, memory_format):
self.dalipipeline = dalipipeline
self.num_classes = num_classes
self.one_hot = one_hot
self.memory_format = memory_format
def __iter__(self):
return DALIWrapper.gen_wrapper(
self.dalipipeline, self.num_classes, self.one_hot, self.memory_format
)
def get_dali_train_loader(dali_cpu=False):
def gdtl(
data_path,
image_size,
batch_size,
num_classes,
one_hot,
interpolation="bilinear",
augmentation=None,
start_epoch=0,
workers=5,
_worker_init_fn=None,
memory_format=torch.contiguous_format,
**kwargs,
):
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
rank = 0
world_size = 1
traindir = os.path.join(data_path, "train")
if augmentation is not None:
raise NotImplementedError(
f"Augmentation {augmentation} for dali loader is not supported"
)
pipe = HybridTrainPipe(
batch_size=batch_size,
num_threads=workers,
device_id=rank % torch.cuda.device_count(),
data_dir=traindir,
interpolation=interpolation,
crop=image_size,
dali_cpu=dali_cpu,
)
pipe.build()
train_loader = DALIClassificationIterator(
pipe, reader_name="Reader", fill_last_batch=False
)
return (
DALIWrapper(train_loader, num_classes, one_hot, memory_format),
int(pipe.epoch_size("Reader") / (world_size * batch_size)),
)
return gdtl
def get_dali_val_loader():
def gdvl(
data_path,
image_size,
batch_size,
num_classes,
one_hot,
interpolation="bilinear",
crop_padding=32,
workers=5,
_worker_init_fn=None,
memory_format=torch.contiguous_format,
**kwargs,
):
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
rank = 0
world_size = 1
valdir = os.path.join(data_path, "val")
pipe = HybridValPipe(
batch_size=batch_size,
num_threads=workers,
device_id=rank % torch.cuda.device_count(),
data_dir=valdir,
interpolation=interpolation,
crop=image_size,
size=image_size + crop_padding,
)
pipe.build()
val_loader = DALIClassificationIterator(
pipe, reader_name="Reader", fill_last_batch=False
)
return (
DALIWrapper(val_loader, num_classes, one_hot, memory_format),
int(pipe.epoch_size("Reader") / (world_size * batch_size)),
)
return gdvl
def fast_collate(memory_format, batch):
imgs = [img[0] for img in batch]
targets = torch.tensor([target[1] for target in batch], dtype=torch.int64)
w = imgs[0].size[0]
h = imgs[0].size[1]
tensor = torch.zeros((len(imgs), 3, h, w), dtype=torch.uint8).contiguous(
memory_format=memory_format
)
for i, img in enumerate(imgs):
nump_array = np.asarray(img, dtype=np.uint8)
if nump_array.ndim < 3:
nump_array = np.expand_dims(nump_array, axis=-1)
nump_array = np.rollaxis(nump_array, 2)
tensor[i] += torch.from_numpy(nump_array.copy())
return tensor, targets
def expand(num_classes, dtype, tensor):
e = torch.zeros(
tensor.size(0), num_classes, dtype=dtype, device=torch.device("cuda")
)
e = e.scatter(1, tensor.unsqueeze(1), 1.0)
return e
class PrefetchedWrapper(object):
def prefetched_loader(loader, num_classes, one_hot):
mean = (
torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255])
.cuda()
.view(1, 3, 1, 1)
)
std = (
torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255])
.cuda()
.view(1, 3, 1, 1)
)
stream = torch.cuda.Stream()
first = True
for next_input, next_target in loader:
with torch.cuda.stream(stream):
next_input = next_input.cuda(non_blocking=True)
next_target = next_target.cuda(non_blocking=True)
next_input = next_input.float()
if one_hot:
next_target = expand(num_classes, torch.float, next_target)
next_input = next_input.sub_(mean).div_(std)
if not first:
yield input, target
else:
first = False
torch.cuda.current_stream().wait_stream(stream)
input = next_input
target = next_target
yield input, target
def __init__(self, dataloader, start_epoch, num_classes, one_hot):
self.dataloader = dataloader
self.epoch = start_epoch
self.one_hot = one_hot
self.num_classes = num_classes
def __iter__(self):
if self.dataloader.sampler is not None and isinstance(
self.dataloader.sampler, torch.utils.data.distributed.DistributedSampler
):
self.dataloader.sampler.set_epoch(self.epoch)
self.epoch += 1
return PrefetchedWrapper.prefetched_loader(
self.dataloader, self.num_classes, self.one_hot
)
def __len__(self):
return len(self.dataloader)
def get_pytorch_train_loader(
data_path,
image_size,
batch_size,
num_classes,
one_hot,
interpolation="bilinear",
augmentation=None,
start_epoch=0,
workers=5,
_worker_init_fn=None,
prefetch_factor=2,
memory_format=torch.contiguous_format,
):
interpolation = {
"bicubic": InterpolationMode.BICUBIC,
"bilinear": InterpolationMode.BILINEAR,
}[interpolation]
traindir = os.path.join(data_path, "train")
transforms_list = [
transforms.RandomResizedCrop(image_size, interpolation=interpolation),
transforms.RandomHorizontalFlip(),
]
if augmentation == "autoaugment":
transforms_list.append(AutoaugmentImageNetPolicy())
train_dataset = datasets.ImageFolder(traindir, transforms.Compose(transforms_list))
if torch.distributed.is_initialized():
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset, shuffle=True
)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=batch_size,
shuffle=(train_sampler is None),
num_workers=workers,
worker_init_fn=_worker_init_fn,
pin_memory=True,
collate_fn=partial(fast_collate, memory_format),
drop_last=True,
persistent_workers=True,
prefetch_factor=prefetch_factor,
)
return (
PrefetchedWrapper(train_loader, start_epoch, num_classes, one_hot),
len(train_loader),
)
def get_pytorch_val_loader(
data_path,
image_size,
batch_size,
num_classes,
one_hot,
interpolation="bilinear",
workers=5,
_worker_init_fn=None,
crop_padding=32,
memory_format=torch.contiguous_format,
prefetch_factor=2,
):
interpolation = {
"bicubic": InterpolationMode.BICUBIC,
"bilinear": InterpolationMode.BILINEAR,
}[interpolation]
valdir = os.path.join(data_path, "val")
val_dataset = datasets.ImageFolder(
valdir,
transforms.Compose(
[
transforms.Resize(
image_size + crop_padding, interpolation=interpolation
),
transforms.CenterCrop(image_size),
]
),
)
if torch.distributed.is_initialized():
val_sampler = torch.utils.data.distributed.DistributedSampler(
val_dataset, shuffle=False
)
else:
val_sampler = None
val_loader = torch.utils.data.DataLoader(
val_dataset,
sampler=val_sampler,
batch_size=batch_size,
shuffle=(val_sampler is None),
num_workers=workers,
worker_init_fn=_worker_init_fn,
pin_memory=True,
collate_fn=partial(fast_collate, memory_format),
drop_last=False,
persistent_workers=True,
prefetch_factor=prefetch_factor,
)
return PrefetchedWrapper(val_loader, 0, num_classes, one_hot), len(val_loader)
class SynteticDataLoader(object):
def __init__(
self,
batch_size,
num_classes,
num_channels,
height,
width,
one_hot,
memory_format=torch.contiguous_format,
):
input_data = (
torch.randn(batch_size, num_channels, height, width)
.contiguous(memory_format=memory_format)
.cuda()
.normal_(0, 1.0)
)
if one_hot:
input_target = torch.empty(batch_size, num_classes).cuda()
input_target[:, 0] = 1.0
else:
input_target = torch.randint(0, num_classes, (batch_size,))
input_target = input_target.cuda()
self.input_data = input_data
self.input_target = input_target
def __iter__(self):
while True:
yield self.input_data, self.input_target
def get_synthetic_loader(
data_path,
image_size,
batch_size,
num_classes,
one_hot,
interpolation=None,
augmentation=None,
start_epoch=0,
workers=None,
_worker_init_fn=None,
memory_format=torch.contiguous_format,
**kwargs,
):
return (
SynteticDataLoader(
batch_size,
num_classes,
3,
image_size,
image_size,
one_hot,
memory_format=memory_format,
),
-1,
)
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/image_classification/dataloaders.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#from . import logger
#from . import dataloaders
#from . import training
#from . import utils
#from . import mixup
#from . import smoothing
from . import models
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/image_classification/__init__.py |
# Copyright (c) 2018-2019, NVIDIA CORPORATION
# Copyright (c) 2017- Facebook, Inc
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections import OrderedDict
from numbers import Number
import dllogger
import numpy as np
def format_step(step):
if isinstance(step, str):
return step
s = ""
if len(step) > 0:
if isinstance(step[0], Number):
s += "Epoch: {} ".format(step[0])
else:
s += "{} ".format(step[0])
if len(step) > 1:
s += "Iteration: {} ".format(step[1])
if len(step) > 2:
s += "Validation Iteration: {} ".format(step[2])
if len(step) == 0:
s = "Summary:"
return s
PERF_METER = lambda: Meter(AverageMeter(), AverageMeter(), AverageMeter())
LOSS_METER = lambda: Meter(AverageMeter(), AverageMeter(), MinMeter())
ACC_METER = lambda: Meter(AverageMeter(), AverageMeter(), MaxMeter())
LR_METER = lambda: Meter(LastMeter(), LastMeter(), LastMeter())
LAT_100 = lambda: Meter(QuantileMeter(1), QuantileMeter(1), QuantileMeter(1))
LAT_99 = lambda: Meter(QuantileMeter(0.99), QuantileMeter(0.99), QuantileMeter(0.99))
LAT_95 = lambda: Meter(QuantileMeter(0.95), QuantileMeter(0.95), QuantileMeter(0.95))
class Meter(object):
def __init__(self, iteration_aggregator, epoch_aggregator, run_aggregator):
self.run_aggregator = run_aggregator
self.epoch_aggregator = epoch_aggregator
self.iteration_aggregator = iteration_aggregator
def record(self, val, n=1):
self.iteration_aggregator.record(val, n=n)
def get_iteration(self):
v, n = self.iteration_aggregator.get_val()
return v
def reset_iteration(self):
v, n = self.iteration_aggregator.get_data()
self.iteration_aggregator.reset()
if v is not None:
self.epoch_aggregator.record(v, n=n)
def get_epoch(self):
v, n = self.epoch_aggregator.get_val()
return v
def reset_epoch(self):
v, n = self.epoch_aggregator.get_data()
self.epoch_aggregator.reset()
if v is not None:
self.run_aggregator.record(v, n=n)
def get_run(self):
v, n = self.run_aggregator.get_val()
return v
def reset_run(self):
self.run_aggregator.reset()
class QuantileMeter(object):
def __init__(self, q):
self.q = q
self.reset()
def reset(self):
self.vals = []
self.n = 0
def record(self, val, n=1):
if isinstance(val, list):
self.vals += val
self.n += len(val)
else:
self.vals += [val] * n
self.n += n
def get_val(self):
if not self.vals:
return None, self.n
return np.quantile(self.vals, self.q, interpolation="nearest"), self.n
def get_data(self):
return self.vals, self.n
class MaxMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.max = None
self.n = 0
def record(self, val, n=1):
if self.max is None:
self.max = val
else:
self.max = max(self.max, val)
self.n = n
def get_val(self):
return self.max, self.n
def get_data(self):
return self.max, self.n
class MinMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.min = None
self.n = 0
def record(self, val, n=1):
if self.min is None:
self.min = val
else:
self.min = max(self.min, val)
self.n = n
def get_val(self):
return self.min, self.n
def get_data(self):
return self.min, self.n
class LastMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.last = None
self.n = 0
def record(self, val, n=1):
self.last = val
self.n = n
def get_val(self):
return self.last, self.n
def get_data(self):
return self.last, self.n
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.n = 0
self.val = 0
def record(self, val, n=1):
self.n += n
self.val += val * n
def get_val(self):
if self.n == 0:
return None, 0
return self.val / self.n, self.n
def get_data(self):
if self.n == 0:
return None, 0
return self.val / self.n, self.n
class Logger(object):
def __init__(self, print_interval, backends, start_epoch=-1, verbose=False):
self.epoch = start_epoch
self.iteration = -1
self.val_iteration = -1
self.calib_iteration = -1
self.metrics = OrderedDict()
self.backends = backends
self.print_interval = print_interval
self.verbose = verbose
dllogger.init(backends)
def log_parameter(self, data, verbosity=0):
dllogger.log(step="PARAMETER", data=data, verbosity=verbosity)
def register_metric(self, metric_name, meter, verbosity=0, metadata={}):
if self.verbose:
print("Registering metric: {}".format(metric_name))
self.metrics[metric_name] = {"meter": meter, "level": verbosity}
dllogger.metadata(metric_name, metadata)
def log_metric(self, metric_name, val, n=1):
self.metrics[metric_name]["meter"].record(val, n=n)
def start_iteration(self, mode="train"):
if mode == "val":
self.val_iteration += 1
elif mode == "train":
self.iteration += 1
elif mode == "calib":
self.calib_iteration += 1
def end_iteration(self, mode="train"):
if mode == "val":
it = self.val_iteration
elif mode == "train":
it = self.iteration
elif mode == "calib":
it = self.calib_iteration
if it % self.print_interval == 0 or mode == "calib":
metrics = {n: m for n, m in self.metrics.items() if n.startswith(mode)}
if mode == "train":
step = (self.epoch, self.iteration)
elif mode == "val":
step = (self.epoch, self.iteration, self.val_iteration)
elif mode == "calib":
step = ("Calibration", self.calib_iteration)
verbositys = {m["level"] for _, m in metrics.items()}
for ll in verbositys:
llm = {n: m for n, m in metrics.items() if m["level"] == ll}
dllogger.log(
step=step,
data={n: m["meter"].get_iteration() for n, m in llm.items()},
verbosity=ll,
)
for n, m in metrics.items():
m["meter"].reset_iteration()
dllogger.flush()
def start_epoch(self):
self.epoch += 1
self.iteration = 0
self.val_iteration = 0
for n, m in self.metrics.items():
if not n.startswith("calib"):
m["meter"].reset_epoch()
def end_epoch(self):
for n, m in self.metrics.items():
if not n.startswith("calib"):
m["meter"].reset_iteration()
verbositys = {m["level"] for _, m in self.metrics.items()}
for ll in verbositys:
llm = {n: m for n, m in self.metrics.items() if m["level"] == ll}
dllogger.log(
step=(self.epoch,),
data={n: m["meter"].get_epoch() for n, m in llm.items()},
)
def start_calibration(self):
self.calib_iteration = 0
for n, m in self.metrics.items():
if n.startswith("calib"):
m["meter"].reset_epoch()
def end_calibration(self):
for n, m in self.metrics.items():
if n.startswith("calib"):
m["meter"].reset_iteration()
def end(self):
for n, m in self.metrics.items():
m["meter"].reset_epoch()
verbositys = {m["level"] for _, m in self.metrics.items()}
for ll in verbositys:
llm = {n: m for n, m in self.metrics.items() if m["level"] == ll}
dllogger.log(
step=tuple(), data={n: m["meter"].get_run() for n, m in llm.items()}
)
for n, m in self.metrics.items():
m["meter"].reset_epoch()
dllogger.flush()
def iteration_generator_wrapper(self, gen, mode="train"):
for g in gen:
self.start_iteration(mode=mode)
yield g
self.end_iteration(mode=mode)
def epoch_generator_wrapper(self, gen):
for g in gen:
self.start_epoch()
yield g
self.end_epoch()
class Metrics:
ACC_METADATA = {"unit": "%", "format": ":.2f"}
IPS_METADATA = {"unit": "images/s", "format": ":.2f"}
TIME_METADATA = {"unit": "s", "format": ":.5f"}
LOSS_METADATA = {"unit": None, "format": ":.5f"}
LR_METADATA = {"unit": None, "format": ":.5f"}
def __init__(self, logger):
self.logger = logger
self.map = {}
def log(self, **kwargs):
if self.logger is None:
return
for k, v in kwargs.items():
tks = self.map.get(k, [k])
for tk in tks:
if isinstance(v, tuple):
self.logger.log_metric(tk, v[0], v[1])
else:
self.logger.log_metric(tk, v)
class TrainingMetrics(Metrics):
def __init__(self, logger):
super().__init__(logger)
if self.logger is not None:
self.map = {
"loss": ["train.loss"],
"compute_ips": ["train.compute_ips"],
"total_ips": ["train.total_ips"],
"data_time": ["train.data_time"],
"compute_time": ["train.compute_time"],
"lr": ["train.lr"],
"grad_scale": ["train.grad_scale"],
}
logger.register_metric(
"train.loss",
LOSS_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
metadata=Metrics.LOSS_METADATA,
)
logger.register_metric(
"train.compute_ips",
PERF_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
metadata=Metrics.IPS_METADATA,
)
logger.register_metric(
"train.total_ips",
PERF_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
metadata=Metrics.IPS_METADATA,
)
logger.register_metric(
"train.data_time",
PERF_METER(),
verbosity=dllogger.Verbosity.VERBOSE,
metadata=Metrics.TIME_METADATA,
)
logger.register_metric(
"train.compute_time",
PERF_METER(),
verbosity=dllogger.Verbosity.VERBOSE,
metadata=Metrics.TIME_METADATA,
)
logger.register_metric(
"train.lr",
LR_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
)
logger.register_metric(
"train.grad_scale",
PERF_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
metadata=Metrics.LOSS_METADATA,
)
class ValidationMetrics(Metrics):
def __init__(self, logger, prefix, topk):
super().__init__(logger)
if self.logger is not None:
self.map = {
"loss": [f"{prefix}.loss"],
"top1": [f"{prefix}.top1"],
f"top{topk}": [f"{prefix}.top{topk}"],
"compute_ips": [f"{prefix}.compute_ips"],
"total_ips": [f"{prefix}.total_ips"],
"data_time": [f"{prefix}.data_time"],
"compute_time": [
f"{prefix}.compute_latency",
f"{prefix}.compute_latency_at100",
f"{prefix}.compute_latency_at99",
f"{prefix}.compute_latency_at95",
],
}
logger.register_metric(
f"{prefix}.top1",
ACC_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
metadata=Metrics.ACC_METADATA,
)
logger.register_metric(
f"{prefix}.top{topk}",
ACC_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
metadata=Metrics.ACC_METADATA,
)
logger.register_metric(
f"{prefix}.loss",
LOSS_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
metadata=Metrics.LOSS_METADATA,
)
logger.register_metric(
f"{prefix}.compute_ips",
PERF_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
metadata=Metrics.IPS_METADATA,
)
logger.register_metric(
f"{prefix}.total_ips",
PERF_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
metadata=Metrics.IPS_METADATA,
)
logger.register_metric(
f"{prefix}.data_time",
PERF_METER(),
verbosity=dllogger.Verbosity.VERBOSE,
metadata=Metrics.TIME_METADATA,
)
logger.register_metric(
f"{prefix}.compute_latency",
PERF_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
metadata=Metrics.TIME_METADATA,
)
logger.register_metric(
f"{prefix}.compute_latency_at100",
LAT_100(),
verbosity=dllogger.Verbosity.VERBOSE,
metadata=Metrics.TIME_METADATA,
)
logger.register_metric(
f"{prefix}.compute_latency_at99",
LAT_99(),
verbosity=dllogger.Verbosity.VERBOSE,
metadata=Metrics.TIME_METADATA,
)
logger.register_metric(
f"{prefix}.compute_latency_at95",
LAT_95(),
verbosity=dllogger.Verbosity.VERBOSE,
metadata=Metrics.TIME_METADATA,
)
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/image_classification/logger.py |
import math
import numpy as np
import torch
from torch import optim
def get_optimizer(parameters, lr, args, state=None):
if args.optimizer == "sgd":
optimizer = get_sgd_optimizer(
parameters,
lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
nesterov=args.nesterov,
bn_weight_decay=args.bn_weight_decay,
)
elif args.optimizer == "rmsprop":
optimizer = get_rmsprop_optimizer(
parameters,
lr,
alpha=args.rmsprop_alpha,
momentum=args.momentum,
weight_decay=args.weight_decay,
eps=args.rmsprop_eps,
bn_weight_decay=args.bn_weight_decay,
)
if not state is None:
optimizer.load_state_dict(state)
return optimizer
def get_sgd_optimizer(
parameters, lr, momentum, weight_decay, nesterov=False, bn_weight_decay=False
):
if bn_weight_decay:
print(" ! Weight decay applied to BN parameters ")
params = [v for n, v in parameters]
else:
print(" ! Weight decay NOT applied to BN parameters ")
bn_params = [v for n, v in parameters if "bn" in n]
rest_params = [v for n, v in parameters if not "bn" in n]
print(len(bn_params))
print(len(rest_params))
params = [
{"params": bn_params, "weight_decay": 0},
{"params": rest_params, "weight_decay": weight_decay},
]
optimizer = torch.optim.SGD(
params, lr, momentum=momentum, weight_decay=weight_decay, nesterov=nesterov
)
return optimizer
def get_rmsprop_optimizer(
parameters, lr, alpha, weight_decay, momentum, eps, bn_weight_decay=False
):
bn_params = [v for n, v in parameters if "bn" in n]
rest_params = [v for n, v in parameters if not "bn" in n]
params = [
{"params": bn_params, "weight_decay": weight_decay if bn_weight_decay else 0},
{"params": rest_params, "weight_decay": weight_decay},
]
optimizer = torch.optim.RMSprop(
params,
lr=lr,
alpha=alpha,
weight_decay=weight_decay,
momentum=momentum,
eps=eps,
)
return optimizer
def lr_policy(lr_fn):
def _alr(optimizer, iteration, epoch):
lr = lr_fn(iteration, epoch)
for param_group in optimizer.param_groups:
param_group["lr"] = lr
return lr
return _alr
def lr_step_policy(base_lr, steps, decay_factor, warmup_length):
def _lr_fn(iteration, epoch):
if epoch < warmup_length:
lr = base_lr * (epoch + 1) / warmup_length
else:
lr = base_lr
for s in steps:
if epoch >= s:
lr *= decay_factor
return lr
return lr_policy(_lr_fn)
def lr_linear_policy(base_lr, warmup_length, epochs):
def _lr_fn(iteration, epoch):
if epoch < warmup_length:
lr = base_lr * (epoch + 1) / warmup_length
else:
e = epoch - warmup_length
es = epochs - warmup_length
lr = base_lr * (1 - (e / es))
return lr
return lr_policy(_lr_fn)
def lr_cosine_policy(base_lr, warmup_length, epochs, end_lr=0):
def _lr_fn(iteration, epoch):
if epoch < warmup_length:
lr = base_lr * (epoch + 1) / warmup_length
else:
e = epoch - warmup_length
es = epochs - warmup_length
lr = end_lr + (0.5 * (1 + np.cos(np.pi * e / es)) * (base_lr - end_lr))
return lr
return lr_policy(_lr_fn)
def lr_exponential_policy(
base_lr,
warmup_length,
epochs,
final_multiplier=0.001,
decay_factor=None,
decay_step=1,
logger=None,
):
"""Exponential lr policy. Setting decay factor parameter overrides final_multiplier"""
es = epochs - warmup_length
if decay_factor is not None:
epoch_decay = decay_factor
else:
epoch_decay = np.power(
2, np.log2(final_multiplier) / math.floor(es / decay_step)
)
def _lr_fn(iteration, epoch):
if epoch < warmup_length:
lr = base_lr * (epoch + 1) / warmup_length
else:
e = epoch - warmup_length
lr = base_lr * (epoch_decay ** math.floor(e / decay_step))
return lr
return lr_policy(_lr_fn, logger=logger)
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/image_classification/optimizers.py |
# Copyright (c) 2018-2019, NVIDIA CORPORATION
# Copyright (c) 2017- Facebook, Inc
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import os
import numpy as np
import torch
import shutil
import signal
import torch.distributed as dist
class Checkpointer:
def __init__(self, last_filename, checkpoint_dir="./", keep_last_n=0):
self.last_filename = last_filename
self.checkpoints = []
self.checkpoint_dir = checkpoint_dir
self.keep_last_n = keep_last_n
def cleanup(self):
to_delete = self.checkpoints[: -self.keep_last_n]
self.checkpoints = self.checkpoints[-self.keep_last_n :]
for f in to_delete:
full_path = os.path.join(self.checkpoint_dir, f)
os.remove(full_path)
def get_full_path(self, filename):
return os.path.join(self.checkpoint_dir, filename)
def save_checkpoint(
self,
state,
is_best,
filename,
):
if torch.distributed.is_initialized() and torch.distributed.get_rank() != 0:
assert False
full_path = self.get_full_path(filename)
print("SAVING {}".format(full_path))
torch.save(state, full_path)
self.checkpoints.append(filename)
shutil.copyfile(
full_path, self.get_full_path(self.last_filename)
)
if is_best:
shutil.copyfile(
full_path, self.get_full_path("model_best.pth.tar")
)
self.cleanup()
def timed_generator(gen):
start = time.time()
for g in gen:
end = time.time()
t = end - start
yield g, t
start = time.time()
def timed_function(f):
def _timed_function(*args, **kwargs):
start = time.time()
ret = f(*args, **kwargs)
return ret, time.time() - start
return _timed_function
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].float().sum()
res.append(correct_k.mul_(100.0 / batch_size))
return res
def reduce_tensor(tensor):
rt = tensor.clone().detach()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= (
torch.distributed.get_world_size() if torch.distributed.is_initialized() else 1
)
return rt
def first_n(n, generator):
for i, d in zip(range(n), generator):
yield d
class TimeoutHandler:
def __init__(self, sig=signal.SIGTERM):
self.sig = sig
self.device = torch.device("cuda")
@property
def interrupted(self):
if not dist.is_initialized():
return self._interrupted
interrupted = torch.tensor(self._interrupted).int().to(self.device)
dist.broadcast(interrupted, 0)
interrupted = bool(interrupted.item())
return interrupted
def __enter__(self):
self._interrupted = False
self.released = False
self.original_handler = signal.getsignal(self.sig)
def master_handler(signum, frame):
self.release()
self._interrupted = True
print(f"Received SIGTERM")
def ignoring_handler(signum, frame):
self.release()
print("Received SIGTERM, ignoring")
rank = dist.get_rank() if dist.is_initialized() else 0
if rank == 0:
signal.signal(self.sig, master_handler)
else:
signal.signal(self.sig, ignoring_handler)
return self
def __exit__(self, type, value, tb):
self.release()
def release(self):
if self.released:
return False
signal.signal(self.sig, self.original_handler)
self.released = True
return True
def calc_ips(batch_size, time):
world_size = (
torch.distributed.get_world_size() if torch.distributed.is_initialized() else 1
)
tbs = world_size * batch_size
return tbs / time
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/image_classification/utils.py |
from PIL import Image, ImageEnhance, ImageOps
import numpy as np
import random
class AutoaugmentImageNetPolicy(object):
"""
Randomly choose one of the best 24 Sub-policies on ImageNet.
Reference: https://arxiv.org/abs/1805.09501
"""
def __init__(self):
self.policies = [
SubPolicy(0.8, "equalize", 1, 0.8, "shearY", 4),
SubPolicy(0.4, "color", 9, 0.6, "equalize", 3),
SubPolicy(0.4, "color", 1, 0.6, "rotate", 8),
SubPolicy(0.8, "solarize", 3, 0.4, "equalize", 7),
SubPolicy(0.4, "solarize", 2, 0.6, "solarize", 2),
SubPolicy(0.2, "color", 0, 0.8, "equalize", 8),
SubPolicy(0.4, "equalize", 8, 0.8, "solarizeadd", 3),
SubPolicy(0.2, "shearX", 9, 0.6, "rotate", 8),
SubPolicy(0.6, "color", 1, 1.0, "equalize", 2),
SubPolicy(0.4, "invert", 9, 0.6, "rotate", 0),
SubPolicy(1.0, "equalize", 9, 0.6, "shearY", 3),
SubPolicy(0.4, "color", 7, 0.6, "equalize", 0),
SubPolicy(0.4, "posterize", 6, 0.4, "autocontrast", 7),
SubPolicy(0.6, "solarize", 8, 0.6, "color", 9),
SubPolicy(0.2, "solarize", 4, 0.8, "rotate", 9),
SubPolicy(1.0, "rotate", 7, 0.8, "translateY", 9),
SubPolicy(0.0, "shearX", 0, 0.8, "solarize", 4),
SubPolicy(0.8, "shearY", 0, 0.6, "color", 4),
SubPolicy(1.0, "color", 0, 0.6, "rotate", 2),
SubPolicy(0.8, "equalize", 4, 0.0, "equalize", 8),
SubPolicy(1.0, "equalize", 4, 0.6, "autocontrast", 2),
SubPolicy(0.4, "shearY", 7, 0.6, "solarizeadd", 7),
SubPolicy(0.8, "posterize", 2, 0.6, "solarize", 10),
SubPolicy(0.6, "solarize", 8, 0.6, "equalize", 1),
SubPolicy(0.8, "color", 6, 0.4, "rotate", 5),
]
def __call__(self, img):
policy_idx = random.randint(0, len(self.policies) - 1)
return self.policies[policy_idx](img)
def __repr__(self):
return "AutoAugment ImageNet Policy"
class SubPolicy(object):
def __init__(self, p1, method1, magnitude_idx1, p2, method2, magnitude_idx2):
operation_factory = OperationFactory()
self.p1 = p1
self.p2 = p2
self.operation1 = operation_factory.get_operation(method1, magnitude_idx1)
self.operation2 = operation_factory.get_operation(method2, magnitude_idx2)
def __call__(self, img):
if random.random() < self.p1:
img = self.operation1(img)
if random.random() < self.p2:
img = self.operation2(img)
return img
class OperationFactory:
def __init__(self):
fillcolor = (128, 128, 128)
self.ranges = {
"shearX": np.linspace(0, 0.3, 11),
"shearY": np.linspace(0, 0.3, 11),
"translateX": np.linspace(0, 250, 11),
"translateY": np.linspace(0, 250, 11),
"rotate": np.linspace(0, 30, 11),
"color": np.linspace(0.1, 1.9, 11),
"posterize": np.round(np.linspace(0, 4, 11), 0).astype(np.int),
"solarize": np.linspace(0, 256, 11),
"solarizeadd": np.linspace(0, 110, 11),
"contrast": np.linspace(0.1, 1.9, 11),
"sharpness": np.linspace(0.1, 1.9, 11),
"brightness": np.linspace(0.1, 1.9, 11),
"autocontrast": [0] * 10,
"equalize": [0] * 10,
"invert": [0] * 10
}
def rotate_with_fill(img, magnitude):
magnitude *= random.choice([-1, 1])
rot = img.convert("RGBA").rotate(magnitude)
return Image.composite(rot, Image.new("RGBA", rot.size, (128,) * 4), rot).convert(img.mode)
def solarize_add(image, addition=0, threshold=128):
lut = []
for i in range(256):
if i < threshold:
res = i + addition if i + addition <= 255 else 255
res = res if res >= 0 else 0
lut.append(res)
else:
lut.append(i)
from PIL.ImageOps import _lut
return _lut(image, lut)
self.operations = {
"shearX": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),
Image.BICUBIC, fillcolor=fillcolor),
"shearY": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0),
Image.BICUBIC, fillcolor=fillcolor),
"translateX": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, magnitude * random.choice([-1, 1]), 0, 1, 0),
fillcolor=fillcolor),
"translateY": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * random.choice([-1, 1])),
fillcolor=fillcolor),
"rotate": lambda img, magnitude: rotate_with_fill(img, magnitude),
"color": lambda img, magnitude: ImageEnhance.Color(img).enhance(magnitude),
"posterize": lambda img, magnitude: ImageOps.posterize(img, magnitude),
"solarize": lambda img, magnitude: ImageOps.solarize(img, magnitude),
"solarizeadd": lambda img, magnitude: solarize_add(img, magnitude),
"contrast": lambda img, magnitude: ImageEnhance.Contrast(img).enhance(magnitude),
"sharpness": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance(magnitude),
"brightness": lambda img, magnitude: ImageEnhance.Brightness(img).enhance(magnitude),
"autocontrast": lambda img, _: ImageOps.autocontrast(img),
"equalize": lambda img, _: ImageOps.equalize(img),
"invert": lambda img, _: ImageOps.invert(img)
}
def get_operation(self, method, magnitude_idx):
magnitude = self.ranges[method][magnitude_idx]
return lambda img: self.operations[method](img, magnitude)
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/image_classification/autoaugment.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
class LabelSmoothing(nn.Module):
"""
NLL loss with label smoothing.
"""
def __init__(self, smoothing=0.0):
"""
Constructor for the LabelSmoothing module.
:param smoothing: label smoothing factor
"""
super(LabelSmoothing, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
def forward(self, x, target):
logprobs = torch.nn.functional.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/image_classification/smoothing.py |
# Copyright (c) 2018-2019, NVIDIA CORPORATION
# Copyright (c) 2017- Facebook, Inc
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import time
from copy import deepcopy
from functools import wraps
from typing import Callable, Dict, Optional, Tuple
import torch
import torch.nn as nn
from torch.cuda.amp import autocast
from torch.nn.parallel import DistributedDataParallel as DDP
from . import logger as log
from . import utils
from .logger import TrainingMetrics, ValidationMetrics
from .models.common import EMA
class Executor:
def __init__(
self,
model: nn.Module,
loss: Optional[nn.Module],
cuda: bool = True,
memory_format: torch.memory_format = torch.contiguous_format,
amp: bool = False,
scaler: Optional[torch.cuda.amp.GradScaler] = None,
divide_loss: int = 1,
ts_script: bool = False,
):
assert not (amp and scaler is None), "Gradient Scaler is needed for AMP"
def xform(m: nn.Module) -> nn.Module:
if cuda:
m = m.cuda()
m.to(memory_format=memory_format)
return m
self.model = xform(model)
if ts_script:
self.model = torch.jit.script(self.model)
self.ts_script = ts_script
self.loss = xform(loss) if loss is not None else None
self.amp = amp
self.scaler = scaler
self.is_distributed = False
self.divide_loss = divide_loss
self._fwd_bwd = None
self._forward = None
def distributed(self, gpu_id):
self.is_distributed = True
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
self.model = DDP(self.model, device_ids=[gpu_id], output_device=gpu_id)
torch.cuda.current_stream().wait_stream(s)
def _fwd_bwd_fn(
self,
input: torch.Tensor,
target: torch.Tensor,
) -> torch.Tensor:
with autocast(enabled=self.amp):
loss = self.loss(self.model(input), target)
loss /= self.divide_loss
self.scaler.scale(loss).backward()
return loss
def _forward_fn(
self, input: torch.Tensor, target: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
with torch.no_grad(), autocast(enabled=self.amp):
output = self.model(input)
loss = None if self.loss is None else self.loss(output, target)
return output if loss is None else loss, output
def optimize(self, fn):
return fn
@property
def forward_backward(self):
if self._fwd_bwd is None:
if self.loss is None:
raise NotImplementedError(
"Loss must not be None for forward+backward step"
)
self._fwd_bwd = self.optimize(self._fwd_bwd_fn)
return self._fwd_bwd
@property
def forward(self):
if self._forward is None:
self._forward = self.optimize(self._forward_fn)
return self._forward
def train(self):
self.model.train()
if self.loss is not None:
self.loss.train()
def eval(self):
self.model.eval()
if self.loss is not None:
self.loss.eval()
class Trainer:
def __init__(
self,
executor: Executor,
optimizer: torch.optim.Optimizer,
grad_acc_steps: int,
ema: Optional[float] = None,
):
self.executor = executor
self.optimizer = optimizer
self.grad_acc_steps = grad_acc_steps
self.use_ema = False
if ema is not None:
self.ema_executor = deepcopy(self.executor)
self.ema = EMA(ema, self.ema_executor.model)
self.use_ema = True
self.optimizer.zero_grad(set_to_none=True)
self.steps_since_update = 0
def train(self):
self.executor.train()
if self.use_ema:
self.ema_executor.train()
def eval(self):
self.executor.eval()
if self.use_ema:
self.ema_executor.eval()
def train_step(self, input, target, step=None):
loss = self.executor.forward_backward(input, target)
self.steps_since_update += 1
if self.steps_since_update == self.grad_acc_steps:
if self.executor.scaler is not None:
self.executor.scaler.step(self.optimizer)
self.executor.scaler.update()
else:
self.optimizer.step()
self.optimizer.zero_grad()
self.steps_since_update = 0
torch.cuda.synchronize()
if self.use_ema:
self.ema(self.executor.model, step=step)
return loss
def validation_steps(self) -> Dict[str, Callable]:
vsd: Dict[str, Callable] = {"val": self.executor.forward}
if self.use_ema:
vsd["val_ema"] = self.ema_executor.forward
return vsd
def state_dict(self) -> dict:
res = {
"state_dict": self.executor.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
}
if self.use_ema:
res["state_dict_ema"] = self.ema_executor.model.state_dict()
return res
def train(
train_step,
train_loader,
lr_scheduler,
grad_scale_fn,
log_fn,
timeout_handler,
prof=-1,
step=0,
):
interrupted = False
end = time.time()
data_iter = enumerate(train_loader)
for i, (input, target) in data_iter:
bs = input.size(0)
lr = lr_scheduler(i)
data_time = time.time() - end
loss = train_step(input, target, step=step + i)
it_time = time.time() - end
with torch.no_grad():
if torch.distributed.is_initialized():
reduced_loss = utils.reduce_tensor(loss.detach())
else:
reduced_loss = loss.detach()
log_fn(
compute_ips=utils.calc_ips(bs, it_time - data_time),
total_ips=utils.calc_ips(bs, it_time),
data_time=data_time,
compute_time=it_time - data_time,
lr=lr,
loss=reduced_loss.item(),
grad_scale=grad_scale_fn(),
)
end = time.time()
if prof > 0 and (i + 1 >= prof):
time.sleep(5)
break
if ((i + 1) % 20 == 0) and timeout_handler.interrupted:
time.sleep(5)
interrupted = True
break
return interrupted
def validate(infer_fn, val_loader, log_fn, prof=-1, with_loss=True, topk=5):
top1 = log.AverageMeter()
# switch to evaluate mode
end = time.time()
data_iter = enumerate(val_loader)
for i, (input, target) in data_iter:
bs = input.size(0)
data_time = time.time() - end
if with_loss:
loss, output = infer_fn(input, target)
else:
output = infer_fn(input)
with torch.no_grad():
precs = utils.accuracy(output.data, target, topk=(1, topk))
if torch.distributed.is_initialized():
if with_loss:
reduced_loss = utils.reduce_tensor(loss.detach())
precs = map(utils.reduce_tensor, precs)
else:
if with_loss:
reduced_loss = loss.detach()
precs = map(lambda t: t.item(), precs)
infer_result = {f"top{k}": (p, bs) for k, p in zip((1, topk), precs)}
if with_loss:
infer_result["loss"] = (reduced_loss.item(), bs)
torch.cuda.synchronize()
it_time = time.time() - end
top1.record(infer_result["top1"][0], bs)
log_fn(
compute_ips=utils.calc_ips(bs, it_time - data_time),
total_ips=utils.calc_ips(bs, it_time),
data_time=data_time,
compute_time=it_time - data_time,
**infer_result,
)
end = time.time()
if (prof > 0) and (i + 1 >= prof):
time.sleep(5)
break
return top1.get_val()
# Train loop {{{
def train_loop(
trainer: Trainer,
lr_scheduler,
train_loader,
train_loader_len,
val_loader,
logger,
best_prec1=0,
start_epoch=0,
end_epoch=0,
early_stopping_patience=-1,
prof=-1,
skip_training=False,
skip_validation=False,
save_checkpoints=True,
checkpoint_dir="./",
checkpoint_filename="checkpoint.pth.tar",
keep_last_n_checkpoints=0,
topk=5,
):
checkpointer = utils.Checkpointer(
last_filename=checkpoint_filename,
checkpoint_dir=checkpoint_dir,
keep_last_n=keep_last_n_checkpoints,
)
train_metrics = TrainingMetrics(logger)
val_metrics = {
k: ValidationMetrics(logger, k, topk) for k in trainer.validation_steps().keys()
}
training_step = trainer.train_step
prec1 = -1
if early_stopping_patience > 0:
epochs_since_improvement = 0
print(f"RUNNING EPOCHS FROM {start_epoch} TO {end_epoch}")
with utils.TimeoutHandler() as timeout_handler:
interrupted = False
for epoch in range(start_epoch, end_epoch):
if logger is not None:
logger.start_epoch()
if not skip_training:
if logger is not None:
data_iter = logger.iteration_generator_wrapper(
train_loader, mode="train"
)
else:
data_iter = train_loader
trainer.train()
interrupted = train(
training_step,
data_iter,
lambda i: lr_scheduler(trainer.optimizer, i, epoch),
trainer.executor.scaler.get_scale,
train_metrics.log,
timeout_handler,
prof=prof,
step=epoch * train_loader_len,
)
if not skip_validation:
trainer.eval()
for k, infer_fn in trainer.validation_steps().items():
if logger is not None:
data_iter = logger.iteration_generator_wrapper(
val_loader, mode="val"
)
else:
data_iter = val_loader
step_prec1, _ = validate(
infer_fn,
data_iter,
val_metrics[k].log,
prof=prof,
topk=topk,
)
if k == "val":
prec1 = step_prec1
if prec1 > best_prec1:
is_best = True
best_prec1 = prec1
else:
is_best = False
else:
is_best = False
best_prec1 = 0
if logger is not None:
logger.end_epoch()
if save_checkpoints and (
not torch.distributed.is_initialized()
or torch.distributed.get_rank() == 0
):
checkpoint_state = {
"epoch": epoch + 1,
"best_prec1": best_prec1,
**trainer.state_dict(),
}
checkpointer.save_checkpoint(
checkpoint_state,
is_best,
filename=f"checkpoint_{epoch:04}.pth.tar",
)
if early_stopping_patience > 0:
if not is_best:
epochs_since_improvement += 1
else:
epochs_since_improvement = 0
if epochs_since_improvement >= early_stopping_patience:
break
if interrupted:
break
# }}}
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/image_classification/training.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .entrypoints import nvidia_convnets_processing_utils, nvidia_efficientnet
from .resnet import resnet50, resnext101_32x4d, se_resnext101_32x4d
from .efficientnet import (
efficientnet_b0,
efficientnet_b4,
efficientnet_widese_b0,
efficientnet_widese_b4,
efficientnet_quant_b0,
efficientnet_quant_b4,
)
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/image_classification/models/__init__.py |
import argparse
import random
import math
import warnings
from typing import List, Any, Optional
from collections import namedtuple, OrderedDict
from dataclasses import dataclass, replace
import torch
from torch import nn
from functools import partial
try:
from pytorch_quantization import nn as quant_nn
from ..quantization import switch_on_quantization
except ImportError as e:
warnings.warn(
"pytorch_quantization module not found, quantization will not be available"
)
quant_nn = None
import contextlib
@contextlib.contextmanager
def switch_on_quantization(do_quantization=False):
assert not do_quantization, "quantization is not available"
try:
yield
finally:
pass
from .common import (
SequentialSqueezeAndExcitation,
SequentialSqueezeAndExcitationTRT,
LayerBuilder,
StochasticDepthResidual,
Flatten,
)
from .model import (
Model,
ModelParams,
ModelArch,
OptimizerParams,
create_entrypoint,
EntryPoint,
)
# EffNetArch {{{
@dataclass
class EffNetArch(ModelArch):
block: Any
stem_channels: int
feature_channels: int
kernel: List[int]
stride: List[int]
num_repeat: List[int]
expansion: List[int]
channels: List[int]
default_image_size: int
squeeze_excitation_ratio: float = 0.25
def enumerate(self):
return enumerate(
zip(
self.kernel, self.stride, self.num_repeat, self.expansion, self.channels
)
)
def num_layers(self):
_f = lambda l: len(set(map(len, l)))
l = [self.kernel, self.stride, self.num_repeat, self.expansion, self.channels]
assert _f(l) == 1
return len(self.kernel)
@staticmethod
def _scale_width(width_coeff, divisor=8):
def _sw(num_channels):
num_channels *= width_coeff
# Rounding should not go down by more than 10%
rounded_num_channels = max(
divisor, int(num_channels + divisor / 2) // divisor * divisor
)
if rounded_num_channels < 0.9 * num_channels:
rounded_num_channels += divisor
return rounded_num_channels
return _sw
@staticmethod
def _scale_depth(depth_coeff):
def _sd(num_repeat):
return int(math.ceil(num_repeat * depth_coeff))
return _sd
def scale(self, wc, dc, dis, divisor=8) -> "EffNetArch":
sw = EffNetArch._scale_width(wc, divisor=divisor)
sd = EffNetArch._scale_depth(dc)
return EffNetArch(
block=self.block,
stem_channels=sw(self.stem_channels),
feature_channels=sw(self.feature_channels),
kernel=self.kernel,
stride=self.stride,
num_repeat=list(map(sd, self.num_repeat)),
expansion=self.expansion,
channels=list(map(sw, self.channels)),
default_image_size=dis,
squeeze_excitation_ratio=self.squeeze_excitation_ratio,
)
# }}}
# EffNetParams {{{
@dataclass
class EffNetParams(ModelParams):
dropout: float
num_classes: int = 1000
activation: str = "silu"
conv_init: str = "fan_in"
bn_momentum: float = 1 - 0.99
bn_epsilon: float = 1e-3
survival_prob: float = 1
quantized: bool = False
trt: bool = False
def parser(self, name):
p = super().parser(name)
p.add_argument(
"--num_classes",
metavar="N",
default=self.num_classes,
type=int,
help="number of classes",
)
p.add_argument(
"--conv_init",
default=self.conv_init,
choices=["fan_in", "fan_out"],
type=str,
help="initialization mode for convolutional layers, see https://pytorch.org/docs/stable/nn.init.html#torch.nn.init.kaiming_normal_",
)
p.add_argument(
"--bn_momentum",
default=self.bn_momentum,
type=float,
help="Batch Norm momentum",
)
p.add_argument(
"--bn_epsilon",
default=self.bn_epsilon,
type=float,
help="Batch Norm epsilon",
)
p.add_argument(
"--survival_prob",
default=self.survival_prob,
type=float,
help="Survival probability for stochastic depth",
)
p.add_argument(
"--dropout", default=self.dropout, type=float, help="Dropout drop prob"
)
p.add_argument("--trt", metavar="True|False", default=self.trt, type=bool)
return p
# }}}
class EfficientNet(nn.Module):
def __init__(
self,
arch: EffNetArch,
dropout: float,
num_classes: int = 1000,
activation: str = "silu",
conv_init: str = "fan_in",
bn_momentum: float = 1 - 0.99,
bn_epsilon: float = 1e-3,
survival_prob: float = 1,
quantized: bool = False,
trt: bool = False,
):
self.quantized = quantized
with switch_on_quantization(self.quantized):
super(EfficientNet, self).__init__()
self.arch = arch
self.num_layers = arch.num_layers()
self.num_blocks = sum(arch.num_repeat)
self.survival_prob = survival_prob
self.builder = LayerBuilder(
LayerBuilder.Config(
activation=activation,
conv_init=conv_init,
bn_momentum=bn_momentum,
bn_epsilon=bn_epsilon,
)
)
self.stem = self._make_stem(arch.stem_channels)
out_channels = arch.stem_channels
plc = 0
layers = []
for i, (k, s, r, e, c) in arch.enumerate():
layer, out_channels = self._make_layer(
block=arch.block,
kernel_size=k,
stride=s,
num_repeat=r,
expansion=e,
in_channels=out_channels,
out_channels=c,
squeeze_excitation_ratio=arch.squeeze_excitation_ratio,
prev_layer_count=plc,
trt=trt,
)
plc = plc + r
layers.append(layer)
self.layers = nn.Sequential(*layers)
self.features = self._make_features(out_channels, arch.feature_channels)
self.classifier = self._make_classifier(
arch.feature_channels, num_classes, dropout
)
def forward(self, x):
x = self.stem(x)
x = self.layers(x)
x = self.features(x)
x = self.classifier(x)
return x
def extract_features(self, x, layers=None):
if layers is None:
layers = [f"layer{i+1}" for i in range(self.num_layers)] + [
"features",
"classifier",
]
run = [
i
for i in range(self.num_layers)
if "classifier" in layers
or "features" in layers
or any([f"layer{j+1}" in layers for j in range(i, self.num_layers)])
]
output = {}
x = self.stem(x)
for l in run:
fn = self.layers[l]
x = fn(x)
if f"layer{l+1}" in layers:
output[f"layer{l+1}"] = x
if "features" in layers or "classifier" in layers:
x = self.features(x)
if "features" in layers:
output["features"] = x
if "classifier" in layers:
output["classifier"] = self.classifier(x)
return output
# helper functions {{{
def _make_stem(self, stem_width):
return nn.Sequential(
OrderedDict(
[
("conv", self.builder.conv3x3(3, stem_width, stride=2)),
("bn", self.builder.batchnorm(stem_width)),
("activation", self.builder.activation()),
]
)
)
def _get_survival_prob(self, block_id):
drop_rate = 1.0 - self.survival_prob
sp = 1.0 - drop_rate * float(block_id) / self.num_blocks
return sp
def _make_features(self, in_channels, num_features):
return nn.Sequential(
OrderedDict(
[
("conv", self.builder.conv1x1(in_channels, num_features)),
("bn", self.builder.batchnorm(num_features)),
("activation", self.builder.activation()),
]
)
)
def _make_classifier(self, num_features, num_classes, dropout):
return nn.Sequential(
OrderedDict(
[
("pooling", nn.AdaptiveAvgPool2d(1)),
("squeeze", Flatten()),
("dropout", nn.Dropout(dropout)),
("fc", nn.Linear(num_features, num_classes)),
]
)
)
def _make_layer(
self,
block,
kernel_size,
stride,
num_repeat,
expansion,
in_channels,
out_channels,
squeeze_excitation_ratio,
prev_layer_count,
trt,
):
layers = []
idx = 0
survival_prob = self._get_survival_prob(idx + prev_layer_count)
blk = block(
self.builder,
kernel_size,
in_channels,
out_channels,
expansion,
stride,
self.arch.squeeze_excitation_ratio,
survival_prob if stride == 1 and in_channels == out_channels else 1.0,
self.quantized,
trt=trt,
)
layers.append((f"block{idx}", blk))
for idx in range(1, num_repeat):
survival_prob = self._get_survival_prob(idx + prev_layer_count)
blk = block(
self.builder,
kernel_size,
out_channels,
out_channels,
expansion,
1, # stride
squeeze_excitation_ratio,
survival_prob,
self.quantized,
trt=trt,
)
layers.append((f"block{idx}", blk))
return nn.Sequential(OrderedDict(layers)), out_channels
def ngc_checkpoint_remap(self, url=None, version=None):
if version is None:
version = url.split("/")[8]
def to_sequential_remap(s):
splited = s.split(".")
if splited[0].startswith("layer"):
return ".".join(
["layers." + str(int(splited[0][len("layer") :]) - 1)] + splited[1:]
)
else:
return s
def no_remap(s):
return s
return {"20.12.0": to_sequential_remap, "21.03.0": to_sequential_remap}.get(
version, no_remap
)
# }}}
# MBConvBlock {{{
class MBConvBlock(nn.Module):
__constants__ = ["quantized"]
def __init__(
self,
builder: LayerBuilder,
depsep_kernel_size: int,
in_channels: int,
out_channels: int,
expand_ratio: int,
stride: int,
squeeze_excitation_ratio: float,
squeeze_hidden=False,
survival_prob: float = 1.0,
quantized: bool = False,
trt: bool = False,
):
super().__init__()
self.quantized = quantized
self.residual = stride == 1 and in_channels == out_channels
hidden_dim = in_channels * expand_ratio
squeeze_base = hidden_dim if squeeze_hidden else in_channels
squeeze_dim = max(1, int(squeeze_base * squeeze_excitation_ratio))
self.expand = (
None
if in_channels == hidden_dim
else builder.conv1x1(in_channels, hidden_dim, bn=True, act=True)
)
self.depsep = builder.convDepSep(
depsep_kernel_size, hidden_dim, hidden_dim, stride, bn=True, act=True
)
if trt or self.quantized:
# Need TRT mode for quantized in order to automatically insert quantization before pooling
self.se: nn.Module = SequentialSqueezeAndExcitationTRT(
hidden_dim, squeeze_dim, builder.activation(), self.quantized
)
else:
self.se: nn.Module = SequentialSqueezeAndExcitation(
hidden_dim, squeeze_dim, builder.activation(), self.quantized
)
self.proj = builder.conv1x1(hidden_dim, out_channels, bn=True)
if survival_prob == 1.0:
self.residual_add = torch.add
else:
self.residual_add = StochasticDepthResidual(survival_prob=survival_prob)
if self.quantized and self.residual:
assert quant_nn is not None, "pytorch_quantization is not available"
self.residual_quantizer = quant_nn.TensorQuantizer(
quant_nn.QuantConv2d.default_quant_desc_input
) # TODO QuantConv2d ?!?
else:
self.residual_quantizer = nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
if not self.residual:
return self.proj(
self.se(self.depsep(x if self.expand is None else self.expand(x)))
)
b = self.proj(
self.se(self.depsep(x if self.expand is None else self.expand(x)))
)
if self.quantized:
x = self.residual_quantizer(x)
return self.residual_add(x, b)
def original_mbconv(
builder: LayerBuilder,
depsep_kernel_size: int,
in_channels: int,
out_channels: int,
expand_ratio: int,
stride: int,
squeeze_excitation_ratio: int,
survival_prob: float,
quantized: bool,
trt: bool,
):
return MBConvBlock(
builder,
depsep_kernel_size,
in_channels,
out_channels,
expand_ratio,
stride,
squeeze_excitation_ratio,
squeeze_hidden=False,
survival_prob=survival_prob,
quantized=quantized,
trt=trt,
)
def widese_mbconv(
builder: LayerBuilder,
depsep_kernel_size: int,
in_channels: int,
out_channels: int,
expand_ratio: int,
stride: int,
squeeze_excitation_ratio: int,
survival_prob: float,
quantized: bool,
trt: bool,
):
return MBConvBlock(
builder,
depsep_kernel_size,
in_channels,
out_channels,
expand_ratio,
stride,
squeeze_excitation_ratio,
squeeze_hidden=True,
survival_prob=survival_prob,
quantized=quantized,
trt=trt,
)
# }}}
# EffNet configs {{{
# fmt: off
effnet_b0_layers = EffNetArch(
block = original_mbconv,
stem_channels = 32,
feature_channels=1280,
kernel = [ 3, 3, 5, 3, 5, 5, 3],
stride = [ 1, 2, 2, 2, 1, 2, 1],
num_repeat = [ 1, 2, 2, 3, 3, 4, 1],
expansion = [ 1, 6, 6, 6, 6, 6, 6],
channels = [16, 24, 40, 80, 112, 192, 320],
default_image_size=224,
)
effnet_b1_layers=effnet_b0_layers.scale(wc=1, dc=1.1, dis=240)
effnet_b2_layers=effnet_b0_layers.scale(wc=1.1, dc=1.2, dis=260)
effnet_b3_layers=effnet_b0_layers.scale(wc=1.2, dc=1.4, dis=300)
effnet_b4_layers=effnet_b0_layers.scale(wc=1.4, dc=1.8, dis=380)
effnet_b5_layers=effnet_b0_layers.scale(wc=1.6, dc=2.2, dis=456)
effnet_b6_layers=effnet_b0_layers.scale(wc=1.8, dc=2.6, dis=528)
effnet_b7_layers=effnet_b0_layers.scale(wc=2.0, dc=3.1, dis=600)
urls = {
"efficientnet-b0": "https://api.ngc.nvidia.com/v2/models/nvidia/efficientnet_b0_pyt_amp/versions/20.12.0/files/nvidia_efficientnet-b0_210412.pth",
"efficientnet-b4": "https://api.ngc.nvidia.com/v2/models/nvidia/efficientnet_b4_pyt_amp/versions/20.12.0/files/nvidia_efficientnet-b4_210412.pth",
"efficientnet-widese-b0": "https://api.ngc.nvidia.com/v2/models/nvidia/efficientnet_widese_b0_pyt_amp/versions/20.12.0/files/nvidia_efficientnet-widese-b0_210412.pth",
"efficientnet-widese-b4": "https://api.ngc.nvidia.com/v2/models/nvidia/efficientnet_widese_b4_pyt_amp/versions/20.12.0/files/nvidia_efficientnet-widese-b4_210412.pth",
"efficientnet-quant-b0": "https://api.ngc.nvidia.com/v2/models/nvidia/efficientnet_b0_pyt_qat_ckpt_fp32/versions/21.03.0/files/nvidia-efficientnet-quant-b0-130421.pth",
"efficientnet-quant-b4": "https://api.ngc.nvidia.com/v2/models/nvidia/efficientnet_b4_pyt_qat_ckpt_fp32/versions/21.03.0/files/nvidia-efficientnet-quant-b4-130421.pth",
}
def _m(*args, **kwargs):
return Model(constructor=EfficientNet, *args, **kwargs)
architectures = {
"efficientnet-b0": _m(arch=effnet_b0_layers, params=EffNetParams(dropout=0.2), checkpoint_url=urls["efficientnet-b0"]),
"efficientnet-b1": _m(arch=effnet_b1_layers, params=EffNetParams(dropout=0.2)),
"efficientnet-b2": _m(arch=effnet_b2_layers, params=EffNetParams(dropout=0.3)),
"efficientnet-b3": _m(arch=effnet_b3_layers, params=EffNetParams(dropout=0.3)),
"efficientnet-b4": _m(arch=effnet_b4_layers, params=EffNetParams(dropout=0.4, survival_prob=0.8), checkpoint_url=urls["efficientnet-b4"]),
"efficientnet-b5": _m(arch=effnet_b5_layers, params=EffNetParams(dropout=0.4)),
"efficientnet-b6": _m(arch=effnet_b6_layers, params=EffNetParams(dropout=0.5)),
"efficientnet-b7": _m(arch=effnet_b7_layers, params=EffNetParams(dropout=0.5)),
"efficientnet-widese-b0": _m(arch=replace(effnet_b0_layers, block=widese_mbconv), params=EffNetParams(dropout=0.2), checkpoint_url=urls["efficientnet-widese-b0"]),
"efficientnet-widese-b1": _m(arch=replace(effnet_b1_layers, block=widese_mbconv), params=EffNetParams(dropout=0.2)),
"efficientnet-widese-b2": _m(arch=replace(effnet_b2_layers, block=widese_mbconv), params=EffNetParams(dropout=0.3)),
"efficientnet-widese-b3": _m(arch=replace(effnet_b3_layers, block=widese_mbconv), params=EffNetParams(dropout=0.3)),
"efficientnet-widese-b4": _m(arch=replace(effnet_b4_layers, block=widese_mbconv), params=EffNetParams(dropout=0.4, survival_prob=0.8), checkpoint_url=urls["efficientnet-widese-b4"]),
"efficientnet-widese-b5": _m(arch=replace(effnet_b5_layers, block=widese_mbconv), params=EffNetParams(dropout=0.4)),
"efficientnet-widese-b6": _m(arch=replace(effnet_b6_layers, block=widese_mbconv), params=EffNetParams(dropout=0.5)),
"efficientnet-widese-b7": _m(arch=replace(effnet_b7_layers, block=widese_mbconv), params=EffNetParams(dropout=0.5)),
"efficientnet-quant-b0": _m(arch=effnet_b0_layers, params=EffNetParams(dropout=0.2, quantized=True), checkpoint_url=urls["efficientnet-quant-b0"]),
"efficientnet-quant-b1": _m(arch=effnet_b1_layers, params=EffNetParams(dropout=0.2, quantized=True)),
"efficientnet-quant-b2": _m(arch=effnet_b2_layers, params=EffNetParams(dropout=0.3, quantized=True)),
"efficientnet-quant-b3": _m(arch=effnet_b3_layers, params=EffNetParams(dropout=0.3, quantized=True)),
"efficientnet-quant-b4": _m(arch=effnet_b4_layers, params=EffNetParams(dropout=0.4, survival_prob=0.8, quantized=True), checkpoint_url=urls["efficientnet-quant-b4"]),
"efficientnet-quant-b5": _m(arch=effnet_b5_layers, params=EffNetParams(dropout=0.4, quantized=True)),
"efficientnet-quant-b6": _m(arch=effnet_b6_layers, params=EffNetParams(dropout=0.5, quantized=True)),
"efficientnet-quant-b7": _m(arch=effnet_b7_layers, params=EffNetParams(dropout=0.5, quantized=True)),
}
# fmt: on
# }}}
_ce = lambda n: EntryPoint.create(n, architectures[n])
efficientnet_b0 = _ce("efficientnet-b0")
efficientnet_b4 = _ce("efficientnet-b4")
efficientnet_widese_b0 = _ce("efficientnet-widese-b0")
efficientnet_widese_b4 = _ce("efficientnet-widese-b4")
efficientnet_quant_b0 = _ce("efficientnet-quant-b0")
efficientnet_quant_b4 = _ce("efficientnet-quant-b4")
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/image_classification/models/efficientnet.py |
from dataclasses import dataclass, asdict, replace
from .common import (
SequentialSqueezeAndExcitationTRT,
SequentialSqueezeAndExcitation,
SqueezeAndExcitation,
SqueezeAndExcitationTRT,
)
from typing import Optional, Callable
import os
import torch
import argparse
from functools import partial
@dataclass
class ModelArch:
pass
@dataclass
class ModelParams:
def parser(self, name):
return argparse.ArgumentParser(
description=f"{name} arguments", add_help=False, usage=""
)
@dataclass
class OptimizerParams:
pass
@dataclass
class Model:
constructor: Callable
arch: ModelArch
params: Optional[ModelParams]
optimizer_params: Optional[OptimizerParams] = None
checkpoint_url: Optional[str] = None
def torchhub_docstring(name: str):
return f"""Constructs a {name} model.
For detailed information on model input and output, training recipies, inference and performance
visit: github.com/NVIDIA/DeepLearningExamples and/or ngc.nvidia.com
Args:
pretrained (bool, True): If True, returns a model pretrained on IMAGENET dataset.
"""
class EntryPoint:
@staticmethod
def create(name: str, model: Model):
ep = EntryPoint(name, model)
ep.__doc__ = torchhub_docstring(name)
return ep
def __init__(self, name: str, model: Model):
self.name = name
self.model = model
def __call__(
self,
pretrained=True,
pretrained_from_file=None,
state_dict_key_map_fn=None,
**kwargs,
):
assert not (pretrained and (pretrained_from_file is not None))
params = replace(self.model.params, **kwargs)
model = self.model.constructor(arch=self.model.arch, **asdict(params))
state_dict = None
if pretrained:
assert self.model.checkpoint_url is not None
state_dict = torch.hub.load_state_dict_from_url(
self.model.checkpoint_url,
map_location=torch.device("cpu"),
progress=True,
)
if pretrained_from_file is not None:
if os.path.isfile(pretrained_from_file):
print(
"=> loading pretrained weights from '{}'".format(
pretrained_from_file
)
)
state_dict = torch.load(
pretrained_from_file, map_location=torch.device("cpu")
)
else:
print(
"=> no pretrained weights found at '{}'".format(
pretrained_from_file
)
)
if state_dict is not None:
state_dict = {
k[len("module.") :] if k.startswith("module.") else k: v
for k, v in state_dict.items()
}
def reshape(t, conv):
if conv:
if len(t.shape) == 4:
return t
else:
return t.view(t.shape[0], -1, 1, 1)
else:
if len(t.shape) == 4:
return t.view(t.shape[0], t.shape[1])
else:
return t
if state_dict_key_map_fn is not None:
state_dict = {
state_dict_key_map_fn(k): v for k, v in state_dict.items()
}
if pretrained and hasattr(model, "ngc_checkpoint_remap"):
remap_fn = model.ngc_checkpoint_remap(url=self.model.checkpoint_url)
state_dict = {remap_fn(k): v for k, v in state_dict.items()}
def _se_layer_uses_conv(m):
return any(
map(
partial(isinstance, m),
[
SqueezeAndExcitationTRT,
SequentialSqueezeAndExcitationTRT,
],
)
)
state_dict = {
k: reshape(
v,
conv=_se_layer_uses_conv(
dict(model.named_modules())[".".join(k.split(".")[:-2])]
),
)
if is_se_weight(k, v)
else v
for k, v in state_dict.items()
}
model.load_state_dict(state_dict)
return model
def parser(self):
if self.model.params is None:
return None
parser = self.model.params.parser(self.name)
parser.add_argument(
"--pretrained-from-file",
default=None,
type=str,
metavar="PATH",
help="load weights from local file",
)
if self.model.checkpoint_url is not None:
parser.add_argument(
"--pretrained",
default=False,
action="store_true",
help="load pretrained weights from NGC",
)
return parser
def is_se_weight(key, value):
return key.endswith("squeeze.weight") or key.endswith("expand.weight")
def create_entrypoint(m: Model):
def _ep(**kwargs):
params = replace(m.params, **kwargs)
return m.constructor(arch=m.arch, **asdict(params))
return _ep
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/image_classification/models/model.py |
import copy
from collections import OrderedDict
from dataclasses import dataclass
from typing import Optional
import torch
import warnings
from torch import nn
import torch.nn.functional as F
try:
from pytorch_quantization import nn as quant_nn
except ImportError as e:
warnings.warn(
"pytorch_quantization module not found, quantization will not be available"
)
quant_nn = None
# LayerBuilder {{{
class LayerBuilder(object):
@dataclass
class Config:
activation: str = "relu"
conv_init: str = "fan_in"
bn_momentum: Optional[float] = None
bn_epsilon: Optional[float] = None
def __init__(self, config: "LayerBuilder.Config"):
self.config = config
def conv(
self,
kernel_size,
in_planes,
out_planes,
groups=1,
stride=1,
bn=False,
zero_init_bn=False,
act=False,
):
conv = nn.Conv2d(
in_planes,
out_planes,
kernel_size=kernel_size,
groups=groups,
stride=stride,
padding=int((kernel_size - 1) / 2),
bias=False,
)
nn.init.kaiming_normal_(
conv.weight, mode=self.config.conv_init, nonlinearity="relu"
)
layers = [("conv", conv)]
if bn:
layers.append(("bn", self.batchnorm(out_planes, zero_init_bn)))
if act:
layers.append(("act", self.activation()))
if bn or act:
return nn.Sequential(OrderedDict(layers))
else:
return conv
def convDepSep(
self, kernel_size, in_planes, out_planes, stride=1, bn=False, act=False
):
"""3x3 depthwise separable convolution with padding"""
c = self.conv(
kernel_size,
in_planes,
out_planes,
groups=in_planes,
stride=stride,
bn=bn,
act=act,
)
return c
def conv3x3(self, in_planes, out_planes, stride=1, groups=1, bn=False, act=False):
"""3x3 convolution with padding"""
c = self.conv(
3, in_planes, out_planes, groups=groups, stride=stride, bn=bn, act=act
)
return c
def conv1x1(self, in_planes, out_planes, stride=1, groups=1, bn=False, act=False):
"""1x1 convolution with padding"""
c = self.conv(
1, in_planes, out_planes, groups=groups, stride=stride, bn=bn, act=act
)
return c
def conv7x7(self, in_planes, out_planes, stride=1, groups=1, bn=False, act=False):
"""7x7 convolution with padding"""
c = self.conv(
7, in_planes, out_planes, groups=groups, stride=stride, bn=bn, act=act
)
return c
def conv5x5(self, in_planes, out_planes, stride=1, groups=1, bn=False, act=False):
"""5x5 convolution with padding"""
c = self.conv(
5, in_planes, out_planes, groups=groups, stride=stride, bn=bn, act=act
)
return c
def batchnorm(self, planes, zero_init=False):
bn_cfg = {}
if self.config.bn_momentum is not None:
bn_cfg["momentum"] = self.config.bn_momentum
if self.config.bn_epsilon is not None:
bn_cfg["eps"] = self.config.bn_epsilon
bn = nn.BatchNorm2d(planes, **bn_cfg)
gamma_init_val = 0 if zero_init else 1
nn.init.constant_(bn.weight, gamma_init_val)
nn.init.constant_(bn.bias, 0)
return bn
def activation(self):
return {
"silu": lambda: nn.SiLU(inplace=True),
"relu": lambda: nn.ReLU(inplace=True),
"onnx-silu": ONNXSiLU,
}[self.config.activation]()
# LayerBuilder }}}
# LambdaLayer {{{
class LambdaLayer(nn.Module):
def __init__(self, lmbd):
super().__init__()
self.lmbd = lmbd
def forward(self, x):
return self.lmbd(x)
# }}}
# SqueezeAndExcitation {{{
class SqueezeAndExcitation(nn.Module):
def __init__(self, in_channels, squeeze, activation):
super(SqueezeAndExcitation, self).__init__()
self.squeeze = nn.Linear(in_channels, squeeze)
self.expand = nn.Linear(squeeze, in_channels)
self.activation = activation
self.sigmoid = nn.Sigmoid()
def forward(self, x):
return self._attention(x)
def _attention(self, x):
out = torch.mean(x, [2, 3])
out = self.squeeze(out)
out = self.activation(out)
out = self.expand(out)
out = self.sigmoid(out)
out = out.unsqueeze(2).unsqueeze(3)
return out
class SqueezeAndExcitationTRT(nn.Module):
def __init__(self, in_channels, squeeze, activation):
super(SqueezeAndExcitationTRT, self).__init__()
self.pooling = nn.AdaptiveAvgPool2d(1)
self.squeeze = nn.Conv2d(in_channels, squeeze, 1)
self.expand = nn.Conv2d(squeeze, in_channels, 1)
self.activation = activation
self.sigmoid = nn.Sigmoid()
def forward(self, x):
return self._attention(x)
def _attention(self, x):
out = self.pooling(x)
out = self.squeeze(out)
out = self.activation(out)
out = self.expand(out)
out = self.sigmoid(out)
return out
# }}}
# EMA {{{
class EMA:
def __init__(self, mu, module_ema):
self.mu = mu
self.module_ema = module_ema
def __call__(self, module, step=None):
if step is None:
mu = self.mu
else:
mu = min(self.mu, (1.0 + step) / (10 + step))
def strip_module(s: str) -> str:
return s
mesd = self.module_ema.state_dict()
with torch.no_grad():
for name, x in module.state_dict().items():
if name.endswith("num_batches_tracked"):
continue
n = strip_module(name)
mesd[n].mul_(mu)
mesd[n].add_((1.0 - mu) * x)
# }}}
# ONNXSiLU {{{
# Since torch.nn.SiLU is not supported in ONNX,
# it is required to use this implementation in exported model (15-20% more GPU memory is needed)
class ONNXSiLU(nn.Module):
def __init__(self, *args, **kwargs):
super(ONNXSiLU, self).__init__()
def forward(self, x):
return x * torch.sigmoid(x)
# }}}
class SequentialSqueezeAndExcitation(SqueezeAndExcitation):
def __init__(self, in_channels, squeeze, activation, quantized=False):
super().__init__(in_channels, squeeze, activation)
self.quantized = quantized
if quantized:
assert quant_nn is not None, "pytorch_quantization is not available"
self.mul_a_quantizer = quant_nn.TensorQuantizer(
quant_nn.QuantConv2d.default_quant_desc_input
)
self.mul_b_quantizer = quant_nn.TensorQuantizer(
quant_nn.QuantConv2d.default_quant_desc_input
)
else:
self.mul_a_quantizer = nn.Identity()
self.mul_b_quantizer = nn.Identity()
def forward(self, x):
out = self._attention(x)
if not self.quantized:
return out * x
else:
x_quant = self.mul_a_quantizer(out)
return x_quant * self.mul_b_quantizer(x)
class SequentialSqueezeAndExcitationTRT(SqueezeAndExcitationTRT):
def __init__(self, in_channels, squeeze, activation, quantized=False):
super().__init__(in_channels, squeeze, activation)
self.quantized = quantized
if quantized:
assert quant_nn is not None, "pytorch_quantization is not available"
self.mul_a_quantizer = quant_nn.TensorQuantizer(
quant_nn.QuantConv2d.default_quant_desc_input
)
self.mul_b_quantizer = quant_nn.TensorQuantizer(
quant_nn.QuantConv2d.default_quant_desc_input
)
else:
self.mul_a_quantizer = nn.Identity()
self.mul_b_quantizer = nn.Identity()
def forward(self, x):
out = self._attention(x)
if not self.quantized:
return out * x
else:
x_quant = self.mul_a_quantizer(out)
return x_quant * self.mul_b_quantizer(x)
class StochasticDepthResidual(nn.Module):
def __init__(self, survival_prob: float):
super().__init__()
self.survival_prob = survival_prob
self.register_buffer("mask", torch.ones(()), persistent=False)
def forward(self, residual: torch.Tensor, x: torch.Tensor) -> torch.Tensor:
if not self.training:
return torch.add(residual, other=x)
else:
with torch.no_grad():
mask = F.dropout(
self.mask,
p=1 - self.survival_prob,
training=self.training,
inplace=False,
)
return torch.addcmul(residual, mask, x)
class Flatten(nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x.squeeze(-1).squeeze(-1)
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/image_classification/models/common.py |
# Copyright (c) 2018-2019, NVIDIA CORPORATION
# Copyright (c) 2017- Facebook, Inc
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
from collections import OrderedDict
from dataclasses import dataclass
from typing import List, Dict, Callable, Any, Type
import torch
import torch.nn as nn
from .common import (
SqueezeAndExcitation,
LayerBuilder,
SqueezeAndExcitationTRT,
)
from .model import (
Model,
ModelParams,
ModelArch,
EntryPoint,
)
__all__ = ["ResNet", "resnet_configs"]
# BasicBlock {{{
class BasicBlock(nn.Module):
def __init__(
self,
builder,
inplanes,
planes,
expansion,
stride=1,
cardinality=1,
downsample=None,
fused_se=True,
last_bn_0_init=False,
trt=False,
):
super(BasicBlock, self).__init__()
self.conv1 = builder.conv3x3(inplanes, planes, stride, groups=cardinality)
self.bn1 = builder.batchnorm(planes)
self.relu = builder.activation()
self.conv2 = builder.conv3x3(
planes, planes * expansion, groups=cardinality
)
self.bn2 = builder.batchnorm(planes * expansion, zero_init=last_bn_0_init)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
if self.bn1 is not None:
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
if self.bn2 is not None:
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
# BasicBlock }}}
# Bottleneck {{{
class Bottleneck(nn.Module):
def __init__(
self,
builder,
inplanes,
planes,
expansion,
stride=1,
cardinality=1,
se=False,
se_squeeze=16,
downsample=None,
fused_se=True,
last_bn_0_init=False,
trt=False,
):
super(Bottleneck, self).__init__()
self.conv1 = builder.conv1x1(inplanes, planes)
self.bn1 = builder.batchnorm(planes)
self.conv2 = builder.conv3x3(planes, planes, groups=cardinality, stride=stride)
self.bn2 = builder.batchnorm(planes)
self.conv3 = builder.conv1x1(planes, planes * expansion)
self.bn3 = builder.batchnorm(planes * expansion, zero_init=last_bn_0_init)
self.relu = builder.activation()
self.downsample = downsample
self.stride = stride
self.fused_se = fused_se
if se:
self.squeeze = (
SqueezeAndExcitation(
planes * expansion, se_squeeze, builder.activation()
)
if not trt
else SqueezeAndExcitationTRT(
planes * expansion, se_squeeze, builder.activation()
)
)
else:
self.squeeze = None
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
if self.squeeze is None:
out += residual
else:
if self.fused_se:
out = torch.addcmul(residual, out, self.squeeze(out), value=1)
else:
out = residual + out * self.squeeze(out)
out = self.relu(out)
return out
class SEBottleneck(Bottleneck):
def __init__(
self,
builder,
inplanes,
planes,
expansion,
stride=1,
cardinality=1,
downsample=None,
fused_se=True,
last_bn_0_init=False,
trt=False,
):
super(SEBottleneck, self).__init__(
builder,
inplanes,
planes,
expansion,
stride=stride,
cardinality=cardinality,
se=True,
se_squeeze=16,
downsample=downsample,
fused_se=fused_se,
last_bn_0_init=last_bn_0_init,
trt=trt,
)
# Bottleneck }}}
class ResNet(nn.Module):
@dataclass
class Arch(ModelArch):
block: Type[Bottleneck]
layers: List[int] # arch
widths: List[int] # arch
expansion: int
cardinality: int = 1
stem_width: int = 64
activation: str = "relu"
default_image_size: int = 224
@dataclass
class Params(ModelParams):
num_classes: int = 1000
last_bn_0_init: bool = False
conv_init: str = "fan_in"
trt: bool = False
fused_se: bool = True
def parser(self, name):
p = super().parser(name)
p.add_argument(
"--num_classes",
metavar="N",
default=self.num_classes,
type=int,
help="number of classes",
)
p.add_argument(
"--last_bn_0_init",
metavar="True|False",
default=self.last_bn_0_init,
type=bool,
)
p.add_argument(
"--conv_init",
default=self.conv_init,
choices=["fan_in", "fan_out"],
type=str,
help="initialization mode for convolutional layers, see https://pytorch.org/docs/stable/nn.init.html#torch.nn.init.kaiming_normal_",
)
p.add_argument("--trt", metavar="True|False", default=self.trt, type=bool)
p.add_argument(
"--fused_se", metavar="True|False", default=self.fused_se, type=bool
)
return p
def __init__(
self,
arch: Arch,
num_classes: int = 1000,
last_bn_0_init: bool = False,
conv_init: str = "fan_in",
trt: bool = False,
fused_se: bool = True,
):
super(ResNet, self).__init__()
self.arch = arch
self.builder = LayerBuilder(
LayerBuilder.Config(activation=arch.activation, conv_init=conv_init)
)
self.last_bn_0_init = last_bn_0_init
self.conv1 = self.builder.conv7x7(3, arch.stem_width, stride=2)
self.bn1 = self.builder.batchnorm(arch.stem_width)
self.relu = self.builder.activation()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
inplanes = arch.stem_width
assert len(arch.widths) == len(arch.layers)
self.num_layers = len(arch.widths)
layers = []
for i, (w, l) in enumerate(zip(arch.widths, arch.layers)):
layer, inplanes = self._make_layer(
arch.block,
arch.expansion,
inplanes,
w,
l,
cardinality=arch.cardinality,
stride=1 if i == 0 else 2,
trt=trt,
fused_se=fused_se,
)
layers.append(layer)
self.layers = nn.Sequential(*layers)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(arch.widths[-1] * arch.expansion, num_classes)
def stem(self, x):
x = self.conv1(x)
if self.bn1 is not None:
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
return x
def classifier(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def forward(self, x):
x = self.stem(x)
x = self.layers(x)
x = self.classifier(x)
return x
def extract_features(self, x, layers=None):
if layers is None:
layers = [f"layer{i+1}" for i in range(self.num_layers)] + ["classifier"]
run = [
i
for i in range(self.num_layers)
if "classifier" in layers
or any([f"layer{j+1}" in layers for j in range(i, self.num_layers)])
]
output = {}
x = self.stem(x)
for l in run:
fn = self.layers[l]
x = fn(x)
if f"layer{l+1}" in layers:
output[f"layer{l+1}"] = x
if "classifier" in layers:
output["classifier"] = self.classifier(x)
return output
# helper functions {{{
def _make_layer(
self,
block,
expansion,
inplanes,
planes,
blocks,
stride=1,
cardinality=1,
trt=False,
fused_se=True,
):
downsample = None
if stride != 1 or inplanes != planes * expansion:
dconv = self.builder.conv1x1(inplanes, planes * expansion, stride=stride)
dbn = self.builder.batchnorm(planes * expansion)
if dbn is not None:
downsample = nn.Sequential(dconv, dbn)
else:
downsample = dconv
layers = []
for i in range(blocks):
layers.append(
block(
self.builder,
inplanes,
planes,
expansion,
stride=stride if i == 0 else 1,
cardinality=cardinality,
downsample=downsample if i == 0 else None,
fused_se=fused_se,
last_bn_0_init=self.last_bn_0_init,
trt=trt,
)
)
inplanes = planes * expansion
return nn.Sequential(*layers), inplanes
def ngc_checkpoint_remap(self, url=None, version=None):
if version is None:
version = url.split("/")[8]
def to_sequential_remap(s):
splited = s.split(".")
if splited[0].startswith("layer"):
return ".".join(
["layers." + str(int(splited[0][len("layer") :]) - 1)] + splited[1:]
)
else:
return s
def no_remap(s):
return s
return {"20.06.0": to_sequential_remap}.get(version, no_remap)
# }}}
__models: Dict[str, Model] = {
"resnet50": Model(
constructor=ResNet,
arch=ResNet.Arch(
stem_width=64,
block=Bottleneck,
layers=[3, 4, 6, 3],
widths=[64, 128, 256, 512],
expansion=4,
default_image_size=224,
),
params=ResNet.Params(),
checkpoint_url="https://api.ngc.nvidia.com/v2/models/nvidia/resnet50_pyt_amp/versions/20.06.0/files/nvidia_resnet50_200821.pth.tar",
),
"resnext101-32x4d": Model(
constructor=ResNet,
arch=ResNet.Arch(
stem_width=64,
block=Bottleneck,
layers=[3, 4, 23, 3],
widths=[128, 256, 512, 1024],
expansion=2,
cardinality=32,
default_image_size=224,
),
params=ResNet.Params(),
checkpoint_url="https://api.ngc.nvidia.com/v2/models/nvidia/resnext101_32x4d_pyt_amp/versions/20.06.0/files/nvidia_resnext101-32x4d_200821.pth.tar",
),
"se-resnext101-32x4d": Model(
constructor=ResNet,
arch=ResNet.Arch(
stem_width=64,
block=SEBottleneck,
layers=[3, 4, 23, 3],
widths=[128, 256, 512, 1024],
expansion=2,
cardinality=32,
default_image_size=224,
),
params=ResNet.Params(),
checkpoint_url="https://api.ngc.nvidia.com/v2/models/nvidia/seresnext101_32x4d_pyt_amp/versions/20.06.0/files/nvidia_se-resnext101-32x4d_200821.pth.tar",
),
}
_ce = lambda n: EntryPoint.create(n, __models[n])
resnet50 = _ce("resnet50")
resnext101_32x4d = _ce("resnext101-32x4d")
se_resnext101_32x4d = _ce("se-resnext101-32x4d")
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/image_classification/models/resnet.py |
# Copyright (c) 2018-2019, NVIDIA CORPORATION
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def nvidia_efficientnet(type='efficient-b0', pretrained=True, **kwargs):
"""Constructs a EfficientNet model.
For detailed information on model input and output, training recipies, inference and performance
visit: github.com/NVIDIA/DeepLearningExamples and/or ngc.nvidia.com
Args:
pretrained (bool, True): If True, returns a model pretrained on IMAGENET dataset.
"""
from .efficientnet import _ce
return _ce(type)(pretrained=pretrained, **kwargs)
def nvidia_convnets_processing_utils():
import numpy as np
import torch
from PIL import Image
import torchvision.transforms as transforms
import numpy as np
import json
import requests
import validators
class Processing:
@staticmethod
def prepare_input_from_uri(uri, cuda=False):
img_transforms = transforms.Compose(
[transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor()]
)
if (validators.url(uri)):
img = Image.open(requests.get(uri, stream=True).raw)
else:
img = Image.open(uri)
img = img_transforms(img)
with torch.no_grad():
# mean and std are not multiplied by 255 as they are in training script
# torch dataloader reads data into bytes whereas loading directly
# through PIL creates a tensor with floats in [0,1] range
mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)
std = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)
img = img.float()
if cuda:
mean = mean.cuda()
std = std.cuda()
img = img.cuda()
input = img.unsqueeze(0).sub_(mean).div_(std)
return input
@staticmethod
def pick_n_best(predictions, n=5):
predictions = predictions.float().cpu().numpy()
topN = np.argsort(-1*predictions, axis=-1)[:,:n]
imgnet_classes = Processing.get_imgnet_classes()
results=[]
for idx,case in enumerate(topN):
r = []
for c, v in zip(imgnet_classes[case], predictions[idx, case]):
r.append((f"{c}", f"{100*v:.1f}%"))
print(f"sample {idx}: {r}")
results.append(r)
return results
@staticmethod
def get_imgnet_classes():
import os
import json
imgnet_classes_json = "LOC_synset_mapping.json"
if not os.path.exists(imgnet_classes_json):
print("Downloading Imagenet Classes names.")
import urllib
urllib.request.urlretrieve(
"https://raw.githubusercontent.com/NVIDIA/DeepLearningExamples/master/PyTorch/Classification/ConvNets/LOC_synset_mapping.json",
filename=imgnet_classes_json)
print("Downloading finished.")
imgnet_classes = np.array(json.load(open(imgnet_classes_json, "r")))
return imgnet_classes
return Processing()
| DeepLearningExamples-master | PyTorch/Classification/ConvNets/image_classification/models/entrypoints.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
abspath = os.path.dirname(os.path.realpath(__file__))
print(find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]))
setup(name="dlrm",
package_dir={'dlrm': 'dlrm'},
version="1.0.0",
description="Reimplementation of Facebook's DLRM",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
zip_safe=False,
ext_modules=[
CUDAExtension(name="dlrm.cuda_ext.fused_embedding",
sources=[
os.path.join(abspath, "dlrm/cuda_src/pytorch_embedding_ops.cpp"),
os.path.join(abspath, "dlrm/cuda_src/gather_gpu_fused_pytorch_impl.cu")
],
extra_compile_args={
'cxx': [],
'nvcc': ["-arch=sm_70",
'-gencode', 'arch=compute_80,code=sm_80']
}),
CUDAExtension(name="dlrm.cuda_ext.interaction_volta",
sources=[
os.path.join(abspath, "dlrm/cuda_src/dot_based_interact_volta/pytorch_ops.cpp"),
os.path.join(abspath, "dlrm/cuda_src/dot_based_interact_volta/dot_based_interact_pytorch_types.cu")
],
extra_compile_args={
'cxx': [],
'nvcc': [
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
'-gencode', 'arch=compute_70,code=sm_70']
}),
CUDAExtension(name="dlrm.cuda_ext.interaction_ampere",
sources=[
os.path.join(abspath, "dlrm/cuda_src/dot_based_interact_ampere/pytorch_ops.cpp"),
os.path.join(abspath, "dlrm/cuda_src/dot_based_interact_ampere/dot_based_interact_pytorch_types.cu")
],
extra_compile_args={
'cxx': [],
'nvcc': [
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
'-gencode', 'arch=compute_80,code=sm_80']
}),
CUDAExtension(name="dlrm.cuda_ext.sparse_gather",
sources=[
os.path.join(abspath, "dlrm/cuda_src/sparse_gather/sparse_pytorch_ops.cpp"),
os.path.join(abspath, "dlrm/cuda_src/sparse_gather/gather_gpu.cu")
],
extra_compile_args={
'cxx': [],
'nvcc': ["-arch=sm_70",
'-gencode', 'arch=compute_80,code=sm_80']
})
],
cmdclass={"build_ext": BuildExtension})
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/setup.py |
#!/usr/bin/python
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import sys
import torch
import numpy as np
from dlrm.data.datasets import SyntheticDataset
from dlrm.model.distributed import DistributedDlrm
from dlrm.utils.checkpointing.distributed import make_distributed_checkpoint_loader
from dlrm.utils.distributed import get_gpu_batch_sizes, get_device_mapping, is_main_process
from triton import deployer_lib
sys.path.append('../')
def get_model_args(model_args):
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", default=1, type=int)
parser.add_argument("--fp16", action="store_true", default=False)
parser.add_argument("--dump_perf_data", type=str, default=None)
parser.add_argument("--model_checkpoint", type=str, default=None)
parser.add_argument("--num_numerical_features", type=int, default=13)
parser.add_argument("--embedding_dim", type=int, default=128)
parser.add_argument("--embedding_type", type=str, default="joint", choices=["joint", "multi_table"])
parser.add_argument("--top_mlp_sizes", type=int, nargs="+",
default=[1024, 1024, 512, 256, 1])
parser.add_argument("--bottom_mlp_sizes", type=int, nargs="+",
default=[512, 256, 128])
parser.add_argument("--interaction_op", type=str, default="dot",
choices=["dot", "cat"])
parser.add_argument("--cpu", default=False, action="store_true")
parser.add_argument("--dataset", type=str, required=True)
return parser.parse_args(model_args)
def initialize_model(args, categorical_sizes, device_mapping):
''' return model, ready to trace '''
device = "cuda:0" if not args.cpu else "cpu"
model_config = {
'top_mlp_sizes': args.top_mlp_sizes,
'bottom_mlp_sizes': args.bottom_mlp_sizes,
'embedding_dim': args.embedding_dim,
'interaction_op': args.interaction_op,
'categorical_feature_sizes': categorical_sizes,
'num_numerical_features': args.num_numerical_features,
'embedding_type': args.embedding_type,
'hash_indices': False,
'use_cpp_mlp': False,
'fp16': args.fp16,
'device': device,
}
model = DistributedDlrm.from_dict(model_config)
model.to(device)
if args.model_checkpoint:
checkpoint_loader = make_distributed_checkpoint_loader(device_mapping=device_mapping, rank=0)
checkpoint_loader.load_checkpoint(model, args.model_checkpoint)
model.to(device)
if args.fp16:
model = model.half()
return model
def get_dataloader(args, categorical_sizes):
dataset_test = SyntheticDataset(num_entries=2000,
batch_size=args.batch_size,
numerical_features=args.num_numerical_features,
categorical_feature_sizes=categorical_sizes,
device="cpu" if args.cpu else "cuda:0")
class RemoveOutput:
def __init__(self, dataset):
self.dataset = dataset
def __getitem__(self, idx):
value = self.dataset[idx]
if args.fp16:
value = (value[0].half(), value[1].long(), value[2])
else:
value = (value[0], value[1].long(), value[2])
return value[:-1]
def __len__(self):
return len(self.dataset)
test_loader = torch.utils.data.DataLoader(RemoveOutput(dataset_test),
batch_size=None,
num_workers=0,
pin_memory=False)
return test_loader
def main():
# deploys and returns removed deployer arguments
deployer, model_args = deployer_lib.create_deployer(sys.argv[1:],
get_model_args)
with open(os.path.join(model_args.dataset, "model_size.json")) as f:
categorical_sizes = list(json.load(f).values())
categorical_sizes = [s + 1 for s in categorical_sizes]
categorical_sizes = np.array(categorical_sizes)
device_mapping = get_device_mapping(categorical_sizes, num_gpus=1)
categorical_sizes = categorical_sizes[device_mapping['embedding'][0]].tolist()
model = initialize_model(model_args, categorical_sizes, device_mapping)
dataloader = get_dataloader(model_args, categorical_sizes)
if model_args.dump_perf_data:
input_0, input_1 = next(iter(dataloader))
if model_args.fp16:
input_0 = input_0.half()
os.makedirs(model_args.dump_perf_data, exist_ok=True)
input_0.detach().cpu().numpy()[0].tofile(os.path.join(model_args.dump_perf_data, "input__0"))
input_1.detach().cpu().numpy()[0].tofile(os.path.join(model_args.dump_perf_data, "input__1"))
deployer.deploy(dataloader, model)
if __name__=='__main__':
main()
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/triton/deployer.py |
#!/usr/bin/python
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import shutil
import time
import json
import onnx
import torch
import argparse
import statistics
import onnxruntime
from collections import Counter
torch_type_to_triton_type = {
torch.bool: 'TYPE_BOOL',
torch.int8: 'TYPE_INT8',
torch.int16: 'TYPE_INT16',
torch.int32: 'TYPE_INT32',
torch.int64: 'TYPE_INT64',
torch.uint8: 'TYPE_UINT8',
torch.float16: 'TYPE_FP16',
torch.float32: 'TYPE_FP32',
torch.float64: 'TYPE_FP64'
}
CONFIG_TEMPLATE = r"""
name: "{model_name}"
platform: "{platform}"
max_batch_size: {max_batch_size}
input [
{spec_inputs}
]
output [
{spec_outputs}
]
{dynamic_batching}
{model_optimizations}
instance_group [
{{
count: {engine_count}
kind: {kind}
gpus: [ {gpu_list} ]
}}
]
"""
INPUT_TEMPLATE = r"""
{{
name: "input__{num}"
data_type: {type}
dims: {dims}
{reshape}
}},"""
OUTPUT_TEMPLATE = r"""
{{
name: "output__{num}"
data_type: {type}
dims: {dims}
{reshape}
}},"""
MODEL_OPTIMIZATION_TEMPLATE = r"""
optimization {{
execution_accelerators {{
gpu_execution_accelerator: [
{{
name: "tensorrt"
}}
]
}}
}}
"""
def remove_empty_lines(text):
''' removes empty lines from text, returns the result '''
ret = "".join([s for s in text.strip().splitlines(True) if s.strip()])
return ret
def create_deployer(argv, model_args_parser):
''' takes a list of arguments, returns a deployer object and the list of unused arguments '''
parser = argparse.ArgumentParser()
# required args
method = parser.add_mutually_exclusive_group(required=True)
method.add_argument('--ts-script',
action='store_true',
help='convert to torchscript using torch.jit.script')
method.add_argument('--ts-trace',
action='store_true',
help='convert to torchscript using torch.jit.trace')
method.add_argument('--onnx',
action='store_true',
help='convert to onnx using torch.onnx.export')
# triton related args
arguments = parser.add_argument_group('triton related flags')
arguments.add_argument('--triton-no-cuda',
action='store_true',
help='Use the CPU for tracing.')
arguments.add_argument(
'--triton-model-name',
type=str,
default="model",
help="exports to appropriate directory structure for triton")
arguments.add_argument(
"--triton-model-version",
type=int,
default=1,
help="exports to appropriate directory structure for triton")
arguments.add_argument(
"--triton-max-batch-size",
type=int,
default=8,
help="Specifies the 'max_batch_size' in the triton model config.\
See the triton documentation for more info.")
arguments.add_argument(
"--triton-dyn-batching-delay",
type=float,
default=0,
help=
"Determines the dynamic_batching queue delay in milliseconds(ms) for\
the triton model config. Use '0' or '-1' to specify static batching.\
See the triton documentation for more info.")
arguments.add_argument(
"--triton-engine-count",
type=int,
default=1,
help=
"Specifies the 'instance_group' count value in the triton model config.\
See the triton documentation for more info.")
arguments.add_argument('--save-dir',
type=str,
default='./triton_models',
help='Saved model directory')
parser.add_argument("--deploy_cpu", default=False, action="store_true")
# other args
arguments = parser.add_argument_group('other flags')
# remainder args
arguments.add_argument(
'model_arguments',
nargs=argparse.REMAINDER,
help=
'arguments that will be ignored by deployer lib and will be forwarded to your deployer script'
)
#
args = parser.parse_args(argv)
model_args = model_args_parser(args.model_arguments[1:])
model_args_no_def = {
k: v
for k, v in vars(model_args).items()
if k in [arg[2:] for arg in args.model_arguments[1:]]
}
deployer = Deployer(args, model_args_no_def)
#
return deployer, model_args
class DeployerLibrary:
def __init__(self, args, model_args):
self.args = args
self.model_args = model_args
self.platform = None
def set_platform(self, platform):
''' sets the platform
:: platform :: "pytorch_libtorch" or "onnxruntime_onnx"
'''
self.platform = platform
def prepare_inputs(self, dataloader, device):
''' load sample inputs to device '''
inputs = []
for batch in dataloader:
if type(batch) is torch.Tensor:
batch_d = batch.to(device)
batch_d = (batch_d, )
inputs.append(batch_d)
else:
batch_d = []
for x in batch:
assert type(x) is torch.Tensor, "input is not a tensor"
batch_d.append(x.to(device) if device else x)
batch_d = tuple(batch_d)
inputs.append(batch_d)
return inputs
def get_list_of_shapes(self, l, fun):
''' returns the list of min/max shapes, depending on fun
:: l :: list of tuples of tensors
:: fun :: min or max
'''
tensor_tuple = l[0]
shapes = [list(x.shape) for x in tensor_tuple]
for tensor_tuple in l:
assert len(tensor_tuple) == len(
shapes), "tensors with varying shape lengths are not supported"
for i, x in enumerate(tensor_tuple):
for j in range(len(x.shape)):
shapes[i][j] = fun(shapes[i][j], x.shape[j])
return shapes # a list of shapes
def get_tuple_of_min_shapes(self, l):
''' returns the tuple of min shapes
:: l :: list of tuples of tensors '''
shapes = self.get_list_of_shapes(l, min)
min_batch = 1
shapes = [[min_batch, *shape[1:]] for shape in shapes]
shapes = tuple(shapes)
return shapes # tuple of min shapes
def get_tuple_of_max_shapes(self, l):
''' returns the tuple of max shapes
:: l :: list of tuples of tensors '''
shapes = self.get_list_of_shapes(l, max)
max_batch = max(2, shapes[0][0])
shapes = [[max_batch, *shape[1:]] for shape in shapes]
shapes = tuple(shapes)
return shapes # tuple of max shapes
def get_tuple_of_opt_shapes(self, l):
''' returns the tuple of opt shapes
:: l :: list of tuples of tensors '''
counter = Counter()
for tensor_tuple in l:
shapes = [x.shape for x in tensor_tuple]
shapes = tuple(shapes)
counter[shapes] += 1
shapes = counter.most_common(1)[0][0]
return shapes # tuple of most common occuring shapes
def get_tuple_of_dynamic_shapes(self, l):
''' returns a tuple of dynamic shapes: variable tensor dimensions
(for ex. batch size) occur as -1 in the tuple
:: l :: list of tuples of tensors '''
tensor_tuple = l[0]
shapes = [list(x.shape) for x in tensor_tuple]
for tensor_tuple in l:
err_msg = "tensors with varying shape lengths are not supported"
assert len(tensor_tuple) == len(shapes), err_msg
for i, x in enumerate(tensor_tuple):
for j in range(len(x.shape)):
if shapes[i][j] != x.shape[j] or j == 0:
shapes[i][j] = -1
shapes = tuple(shapes)
return shapes # tuple of dynamic shapes
def run_models(self, models, inputs):
''' run the models on inputs, return the outputs and execution times '''
ret = []
for model in models:
torch.cuda.synchronize()
time_start = time.time()
outputs = []
for input in inputs:
with torch.no_grad():
output = model(*input)
if type(output) is torch.Tensor:
output = [output]
outputs.append(output)
torch.cuda.synchronize()
time_end = time.time()
t = time_end - time_start
ret.append(outputs)
ret.append(t)
return ret
def compute_errors(self, outputs_A, outputs_B):
''' returns the list of L_inf errors computed over every single output tensor '''
Linf_errors = []
for output_A, output_B in zip(outputs_A, outputs_B):
for x, y in zip(output_A, output_B):
error = (x - y).norm(float('inf')).item()
Linf_errors.append(error)
return Linf_errors
def print_errors(self, Linf_errors):
''' print various statistcs of Linf errors '''
print()
print("conversion correctness test results")
print("-----------------------------------")
print("maximal absolute error over dataset (L_inf): ",
max(Linf_errors))
print()
print("average L_inf error over output tensors: ",
statistics.mean(Linf_errors))
print("variance of L_inf error over output tensors: ",
statistics.variance(Linf_errors))
print("stddev of L_inf error over output tensors: ",
statistics.stdev(Linf_errors))
print()
def write_config(self,
config_filename,
input_shapes,
input_types,
output_shapes,
output_types):
''' writes triton config file
:: config_filename :: the file to write the config file into
:: input_shapes :: tuple of dynamic shapes of the input tensors
:: input_types :: tuple of torch types of the input tensors
:: output_shapes :: tuple of dynamic shapes of the output tensors
:: output_types :: tuple of torch types of the output tensors
'''
assert self.platform is not None, "error - platform is not set"
config_template = CONFIG_TEMPLATE
accelerator_template = MODEL_OPTIMIZATION_TEMPLATE
input_template = INPUT_TEMPLATE
spec_inputs = r""""""
for i,(shape,typ) in enumerate(zip(input_shapes,input_types)):
d = {
'num' : str(i),
'type': torch_type_to_triton_type[typ],
'dims': str([1]) if len(shape) == 1 else str(list(shape)[1:]) # first dimension is the batch size
}
d['reshape'] = 'reshape: { shape: [ ] }' if len(shape) == 1 else ''
spec_inputs += input_template.format_map(d)
spec_inputs = spec_inputs[:-1]
output_template = OUTPUT_TEMPLATE
spec_outputs = r""""""
for i,(shape,typ) in enumerate(zip(output_shapes,output_types)):
d = {
'num' : str(i),
'type': torch_type_to_triton_type[typ],
'dims': str([1]) if len(shape) == 1 else str(list(shape)[1:]) # first dimension is the batch size
}
d['reshape'] = 'reshape: { shape: [ ] }' if len(shape) == 1 else ''
spec_outputs += output_template.format_map(d)
spec_outputs = spec_outputs[:-1]
batching_str = ""
parameters_str = ""
max_batch_size = self.args.triton_max_batch_size
accelerator_str = ""
if (self.args.triton_dyn_batching_delay > 0):
# Use only full and half full batches
pref_batch_size = [int(max_batch_size / 2.0), max_batch_size]
batching_str = r"""
dynamic_batching {{
preferred_batch_size: [{0}]
max_queue_delay_microseconds: {1}
}}""".format(", ".join([str(x) for x in pref_batch_size]),
int(self.args.triton_dyn_batching_delay * 1000.0))
if self.platform == 'onnxruntime_onnx':
accelerator_str = accelerator_template.format_map({})
config_values = {
"model_name": self.args.triton_model_name,
"platform": self.platform,
"max_batch_size": max_batch_size,
"spec_inputs": spec_inputs,
"spec_outputs": spec_outputs,
"dynamic_batching": batching_str,
"model_parameters": parameters_str,
"model_optimizations": accelerator_str,
"gpu_list": "" if self.args.deploy_cpu else ", ".join([str(x) for x in range(torch.cuda.device_count())]),
"engine_count": self.args.triton_engine_count,
"kind": "KIND_CPU" if self.args.deploy_cpu else "KIND_GPU"
}
# write config
with open(config_filename, "w") as file:
final_config_str = config_template.format_map(config_values)
final_config_str = remove_empty_lines(final_config_str)
file.write(final_config_str)
class Deployer:
def __init__(self, args, model_args):
self.args = args
self.lib = DeployerLibrary(args, model_args)
def deploy(self, dataloader, model):
''' deploy the model and test for correctness with dataloader '''
if self.args.ts_script or self.args.ts_trace:
self.lib.set_platform("pytorch_libtorch")
print("deploying model " + self.args.triton_model_name +
" in format " + self.lib.platform)
self.to_triton_torchscript(dataloader, model)
elif self.args.onnx:
self.lib.set_platform("onnxruntime_onnx")
print("deploying model " + self.args.triton_model_name +
" in format " + self.lib.platform)
self.to_triton_onnx(dataloader, model)
else:
assert False, "error"
print("done")
def to_triton_onnx(self, dataloader, model):
''' export the model to onnx and test correctness on dataloader '''
model.eval()
assert not model.training, "internal error - model should be in eval() mode! "
# prepare inputs
inputs = self.lib.prepare_inputs(dataloader, device=None)
# generate outputs
outputs = []
for input in inputs:
with torch.no_grad():
output = model(*input)
if type(output) is torch.Tensor:
output = [output]
outputs.append(output)
# generate input shapes - dynamic tensor shape support
input_shapes = self.lib.get_tuple_of_dynamic_shapes(inputs)
# generate output shapes - dynamic tensor shape support
output_shapes = self.lib.get_tuple_of_dynamic_shapes(outputs)
# generate input types
input_types = [x.dtype for x in inputs[0]]
# generate output types
output_types = [x.dtype for x in outputs[0]]
# get input names
rng = range(len(input_types))
input_names = ["input__" + str(num) for num in rng]
# get output names
rng = range(len(output_types))
output_names = ["output__" + str(num) for num in rng]
# prepare save path
model_folder = os.path.join(self.args.save_dir, self.args.triton_model_name)
version_folder = os.path.join(model_folder, str(self.args.triton_model_version))
if not os.path.exists(version_folder):
os.makedirs(version_folder)
final_model_path = os.path.join(version_folder, 'model.onnx')
if not os.path.exists(final_model_path):
os.makedirs(final_model_path)
final_model_path = os.path.join(final_model_path, 'model.onnx')
# get indices of dynamic input and output shapes
dynamic_axes = {}
for input_name,input_shape in zip(input_names,input_shapes):
dynamic_axes[input_name] = [i for i,x in enumerate(input_shape) if x == -1]
for output_name,output_shape in zip(output_names,output_shapes):
dynamic_axes[output_name] = [i for i,x in enumerate(output_shape) if x == -1]
# export the model
assert not model.training, "internal error - model should be in eval() mode! "
with torch.no_grad():
torch.onnx.export(model, inputs[0], final_model_path, verbose=False,
input_names=input_names, output_names=output_names,
dynamic_axes=dynamic_axes, opset_version=11,
use_external_data_format=True)
config_filename = os.path.join(model_folder, "config.pbtxt")
self.lib.write_config(config_filename,
input_shapes, input_types,
output_shapes, output_types)
def to_triton_torchscript(self, dataloader, model):
''' export the model to torchscript and test correctness on dataloader '''
model.eval()
assert not model.training, "internal error - model should be in eval() mode! "
# prepare inputs
inputs = self.lib.prepare_inputs(dataloader, device=None)
# generate input shapes - dynamic tensor shape support
input_shapes = self.lib.get_tuple_of_dynamic_shapes(inputs)
# generate input types
input_types = [x.dtype for x in inputs[0]]
# prepare save path
model_folder = os.path.join(self.args.save_dir, self.args.triton_model_name)
version_folder = os.path.join(model_folder, str(self.args.triton_model_version))
if not os.path.exists(version_folder):
os.makedirs(version_folder)
final_model_path = os.path.join(version_folder, 'model.pt')
# convert the model
with torch.no_grad():
if self.args.ts_trace: # trace it
model_ts = torch.jit.trace(model, inputs[0])
if self.args.ts_script: # script it
model_ts = torch.jit.script(model)
# generate outputs
outputs = []
for input in inputs:
with torch.no_grad():
output = model(*input)
if type(output) is torch.Tensor:
output = [output]
outputs.append(output)
# save the model
torch.jit.save(model_ts, final_model_path)
# generate output shapes - dynamic tensor shape support
output_shapes = self.lib.get_tuple_of_dynamic_shapes(outputs)
# generate output types
output_types = [x.dtype for x in outputs[0]]
# now we build the config for triton
config_filename = os.path.join(model_folder, "config.pbtxt")
self.lib.write_config(config_filename,
input_shapes, input_types,
output_shapes, output_types)
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/triton/deployer_lib.py |
#!/usr/bin/env python
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import json
import sys
import numpy as np
import torch
import tritonclient.http as http_client
from sklearn.metrics import roc_auc_score
from tqdm import tqdm
from dlrm.data.datasets import SyntheticDataset, SplitCriteoDataset
from dlrm.utils.distributed import get_device_mapping
def get_data_loader(batch_size, *, data_path, model_config):
with open(model_config.dataset_config) as f:
categorical_sizes = list(json.load(f).values())
categorical_sizes = [s + 1 for s in categorical_sizes]
device_mapping = get_device_mapping(categorical_sizes, num_gpus=1)
if data_path:
data = SplitCriteoDataset(
data_path=data_path,
batch_size=batch_size,
numerical_features=True,
categorical_features=device_mapping['embedding'][0],
categorical_feature_sizes=categorical_sizes,
prefetch_depth=1,
drop_last_batch=model_config.drop_last_batch
)
else:
data = SyntheticDataset(
num_entries=batch_size * 1024,
batch_size=batch_size,
numerical_features=model_config.num_numerical_features,
categorical_feature_sizes=categorical_sizes,
device="cpu"
)
if model_config.test_batches > 0:
data = torch.utils.data.Subset(data, list(range(model_config.test_batches)))
return torch.utils.data.DataLoader(data,
batch_size=None,
num_workers=0,
pin_memory=False)
def run_infer(model_name, model_version, numerical_features, categorical_features, headers=None):
inputs = []
outputs = []
num_type = "FP16" if numerical_features.dtype == np.float16 else "FP32"
inputs.append(http_client.InferInput('input__0', numerical_features.shape, num_type))
inputs.append(http_client.InferInput('input__1', categorical_features.shape, "INT64"))
# Initialize the data
inputs[0].set_data_from_numpy(numerical_features, binary_data=True)
inputs[1].set_data_from_numpy(categorical_features, binary_data=False)
outputs.append(http_client.InferRequestedOutput('output__0', binary_data=True))
results = triton_client.infer(model_name,
inputs,
model_version=str(model_version) if model_version != -1 else '',
outputs=outputs,
headers=headers)
return results
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--triton-server-url',
type=str,
required=True,
help='URL adress of triton server (with port)')
parser.add_argument('--triton-model-name', type=str, required=True,
help='Triton deployed model name')
parser.add_argument('--triton-model-version', type=int, default=-1,
help='Triton model version')
parser.add_argument('-v',
'--verbose',
action="store_true",
required=False,
default=False,
help='Enable verbose output')
parser.add_argument('-H', dest='http_headers', metavar="HTTP_HEADER",
required=False, action='append',
help='HTTP headers to add to inference server requests. ' +
'Format is -H"Header:Value".')
parser.add_argument("--dataset_config", type=str, required=True)
parser.add_argument("--inference_data", type=str,
help="Path to file with inference data.")
parser.add_argument("--batch_size", type=int, default=1,
help="Inference request batch size")
parser.add_argument("--drop_last_batch", type=bool, default=True,
help="Drops the last batch size if it's not full")
parser.add_argument("--fp16", action="store_true", default=False,
help="Use 16bit for numerical input")
parser.add_argument("--test_batches", type=int, default=0,
help="Specifies number of batches used in the inference")
FLAGS = parser.parse_args()
try:
triton_client = http_client.InferenceServerClient(url=FLAGS.triton_server_url, verbose=FLAGS.verbose)
except Exception as e:
print("channel creation failed: " + str(e))
sys.exit(1)
if FLAGS.http_headers is not None:
headers_dict = {l.split(':')[0]: l.split(':')[1]
for l in FLAGS.http_headers}
else:
headers_dict = None
triton_client.load_model(FLAGS.triton_model_name)
if not triton_client.is_model_ready(FLAGS.triton_model_name):
sys.exit(1)
dataloader = get_data_loader(FLAGS.batch_size,
data_path=FLAGS.inference_data,
model_config=FLAGS)
results = []
tgt_list = []
for numerical_features, categorical_features, target in tqdm(dataloader):
numerical_features = numerical_features.cpu().numpy()
numerical_features = numerical_features.astype(np.float16 if FLAGS.fp16 else np.float32)
categorical_features = categorical_features.long().cpu().numpy()
output = run_infer(FLAGS.triton_model_name, FLAGS.triton_model_version,
numerical_features, categorical_features, headers_dict)
results.append(output.as_numpy('output__0'))
tgt_list.append(target.cpu().numpy())
results = np.concatenate(results).squeeze()
tgt_list = np.concatenate(tgt_list)
score = roc_auc_score(tgt_list, results)
print(f"Model score: {score}")
statistics = triton_client.get_inference_statistics(model_name=FLAGS.triton_model_name, headers=headers_dict)
print(statistics)
if len(statistics['model_stats']) != 1:
print("FAILED: Inference Statistics")
sys.exit(1)
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/triton/client.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import os
from joblib import Parallel, delayed
import glob
import argparse
import tqdm
import subprocess
def process_file(f, dst):
label = '_c0'
dense_columns = [f'_c{i}' for i in range(1, 14)]
categorical_columns = [f'_c{i}' for i in range(14, 40)]
all_columns_sorted = [f'_c{i}' for i in range(0, 40)]
data = pd.read_parquet(f)
data = data[all_columns_sorted]
data[label] = data[label].astype(np.int32)
data[dense_columns] = data[dense_columns].astype(np.float32)
data[categorical_columns] = data[categorical_columns].astype(np.int32)
data = data.to_records(index=False)
data = data.tobytes()
dst_file = dst + '/' + f.split('/')[-1] + '.bin'
with open(dst_file, 'wb') as dst_fd:
dst_fd.write(data)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--src_dir', type=str)
parser.add_argument('--intermediate_dir', type=str)
parser.add_argument('--dst_dir', type=str)
parser.add_argument('--parallel_jobs', default=40, type=int)
args = parser.parse_args()
print('Processing train files...')
train_src_files = glob.glob(args.src_dir + '/train/*.parquet')
train_intermediate_dir = os.path.join(args.intermediate_dir, 'train')
os.makedirs(train_intermediate_dir, exist_ok=True)
Parallel(n_jobs=args.parallel_jobs)(delayed(process_file)(f, train_intermediate_dir) for f in tqdm.tqdm(train_src_files))
print('Train files conversion done')
print('Processing test files...')
test_src_files = glob.glob(args.src_dir + '/test/*.parquet')
test_intermediate_dir = os.path.join(args.intermediate_dir, 'test')
os.makedirs(test_intermediate_dir, exist_ok=True)
Parallel(n_jobs=args.parallel_jobs)(delayed(process_file)(f, test_intermediate_dir) for f in tqdm.tqdm(test_src_files))
print('Test files conversion done')
print('Processing validation files...')
valid_src_files = glob.glob(args.src_dir + '/validation/*.parquet')
valid_intermediate_dir = os.path.join(args.intermediate_dir, 'validation')
os.makedirs(valid_intermediate_dir, exist_ok=True)
Parallel(n_jobs=args.parallel_jobs)(delayed(process_file)(f, valid_intermediate_dir) for f in tqdm.tqdm(valid_src_files))
print('Validation files conversion done')
os.makedirs(args.dst_dir, exist_ok=True)
print('Concatenating train files')
os.system(f'cat {train_intermediate_dir}/*.bin > {args.dst_dir}/train_data.bin')
print('Concatenating test files')
os.system(f'cat {test_intermediate_dir}/*.bin > {args.dst_dir}/test_data.bin')
print('Concatenating validation files')
os.system(f'cat {valid_intermediate_dir}/*.bin > {args.dst_dir}/validation_data.bin')
print('Done')
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/preproc/parquet_to_binary.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from pyspark.sql import Row, SparkSession, Window
from pyspark.sql.functions import *
from pyspark.sql.types import *
LABEL_COL = 0
INT_COLS = list(range(1, 14))
CAT_COLS = list(range(14, 40))
def col_of_rand_long():
return (rand() * (1 << 52)).cast(LongType())
def rand_ordinal(df):
return df.withColumn('ordinal', col_of_rand_long())
def _parse_args():
parser = ArgumentParser()
parser.add_argument('--input_path', required=True)
parser.add_argument('--output_path')
args = parser.parse_args()
return args
def _main():
args = _parse_args()
spark = SparkSession.builder.getOrCreate()
df = rand_ordinal(spark.read.load(args.input_path + "/*"))
df = df.repartition('ordinal').sortWithinPartitions('ordinal')
df = df.drop('ordinal')
df.write.parquet(
args.output_path,
mode='overwrite'
)
if __name__ == '__main__':
_main()
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/preproc/NVT_shuffle_spark.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import math
from tqdm import tqdm
import numpy as np
from typing import Sequence
# Workaround to avoid duplicating code from the main module, without building it outright.
import sys
sys.path.append('/workspace/dlrm')
from dlrm.data.defaults import get_categorical_feature_type
from dlrm.data.feature_spec import FeatureSpec
def split_binary_file(
binary_file_path: str,
output_dir: str,
categorical_feature_sizes: Sequence[int],
num_numerical_features: int,
batch_size: int,
source_data_type: str = 'int32',
):
record_width = 1 + num_numerical_features + len(categorical_feature_sizes) # label + numerical + categorical
bytes_per_feature = np.__dict__[source_data_type]().nbytes
bytes_per_entry = record_width * bytes_per_feature
total_size = os.path.getsize(binary_file_path)
batches_num = int(math.ceil((total_size // bytes_per_entry) / batch_size))
cat_feature_types = [get_categorical_feature_type(cat_size) for cat_size in categorical_feature_sizes]
file_streams = []
try:
input_data_f = open(binary_file_path, "rb")
file_streams.append(input_data_f)
numerical_f = open(os.path.join(output_dir, "numerical.bin"), "wb+")
file_streams.append(numerical_f)
label_f = open(os.path.join(output_dir, 'label.bin'), 'wb+')
file_streams.append(label_f)
categorical_fs = []
for i in range(len(categorical_feature_sizes)):
fs = open(os.path.join(output_dir, f'cat_{i}.bin'), 'wb+')
categorical_fs.append(fs)
file_streams.append(fs)
for _ in tqdm(range(batches_num)):
raw_data = np.frombuffer(input_data_f.read(bytes_per_entry * batch_size), dtype=np.int32)
batch_data = raw_data.reshape(-1, record_width)
numerical_features = batch_data[:, 1:1 + num_numerical_features].view(dtype=np.float32)
numerical_f.write(numerical_features.astype(np.float16).tobytes())
label = batch_data[:, 0]
label_f.write(label.astype(np.bool).tobytes())
cat_offset = num_numerical_features + 1
for cat_idx, cat_feature_type in enumerate(cat_feature_types):
cat_data = batch_data[:, (cat_idx + cat_offset):(cat_idx + cat_offset + 1)].astype(cat_feature_type)
categorical_fs[cat_idx].write(cat_data.tobytes())
finally:
for stream in file_streams:
stream.close()
def split_dataset(dataset_dir: str, output_dir: str, batch_size: int, numerical_features: int):
categorical_sizes_file = os.path.join(dataset_dir, "model_size.json")
with open(categorical_sizes_file) as f:
# model_size.json contains the max value of each feature instead of the cardinality.
# For feature spec this is changed for consistency and clarity.
categorical_cardinalities = [int(v)+1 for v in json.load(f).values()]
train_file = os.path.join(dataset_dir, "train_data.bin")
test_file = os.path.join(dataset_dir, "test_data.bin")
val_file = os.path.join(dataset_dir, "validation_data.bin")
target_train = os.path.join(output_dir, "train")
target_test = os.path.join(output_dir, "test")
target_val = os.path.join(output_dir, "validation")
os.makedirs(output_dir, exist_ok=True)
os.makedirs(target_train, exist_ok=True)
os.makedirs(target_test, exist_ok=True)
os.makedirs(target_val, exist_ok=True)
# VALIDATION chunk is ignored in feature spec on purpose
feature_spec = FeatureSpec.get_default_feature_spec(number_of_numerical_features=numerical_features,
categorical_feature_cardinalities=categorical_cardinalities)
feature_spec.to_yaml(os.path.join(output_dir, 'feature_spec.yaml'))
split_binary_file(test_file, target_test, categorical_cardinalities, numerical_features, batch_size)
split_binary_file(train_file, target_train, categorical_cardinalities, numerical_features, batch_size)
split_binary_file(val_file, target_val, categorical_cardinalities, numerical_features, batch_size)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, required=True)
parser.add_argument('--output', type=str, required=True)
parser.add_argument('--batch_size', type=int, default=32768)
parser.add_argument('--numerical_features', type=int, default=13)
args = parser.parse_args()
split_dataset(
dataset_dir=args.dataset,
output_dir=args.output,
batch_size=args.batch_size,
numerical_features=args.numerical_features
)
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/preproc/split_dataset.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import sys
from argparse import ArgumentParser
from collections import OrderedDict
from contextlib import contextmanager
from operator import itemgetter
from time import time
from pyspark import broadcast
from pyspark.sql import Row, SparkSession, Window
from pyspark.sql.functions import *
from pyspark.sql.types import *
LABEL_COL = 0
INT_COLS = list(range(1, 14))
CAT_COLS = list(range(14, 40))
def get_column_counts_with_frequency_limit(df, frequency_limit = None):
cols = ['_c%d' % i for i in CAT_COLS]
df = (df
.select(posexplode(array(*cols)))
.withColumnRenamed('pos', 'column_id')
.withColumnRenamed('col', 'data')
.filter('data is not null')
.groupBy('column_id', 'data')
.count())
if frequency_limit:
frequency_limit = frequency_limit.split(",")
exclude = []
default_limit = None
for fl in frequency_limit:
frequency_pair = fl.split(":")
if len(frequency_pair) == 1:
default_limit = int(frequency_pair[0])
elif len(frequency_pair) == 2:
df = df.filter((col('column_id') != int(frequency_pair[0]) - CAT_COLS[0]) | (col('count') >= int(frequency_pair[1])))
exclude.append(int(frequency_pair[0]))
if default_limit:
remain = [x - CAT_COLS[0] for x in CAT_COLS if x not in exclude]
df = df.filter((~col('column_id').isin(remain)) | (col('count') >= default_limit))
# for comparing isin and separate filter
# for i in remain:
# df = df.filter((col('column_id') != i - CAT_COLS[0]) | (col('count') >= default_limit))
return df
def assign_id_with_window(df):
windowed = Window.partitionBy('column_id').orderBy(desc('count'))
return (df
.withColumn('id', row_number().over(windowed))
.withColumnRenamed('count', 'model_count'))
def assign_low_mem_partial_ids(df):
# To avoid some scaling issues with a simple window operation, we use a more complex method
# to compute the same thing, but in a more distributed spark specific way
df = df.orderBy(asc('column_id'), desc('count'))
# The monotonically_increasing_id is the partition id in the top 31 bits and the rest
# is an increasing count of the rows within that partition. So we split it into two parts,
# the partion id part_id and the count mono_id
df = df.withColumn('part_id', spark_partition_id())
return df.withColumn('mono_id', monotonically_increasing_id() - shiftLeft(col('part_id'), 33))
def assign_low_mem_final_ids(df):
# Now we can find the minimum and maximum mono_ids within a given column/partition pair
sub_model = df.groupBy('column_id', 'part_id').agg(max('mono_id').alias('top'), min('mono_id').alias('bottom'))
sub_model = sub_model.withColumn('diff', col('top') - col('bottom') + 1)
sub_model = sub_model.drop('top')
# This window function is over aggregated column/partition pair table. It will do a running sum of the rows
# within that column
windowed = Window.partitionBy('column_id').orderBy('part_id').rowsBetween(Window.unboundedPreceding, -1)
sub_model = sub_model.withColumn('running_sum', sum('diff').over(windowed)).na.fill(0, ["running_sum"])
joined = df.withColumnRenamed('column_id', 'i_column_id')
joined = joined.withColumnRenamed('part_id', 'i_part_id')
joined = joined.withColumnRenamed('count', 'model_count')
# Then we can join the original input with the pair it is a part of
joined = joined.join(sub_model, (col('i_column_id') == col('column_id')) & (col('part_id') == col('i_part_id')))
# So with all that we can subtract bottom from mono_id makeing it start at 0 for each partition
# and then add in the running_sum so the id is contiguous and unique for the entire column. + 1 to make it match the 1 based indexing
# for row_number
ret = joined.select(col('column_id'),
col('data'),
(col('mono_id') - col('bottom') + col('running_sum') + 1).cast(IntegerType()).alias('id'),
col('model_count'))
return ret
def get_column_models(combined_model):
for i in CAT_COLS:
model = (combined_model
.filter('column_id == %d' % (i - CAT_COLS[0]))
.drop('column_id'))
yield i, model
def col_of_rand_long():
return (rand() * (1 << 52)).cast(LongType())
def skewed_join(df, model, col_name, cutoff):
# Most versions of spark don't have a good way
# to deal with a skewed join out of the box.
# Some do and if you want to replace this with
# one of those that would be great.
# Because we have statistics about the skewedness
# that we can used we divide the model up into two parts
# one part is the highly skewed part and we do a
# broadcast join for that part, but keep the result in
# a separate column
b_model = broadcast(model.filter(col('model_count') >= cutoff)
.withColumnRenamed('data', col_name)
.drop('model_count'))
df = (df
.join(b_model, col_name, how='left')
.withColumnRenamed('id', 'id_tmp'))
# We also need to spread the skewed data that matched
# evenly. We will use a source of randomness for this
# but use a -1 for anything that still needs to be matched
if 'ordinal' in df.columns:
rand_column = col('ordinal')
else:
rand_column = col_of_rand_long()
df = df.withColumn('join_rand',
# null values are not in the model, they are filtered out
# but can be a source of skewedness so include them in
# the even distribution
when(col('id_tmp').isNotNull() | col(col_name).isNull(), rand_column)
.otherwise(lit(-1)))
# Null out the string data that already matched to save memory
df = df.withColumn(col_name,
when(col('id_tmp').isNotNull(), None)
.otherwise(col(col_name)))
# Now do the second join, which will be a non broadcast join.
# Sadly spark is too smart for its own good and will optimize out
# joining on a column it knows will always be a constant value.
# So we have to make a convoluted version of assigning a -1 to the
# randomness column for the model itself to work around that.
nb_model = (model
.withColumn('join_rand', when(col('model_count') < cutoff, lit(-1)).otherwise(lit(-2)))
.filter(col('model_count') < cutoff)
.withColumnRenamed('data', col_name)
.drop('model_count'))
df = (df
.join(nb_model, ['join_rand', col_name], how='left')
.drop(col_name, 'join_rand')
# Pick either join result as an answer
.withColumn(col_name, coalesce(col('id'), col('id_tmp')))
.drop('id', 'id_tmp'))
return df
def apply_models(df, models, broadcast_model = False, skew_broadcast_pct = 1.0):
# sort the models so broadcast joins come first. This is
# so we reduce the amount of shuffle data sooner than later
# If we parsed the string hex values to ints early on this would
# not make a difference.
models = sorted(models, key=itemgetter(3), reverse=True)
for i, model, original_rows, would_broadcast in models:
col_name = '_c%d' % i
if not (would_broadcast or broadcast_model):
# The data is highly skewed so we need to offset that
cutoff = int(original_rows * skew_broadcast_pct/100.0)
df = skewed_join(df, model, col_name, cutoff)
else:
# broadcast joins can handle skewed data so no need to
# do anything special
model = (model.drop('model_count')
.withColumnRenamed('data', col_name))
model = broadcast(model) if broadcast_model else model
df = (df
.join(model, col_name, how='left')
.drop(col_name)
.withColumnRenamed('id', col_name))
return df.fillna(0, ['_c%d' % i for i in CAT_COLS])
def transform_log(df, transform_log = False):
cols = ['_c%d' % i for i in INT_COLS]
if transform_log:
for col_name in cols:
df = df.withColumn(col_name, log(df[col_name] + 3))
return df.fillna(0, cols)
def would_broadcast(spark, str_path):
sc = spark.sparkContext
config = sc._jsc.hadoopConfiguration()
path = sc._jvm.org.apache.hadoop.fs.Path(str_path)
fs = sc._jvm.org.apache.hadoop.fs.FileSystem.get(config)
stat = fs.listFiles(path, True)
sum = 0
while stat.hasNext():
sum = sum + stat.next().getLen()
sql_conf = sc._jvm.org.apache.spark.sql.internal.SQLConf()
cutoff = sql_conf.autoBroadcastJoinThreshold() * sql_conf.fileCompressionFactor()
return sum <= cutoff
def delete_data_source(spark, path):
sc = spark.sparkContext
config = sc._jsc.hadoopConfiguration()
path = sc._jvm.org.apache.hadoop.fs.Path(path)
sc._jvm.org.apache.hadoop.fs.FileSystem.get(config).delete(path, True)
def load_raw(spark, folder, day_range):
label_fields = [StructField('_c%d' % LABEL_COL, IntegerType())]
int_fields = [StructField('_c%d' % i, IntegerType()) for i in INT_COLS]
str_fields = [StructField('_c%d' % i, StringType()) for i in CAT_COLS]
schema = StructType(label_fields + int_fields + str_fields)
paths = [os.path.join(folder, 'day_%d' % i) for i in day_range]
return (spark
.read
.schema(schema)
.option('sep', '\t')
.csv(paths))
def rand_ordinal(df):
# create a random long from the double precision float.
# The fraction part of a double is 52 bits, so we try to capture as much
# of that as possible
return df.withColumn('ordinal', col_of_rand_long())
def day_from_ordinal(df, num_days):
return df.withColumn('day', (col('ordinal') % num_days).cast(IntegerType()))
def day_from_input_file(df):
return df.withColumn('day', substring_index(input_file_name(), '_', -1).cast(IntegerType()))
def psudo_sort_by_day_plus(spark, df, num_days):
# Sort is very expensive because it needs to calculate the partitions
# which in our case may involve rereading all of the data. In some cases
# we can avoid this by repartitioning the data and sorting within a single partition
shuffle_parts = int(spark.conf.get('spark.sql.shuffle.partitions'))
extra_parts = int(shuffle_parts/num_days)
if extra_parts <= 0:
df = df.repartition('day')
else:
#We want to spread out the computation to about the same amount as shuffle_parts
divided = (col('ordinal') / num_days).cast(LongType())
extra_ident = divided % extra_parts
df = df.repartition(col('day'), extra_ident)
return df.sortWithinPartitions('day', 'ordinal')
def load_combined_model(spark, model_folder):
path = os.path.join(model_folder, 'combined.parquet')
return spark.read.parquet(path)
def save_combined_model(df, model_folder, mode=None):
path = os.path.join(model_folder, 'combined.parquet')
df.write.parquet(path, mode=mode)
def delete_combined_model(spark, model_folder):
path = os.path.join(model_folder, 'combined.parquet')
delete_data_source(spark, path)
def load_low_mem_partial_ids(spark, model_folder):
path = os.path.join(model_folder, 'partial_ids.parquet')
return spark.read.parquet(path)
def save_low_mem_partial_ids(df, model_folder, mode=None):
path = os.path.join(model_folder, 'partial_ids.parquet')
df.write.parquet(path, mode=mode)
def delete_low_mem_partial_ids(spark, model_folder):
path = os.path.join(model_folder, 'partial_ids.parquet')
delete_data_source(spark, path)
def load_column_models(spark, model_folder, count_required):
for i in CAT_COLS:
path = os.path.join(model_folder, '%d.parquet' % i)
df = spark.read.parquet(path)
if count_required:
values = df.agg(sum('model_count').alias('sum'), count('*').alias('size')).collect()
else:
values = df.agg(sum('model_count').alias('sum')).collect()
yield i, df, values[0], would_broadcast(spark, path)
def save_column_models(column_models, model_folder, mode=None):
for i, model in column_models:
path = os.path.join(model_folder, '%d.parquet' % i)
model.write.parquet(path, mode=mode)
def save_model_size(model_size, path, write_mode):
if os.path.exists(path) and write_mode == 'errorifexists':
print('Error: model size file %s exists' % path)
sys.exit(1)
os.makedirs(os.path.dirname(os.path.abspath(path)), exist_ok=True)
with open(path, 'w') as fp:
json.dump(model_size, fp, indent=4)
_benchmark = {}
@contextmanager
def _timed(step):
start = time()
yield
end = time()
_benchmark[step] = end - start
def _parse_args():
parser = ArgumentParser()
parser.add_argument(
'--mode',
required=True,
choices=['generate_models', 'transform'])
parser.add_argument('--days', required=True)
parser.add_argument('--input_folder', required=True)
parser.add_argument('--output_folder')
parser.add_argument('--model_size_file')
parser.add_argument('--model_folder', required=True)
parser.add_argument(
'--write_mode',
choices=['overwrite', 'errorifexists'],
default='errorifexists')
parser.add_argument('--frequency_limit')
parser.add_argument('--no_numeric_log_col', action='store_true')
#Support for running in a lower memory environment
parser.add_argument('--low_mem', action='store_true')
parser.add_argument(
'--output_ordering',
choices=['total_random', 'day_random', 'any', 'input'],
default='total_random')
parser.add_argument(
'--output_partitioning',
choices=['day', 'none'],
default='none')
parser.add_argument('--dict_build_shuffle_parallel_per_day', type=int, default=2)
parser.add_argument('--apply_shuffle_parallel_per_day', type=int, default=25)
parser.add_argument('--skew_broadcast_pct', type=float, default=1.0)
parser.add_argument('--debug_mode', action='store_true')
args = parser.parse_args()
start, end = args.days.split('-')
args.day_range = list(range(int(start), int(end) + 1))
args.days = len(args.day_range)
return args
def _main():
args = _parse_args()
spark = SparkSession.builder.getOrCreate()
df = load_raw(spark, args.input_folder, args.day_range)
if args.mode == 'generate_models':
spark.conf.set('spark.sql.shuffle.partitions', args.days * args.dict_build_shuffle_parallel_per_day)
with _timed('generate models'):
col_counts = get_column_counts_with_frequency_limit(df, args.frequency_limit)
if args.low_mem:
# in low memory mode we have to save an intermediate result
# because if we try to do it in one query spark ends up assigning the
# partial ids in two different locations that are not guaranteed to line up
# this prevents that from happening by assigning the partial ids
# and then writeing them out.
save_low_mem_partial_ids(
assign_low_mem_partial_ids(col_counts),
args.model_folder,
args.write_mode)
save_combined_model(
assign_low_mem_final_ids(load_low_mem_partial_ids(spark, args.model_folder)),
args.model_folder,
args.write_mode)
if not args.debug_mode:
delete_low_mem_partial_ids(spark, args.model_folder)
else:
save_combined_model(
assign_id_with_window(col_counts),
args.model_folder,
args.write_mode)
save_column_models(
get_column_models(load_combined_model(spark, args.model_folder)),
args.model_folder,
args.write_mode)
if not args.debug_mode:
delete_combined_model(spark, args.model_folder)
if args.mode == 'transform':
with _timed('transform'):
if args.output_ordering == 'total_random':
df = rand_ordinal(df)
if args.output_partitioning == 'day':
df = day_from_ordinal(df, args.days)
elif args.output_ordering == 'day_random':
df = rand_ordinal(df)
df = day_from_input_file(df)
elif args.output_ordering == 'input':
df = df.withColumn('ordinal', monotonically_increasing_id())
if args.output_partitioning == 'day':
df = day_from_input_file(df)
else: # any ordering
if args.output_partitioning == 'day':
df = day_from_input_file(df)
models = list(load_column_models(spark, args.model_folder, bool(args.model_size_file)))
if args.model_size_file:
save_model_size(
OrderedDict(('_c%d' % i, agg.size) for i, _, agg, _ in models),
args.model_size_file,
args.write_mode)
models = [(i, df, agg.sum, flag) for i, df, agg, flag in models]
df = apply_models(
df,
models,
not args.low_mem,
args.skew_broadcast_pct)
df = transform_log(df, not args.no_numeric_log_col)
if args.output_partitioning == 'day':
partitionBy = 'day'
else:
partitionBy = None
if args.output_ordering == 'total_random':
if args.output_partitioning == 'day':
df = psudo_sort_by_day_plus(spark, df, args.days)
else: # none
# Don't do a full sort it is expensive. Order is random so
# just make it random
df = df.repartition('ordinal').sortWithinPartitions('ordinal')
df = df.drop('ordinal')
elif args.output_ordering == 'day_random':
df = psudo_sort_by_day_plus(spark, df, args.days)
df = df.drop('ordinal')
if args.output_partitioning != 'day':
df = df.drop('day')
elif args.output_ordering == 'input':
if args.low_mem:
# This is the slowest option. We totally messed up the order so we have to put
# it back in the correct order
df = df.orderBy('ordinal')
else:
# Applying the dictionary happened within a single task so we are already really
# close to the correct order, just need to sort within the partition
df = df.sortWithinPartitions('ordinal')
df = df.drop('ordinal')
if args.output_partitioning != 'day':
df = df.drop('day')
# else: any ordering so do nothing the ordering does not matter
df.write.parquet(
args.output_folder,
mode=args.write_mode,
partitionBy=partitionBy)
print('=' * 100)
print(_benchmark)
if __name__ == '__main__':
_main()
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/preproc/spark_data_utils.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocess Criteo 1TB Click Logs dataset with frequency thresholding and filling missing values.
This script accepts input in either tsv or parquet format.
"""
import argparse
from collections import OrderedDict
import json
import os
import subprocess
from time import time
from typing import List, Optional
import numpy as np
import nvtabular as nvt
import rmm
import cudf
from dask.base import tokenize
from dask.dataframe.io.parquet.utils import _analyze_paths
from dask.delayed import Delayed
from dask.distributed import Client
from dask.highlevelgraph import HighLevelGraph
from dask.utils import natural_sort_key
from dask_cuda import LocalCUDACluster
from fsspec.core import get_fs_token_paths
from nvtabular import Workflow
from nvtabular.io import Dataset, Shuffle
from nvtabular.utils import device_mem_size
from nvtabular.ops import Normalize, Categorify, LogOp, FillMissing, Clip, get_embedding_sizes, \
LambdaOp
from cudf.io.parquet import ParquetWriter
CRITEO_CONTINUOUS_COLUMNS = [f'_c{x}' for x in range(1, 14)]
CRITEO_CATEGORICAL_COLUMNS = [f'_c{x}' for x in range(14, 40)]
CRITEO_CLICK_COLUMNS = ['_c0']
COLUMNS = CRITEO_CONTINUOUS_COLUMNS + CRITEO_CATEGORICAL_COLUMNS + CRITEO_CLICK_COLUMNS
CRITEO_TRAIN_DAYS = list(range(0, 23))
ALL_DS_MEM_FRAC = 0.04
TRAIN_DS_MEM_FRAC = 0.045
TEST_DS_MEM_FRAC = 0.3
VALID_DS_MEM_FRAC = 0.3
def _pool(frac=0.8):
initial_pool_size = frac * device_mem_size()
if initial_pool_size % 256 != 0:
new_initial_pool_size = initial_pool_size // 256 * 256
print(
f"Initial pool size for rmm has to be a multiply of 256. Got {initial_pool_size}, reducing to {new_initial_pool_size}")
initial_pool_size = new_initial_pool_size
rmm.reinitialize(
pool_allocator=True,
initial_pool_size=initial_pool_size,
)
def _convert_file(path, name, out_dir, gpu_mem_frac, fs, cols, dtypes):
fn = f"{name}.parquet"
out_path = fs.sep.join([out_dir, f"{name}.parquet"])
writer = ParquetWriter(out_path, compression=None)
for gdf in nvt.Dataset(
path,
engine="csv",
names=cols,
part_memory_fraction=gpu_mem_frac,
sep='\t',
dtypes=dtypes,
).to_iter():
writer.write_table(gdf)
del gdf
md = writer.close(metadata_file_path=fn)
return md
def _write_metadata(md_list, fs, path):
if md_list:
metadata_path = fs.sep.join([path, "_metadata"])
_meta = (
cudf.io.merge_parquet_filemetadata(md_list)
if len(md_list) > 1
else md_list[0]
)
with fs.open(metadata_path, "wb") as f:
_meta.tofile(f)
return True
def convert_criteo_to_parquet(
input_path: str,
output_path: str,
client,
gpu_mem_frac: float = 0.05,
):
print("Converting tsv to parquet files")
if not output_path:
raise RuntimeError("Intermediate directory must be defined, if the dataset is tsv.")
os.makedirs(output_path, exist_ok=True)
# split last day into two parts
number_of_lines = int(
subprocess.check_output((f'wc -l {os.path.join(input_path, "day_23")}').split()).split()[0])
valid_set_size = number_of_lines // 2
test_set_size = number_of_lines - valid_set_size
with open(os.path.join(input_path, "day_23.part1"), "w") as f:
subprocess.run(['head', '-n', str(test_set_size), str(os.path.join(input_path, "day_23"))], stdout=f)
with open(os.path.join(input_path, "day_23.part2"), "w") as f:
subprocess.run(['tail', '-n', str(valid_set_size), str(os.path.join(input_path, "day_23"))], stdout=f)
fs = get_fs_token_paths(input_path, mode="rb")[0]
file_list = [
x for x in fs.glob(fs.sep.join([input_path, "day_*"]))
if not x.endswith("parquet")
]
file_list = sorted(file_list, key=natural_sort_key)
name_list = _analyze_paths(file_list, fs)[1]
cols = CRITEO_CLICK_COLUMNS + CRITEO_CONTINUOUS_COLUMNS + CRITEO_CATEGORICAL_COLUMNS
dtypes = {}
dtypes[CRITEO_CLICK_COLUMNS[0]] = np.int64
for x in CRITEO_CONTINUOUS_COLUMNS:
dtypes[x] = np.int64
for x in CRITEO_CATEGORICAL_COLUMNS:
dtypes[x] = "hex"
dsk = {}
token = tokenize(file_list, name_list, output_path, gpu_mem_frac, fs, cols, dtypes)
convert_file_name = "convert_file-" + token
for i, (path, name) in enumerate(zip(file_list, name_list)):
key = (convert_file_name, i)
dsk[key] = (_convert_file, path, name, output_path, gpu_mem_frac, fs, cols, dtypes)
write_meta_name = "write-metadata-" + token
dsk[write_meta_name] = (
_write_metadata,
[(convert_file_name, i) for i in range(len(file_list))],
fs,
output_path,
)
graph = HighLevelGraph.from_collections(write_meta_name, dsk, dependencies=[])
conversion_delayed = Delayed(write_meta_name, graph)
if client:
conversion_delayed.compute()
else:
conversion_delayed.compute(scheduler="synchronous")
print("Converted")
def save_model_size_config(workflow: Workflow, output_path: str):
embeddings = {}
for k, v in get_embedding_sizes(workflow).items():
embeddings[k] = v[0] - 1 # we have to subtract one, as the model expects to get a maximal id for each category
ordered_dict = OrderedDict()
for k, v in sorted(list(embeddings.items()), key=lambda x: x[0]):
ordered_dict[k] = v
with open(os.path.join(output_path, "model_size.json"), 'w') as file:
file.write(json.dumps(ordered_dict))
def preprocess_criteo_parquet(
input_path: str,
output_path: str,
client,
frequency_threshold: int,
):
train_days = [str(x) for x in CRITEO_TRAIN_DAYS]
train_files = [
os.path.join(input_path, x)
for x in os.listdir(input_path)
if x.startswith("day") and x.split(".")[0].split("_")[-1] in train_days
]
valid_file = os.path.join(input_path, "day_23.part2.parquet")
test_file = os.path.join(input_path, "day_23.part1.parquet")
all_set = train_files + [valid_file] + [test_file]
print(all_set, train_files, valid_file, test_file)
print("Creating Workflow Object")
workflow = Workflow(
cat_names=CRITEO_CATEGORICAL_COLUMNS,
cont_names=CRITEO_CONTINUOUS_COLUMNS,
label_name=CRITEO_CLICK_COLUMNS
)
# We want to assign 0 to all missing values, and calculate log(x+3) for present values
# so if we set missing values to -2, then the result of log(1+2+(-2)) would be 0
workflow.add_cont_feature([
FillMissing(fill_val=-2.0),
LambdaOp(op_name='Add3ButMinusOneCauseLogAddsOne', f=lambda col, _: col.add(2.0)),
LogOp(), # Log(1+x)
])
workflow.add_cat_preprocess(
Categorify(freq_threshold=frequency_threshold, out_path=output_path)
)
workflow.finalize()
print("Creating Dataset Iterator")
all_ds = Dataset(all_set, engine="parquet", part_mem_fraction=ALL_DS_MEM_FRAC)
trains_ds = Dataset(train_files, engine="parquet", part_mem_fraction=TRAIN_DS_MEM_FRAC)
valid_ds = Dataset(valid_file, engine="parquet", part_mem_fraction=TEST_DS_MEM_FRAC)
test_ds = Dataset(test_file, engine="parquet", part_mem_fraction=VALID_DS_MEM_FRAC)
print("Running apply")
out_train = os.path.join(output_path, "train")
out_valid = os.path.join(output_path, "validation")
out_test = os.path.join(output_path, "test")
start = time()
workflow.update_stats(all_ds)
print(f"Gathering statistics time: {time() - start}")
start = time()
workflow.apply(
trains_ds,
record_stats=False,
output_path=out_train
)
print(f"train preprocess time: {time() - start}")
start = time()
workflow.apply(
valid_ds,
record_stats=False,
output_path=out_valid
)
print(f"valid preprocess time: {time() - start}")
start = time()
workflow.apply(
test_ds,
record_stats=False,
output_path=out_test
)
print(f"test preprocess time: {time() - start}")
save_model_size_config(workflow, output_path)
def parse_args():
parser = argparse.ArgumentParser(description="Process some integers.")
parser.add_argument(
"input_dir",
help="directory with either csv or parquet dataset files inside"
)
parser.add_argument(
"output_dir",
help="directory to save preprocessed dataset files"
)
parser.add_argument(
"--intermediate_dir",
required=False,
default=None,
help="directory for converted to parquet dataset files inside"
)
parser.add_argument(
"--devices",
required=True,
help="available gpus, separated with commas; e.g 0,1,2,3"
)
parser.add_argument(
"--freq_threshold",
required=False,
default=15,
help="frequency threshold for categorical can be int or dict {column_name: threshold}"
)
parser.add_argument(
"--pool",
required=False,
default=False,
help="bool value to use a RMM pooled allocator"
)
args = parser.parse_args()
args.devices = args.devices.split(",")
return args
def is_input_parquet(input_dir: str):
for f in os.listdir(input_dir):
if 'parquet' in f:
return True
return False
def start_local_CUDA_cluster(devices, pool):
if len(devices) > 1:
cluster = LocalCUDACluster(
n_workers=len(devices),
CUDA_VISIBLE_DEVICES=",".join(str(x) for x in devices),
)
client = Client(cluster)
if pool:
client.run(_pool)
elif pool:
_pool()
return client
def main():
args = parse_args()
client = start_local_CUDA_cluster(args.devices, args.pool)
if not is_input_parquet(args.input_dir):
convert_criteo_to_parquet(
input_path=args.input_dir,
output_path=args.intermediate_dir,
client=client,
)
args.input_dir = args.intermediate_dir
print("Preprocessing data")
preprocess_criteo_parquet(
input_path=args.input_dir,
output_path=args.output_dir,
client=client,
frequency_threshold=int(args.freq_threshold),
)
print("Done")
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/preproc/preproc_NVTabular.py |
DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/__init__.py |
|
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Sequence, List, Iterable
import apex.mlp
import torch
from torch import nn
class AmpMlpFunction(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(*args, **kwargs):
return apex.mlp.MlpFunction.forward(*args, **kwargs)
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def backward(*args, **kwargs):
return apex.mlp.MlpFunction.backward(*args, **kwargs)
mlp_function = AmpMlpFunction.apply
class AmpMlp(apex.mlp.MLP):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
return mlp_function(self.bias, self.activation, input, *self.weights, *self.biases)
class AbstractMlp(nn.Module):
"""
MLP interface used for configuration-agnostic checkpointing (`dlrm.utils.checkpointing`)
and easily swappable MLP implementation
"""
@property
def weights(self) -> List[torch.Tensor]:
"""
Getter for all MLP layers weights (without biases)
"""
raise NotImplementedError()
@property
def biases(self) -> List[torch.Tensor]:
"""
Getter for all MLP layers biases
"""
raise NotImplementedError()
def forward(self, mlp_input: torch.Tensor) -> torch.Tensor:
raise NotImplementedError()
def load_state(self, weights: Iterable[torch.Tensor], biases: Iterable[torch.Tensor]):
for new_weight, weight, new_bias, bias in zip(weights, self.weights, biases, self.biases):
weight.data = new_weight.data
weight.data.requires_grad_()
bias.data = new_bias.data
bias.data.requires_grad_()
class TorchMlp(AbstractMlp):
def __init__(self, input_dim: int, sizes: Sequence[int]):
super().__init__()
layers = []
for output_dims in sizes:
layers.append(nn.Linear(input_dim, output_dims))
layers.append(nn.ReLU(inplace=True))
input_dim = output_dims
self.layers = nn.Sequential(*layers)
self._initialize_weights()
def _initialize_weights(self):
for module in self.modules():
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight.data, 0., math.sqrt(2. / (module.in_features + module.out_features)))
nn.init.normal_(module.bias.data, 0., math.sqrt(1. / module.out_features))
@property
def weights(self):
return [layer.weight for layer in self.layers if isinstance(layer, nn.Linear)]
@property
def biases(self):
return [layer.bias for layer in self.layers if isinstance(layer, nn.Linear)]
def forward(self, mlp_input: torch.Tensor) -> torch.Tensor:
"""
Args:
mlp_input (Tensor): with shape [batch_size, num_features]
Returns:
Tensor: Mlp output in shape [batch_size, num_output_features]
"""
return self.layers(mlp_input)
class CppMlp(AbstractMlp):
def __init__(self, input_dim: int, sizes: Sequence[int]):
super().__init__()
self.mlp = AmpMlp([input_dim] + list(sizes))
@property
def weights(self):
return self.mlp.weights
@property
def biases(self):
return self.mlp.biases
def forward(self, mlp_input: torch.Tensor) -> torch.Tensor:
"""
Args:
mlp_input (Tensor): with shape [batch_size, num_features]
Returns:
Tensor: Mlp output in shape [batch_size, num_output_features]
"""
return self.mlp(mlp_input)
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/nn/mlps.py |
DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/nn/__init__.py |
|
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Sequence
from dlrm.nn.embeddings import (
JointEmbedding, MultiTableEmbeddings, FusedJointEmbedding, JointSparseEmbedding,
Embeddings
)
from dlrm.nn.interactions import Interaction, CudaDotInteraction, DotInteraction, CatInteraction
from dlrm.nn.mlps import AbstractMlp, CppMlp, TorchMlp
from dlrm.utils.distributed import is_distributed
def create_mlp(input_dim: int, sizes: Sequence[int], use_cpp_mlp: bool) -> AbstractMlp:
return CppMlp(input_dim, sizes) if use_cpp_mlp else TorchMlp(input_dim, sizes)
def create_embeddings(
embedding_type: str,
categorical_feature_sizes: Sequence[int],
embedding_dim: int,
device: str = "cuda",
hash_indices: bool = False,
fp16: bool = False
) -> Embeddings:
if embedding_type == "joint":
return JointEmbedding(categorical_feature_sizes, embedding_dim, device=device, hash_indices=hash_indices)
elif embedding_type == "joint_fused":
assert not is_distributed(), "Joint fused embedding is not supported in the distributed mode. " \
"You may want to use 'joint_sparse' option instead."
return FusedJointEmbedding(categorical_feature_sizes, embedding_dim, device=device, hash_indices=hash_indices,
amp_train=fp16)
elif embedding_type == "joint_sparse":
return JointSparseEmbedding(categorical_feature_sizes, embedding_dim, device=device, hash_indices=hash_indices)
elif embedding_type == "multi_table":
return MultiTableEmbeddings(categorical_feature_sizes, embedding_dim,
hash_indices=hash_indices, device=device)
else:
raise NotImplementedError(f"unknown embedding type: {embedding_type}")
def create_interaction(interaction_op: str, embedding_num: int, embedding_dim: int) -> Interaction:
if interaction_op == "dot":
return DotInteraction(embedding_num, embedding_dim)
elif interaction_op == "cuda_dot":
return CudaDotInteraction(
DotInteraction(embedding_num, embedding_dim)
)
elif interaction_op == "cat":
return CatInteraction(embedding_num, embedding_dim)
else:
raise NotImplementedError(f"unknown interaction op: {interaction_op}")
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/nn/factories.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from typing import Sequence, List, Iterable
import torch
from absl import logging
from torch import nn
from dlrm import cuda_ext
from dlrm.cuda_ext.fused_gather_embedding import BuckleEmbeddingFusedGatherFunction
class Embeddings(nn.Module):
def forward(self, categorical_inputs) -> List[torch.Tensor]:
raise NotImplementedError()
@property
def weights(self) -> List[torch.Tensor]:
"""
Note: output list size should match number of handled categorical features
"""
raise NotImplementedError()
def load_weights(self, weights: Iterable[torch.Tensor]):
raise NotImplementedError()
class MultiTableEmbeddings(Embeddings):
def __init__(
self,
categorical_feature_sizes: Sequence[int],
embedding_dim: int,
hash_indices: bool = False,
device: str = "cuda"
):
super().__init__()
self._categorical_feature_sizes = copy.copy(categorical_feature_sizes)
self._base_device = device
self._embedding_device_map = [device for _ in range(len(categorical_feature_sizes))]
embeddings = []
# Each embedding table has size [num_features, embedding_dim]
for i, num_features in enumerate(categorical_feature_sizes):
# Allocate directly on GPU is much faster than allocating on CPU then copying over
embedding_weight = torch.empty((num_features, embedding_dim), device=self._embedding_device_map[i])
embedding = nn.Embedding.from_pretrained(embedding_weight, freeze=False, sparse=True)
embeddings.append(embedding)
self.embeddings = nn.ModuleList(embeddings)
self.hash_indices = hash_indices
self.embedding_dim = embedding_dim
def forward(self, categorical_inputs) -> List[torch.Tensor]:
"""
Args:
categorical_inputs (Tensor): with shape [batch_size, num_categorical_features]
Returns:
Tensor: embedding outputs in shape [batch, embedding_num, embedding_dim]
"""
# Put indices on the same device as corresponding embedding
device_indices = []
for embedding_id, _ in enumerate(self.embeddings):
device_indices.append(categorical_inputs[:, embedding_id].to(self._embedding_device_map[embedding_id]))
# embedding_outputs will be a list of (26 in the case of Criteo) fetched embeddings with shape
# [batch_size, embedding_size]
embedding_outputs = []
for embedding_id, embedding in enumerate(self.embeddings):
if self.hash_indices:
device_indices[embedding_id] %= embedding.num_embeddings
embedding_outputs.append(embedding(device_indices[embedding_id]).to(self._base_device).unsqueeze(1))
return embedding_outputs
@property
def weights(self):
return [embedding.weight.data for embedding in self.embeddings]
def load_weights(self, weights: Iterable[torch.Tensor]):
for embedding, weight in zip(self.embeddings, weights):
embedding.weight.data = weight
embedding.weight.data.requires_grad_()
class JointEmbedding(Embeddings):
"""Buckle multiple one hot embedding together
Multiple one hot embedding can be done as one embedding (indexing). Use nn.Embedding to deal with sparse wgrad
before I fully customizing it.
Args:
categorical_feature_sizes (list): A list of integer indicating number of features of each embedding table
embedding_dim (int): the size of each embedding vector
device (torch.device): where to create the embedding. Default "cuda"
"""
def __init__(
self,
categorical_feature_sizes: Sequence[int],
embedding_dim: int,
device: str = "cuda",
hash_indices: bool = False
):
super().__init__()
self._categorical_feature_sizes = copy.copy(categorical_feature_sizes)
self.register_buffer("offsets", torch.tensor([0] + list(categorical_feature_sizes), device=device).cumsum(0))
embedding_weight = torch.empty((self.offsets[-1].item(), embedding_dim), device=device)
self.embedding = nn.Embedding.from_pretrained(embedding_weight, freeze=False, sparse=True)
self.hash_indices = hash_indices
# pylint:disable=missing-docstring
def forward(self, categorical_inputs) -> List[torch.Tensor]:
if self.hash_indices:
for cat, size in enumerate(self._categorical_feature_sizes):
categorical_inputs[:, cat] %= size
logging.log_first_n(logging.WARNING, f"Hashed indices out of range.", 1)
return [self.embedding(categorical_inputs + self.offsets[:-1])]
def extra_repr(self):
s = f"offsets={self.offsets.cpu().numpy()}"
return s
# pylint:enable=missing-docstring
@property
def weights(self):
return [self.embedding.weight.data[self.offsets[cat]:self.offsets[cat + 1]]
for cat in range(len(self._categorical_feature_sizes))]
def load_weights(self, weights: Iterable[torch.Tensor]):
data = self.embedding.weight.data
offsets = self.offsets
for cat, weight in zip(range(len(self._categorical_feature_sizes)), weights):
data[offsets[cat]:offsets[cat + 1]] = weight
# If you want ot use a fused joint embedding for a different number of variables, firstly change
# the custom cuda kernel code to accommodate the new number, then change this value accordingly
FUSED_JOINT_EMBEDDING_NUMBER_OF_CATEGORICAL_VARIABLES = 26
class FusedJointEmbedding(Embeddings):
"""
Buckle multiple one hot embedding together
Multiple one hot embedding can be done as one embedding (indexing).
Args:
categorical_feature_sizes (list): A list of integer indicating number of features of each embedding table
embedding_dim (int): the size of each embedding vector
device (torch.device): where to create the embedding. Default "cuda"
"""
def __init__(
self,
categorical_feature_sizes: Sequence[int],
embedding_dim: int,
device: str = "cuda",
hash_indices: bool = False,
amp_train: bool = False
):
super().__init__()
self._categorical_feature_sizes = copy.copy(categorical_feature_sizes)
self.embedding_dim = embedding_dim
self.amp_train = amp_train
self.hash_indices = hash_indices
self.register_buffer("offsets", torch.tensor([0] + categorical_feature_sizes).cumsum(0).to(device))
self.register_parameter("weight", torch.nn.Parameter(
torch.empty((self.offsets[-1].item(), embedding_dim), device=device), requires_grad=True))
if len(categorical_feature_sizes) != FUSED_JOINT_EMBEDDING_NUMBER_OF_CATEGORICAL_VARIABLES:
raise ValueError(
f"Number of categorical features must be equal to"
f" {FUSED_JOINT_EMBEDDING_NUMBER_OF_CATEGORICAL_VARIABLES}, got {len(categorical_feature_sizes)}\n"
f"If you want to train on a different number, you need to recompile cuda kernels to support it or "
f"use different embedding type.")
def forward(self, categorical_inputs) -> List[torch.Tensor]:
# Check input has the right shape
if self.hash_indices:
for cat, size in enumerate(self._categorical_feature_sizes):
categorical_inputs[:, cat] %= size
logging.log_first_n(logging.WARNING, f"Hashed indices out of range.", 1)
return [BuckleEmbeddingFusedGatherFunction.apply(self.weight, categorical_inputs, self.offsets, self.amp_train)]
def extra_repr(self):
return 'embedding_dim={}, categorical_feature_sizes={}, offsets={}'.format(
self.embedding_dim, self._categorical_feature_sizes, self.offsets)
@property
def weights(self) -> List[torch.Tensor]:
return [self.weight.data[self.offsets[cat]:self.offsets[cat + 1]]
for cat in range(len(self._categorical_feature_sizes))]
def load_weights(self, weights: Iterable[torch.Tensor]):
data = self.weight.data
offsets = self.offsets
for cat, weight in zip(range(len(self._categorical_feature_sizes)), weights):
data[offsets[cat]:offsets[cat + 1]] = weight
class JointSparseEmbedding(Embeddings):
def __init__(
self,
categorical_feature_sizes: List[int],
embedding_dim: int,
device: str = "cuda",
hash_indices: bool = False
):
super().__init__()
self._categorical_feature_sizes = categorical_feature_sizes
self.embedding = cuda_ext.JointSparseEmbedding(categorical_feature_sizes, embedding_dim, device)
self.hash_indices = hash_indices
def forward(self, categorical_inputs) -> List[torch.Tensor]:
if self.hash_indices:
for cat, size in enumerate(self._categorical_feature_sizes):
categorical_inputs[:, cat] %= size
logging.log_first_n(logging.WARNING, f"Hashed indices out of range.", 1)
return [
self.embedding(categorical_inputs)
]
@property
def weights(self):
data = self.embedding.weights.data
offsets = self.embedding.offsets
return [data[offsets[cat]:offsets[cat + 1]] for cat in range(len(self._categorical_feature_sizes))]
def load_weights(self, weights: Iterable[torch.Tensor]):
data = self.embedding.weights.data
offsets = self.embedding.offsets
for cat, weight in zip(range(len(self._categorical_feature_sizes)), weights):
data[offsets[cat]:offsets[cat + 1]] = weight
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/nn/embeddings.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import math
from typing import Sequence, Optional, Tuple
import torch
from torch import nn
from dlrm.nn.embeddings import Embeddings
from dlrm.nn.factories import create_embeddings, create_mlp
from dlrm.nn.interactions import Interaction
class DlrmBottom(nn.Module):
def __init__(
self,
num_numerical_features: int,
categorical_feature_sizes: Sequence[int],
bottom_mlp_sizes: Optional[Sequence[int]] = None,
embedding_type: str = "multi_table",
embedding_dim: int = 128,
hash_indices: bool = False,
use_cpp_mlp: bool = False,
fp16: bool = False,
device: str = "cuda"
):
super().__init__()
assert bottom_mlp_sizes is None or embedding_dim == bottom_mlp_sizes[-1], "The last bottom MLP layer must" \
" have same size as embedding."
self._embedding_dim = embedding_dim
self._categorical_feature_sizes = copy.copy(categorical_feature_sizes)
self._fp16 = fp16
self.embeddings = create_embeddings(
embedding_type,
categorical_feature_sizes,
embedding_dim,
device,
hash_indices,
fp16
)
self.mlp = (create_mlp(num_numerical_features, bottom_mlp_sizes, use_cpp_mlp).to(device)
if bottom_mlp_sizes else torch.nn.ModuleList())
self._initialize_embeddings_weights(self.embeddings, categorical_feature_sizes)
def _initialize_embeddings_weights(self, embeddings: Embeddings, categorical_feature_sizes: Sequence[int]):
assert len(embeddings.weights) == len(categorical_feature_sizes)
for size, weight in zip(categorical_feature_sizes, embeddings.weights):
nn.init.uniform_(
weight,
-math.sqrt(1. / size),
math.sqrt(1. / size)
)
@property
def num_categorical_features(self) -> int:
return len(self._categorical_feature_sizes)
@property
def num_feature_vectors(self) -> int:
return self.num_categorical_features + int(self.mlp is not None)
def forward(self, numerical_input, categorical_inputs) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""
Args:
numerical_input (Tensor): with shape [batch_size, num_numerical_features]
categorical_inputs (Tensor): with shape [batch_size, num_categorical_features]
Returns:
Tensor: Concatenated bottom mlp and embedding output in shape [batch, 1 + #embedding, embedding_dim]
"""
bottom_output = []
bottom_mlp_output = None
if self.mlp:
bottom_mlp_output = self.mlp(numerical_input)
if self._fp16:
bottom_mlp_output = bottom_mlp_output.half()
# reshape bottom mlp to concatenate with embeddings
# this order with bottom_mlp at the front is assumed by custom kernels
bottom_output.append(bottom_mlp_output.view(-1, 1, self._embedding_dim))
if self.num_categorical_features > 0:
bottom_output += self.embeddings(categorical_inputs)
if self._fp16:
bottom_output = [x.half() if x.dtype != torch.half else x for x in bottom_output]
if len(bottom_output) == 1:
return bottom_output[0], bottom_mlp_output
return torch.cat(bottom_output, dim=1), bottom_mlp_output
class DlrmTop(nn.Module):
def __init__(self, top_mlp_sizes: Sequence[int], interaction: Interaction, use_cpp_mlp: bool = False):
super().__init__()
self.interaction = interaction
self.mlp = create_mlp(interaction.num_interactions, top_mlp_sizes[:-1], use_cpp_mlp)
self.out = nn.Linear(top_mlp_sizes[-2], top_mlp_sizes[-1])
self._initialize_weights()
def _initialize_weights(self):
# Explicitly set weight corresponding to zero padded interaction output. They will
# stay 0 throughout the entire training. An assert can be added to the end of the training
# to prove it doesn't increase model capacity but just 0 paddings.
nn.init.zeros_(self.mlp.weights[0][:, -1].data)
def forward(self, bottom_output, bottom_mlp_output):
"""
Args:
bottom_output (Tensor): with shape [batch_size, 1 + #embeddings, embedding_dim]
bottom_mlp_output (Tensor): with shape [batch_size, embedding_dim]
"""
interaction_output = self.interaction.interact(bottom_output, bottom_mlp_output)
return self.out(self.mlp(interaction_output))
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/nn/parts.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from dlrm.cuda_ext import dotBasedInteract
def padding_size(n: int) -> int:
nearest_multiple = ((n - 1) // 8 + 1) * 8
return nearest_multiple - n
class Interaction:
@property
def num_interactions(self) -> int:
raise NotImplementedError()
def interact(self, bottom_output, bottom_mlp_output):
"""
:param bottom_output: [batch_size, 1 + #embeddings, embedding_dim]
:param bottom_mlp_output
:return:
"""
raise NotImplementedError()
class DotInteraction(Interaction):
def __init__(self, embedding_num: int, embedding_dim: int):
"""
Interactions are among outputs of all the embedding tables and bottom MLP, total number of
(num_embedding_tables + 1) vectors with size embedding_dim. ``dot`` product interaction computes dot product
between any 2 vectors. Output of interaction will have shape [num_interactions, embedding_dim].
"""
self._num_interaction_inputs = embedding_num + 1
self._embedding_dim = embedding_dim
self._tril_indices = torch.tensor([[i for i in range(self._num_interaction_inputs)
for _ in range(i)],
[j for i in range(self._num_interaction_inputs)
for j in range(i)]]).cuda()
# THIS IS NOT A REGULAR TRIANGULAR LOWER MATRIX! THE MAIN DIAGONAL IS NOT INCLUDED
@property
def _raw_num_interactions(self) -> int:
return (self._num_interaction_inputs * (self._num_interaction_inputs - 1)) // 2 + self._embedding_dim
@property
def num_interactions(self) -> int:
n = self._raw_num_interactions
return n + padding_size(n)
def interact(self, bottom_output, bottom_mlp_output):
"""
:param bottom_output: [batch_size, 1 + #embeddings, embedding_dim]
:param bottom_mlp_output
:return:
"""
batch_size = bottom_output.size()[0]
interaction = torch.bmm(bottom_output, torch.transpose(bottom_output, 1, 2))
interaction_flat = interaction[:, self._tril_indices[0], self._tril_indices[1]]
# concatenate dense features and interactions
padding_dim = padding_size(self._raw_num_interactions)
zeros_padding = torch.zeros(batch_size, padding_dim, dtype=bottom_output.dtype, device=bottom_output.device)
interaction_output = torch.cat(
(bottom_mlp_output, interaction_flat, zeros_padding), dim=1)
return interaction_output
class CudaDotInteraction(Interaction):
def __init__(self, dot_interaction: DotInteraction):
self._dot_interaction = dot_interaction
@property
def num_interactions(self):
return self._dot_interaction.num_interactions
def interact(self, bottom_output, bottom_mlp_output):
"""
:param bottom_output: [batch_size, 1 + #embeddings, embedding_dim]
:param bottom_mlp_output
:return:
"""
return dotBasedInteract(bottom_output, bottom_mlp_output)
class CatInteraction(Interaction):
def __init__(self, embedding_num: int, embedding_dim: int):
"""
Interactions are among outputs of all the embedding tables and bottom MLP, total number of
(num_embedding_tables + 1) vectors with size embdding_dim. ``cat`` interaction concatenate all the vectors
together. Output of interaction will have shape [num_interactions, embedding_dim].
"""
self._num_interaction_inputs = embedding_num + 1
self._embedding_dim = embedding_dim
@property
def num_interactions(self) -> int:
return self._num_interaction_inputs * self._embedding_dim
def interact(self, bottom_output, bottom_mlp_output):
"""
:param bottom_output: [batch_size, 1 + #embeddings, embedding_dim]
:param bottom_mlp_output
:return:
"""
return bottom_output.view(-1, self.num_interactions)
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/nn/interactions.py |
DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/utils/__init__.py |
|
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
import warnings
from collections import deque
from functools import reduce
from itertools import combinations_with_replacement
from typing import Sequence
import torch
import torch.distributed as dist
def setup_distributed_print(enable):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if enable or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def is_distributed() -> bool:
return get_world_size() > 1
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def get_local_rank():
if not is_dist_avail_and_initialized():
return 0
return int(os.environ['LOCAL_RANK'])
def is_main_process():
return get_rank() == 0
def init_distributed_mode(backend="nccl", use_gpu=True):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
rank = int(os.environ["RANK"])
world_size = int(os.environ['WORLD_SIZE'])
gpu = int(os.environ['LOCAL_RANK'])
elif 'OMPI_COMM_WORLD_RANK' in os.environ and 'OMPI_COMM_WORLD_SIZE' in os.environ:
rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
else:
print('Not using distributed mode')
return 0, 1, 0
if use_gpu:
torch.cuda.set_device(gpu)
if rank != 0:
warnings.filterwarnings("ignore")
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0"
torch.distributed.init_process_group(backend=backend, world_size=world_size, rank=rank, init_method='env://')
return rank, world_size, gpu
def get_gpu_batch_sizes(global_batch_size: int, num_gpus: int = 4, batch_std: int = 64, divisible_by: int = 64):
batch_avg = global_batch_size // num_gpus
start, end = batch_avg - batch_std, batch_avg + batch_std
sizes_range = (x for x in range(start, end + 1) if x % divisible_by == 0)
solutions = [
sizes for sizes in combinations_with_replacement(sizes_range, num_gpus) if sum(sizes) == global_batch_size
]
if not solutions:
raise RuntimeError("Could not find GPU batch sizes for a given configuration. "
"Please adjust global batch size or number of used GPUs.")
return max(solutions, key=lambda sizes: reduce(lambda x, y: x * y, sizes))
def argsort(sequence, reverse: bool = False):
idx_pairs = [(x, i) for i, x in enumerate(sequence)]
sorted_pairs = sorted(idx_pairs, key=lambda pair: pair[0], reverse=reverse)
return [i for _, i in sorted_pairs]
def distribute_to_buckets(sizes: Sequence[int], buckets_num: int):
def sum_sizes(indices):
return sum(sizes[i] for i in indices)
max_bucket_size = math.ceil(len(sizes) / buckets_num)
idx_sorted = deque(argsort(sizes, reverse=True))
buckets = [[] for _ in range(buckets_num)]
final_buckets = []
while idx_sorted:
bucket = buckets[0]
bucket.append(idx_sorted.popleft())
if len(bucket) == max_bucket_size:
final_buckets.append(buckets.pop(0))
buckets.sort(key=sum_sizes)
final_buckets += buckets
return final_buckets
def get_device_mapping(embedding_sizes: Sequence[int], num_gpus: int = 8):
"""Get device mappings for hybrid parallelism
Bottom MLP running on device 0. Embeddings will be distributed across among all the devices.
Optimal solution for partitioning set of N embedding tables into K devices to minimize maximal subset sum
is an NP-hard problem. Additionally, embedding tables distribution should be nearly uniform due to the performance
constraints. Therefore, suboptimal greedy approach with max bucket size is used.
Args:
embedding_sizes (Sequence[int]): embedding tables sizes
num_gpus (int): Default 8.
Returns:
device_mapping (dict):
"""
if num_gpus > 4:
# for higher no. of GPUs, make sure the one with bottom mlp has no embeddings
gpu_buckets = distribute_to_buckets(embedding_sizes, num_gpus - 1) # leave one device out for the bottom MLP
gpu_buckets.insert(0, [])
else:
gpu_buckets = distribute_to_buckets(embedding_sizes, num_gpus)
vectors_per_gpu = [len(bucket) for bucket in gpu_buckets]
vectors_per_gpu[0] += 1 # count bottom mlp
return {
'bottom_mlp': 0,
'embedding': gpu_buckets,
'vectors_per_gpu': vectors_per_gpu,
}
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/utils/distributed.py |
DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/utils/checkpointing/__init__.py |
|
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from os.path import join
from typing import Sequence, Any, Dict
import torch
_BOTTOM_MLP_FILE = "bottom_model.mlp.pt"
_TOP_MLP_FILE = "top_model.mlp.pt"
_TOP_OUT_FILE = "top_model.out.pt"
_EMBEDDING_METADATA_FILE = "embeddings.metadata.pt"
_METADATA_FILE = "metadata.pt"
def _get_embedding_file(embedding_index: int) -> str:
return f"bottom_model.embeddings.{embedding_index}.bin"
def _get_embedding_meta_file(embedding_index: int) -> str:
return f"embeddings.{embedding_index}.meta.pt"
class DlrmCheckpointWriter:
"""
Class responsible for saving checkpoints of DLRM model parts.
Depends on `dlrm.nn.embeddings.Embeddings` and `dlrm.nn.mlps.AbstractMlp` interfaces
(for handling multiple model configurations)
"""
def __init__(self, embedding_indices: Sequence[int], config: Dict[str, Any]):
self._embedding_indices = embedding_indices
self._config = config
def save_embeddings(self, checkpoint_path: str, model):
self._ensure_directory(checkpoint_path)
for embedding_index, weight in zip(self._embedding_indices, model.bottom_model.embeddings.weights):
self._save_as_bytes(weight.data, join(checkpoint_path, _get_embedding_file(embedding_index)))
torch.save({"shape": weight.shape}, join(checkpoint_path, _get_embedding_meta_file(embedding_index)))
def save_bottom_mlp(self, checkpoint_path: str, model):
self._ensure_directory(checkpoint_path)
torch.save(self._mlp_state(model.bottom_model.mlp), join(checkpoint_path, _BOTTOM_MLP_FILE))
def save_top_model(self, checkpoint_path: str, model):
self._ensure_directory(checkpoint_path)
# DistributedDataParallel wraps top_model under "module" attribute
top_model = model.top_model.module if hasattr(model.top_model, 'module') else model.top_model
torch.save(self._mlp_state(top_model.mlp), join(checkpoint_path, _TOP_MLP_FILE))
torch.save(top_model.out.state_dict(), join(checkpoint_path, _TOP_OUT_FILE))
def save_metadata(self, checkpoint_path: str, data: Dict[str, Any]):
self._ensure_directory(checkpoint_path)
torch.save({"data": data, "config": self._config}, join(checkpoint_path, _METADATA_FILE))
def _ensure_directory(self, checkpoint_path: str):
os.makedirs(checkpoint_path, exist_ok=True)
def _mlp_state(self, mlp):
return {
"weights": [x.to(torch.float32) for x in mlp.weights],
"biases": [x.to(torch.float32) for x in mlp.biases]
}
def _save_as_bytes(self, tensor: torch.Tensor, path: str):
with open(path, "wb+") as file:
file.write(tensor.cpu().numpy().astype(np.float32).tobytes())
class DlrmCheckpointLoader:
"""
Class responsible for loading checkpoints of DLRM model parts.
Depends on `dlrm.nn.embeddings.Embeddings` and `dlrm.nn.mlps.AbstractMlp` interfaces
(for handling multiple model configurations)
"""
def __init__(self, embedding_indices: Sequence[int], device: str = "cpu"):
self._embedding_indices = embedding_indices
self._device = device
def load_embeddings(self, checkpoint_path: str, model):
embedding_weights = (self._load_from_bytes(join(checkpoint_path, _get_embedding_file(index)),
self._get_embedding_shape(checkpoint_path, index))
for index in self._embedding_indices)
model.bottom_model.embeddings.load_weights(embedding_weights)
def load_bottom_mlp(self, checkpoint_path: str, model):
bottom_mlp_state = self._load(checkpoint_path, _BOTTOM_MLP_FILE)
model.bottom_model.mlp.load_state(bottom_mlp_state["weights"], bottom_mlp_state["biases"])
def load_top_model(self, checkpoint_path: str, model):
# DistributedDataParallel wraps top_model under "module" attribute
top_model = model.top_model.module if hasattr(model.top_model, 'module') else model.top_model
top_mlp_state = self._load(checkpoint_path, _TOP_MLP_FILE)
top_model.mlp.load_state(top_mlp_state["weights"], top_mlp_state["biases"])
top_out_state = self._load(checkpoint_path, _TOP_OUT_FILE)
top_model.out.load_state_dict(top_out_state)
def _load(self, checkpoint_path: str, state_path: str):
data = torch.load(join(checkpoint_path, state_path), map_location=self._device)
return {self._strip_key(key): value for key, value in data.items()}
def _strip_key(self, key: str):
# DistributedDataParallel wraps top_model under "module" attribute
prefix = "module."
if key.startswith(prefix):
return key[len(prefix):]
return key
def _load_from_bytes(self, path: str, shape) -> torch.Tensor:
with open(path, "rb") as file:
array = np.frombuffer(file.read(), dtype=np.float32).reshape(*shape)
return torch.from_numpy(array).to(self._device)
def _get_embedding_shape(self, checkpoint_path: str, index: int) -> tuple:
embedding_meta = torch.load(join(checkpoint_path, _get_embedding_meta_file(index)))
return embedding_meta["shape"]
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/utils/checkpointing/model.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Any, Optional
import torch
from dlrm.utils.checkpointing.model import DlrmCheckpointWriter, DlrmCheckpointLoader
class DistributedCheckpointWriter:
def __init__(
self,
writer: DlrmCheckpointWriter,
device_mapping: Dict[str, Any],
rank: int,
main_process: bool
):
self._device_mapping = device_mapping
self._main_process = main_process
self._has_bottom_mlp = rank == device_mapping["bottom_mlp"]
self._writer = writer
self._distributed = len(device_mapping['embedding']) > 1
def save_checkpoint(
self,
model,
checkpoint_path: str,
epoch: Optional[int] = None,
step: Optional[int] = None
):
self._writer.save_embeddings(checkpoint_path, model)
if self._has_bottom_mlp:
self._writer.save_bottom_mlp(checkpoint_path, model)
if self._main_process:
self._writer.save_top_model(checkpoint_path, model)
self._save_metadata(checkpoint_path, epoch, step)
if self._distributed:
torch.distributed.barrier()
def _save_metadata(self, checkpoint_path, epoch, step):
self._writer.save_metadata(checkpoint_path, {
"device_mapping": self._device_mapping,
"epoch": epoch,
"step": step
})
class DistributedCheckpointLoader:
def __init__(self, loader: DlrmCheckpointLoader, device_mapping: Dict[str, Any], rank: int):
self._has_bottom_mlp = rank == device_mapping["bottom_mlp"]
self._loader = loader
self.distributed = len(device_mapping['embedding']) > 1
def load_checkpoint(self, model, checkpoint_path: str):
self._loader.load_top_model(checkpoint_path, model)
if self._has_bottom_mlp:
self._loader.load_bottom_mlp(checkpoint_path, model)
self._loader.load_embeddings(checkpoint_path, model)
if self.distributed:
torch.distributed.barrier()
def make_distributed_checkpoint_loader(device_mapping, rank: int, device: str = "cpu") -> DistributedCheckpointLoader:
embedding_indices = device_mapping["embedding"][rank]
return DistributedCheckpointLoader(
loader=DlrmCheckpointLoader(
embedding_indices=embedding_indices,
device=device,
),
device_mapping=device_mapping,
rank=rank
)
def make_distributed_checkpoint_writer(
device_mapping,
rank: int,
is_main_process: bool,
config: Dict[str, Any],
) -> DistributedCheckpointWriter:
embedding_indices = device_mapping["embedding"][rank]
return DistributedCheckpointWriter(
writer=DlrmCheckpointWriter(
embedding_indices=embedding_indices,
config=config
),
device_mapping=device_mapping,
rank=rank,
main_process=is_main_process
)
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/utils/checkpointing/distributed.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fused Buckle Embedding
"""
from absl import logging
import torch
from torch.autograd import Function
from dlrm.cuda_ext import fused_embedding
class BuckleEmbeddingFusedGatherFunction(Function):
"""Customized embedding gather """
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)
def forward(ctx, embedding, indices, offsets, amp_train):
output = fused_embedding.gather_gpu_fused_fwd(embedding, indices, offsets, amp_train)
ctx.save_for_backward(embedding, indices, offsets)
return output
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad_output):
embedding, indices, offsets = ctx.saved_tensors
logging.log_first_n(logging.WARNING, "Highly specialized embedding for embedding_dim 128", 1)
grad_weights = fused_embedding.gather_gpu_fused_bwd(embedding, indices, offsets, grad_output)
return grad_weights, None, None, None
buckle_embedding_fused_gather = BuckleEmbeddingFusedGatherFunction.apply
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/cuda_ext/fused_gather_embedding.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import torch
from torch.cuda import amp
from dlrm.cuda_ext import sparse_gather
from torch import nn
from torch.autograd import Function
class EmbeddingGatherFunction(Function):
"""Customized embedding gather with fused plain SGD"""
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)
def forward(ctx, embedding, indices):
output = sparse_gather.gather_gpu_fwd(embedding, indices)
ctx.save_for_backward(indices)
ctx.num_features = embedding.size(0)
return output
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)
def backward(ctx, grad_output):
indices = ctx.saved_tensors[0]
grad_embedding = sparse_gather.gather_gpu_bwd(grad_output, indices, ctx.num_features)
return grad_embedding, None
class JointSparseEmbedding(nn.Module):
"""Joint multiple one hot embedding together
Multiple one hot embedding can be done as one embedding (indexing).
Args:
categorical_feature_sizes (list): A list of integer indicating number of features of each embedding table
embedding_dim (int): the size of each embedding vector
device (torch.device): where to create the embedding. Default "cuda"
"""
def __init__(self, categorical_feature_sizes, embedding_dim, device="cuda"):
super(JointSparseEmbedding, self).__init__()
self.embedding_dim = embedding_dim
self.categorical_feature_sizes = copy.copy(categorical_feature_sizes)
self.register_buffer("offsets", torch.tensor([0] + categorical_feature_sizes).cumsum(0).to(device))
self.weights = torch.nn.Parameter(torch.rand((self.offsets[-1].item(), embedding_dim), device=device))
def forward(self, categorical_inputs):
# Check input has the right shape
assert categorical_inputs.shape[1] == len(self.categorical_feature_sizes)
embedding_out = embedding_gather(self.weights, categorical_inputs + self.offsets[:-1])
return embedding_out
embedding_gather = EmbeddingGatherFunction.apply
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/cuda_ext/sparse_embedding.py |
from .dot_based_interact import dotBasedInteract
from .fused_gather_embedding import buckle_embedding_fused_gather
from .sparse_embedding import JointSparseEmbedding
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/cuda_ext/__init__.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.autograd import Function
if torch.cuda.get_device_capability()[0] >= 8:
from dlrm.cuda_ext import interaction_ampere as interaction
else:
from dlrm.cuda_ext import interaction_volta as interaction
class DotBasedInteract(Function):
""" Forward and Backward paths of cuda extension for dot-based feature interact."""
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(ctx, input, bottom_mlp_output):
output = interaction.dotBasedInteractFwd(input, bottom_mlp_output)
ctx.save_for_backward(input)
return output
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad_output):
input, = ctx.saved_tensors
grad, mlp_grad = interaction.dotBasedInteractBwd(input, grad_output)
return grad, mlp_grad
dotBasedInteract = DotBasedInteract.apply
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/cuda_ext/dot_based_interact.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
import os
from collections import defaultdict
import torch
import pandas as pd
from dlrm.data.feature_spec import FeatureSpec
from dlrm.data.defaults import CATEGORICAL_CHANNEL, NUMERICAL_CHANNEL, LABEL_CHANNEL, CARDINALITY_SELECTOR
from dlrm.data.defaults import get_categorical_feature_type
def parse_args():
parser = ArgumentParser()
parser.add_argument('--input', type=str, default='',
help='Path to input data directory')
parser.add_argument('--feature_spec_in', type=str, default='feature_spec.yaml',
help='Name of the input feature specification file')
parser.add_argument('--output', type=str, default='/data',
help='Path to output data directory')
parser.add_argument('--feature_spec_out', type=str, default='feature_spec.yaml',
help='Name of the output feature specification file')
parser.add_argument('--chunk_size', type=int, default=65536)
return parser.parse_args()
def main():
args = parse_args()
args_output = args.output
args_input = args.input
args_feature_spec_in = args.feature_spec_in
args_feature_spec_out = args.feature_spec_out
batch_size = args.chunk_size
fspec_in_path = os.path.join(args_input, args_feature_spec_in)
fspec_in = FeatureSpec.from_yaml(fspec_in_path)
input_label_feature_name = fspec_in.channel_spec[LABEL_CHANNEL][0]
input_numerical_features_list = fspec_in.channel_spec[NUMERICAL_CHANNEL]
input_categorical_features_list = fspec_in.channel_spec[CATEGORICAL_CHANNEL]
# Do a pass to establish the cardinalities: they influence the type we save the dataset as
found_cardinalities = defaultdict(lambda: 0)
for mapping_name, mapping in fspec_in.source_spec.items():
df_iterators = []
for chunk in mapping:
assert chunk['type'] == 'csv', "Only csv files supported in this transcoder"
assert len(chunk['files']) == 1, "Only one file per chunk supported in this transcoder"
path_to_load = os.path.join(fspec_in.base_directory, chunk['files'][0])
chunk_iterator = pd.read_csv(path_to_load, header=None, chunksize=batch_size, names=chunk['features'])
df_iterators.append(chunk_iterator)
zipped = zip(*df_iterators)
for chunks in zipped:
mapping_df = pd.concat(chunks, axis=1)
for feature in input_categorical_features_list:
mapping_cardinality = mapping_df[feature].max() + 1
previous_cardinality = found_cardinalities[feature]
found_cardinalities[feature] = max(previous_cardinality, mapping_cardinality)
for feature in input_categorical_features_list:
declared_cardinality = fspec_in.feature_spec[feature][CARDINALITY_SELECTOR]
if declared_cardinality == 'auto':
pass
else:
assert int(declared_cardinality) >= found_cardinalities[feature]
found_cardinalities[feature] = int(declared_cardinality)
categorical_cardinalities = [found_cardinalities[f] for f in input_categorical_features_list]
number_of_numerical_features = fspec_in.get_number_of_numerical_features()
fspec_out = FeatureSpec.get_default_feature_spec(number_of_numerical_features=number_of_numerical_features,
categorical_feature_cardinalities=categorical_cardinalities)
fspec_out.base_directory = args.output
for mapping_name, mapping in fspec_in.source_spec.items():
# open files for outputting
label_path, numerical_path, categorical_paths = fspec_out.get_mapping_paths(mapping_name)
for path in [label_path, numerical_path, *categorical_paths.values()]:
os.makedirs(os.path.dirname(path), exist_ok=True)
output_categorical_features_list = fspec_out.get_categorical_feature_names()
numerical_f = open(numerical_path, "ab+")
label_f = open(label_path, "ab+")
categorical_fs = [open(categorical_paths[name], "ab+") for name in output_categorical_features_list]
categorical_feature_types = [get_categorical_feature_type(card) for card in categorical_cardinalities]
df_iterators = []
for chunk in mapping:
# We checked earlier it's a single file chunk
path_to_load = os.path.join(fspec_in.base_directory, chunk['files'][0])
chunk_iterator = pd.read_csv(path_to_load, header=None, chunksize=batch_size, names=chunk['features'])
df_iterators.append(chunk_iterator)
zipped = zip(*df_iterators)
for chunks in zipped:
mapping_df = pd.concat(chunks, axis=1) # This takes care of making sure feature names are unique
# Choose the right columns
numerical_df = mapping_df[input_numerical_features_list]
categorical_df = mapping_df[input_categorical_features_list]
label_df = mapping_df[[input_label_feature_name]]
numerical = torch.tensor(numerical_df.values)
label = torch.tensor(label_df.values)
categorical = torch.tensor(categorical_df.values)
# Append them to the binary files
numerical_f.write(numerical.to(torch.float16).cpu().numpy().tobytes())
label_f.write(label.to(torch.bool).cpu().numpy().tobytes())
for cat_idx, cat_feature_type in enumerate(categorical_feature_types):
categorical_fs[cat_idx].write(
categorical[:, cat_idx].cpu().numpy().astype(cat_feature_type).tobytes())
feature_spec_save_path = os.path.join(args_output, args_feature_spec_out)
fspec_out.to_yaml(output_path=feature_spec_save_path)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/scripts/transcode.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from dlrm.data.datasets import SyntheticDataset
from dlrm.data.utils import write_dataset_to_disk
from dlrm.data.feature_spec import FeatureSpec
from absl import app, flags
FLAGS = flags.FLAGS
flags.DEFINE_integer("synthetic_dataset_num_entries",
default=int(32768 * 1024), # 1024 batches for single-GPU training by default
help="Number of samples per epoch for the synthetic dataset")
flags.DEFINE_integer("num_numerical_features", default=13,
help="Number of numerical features in the dataset. Defaults to 13 for the Criteo Terabyte Dataset")
flags.DEFINE_list("synthetic_dataset_table_sizes", default=','.join(26 * [str(10 ** 5)]),
help="Cardinality of each categorical feature")
flags.DEFINE_string("feature_spec", default=None,
help="Feature specification file describing the desired dataset."
"Only feature_spec and channel_spec sections are required and used."
"Overrides num_numerical_features and synthetic_dataset_table_sizes")
flags.DEFINE_string("synthetic_dataset_dir", default="/tmp/dlrm_synthetic_data",
help="Destination of the saved synthetic dataset")
flags.DEFINE_integer("seed", default=12345, help="Set a seed for generating synthetic data")
def main(argv):
torch.manual_seed(FLAGS.seed)
number_of_entries = FLAGS.synthetic_dataset_num_entries
if FLAGS.feature_spec is not None:
fspec = FeatureSpec.from_yaml(FLAGS.feature_spec)
else:
cardinalities = [int(s) for s in FLAGS.synthetic_dataset_table_sizes]
fspec = FeatureSpec.get_default_feature_spec(number_of_numerical_features=FLAGS.num_numerical_features,
categorical_feature_cardinalities=cardinalities)
fspec.base_directory = FLAGS.synthetic_dataset_dir
fspec.check_feature_spec()
number_of_numerical_features = fspec.get_number_of_numerical_features()
categorical_feature_sizes = fspec.get_categorical_sizes()
train_dataset = SyntheticDataset(
num_entries=number_of_entries,
numerical_features=number_of_numerical_features,
categorical_feature_sizes=categorical_feature_sizes
)
test_dataset = SyntheticDataset(
num_entries=number_of_entries,
numerical_features=number_of_numerical_features,
categorical_feature_sizes=categorical_feature_sizes
)
write_dataset_to_disk(
dataset_train=train_dataset,
dataset_test=test_dataset,
feature_spec=fspec
)
if __name__ == '__main__':
app.run(main)
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/scripts/prepare_synthetic_dataset.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import time
from collections import defaultdict, deque
import dllogger
import torch
import torch.distributed as dist
from dlrm.utils.distributed import is_dist_avail_and_initialized
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item() if len(self.deque) else 0
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count if self.count else 0
@property
def max(self):
return max(self.deque) if len(self.deque) else 0
@property
def value(self):
return self.deque[-1] if len(self.deque) else None
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def print(self, header=None):
if not header:
header = ''
print_str = header
for name, meter in self.meters.items():
print_str += f" {name}: {meter}"
print(print_str)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target[None])
res = []
for k in topk:
correct_k = correct[:k].flatten().sum(dtype=torch.float32)
res.append(correct_k * (100.0 / batch_size))
return res
def lr_step(optim, num_warmup_iter, current_step, base_lr, warmup_factor, decay_steps=0, decay_start_step=None):
if decay_start_step is None:
decay_start_step = num_warmup_iter
new_lr = base_lr
if decay_start_step < num_warmup_iter:
raise ValueError('Learning rate warmup must finish before decay starts')
if current_step <= num_warmup_iter:
warmup_step = base_lr / (num_warmup_iter * (2 ** warmup_factor))
new_lr = base_lr - (num_warmup_iter - current_step) * warmup_step
steps_since_decay_start = current_step - decay_start_step
if decay_steps != 0 and steps_since_decay_start > 0:
already_decayed_steps = min(steps_since_decay_start, decay_steps)
new_lr = base_lr * ((decay_steps - already_decayed_steps) / decay_steps) ** 2
min_lr = 0.0000001
new_lr = max(min_lr, new_lr)
for param_group in optim.param_groups:
param_group['lr'] = new_lr
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def init_logging(log_path):
json_backend = dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE,
filename=log_path)
stdout_backend = dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE)
stdout_backend._metadata['best_auc'].update({'format': '0:.5f'})
stdout_backend._metadata['best_epoch'].update({'format': '0:.2f'})
stdout_backend._metadata['average_train_throughput'].update({'format': ':.2e'})
stdout_backend._metadata['average_test_throughput'].update({'format': ':.2e'})
stdout_backend._metadata['training_loss'].update({'format': '0:.5f'})
stdout_backend._metadata['best_validation_loss'].update({'format': '0:.5f'})
dllogger.init(backends=[json_backend, stdout_backend])
dllogger.metadata("best_auc", {"unit": None})
dllogger.metadata("mean_inference_latency_batch_1", {"unit": "s"})
dllogger.metadata("mean_inference_latency_batch_64", {"unit": "s"})
dllogger.metadata("mean_inference_latency_batch_4096", {"unit": "s"})
dllogger.metadata("average_train_throughput", {"unit": "samples/s"})
dllogger.metadata("mean_inference_throughput_batch_1", {"unit": "samples/s"})
dllogger.metadata("mean_inference_throughput_batch_64", {"unit": "samples/s"})
dllogger.metadata("mean_inference_throughput_batch_4096", {"unit": "samples/s"})
class StepTimer():
def __init__(self):
self._previous = None
self._new = None
self.measured = None
def click(self, synchronize=False):
self._previous = self._new
if synchronize:
torch.cuda.synchronize()
self._new = time.time()
if self._previous is not None:
self.measured = self._new - self._previous
class LearningRateScheduler:
"""Polynomial learning rate decay for multiple optimizers and multiple param groups
Args:
optimizers (list): optimizers for which to apply the learning rate changes
base_lrs (list): a nested list of base_lrs to use for each param_group of each optimizer
warmup_steps (int): number of linear warmup steps to perform at the beginning of training
warmup_factor (int)
decay_steps (int): number of steps over which to apply poly LR decay from base_lr to 0
decay_start_step (int): the optimization step at which to start decaying the learning rate
if None will start the decay immediately after
decay_power (float): polynomial learning rate decay power
end_lr_factor (float): for each optimizer and param group:
lr = max(current_lr_factor, end_lr_factor) * base_lr
Example:
lr_scheduler = LearningRateScheduler(optimizers=[optimizer], base_lrs=[[lr]],
warmup_steps=100, warmup_factor=0,
decay_start_step=1000, decay_steps=2000,
decay_power=2, end_lr_factor=1e-6)
for batch in data_loader:
lr_scheduler.step()
# foward, backward, weight update
"""
def __init__(self, optimizers, base_lrs, warmup_steps, warmup_factor,
decay_steps, decay_start_step, decay_power=2, end_lr_factor=0):
self.current_step = 0
self.optimizers = optimizers
self.base_lrs = base_lrs
self.warmup_steps = warmup_steps
self.warmup_factor = warmup_factor
self.decay_steps = decay_steps
self.decay_start_step = decay_start_step
self.decay_power = decay_power
self.end_lr_factor = end_lr_factor
self.decay_end_step = self.decay_start_step + self.decay_steps
if self.decay_start_step < self.warmup_steps:
raise ValueError('Learning rate warmup must finish before decay starts')
def _compute_lr_factor(self):
lr_factor = 1
if self.current_step <= self.warmup_steps:
warmup_step = 1 / (self.warmup_steps * (2 ** self.warmup_factor))
lr_factor = 1 - (self.warmup_steps - self.current_step) * warmup_step
elif self.decay_start_step < self.current_step <= self.decay_end_step:
lr_factor = ((self.decay_end_step - self.current_step) / self.decay_steps) ** self.decay_power
lr_factor = max(lr_factor, self.end_lr_factor)
elif self.current_step > self.decay_end_step:
lr_factor = self.end_lr_factor
return lr_factor
def step(self):
self.current_step += 1
lr_factor = self._compute_lr_factor()
for optim, base_lrs in zip(self.optimizers, self.base_lrs):
for group_id, base_lr in enumerate(base_lrs):
optim.param_groups[group_id]['lr'] = base_lr * lr_factor
def roc_auc_score(y_true, y_score):
"""ROC AUC score in PyTorch
Args:
y_true (Tensor):
y_score (Tensor):
"""
device = y_true.device
y_true.squeeze_()
y_score.squeeze_()
if y_true.shape != y_score.shape:
raise TypeError(f"Shape of y_true and y_score must match. Got {y_true.shape()} and {y_score.shape()}.")
desc_score_indices = torch.argsort(y_score, descending=True)
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
distinct_value_indices = torch.nonzero(y_score[1:] - y_score[:-1], as_tuple=False).squeeze()
threshold_idxs = torch.cat([distinct_value_indices, torch.tensor([y_true.numel() - 1], device=device)])
tps = torch.cumsum(y_true, dim=0)[threshold_idxs]
fps = 1 + threshold_idxs - tps
tps = torch.cat([torch.zeros(1, device=device), tps])
fps = torch.cat([torch.zeros(1, device=device), fps])
fpr = fps / fps[-1]
tpr = tps / tps[-1]
area = torch.trapz(tpr, fpr).item()
return area
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/scripts/utils.py |
from dlrm.data.defaults import NUMERICAL_CHANNEL, LABEL_CHANNEL
from dlrm.data.feature_spec import FeatureSpec
from argparse import ArgumentParser
import pandas as pd
import os
import numpy as np
def parse_args():
parser = ArgumentParser()
parser.add_argument('--feature_spec_in', type=str, default='feature_spec.yaml',
help='Name of the input feature specification file')
parser.add_argument('--output', type=str, default='/data')
parser.add_argument('--size', type=int, default=1000)
return parser.parse_args()
def main():
args = parse_args()
dataset_size = args.size
fspec_in = FeatureSpec.from_yaml(args.feature_spec_in)
fspec_in.base_directory = args.output
cat_cardinalities = fspec_in.get_categorical_sizes()
cat_names = fspec_in.get_categorical_feature_names()
cardinalities = {name: cardinality for name, cardinality in zip(cat_names, cat_cardinalities)}
input_label_feature_name = fspec_in.channel_spec[LABEL_CHANNEL][0]
numerical_names_set = set(fspec_in.channel_spec[NUMERICAL_CHANNEL])
for mapping_name, mapping in fspec_in.source_spec.items():
for chunk in mapping:
assert chunk['type'] == 'csv', "Only csv files supported in this generator"
assert len(chunk['files']) == 1, "Only one file per chunk supported in this transcoder"
path_to_save = os.path.join(fspec_in.base_directory, chunk['files'][0])
data = []
for name in chunk['features']:
if name == input_label_feature_name:
data.append(np.random.randint(0, 1, size=dataset_size))
elif name in numerical_names_set:
data.append(np.random.rand(dataset_size))
else:
local_cardinality = cardinalities[name]
data.append(np.random.randint(0, local_cardinality, size=dataset_size))
values = np.stack(data).T
to_save = pd.DataFrame(values, columns=chunk['features'])
os.makedirs(os.path.dirname(path_to_save), exist_ok=True)
to_save.to_csv(path_to_save, index=False, header=False)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/scripts/gen_csv.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import sys
from absl import app, flags, logging
from apex import optimizers as apex_optim
from dlrm.data.feature_spec import FeatureSpec
from dlrm.model.distributed import DistributedDlrm
from dlrm.utils import distributed as dist
from dlrm.utils.checkpointing.distributed import make_distributed_checkpoint_writer, make_distributed_checkpoint_loader
from dlrm.utils.distributed import get_gpu_batch_sizes, get_device_mapping, is_main_process, is_distributed
import datetime
from time import time
import dllogger
import numpy as np
import torch
from absl import app, flags
import dlrm.scripts.utils as utils
from dlrm.data.data_loader import get_data_loaders
from dlrm.data.utils import prefetcher, get_embedding_sizes
FLAGS = flags.FLAGS
# Basic run settings
flags.DEFINE_enum("mode", default='train', enum_values=['train', 'test', 'inference_benchmark'],
help="Select task to be performed")
flags.DEFINE_integer("seed", 12345, "Random seed")
# Training flags
flags.DEFINE_integer("batch_size", 65536, "Batch size used for training")
flags.DEFINE_integer("test_batch_size", 65536, "Batch size used for testing/validation")
flags.DEFINE_float("lr", 24, "Base learning rate")
flags.DEFINE_integer("epochs", 1, "Number of epochs to train for")
flags.DEFINE_integer("max_steps", None, "Stop training after doing this many optimization steps")
# Learning rate schedule flags
flags.DEFINE_integer("warmup_factor", 0, "Learning rate warmup factor. Must be a non-negative integer")
flags.DEFINE_integer("warmup_steps", 8000, "Number of warmup optimization steps")
flags.DEFINE_integer("decay_steps", 24000,
"Polynomial learning rate decay steps. If equal to 0 will not do any decaying")
flags.DEFINE_integer("decay_start_step", 48000,
"Optimization step after which to start decaying the learning rate, "
"if None will start decaying right after the warmup phase is completed")
flags.DEFINE_integer("decay_power", 2, "Polynomial learning rate decay power")
flags.DEFINE_float("decay_end_lr", 0, "LR after the decay ends")
# Model configuration
flags.DEFINE_enum("embedding_type", "custom_cuda",
["joint", "custom_cuda", "multi_table", "joint_sparse", "joint_fused"],
help="The type of the embedding operation to use")
flags.DEFINE_integer("embedding_dim", 128, "Dimensionality of embedding space for categorical features")
flags.DEFINE_list("top_mlp_sizes", [1024, 1024, 512, 256, 1], "Linear layer sizes for the top MLP")
flags.DEFINE_list("bottom_mlp_sizes", [512, 256, 128], "Linear layer sizes for the bottom MLP")
flags.DEFINE_enum("interaction_op", default="cuda_dot", enum_values=["cuda_dot", "dot", "cat"],
help="Type of interaction operation to perform.")
# Data configuration
flags.DEFINE_string("dataset", None, "Path to dataset directory")
flags.DEFINE_string("feature_spec", default="feature_spec.yaml",
help="Name of the feature spec file in the dataset directory")
flags.DEFINE_enum("dataset_type", default="parametric", enum_values=['synthetic_gpu', 'parametric'],
help='The type of the dataset to use')
flags.DEFINE_boolean("shuffle_batch_order", False, "Read batch in train dataset by random order", short_name="shuffle")
flags.DEFINE_integer("max_table_size", None,
"Maximum number of rows per embedding table, "
"by default equal to the number of unique values for each categorical variable")
flags.DEFINE_boolean("hash_indices", False,
"If True the model will compute `index := index % table size` "
"to ensure that the indices match table sizes")
# Synthetic data configuration
flags.DEFINE_integer("synthetic_dataset_num_entries", default=int(2 ** 15 * 1024),
help="Number of samples per epoch for the synthetic dataset")
flags.DEFINE_list("synthetic_dataset_table_sizes", default=','.join(26 * [str(10 ** 5)]),
help="Cardinalities of variables to use with the synthetic dataset.")
flags.DEFINE_integer("synthetic_dataset_numerical_features", default='13',
help="Number of numerical features to use with the synthetic dataset")
flags.DEFINE_boolean("synthetic_dataset_use_feature_spec", default=False,
help="Create a temporary synthetic dataset based on a real one. "
"Uses --dataset and --feature_spec"
"Overrides synthetic_dataset_table_sizes and synthetic_dataset_numerical_features."
"--synthetic_dataset_num_entries is still required")
# Checkpointing
flags.DEFINE_string("load_checkpoint_path", None, "Path from which to load a checkpoint")
flags.DEFINE_string("save_checkpoint_path", None, "Path to which to save the training checkpoints")
# Saving and logging flags
flags.DEFINE_string("log_path", "./log.json", "Destination for the log file with various results and statistics")
flags.DEFINE_integer("test_freq", None,
"Number of optimization steps between validations. If None will test after each epoch")
flags.DEFINE_float("test_after", 0, "Don't test the model unless this many epochs has been completed")
flags.DEFINE_integer("print_freq", 200, "Number of optimizations steps between printing training status to stdout")
flags.DEFINE_integer("benchmark_warmup_steps", 0,
"Number of initial iterations to exclude from throughput measurements")
# Machine setting flags
flags.DEFINE_string("base_device", "cuda", "Device to run the majority of the model operations")
flags.DEFINE_boolean("amp", False, "If True the script will use Automatic Mixed Precision")
flags.DEFINE_boolean("cuda_graphs", False, "Use CUDA Graphs")
# inference benchmark
flags.DEFINE_list("inference_benchmark_batch_sizes", default=[1, 64, 4096],
help="Batch sizes for inference throughput and latency measurements")
flags.DEFINE_integer("inference_benchmark_steps", 200,
"Number of steps for measuring inference latency and throughput")
# Miscellaneous
flags.DEFINE_float("auc_threshold", None, "Stop the training after achieving this AUC")
flags.DEFINE_boolean("optimized_mlp", True, "Use an optimized implementation of MLP from apex")
flags.DEFINE_enum("auc_device", default="GPU", enum_values=['GPU', 'CPU'],
help="Specifies where ROC AUC metric is calculated")
flags.DEFINE_string("backend", "nccl", "Backend to use for distributed training. Default nccl")
flags.DEFINE_boolean("bottom_features_ordered", False,
"Sort features from the bottom model, useful when using saved "
"checkpoint in different device configurations")
flags.DEFINE_boolean("freeze_mlps", False,
"For debug and benchmarking. Don't perform the weight update for MLPs.")
flags.DEFINE_boolean("freeze_embeddings", False,
"For debug and benchmarking. Don't perform the weight update for the embeddings.")
flags.DEFINE_boolean("Adam_embedding_optimizer", False, "Swaps embedding optimizer to Adam")
flags.DEFINE_boolean("Adam_MLP_optimizer", False, "Swaps MLP optimizer to Adam")
def validate_flags(cat_feature_count):
if FLAGS.max_table_size is not None and not FLAGS.hash_indices:
raise ValueError('Hash indices must be True when setting a max_table_size')
if FLAGS.base_device == 'cpu':
if FLAGS.embedding_type in ('joint_fused', 'joint_sparse'):
print('WARNING: CUDA joint embeddings are not supported on CPU')
FLAGS.embedding_type = 'joint'
if FLAGS.amp:
print('WARNING: Automatic mixed precision not supported on CPU')
FLAGS.amp = False
if FLAGS.optimized_mlp:
print('WARNING: Optimized MLP is not supported on CPU')
FLAGS.optimized_mlp = False
if FLAGS.embedding_type == 'custom_cuda':
if (not is_distributed()) and FLAGS.embedding_dim == 128 and cat_feature_count == 26:
FLAGS.embedding_type = 'joint_fused'
else:
FLAGS.embedding_type = 'joint_sparse'
if FLAGS.embedding_type == 'joint_fused' and FLAGS.embedding_dim != 128:
print('WARNING: Joint fused can be used only with embedding_dim=128. Changed embedding type to joint_sparse.')
FLAGS.embedding_type = 'joint_sparse'
if FLAGS.dataset is None and (FLAGS.dataset_type != 'synthetic_gpu' or
FLAGS.synthetic_dataset_use_feature_spec):
raise ValueError('Dataset argument has to specify a path to the dataset')
FLAGS.inference_benchmark_batch_sizes = [int(x) for x in FLAGS.inference_benchmark_batch_sizes]
FLAGS.top_mlp_sizes = [int(x) for x in FLAGS.top_mlp_sizes]
FLAGS.bottom_mlp_sizes = [int(x) for x in FLAGS.bottom_mlp_sizes]
# TODO check that bottom_mlp ends in embedding_dim size
def load_feature_spec(flags):
if flags.dataset_type == 'synthetic_gpu' and not flags.synthetic_dataset_use_feature_spec:
num_numerical = flags.synthetic_dataset_numerical_features
categorical_sizes = [int(s) for s in FLAGS.synthetic_dataset_table_sizes]
return FeatureSpec.get_default_feature_spec(number_of_numerical_features=num_numerical,
categorical_feature_cardinalities=categorical_sizes)
fspec_path = os.path.join(flags.dataset, flags.feature_spec)
return FeatureSpec.from_yaml(fspec_path)
class CudaGraphWrapper:
def __init__(self, model, train_step, parallelize,
zero_grad, cuda_graphs=False, warmup_steps=20):
self.cuda_graphs = cuda_graphs
self.warmup_iters = warmup_steps
self.graph = None
self.stream = None
self.static_args = None
self.model = model
self._parallelize = parallelize
self._train_step = train_step
self._zero_grad = zero_grad
self.loss = None
self.step = -1
if cuda_graphs:
self.stream = torch.cuda.Stream()
else:
# if not using graphs, parallelize the model immediately
# otherwise do this in the warmup phase under the graph stream
self.model = self._parallelize(self.model)
self.stream = torch.cuda.default_stream()
def _copy_input_data(self, *train_step_args):
if len(train_step_args) != len(self.static_args):
raise ValueError(f'Expected {len(self.static_args)} arguments to train step'
f'Got: {len(train_step_args)}')
for data, placeholder in zip(train_step_args, self.static_args):
if placeholder is None:
continue
placeholder.copy_(data)
def _cuda_graph_capture(self, *train_step_args):
self._copy_input_data(*train_step_args)
self.graph = torch.cuda.CUDAGraph()
self._zero_grad(self.model)
with torch.cuda.graph(self.graph, stream=self.stream):
self.loss = self._train_step(self.model, *self.static_args)
return self.loss
def _cuda_graph_replay(self, *train_step_args):
self._copy_input_data(*train_step_args)
self.graph.replay()
def _warmup_step(self, *train_step_args):
with torch.cuda.stream(self.stream):
if self.step == 0:
self.model = self._parallelize(self.model)
self.static_args = list(train_step_args)
else:
self._copy_input_data(*train_step_args)
self._zero_grad(self.model)
self.loss = self._train_step(self.model, *self.static_args)
return self.loss
def train_step(self, *train_step_args):
self.step += 1
if not self.cuda_graphs:
self._zero_grad(self.model)
self.loss = self._train_step(self.model, *train_step_args)
return self.loss
if self.step == 0:
self.stream.wait_stream(torch.cuda.current_stream())
if self.step < self.warmup_iters:
return self._warmup_step(*train_step_args)
if self.graph is None:
torch.cuda.synchronize()
self._cuda_graph_capture(*train_step_args)
self._cuda_graph_replay(*train_step_args)
return self.loss
def inference_benchmark(*args, cuda_graphs=False, **kwargs):
if cuda_graphs:
return inference_benchmark_graphed(*args, **kwargs)
else:
return inference_benchmark_nongraphed(*args, **kwargs)
def inference_benchmark_nongraphed(model, data_loader, num_batches=100):
model.eval()
base_device = FLAGS.base_device
latencies = []
y_true = []
y_score = []
with torch.no_grad():
for step, (numerical_features, categorical_features, click) in enumerate(data_loader):
if step > num_batches:
break
step_start_time = time()
numerical_features = numerical_features.to(base_device)
if FLAGS.amp:
numerical_features = numerical_features.half()
categorical_features = categorical_features.to(device=base_device, dtype=torch.int64)
inference_result = model(numerical_features, categorical_features).squeeze()
torch.cuda.synchronize()
step_time = time() - step_start_time
if step >= FLAGS.benchmark_warmup_steps:
latencies.append(step_time)
y_true.append(click)
y_score.append(inference_result.reshape([-1]).clone())
y_true = torch.cat(y_true)
y_score = torch.sigmoid(torch.cat(y_score)).float()
auc = utils.roc_auc_score(y_true, y_score)
print('auc: ', auc)
return latencies
def inference_benchmark_graphed(model, data_loader, num_batches=100):
model.eval()
base_device = FLAGS.base_device
latencies = []
data_iter = iter(data_loader)
numerical, categorical, _ = next(data_iter)
# Warmup before capture
s = torch.cuda.Stream()
static_numerical = numerical.to(base_device)
static_categorical = categorical.to(device=base_device, dtype=torch.int64)
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
for i in range(10):
if FLAGS.amp:
numerical = static_numerical.half()
else:
numerical = static_numerical
inference_result = model(numerical, static_categorical).squeeze()
torch.cuda.synchronize()
# Graph capture
graph = torch.cuda.CUDAGraph()
with torch.cuda.graph(graph):
if FLAGS.amp:
numerical = static_numerical.half()
else:
numerical = static_numerical
inference_result = model(numerical, static_categorical).squeeze()
torch.cuda.synchronize()
# Inference
y_true = []
y_score = []
with torch.no_grad():
for step, (numerical_features, categorical_features, click) in enumerate(data_loader):
if step > num_batches:
break
torch.cuda.synchronize()
step_start_time = time()
numerical_features = numerical_features.to(base_device)
categorical_features = categorical_features.to(device=base_device, dtype=torch.int64)
static_categorical.copy_(categorical_features)
static_numerical.copy_(numerical_features)
graph.replay()
torch.cuda.synchronize()
step_time = time() - step_start_time
if step >= FLAGS.benchmark_warmup_steps:
latencies.append(step_time)
y_true.append(click)
y_score.append(inference_result.reshape([-1]).clone())
y_true = torch.cat(y_true)
y_score = torch.sigmoid(torch.cat(y_score)).float()
auc = utils.roc_auc_score(y_true, y_score)
print('auc: ', auc)
return latencies
def main(argv):
torch.manual_seed(FLAGS.seed)
use_gpu = "cpu" not in FLAGS.base_device.lower()
rank, world_size, gpu = dist.init_distributed_mode(backend=FLAGS.backend, use_gpu=use_gpu)
device = FLAGS.base_device
feature_spec = load_feature_spec(FLAGS)
cat_feature_count = len(get_embedding_sizes(feature_spec, None))
validate_flags(cat_feature_count)
if is_main_process():
utils.init_logging(log_path=FLAGS.log_path)
dllogger.log(data=FLAGS.flag_values_dict(), step='PARAMETER')
FLAGS.set_default("test_batch_size", FLAGS.test_batch_size // world_size * world_size)
feature_spec = load_feature_spec(FLAGS)
world_embedding_sizes = get_embedding_sizes(feature_spec, max_table_size=FLAGS.max_table_size)
world_categorical_feature_sizes = np.asarray(world_embedding_sizes)
device_mapping = get_device_mapping(world_embedding_sizes, num_gpus=world_size)
batch_sizes_per_gpu = get_gpu_batch_sizes(FLAGS.batch_size, num_gpus=world_size)
batch_indices = tuple(np.cumsum([0] + list(batch_sizes_per_gpu))) # todo what does this do
# Embedding sizes for each GPU
categorical_feature_sizes = world_categorical_feature_sizes[device_mapping['embedding'][rank]].tolist()
num_numerical_features = feature_spec.get_number_of_numerical_features()
bottom_mlp_sizes = FLAGS.bottom_mlp_sizes if rank == device_mapping['bottom_mlp'] else None
data_loader_train, data_loader_test = get_data_loaders(FLAGS, device_mapping=device_mapping,
feature_spec=feature_spec)
model = DistributedDlrm(
vectors_per_gpu=device_mapping['vectors_per_gpu'],
embedding_device_mapping=device_mapping['embedding'],
embedding_type=FLAGS.embedding_type,
embedding_dim=FLAGS.embedding_dim,
world_num_categorical_features=len(world_categorical_feature_sizes),
categorical_feature_sizes=categorical_feature_sizes,
num_numerical_features=num_numerical_features,
hash_indices=FLAGS.hash_indices,
bottom_mlp_sizes=bottom_mlp_sizes,
top_mlp_sizes=FLAGS.top_mlp_sizes,
interaction_op=FLAGS.interaction_op,
fp16=FLAGS.amp,
use_cpp_mlp=FLAGS.optimized_mlp,
bottom_features_ordered=FLAGS.bottom_features_ordered,
device=device
)
dist.setup_distributed_print(is_main_process())
# DDP introduces a gradient average through allreduce(mean), which doesn't apply to bottom model.
# Compensate it with further scaling lr
if FLAGS.Adam_embedding_optimizer:
embedding_model_parallel_lr = FLAGS.lr
else:
embedding_model_parallel_lr = FLAGS.lr / world_size
if FLAGS.Adam_MLP_optimizer:
MLP_model_parallel_lr = FLAGS.lr
else:
MLP_model_parallel_lr = FLAGS.lr / world_size
data_parallel_lr = FLAGS.lr
if is_main_process():
mlp_params = [
{'params': list(model.top_model.parameters()), 'lr': data_parallel_lr},
{'params': list(model.bottom_model.mlp.parameters()), 'lr': MLP_model_parallel_lr}
]
mlp_lrs = [data_parallel_lr, MLP_model_parallel_lr]
else:
mlp_params = [
{'params': list(model.top_model.parameters()), 'lr': data_parallel_lr}
]
mlp_lrs = [data_parallel_lr]
if FLAGS.Adam_MLP_optimizer:
mlp_optimizer = apex_optim.FusedAdam(mlp_params)
else:
mlp_optimizer = apex_optim.FusedSGD(mlp_params)
embedding_params = [{
'params': list(model.bottom_model.embeddings.parameters()),
'lr': embedding_model_parallel_lr
}]
embedding_lrs = [embedding_model_parallel_lr]
if FLAGS.Adam_embedding_optimizer:
embedding_optimizer = torch.optim.SparseAdam(embedding_params)
else:
embedding_optimizer = torch.optim.SGD(embedding_params)
checkpoint_writer = make_distributed_checkpoint_writer(
device_mapping=device_mapping,
rank=rank,
is_main_process=is_main_process(),
config=FLAGS.flag_values_dict()
)
checkpoint_loader = make_distributed_checkpoint_loader(device_mapping=device_mapping, rank=rank)
if FLAGS.load_checkpoint_path:
checkpoint_loader.load_checkpoint(model, FLAGS.load_checkpoint_path)
model.to(device)
scaler = torch.cuda.amp.GradScaler(enabled=FLAGS.amp, growth_interval=int(1e9))
def parallelize(model):
if world_size <= 1:
return model
model.top_model = torch.nn.parallel.DistributedDataParallel(model.top_model)
return model
if FLAGS.mode == 'test':
model = parallelize(model)
auc, valid_loss = dist_evaluate(model, data_loader_test)
results = {'best_auc': auc, 'best_validation_loss': valid_loss}
if is_main_process():
dllogger.log(data=results, step=tuple())
return
elif FLAGS.mode == 'inference_benchmark':
if world_size > 1:
raise ValueError('Inference benchmark only supports singleGPU mode.')
results = {}
if FLAGS.amp:
# can use pure FP16 for inference
model = model.half()
for batch_size in FLAGS.inference_benchmark_batch_sizes:
FLAGS.test_batch_size = batch_size
_, data_loader_test = get_data_loaders(FLAGS, device_mapping=device_mapping, feature_spec=feature_spec)
latencies = inference_benchmark(model=model, data_loader=data_loader_test,
num_batches=FLAGS.inference_benchmark_steps,
cuda_graphs=FLAGS.cuda_graphs)
# drop the first 10 as a warmup
latencies = latencies[10:]
mean_latency = np.mean(latencies)
mean_inference_throughput = batch_size / mean_latency
subresult = {f'mean_inference_latency_batch_{batch_size}': mean_latency,
f'mean_inference_throughput_batch_{batch_size}': mean_inference_throughput}
results.update(subresult)
if is_main_process():
dllogger.log(data=results, step=tuple())
return
if FLAGS.save_checkpoint_path and not FLAGS.bottom_features_ordered and is_main_process():
logging.warning("Saving checkpoint without --bottom_features_ordered flag will result in "
"a device-order dependent model. Consider using --bottom_features_ordered "
"if you plan to load the checkpoint in different device configurations.")
loss_fn = torch.nn.BCEWithLogitsLoss(reduction="mean")
# Print per 16384 * 2000 samples by default
default_print_freq = 16384 * 2000 // FLAGS.batch_size
print_freq = default_print_freq if FLAGS.print_freq is None else FLAGS.print_freq
# last one will be dropped in the training loop
steps_per_epoch = len(data_loader_train) - 1
test_freq = FLAGS.test_freq if FLAGS.test_freq is not None else steps_per_epoch - 2
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('loss', utils.SmoothedValue(window_size=1, fmt='{avg:.8f}'))
metric_logger.add_meter('step_time', utils.SmoothedValue(window_size=1, fmt='{avg:.6f}'))
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
# Accumulating loss on GPU to avoid memcpyD2H every step
moving_loss = torch.zeros(1, device=device)
lr_scheduler = utils.LearningRateScheduler(optimizers=[mlp_optimizer, embedding_optimizer],
base_lrs=[mlp_lrs, embedding_lrs],
warmup_steps=FLAGS.warmup_steps,
warmup_factor=FLAGS.warmup_factor,
decay_start_step=FLAGS.decay_start_step,
decay_steps=FLAGS.decay_steps,
decay_power=FLAGS.decay_power,
end_lr_factor=FLAGS.decay_end_lr / FLAGS.lr)
def zero_grad(model):
if FLAGS.Adam_embedding_optimizer or FLAGS.Adam_MLP_optimizer:
model.zero_grad()
else:
# We don't need to accumulate gradient. Set grad to None is faster than optimizer.zero_grad()
for param_group in itertools.chain(embedding_optimizer.param_groups, mlp_optimizer.param_groups):
for param in param_group['params']:
param.grad = None
def forward_backward(model, *args):
numerical_features, categorical_features, click = args
with torch.cuda.amp.autocast(enabled=FLAGS.amp):
output = model(numerical_features, categorical_features, batch_sizes_per_gpu).squeeze()
loss = loss_fn(output, click[batch_indices[rank]: batch_indices[rank + 1]])
scaler.scale(loss).backward()
return loss
def weight_update():
if not FLAGS.freeze_mlps:
if FLAGS.Adam_MLP_optimizer:
scale_MLP_gradients(mlp_optimizer, world_size)
scaler.step(mlp_optimizer)
if not FLAGS.freeze_embeddings:
if FLAGS.Adam_embedding_optimizer:
scale_embeddings_gradients(embedding_optimizer, world_size)
scaler.unscale_(embedding_optimizer)
embedding_optimizer.step()
scaler.update()
trainer = CudaGraphWrapper(model, forward_backward, parallelize, zero_grad,
cuda_graphs=FLAGS.cuda_graphs)
data_stream = torch.cuda.Stream()
timer = utils.StepTimer()
best_validation_loss = 1e6
best_auc = 0
best_epoch = 0
start_time = time()
for epoch in range(FLAGS.epochs):
epoch_start_time = time()
batch_iter = prefetcher(iter(data_loader_train), data_stream)
for step in range(len(data_loader_train)):
numerical_features, categorical_features, click = next(batch_iter)
timer.click(synchronize=(device == 'cuda'))
global_step = steps_per_epoch * epoch + step
if FLAGS.max_steps and global_step > FLAGS.max_steps:
print(f"Reached max global steps of {FLAGS.max_steps}. Stopping.")
break
# One of the batches will be smaller because the dataset size
# isn't necessarily a multiple of the batch size. #TODO isn't dropping here a change of behavior
if click.shape[0] != FLAGS.batch_size:
continue
lr_scheduler.step()
loss = trainer.train_step(numerical_features, categorical_features, click)
# need to wait for the gradients before the weight update
torch.cuda.current_stream().wait_stream(trainer.stream)
weight_update()
moving_loss += loss
if timer.measured is None:
# first iteration, no step time etc. to print
continue
if step == 0:
print(f"Started epoch {epoch}...")
elif step % print_freq == 0:
# Averaging across a print_freq period to reduce the error.
# An accurate timing needs synchronize which would slow things down.
# only check for nan every print_freq steps
if torch.any(torch.isnan(loss)):
print('NaN loss encountered.')
break
if global_step < FLAGS.benchmark_warmup_steps:
metric_logger.update(
loss=moving_loss.item() / print_freq,
lr=mlp_optimizer.param_groups[0]["lr"])
else:
metric_logger.update(
step_time=timer.measured,
loss=moving_loss.item() / print_freq,
lr=mlp_optimizer.param_groups[0]["lr"])
eta_str = datetime.timedelta(seconds=int(metric_logger.step_time.global_avg * (steps_per_epoch - step)))
metric_logger.print(header=f"Epoch:[{epoch}/{FLAGS.epochs}] [{step}/{steps_per_epoch}] eta: {eta_str}")
moving_loss = 0.
if global_step % test_freq == 0 and global_step > 0 and global_step / steps_per_epoch >= FLAGS.test_after:
auc, validation_loss = dist_evaluate(trainer.model, data_loader_test)
if auc is None:
continue
print(f"Epoch {epoch} step {step}. auc {auc:.6f}")
stop_time = time()
if auc > best_auc:
best_auc = auc
best_epoch = epoch + ((step + 1) / steps_per_epoch)
if validation_loss < best_validation_loss:
best_validation_loss = validation_loss
if FLAGS.auc_threshold and auc >= FLAGS.auc_threshold:
run_time_s = int(stop_time - start_time)
print(f"Hit target accuracy AUC {FLAGS.auc_threshold} at epoch "
f"{global_step / steps_per_epoch:.2f} in {run_time_s}s. ")
sys.exit()
epoch_stop_time = time()
epoch_time_s = epoch_stop_time - epoch_start_time
print(f"Finished epoch {epoch} in {datetime.timedelta(seconds=int(epoch_time_s))}. ")
avg_throughput = FLAGS.batch_size / metric_logger.step_time.avg
if FLAGS.save_checkpoint_path:
checkpoint_writer.save_checkpoint(model, FLAGS.save_checkpoint_path, epoch, step)
results = {'best_auc': best_auc,
'best_validation_loss': best_validation_loss,
'training_loss' : metric_logger.meters['loss'].avg,
'best_epoch': best_epoch,
'average_train_throughput': avg_throughput}
if is_main_process():
dllogger.log(data=results, step=tuple())
def scale_MLP_gradients(mlp_optimizer: torch.optim.Optimizer, world_size: int):
for param_group in mlp_optimizer.param_groups[1:]: # Omitting top MLP
for param in param_group['params']:
param.grad.div_(world_size)
def scale_embeddings_gradients(embedding_optimizer: torch.optim.Optimizer, world_size: int):
for param_group in embedding_optimizer.param_groups:
for param in param_group['params']:
if param.grad != None:
param.grad.div_(world_size)
def dist_evaluate(model, data_loader):
"""Test distributed DLRM model
Args:
model (DistDLRM):
data_loader (torch.utils.data.DataLoader):
"""
model.eval()
device = FLAGS.base_device
world_size = dist.get_world_size()
batch_sizes_per_gpu = [FLAGS.test_batch_size // world_size for _ in range(world_size)]
test_batch_size = sum(batch_sizes_per_gpu)
if FLAGS.test_batch_size != test_batch_size:
print(f"Rounded test_batch_size to {test_batch_size}")
# Test bach size could be big, make sure it prints
default_print_freq = max(524288 * 100 // test_batch_size, 1)
print_freq = default_print_freq if FLAGS.print_freq is None else FLAGS.print_freq
steps_per_epoch = len(data_loader)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('step_time', utils.SmoothedValue(window_size=1, fmt='{avg:.4f}'))
with torch.no_grad():
timer = utils.StepTimer()
# ROC can be computed per batch and then compute AUC globally, but I don't have the code.
# So pack all the outputs and labels together to compute AUC. y_true and y_score naming follows sklearn
y_true = []
y_score = []
data_stream = torch.cuda.Stream()
batch_iter = prefetcher(iter(data_loader), data_stream)
loss_fn = torch.nn.BCELoss(reduction="mean")
timer.click(synchronize=(device=='cuda'))
for step in range(len(data_loader)):
numerical_features, categorical_features, click = next(batch_iter)
torch.cuda.synchronize()
last_batch_size = None
if click.shape[0] != test_batch_size: # last batch
last_batch_size = click.shape[0]
padding_size = test_batch_size - last_batch_size
if numerical_features is not None:
padding_numerical = torch.empty(
padding_size, numerical_features.shape[1],
device=numerical_features.device, dtype=numerical_features.dtype)
numerical_features = torch.cat((numerical_features, padding_numerical), dim=0)
if categorical_features is not None:
padding_categorical = torch.ones(
padding_size, categorical_features.shape[1],
device=categorical_features.device, dtype=categorical_features.dtype)
categorical_features = torch.cat((categorical_features, padding_categorical), dim=0)
with torch.cuda.amp.autocast(enabled=FLAGS.amp):
output = model(numerical_features, categorical_features, batch_sizes_per_gpu)
output = output.squeeze()
output = output.float()
if world_size > 1:
output_receive_buffer = torch.empty(test_batch_size, device=device)
torch.distributed.all_gather(list(output_receive_buffer.split(batch_sizes_per_gpu)), output)
output = output_receive_buffer
if last_batch_size is not None:
output = output[:last_batch_size]
if FLAGS.auc_device == "CPU":
click = click.cpu()
output = output.cpu()
y_true.append(click)
y_score.append(output)
timer.click(synchronize=(device == 'cuda'))
if timer.measured is not None:
metric_logger.update(step_time=timer.measured)
if step % print_freq == 0 and step > 0:
metric_logger.print(header=f"Test: [{step}/{steps_per_epoch}]")
if is_main_process():
y_true = torch.cat(y_true)
y_score = torch.sigmoid(torch.cat(y_score)).float()
auc = utils.roc_auc_score(y_true, y_score)
loss = loss_fn(y_score, y_true).item()
print(f'test loss: {loss:.8f}', )
else:
auc = None
loss = None
if world_size > 1:
torch.distributed.barrier()
model.train()
return auc, loss
if __name__ == '__main__':
app.run(main)
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/scripts/main.py |
DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/model/__init__.py |
|
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Sequence, Optional
import torch
from torch import nn
from dlrm.nn.factories import create_interaction
from dlrm.nn.parts import DlrmBottom, DlrmTop
from dlrm.utils import distributed as dist
class BottomToTop(torch.autograd.Function):
"""Switch from model parallel to data parallel
Wrap the communication of doing from bottom model in model parallel fashion to top model in data parallel
"""
@staticmethod
def forward(
ctx,
local_bottom_outputs: torch.Tensor,
batch_sizes_per_gpu: Sequence[int],
vector_dim: int,
vectors_per_gpu: Sequence[int],
feature_order: Optional[torch.Tensor] = None,
device_feature_order: Optional[torch.Tensor] = None
):
"""
Args:
ctx : Pytorch convention
local_bottom_outputs (Tensor): Concatenated output of bottom model
batch_sizes_per_gpu (Sequence[int]):
vector_dim (int):
vectors_per_gpu (Sequence[int]): Note, bottom MLP is considered as 1 vector
device_feature_order:
feature_order:
Returns:
slice_embedding_outputs (Tensor): Patial output from bottom model to feed into data parallel top model
"""
rank = dist.get_rank()
ctx.world_size = torch.distributed.get_world_size()
ctx.batch_sizes_per_gpu = batch_sizes_per_gpu
ctx.vector_dim = vector_dim
ctx.vectors_per_gpu = vectors_per_gpu
ctx.feature_order = feature_order
ctx.device_feature_order = device_feature_order
# Buffer shouldn't need to be zero out. If not zero out buffer affecting accuracy, there must be a bug.
bottom_output_buffer = [torch.empty(
batch_sizes_per_gpu[rank], n * vector_dim,
device=local_bottom_outputs.device, dtype=local_bottom_outputs.dtype) for n in vectors_per_gpu]
torch.distributed.all_to_all(bottom_output_buffer, list(local_bottom_outputs.split(batch_sizes_per_gpu, dim=0)))
slice_bottom_outputs = torch.cat(bottom_output_buffer, dim=1).view(batch_sizes_per_gpu[rank], -1, vector_dim)
# feature reordering is just for consistency across different device mapping configurations
if feature_order is not None and device_feature_order is not None:
return slice_bottom_outputs[:, feature_order, :]
return slice_bottom_outputs
@staticmethod
def backward(ctx, grad_slice_bottom_outputs):
rank = dist.get_rank()
if ctx.feature_order is not None and ctx.device_feature_order is not None:
grad_slice_bottom_outputs = grad_slice_bottom_outputs[:, ctx.device_feature_order, :]
grad_local_bottom_outputs = torch.empty(
sum(ctx.batch_sizes_per_gpu), ctx.vectors_per_gpu[rank] * ctx.vector_dim,
device=grad_slice_bottom_outputs.device,
dtype=grad_slice_bottom_outputs.dtype)
# All to all only takes list while split() returns tuple
grad_local_bottom_outputs_split = list(grad_local_bottom_outputs.split(ctx.batch_sizes_per_gpu, dim=0))
split_grads = [t.contiguous() for t in (grad_slice_bottom_outputs.view(ctx.batch_sizes_per_gpu[rank], -1).split(
[ctx.vector_dim * n for n in ctx.vectors_per_gpu], dim=1))]
torch.distributed.all_to_all(grad_local_bottom_outputs_split, split_grads)
return (grad_local_bottom_outputs.view(grad_local_bottom_outputs.shape[0], -1, ctx.vector_dim), None, None,
None, None, None)
bottom_to_top = BottomToTop.apply
class DistributedDlrm(nn.Module):
def __init__(
self,
num_numerical_features: int,
categorical_feature_sizes: Sequence[int],
bottom_mlp_sizes: Sequence[int],
top_mlp_sizes: Sequence[int],
vectors_per_gpu: Sequence[int] = None,
embedding_device_mapping: Sequence[Sequence[int]] = None,
world_num_categorical_features: int = None,
embedding_type: str = "multi_table",
embedding_dim: int = 128,
interaction_op: str = "dot",
hash_indices: bool = False,
use_cpp_mlp: bool = False,
fp16: bool = False,
bottom_features_ordered: bool = False,
device: str = "cuda"
):
super().__init__()
self.distributed = dist.get_world_size() > 1
self._vectors_per_gpu = vectors_per_gpu
self._embedding_dim = embedding_dim
self._interaction_op = interaction_op
self._hash_indices = hash_indices
if self.distributed:
# TODO: take bottom_mlp GPU from device mapping, do not assume it's always first
self._device_feature_order = torch.tensor(
[-1] + [i for bucket in embedding_device_mapping for i in bucket], dtype=torch.long, device=device
) + 1 if bottom_features_ordered else None
self._feature_order = self._device_feature_order.argsort() if bottom_features_ordered else None
else:
world_num_categorical_features = len(categorical_feature_sizes)
interaction = create_interaction(interaction_op, world_num_categorical_features, embedding_dim)
self.bottom_model = DlrmBottom(
num_numerical_features, categorical_feature_sizes, bottom_mlp_sizes,
embedding_type, embedding_dim, hash_indices=hash_indices, use_cpp_mlp=use_cpp_mlp,
fp16=fp16, device=device
)
self.top_model = DlrmTop(top_mlp_sizes, interaction, use_cpp_mlp=use_cpp_mlp).to(device)
def extra_repr(self):
return f"interaction_op={self._interaction_op}, hash_indices={self._hash_indices}"
# pylint:enable=missing-docstring
@classmethod
def from_dict(cls, obj_dict, **kwargs):
"""Create from json str"""
return cls(**obj_dict, **kwargs)
def forward(self, numerical_input, categorical_inputs, batch_sizes_per_gpu: Sequence[int] = None):
"""
Args:
numerical_input (Tensor): with shape [batch_size, num_numerical_features]
categorical_inputs (Tensor): with shape [batch_size, num_categorical_features]
batch_sizes_per_gpu (Sequence[int]):
"""
# bottom mlp output may be not present before all to all communication
from_bottom, bottom_mlp_output = self.bottom_model(numerical_input, categorical_inputs)
# only perform all_to_all in multiGPU mode
if self.distributed:
from_bottom = bottom_to_top(from_bottom, batch_sizes_per_gpu, self._embedding_dim, self._vectors_per_gpu,
self._feature_order, self._device_feature_order)
# TODO: take bottom_mlp GPU from device mapping, do not assume it's always first
bottom_mlp_output = from_bottom[:, 0, :]
return self.top_model(from_bottom, bottom_mlp_output)
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/model/distributed.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import time
from typing import Tuple, Optional
from torch.utils.data import DataLoader
from dlrm.data.datasets import ParametricDataset
from dlrm.data.factories import create_dataset_factory
from dlrm.data.feature_spec import FeatureSpec
def get_data_loaders(flags, feature_spec: FeatureSpec, device_mapping: Optional[dict] = None) -> \
Tuple[DataLoader, DataLoader]:
dataset_factory = create_dataset_factory(flags, feature_spec=feature_spec, device_mapping=device_mapping)
dataset_train, dataset_test = dataset_factory.create_datasets()
train_sampler = dataset_factory.create_sampler(dataset_train) if flags.shuffle_batch_order else None
collate_fn = dataset_factory.create_collate_fn()
data_loader_train = dataset_factory.create_data_loader(dataset_train, collate_fn=collate_fn, sampler=train_sampler)
data_loader_test = dataset_factory.create_data_loader(dataset_test, collate_fn=collate_fn)
return data_loader_train, data_loader_test
if __name__ == '__main__':
print('Dataloader benchmark')
parser = argparse.ArgumentParser()
parser.add_argument('--fspec_path', type=str)
parser.add_argument('--batch_size', type=int)
parser.add_argument('--steps', type=int, default=1000)
args = parser.parse_args()
fspec = FeatureSpec.from_yaml(args.fspec_path)
dataset = ParametricDataset(fspec, args.mapping, batch_size=args.batch_size, numerical_features_enabled=True,
categorical_features_to_read=fspec.get_categorical_feature_names())
begin = time.time()
for i in range(args.steps):
_ = dataset[i]
end = time.time()
step_time = (end - begin) / args.steps
throughput = args.batch_size / step_time
print(f'Mean step time: {step_time:.6f} [s]')
print(f'Mean throughput: {throughput:,.0f} [samples / s]')
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/data/data_loader.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.