Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/common/common.py +37 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/common/common.sh +126 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/common/debug.py +132 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/common/modeling/model_training_utils.py +491 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/common/modeling/performance.py +56 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/common/modeling/tf_utils.py +175 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/common/training/__init__.py +14 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/common/training/grad_utils.py +143 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/common/training/runnable.py +79 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/common/training/standard_runnable.py +184 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/common/training/utils.py +342 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/__init__.py +0 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/flags/README.md +97 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/flags/__init__.py +0 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/flags/_base.py +163 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/flags/_device.py +85 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/flags/_distribution.py +54 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/flags/core.py +133 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/hyperparams_flags.py +119 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/logs/__init__.py +0 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/logs/cloud_lib.py +34 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/logs/guidelines.md +58 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/logs/hooks.py +130 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/logs/hooks_helper.py +172 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/logs/logger.py +423 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/logs/metric_hook.py +97 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/logs/mlperf_helper.py +192 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/misc/__init__.py +0 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/misc/callstack_sampler.py +62 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/misc/distribution_utils.py +345 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/misc/keras_utils.py +262 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/misc/model_helpers.py +93 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/misc/tpu_lib.py +34 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/testing/__init__.py +0 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/testing/benchmark_wrappers.py +83 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/testing/integration.py +71 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/testing/mock_lib.py +36 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/testing/perfzero_benchmark.py +90 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/testing/pylint.rcfile +168 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/testing/scripts/builds_common.sh +64 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/testing/scripts/ci_sanity.sh +132 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/testing/scripts/presubmit.sh +73 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/stable_diffusion/configs/train_01x08x08.yaml +158 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/stable_diffusion/configs/train_02x08x08.yaml +158 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/stable_diffusion/configs/train_08x08x08.yaml +158 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/stable_diffusion/configs/train_32x08x08.yaml +156 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/stable_diffusion/ldm/data/base.py +76 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/stable_diffusion/ldm/data/utils.py +52 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/stable_diffusion/ldm/lr_scheduler.py +98 -0
- docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/stable_diffusion/ldm/models/autoencoder.py +227 -0
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/common/common.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
###############################################################################
|
2 |
+
# Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company
|
3 |
+
###############################################################################
|
4 |
+
|
5 |
+
import os
|
6 |
+
import logging
|
7 |
+
import subprocess
|
8 |
+
import sys
|
9 |
+
_log = logging.getLogger(__file__)
|
10 |
+
|
11 |
+
|
12 |
+
def setup_jemalloc() -> None:
|
13 |
+
"""
|
14 |
+
Setup libjemalloc.so.1 or libjemalloc.so.1 (depending on the OS version)
|
15 |
+
by exporting LD_PRELOAD env variable.
|
16 |
+
"""
|
17 |
+
_log.info("libjemalloc.so has been requested")
|
18 |
+
paths = {"LD_LIBRARY_PATH"}
|
19 |
+
env_vals = [os.environ[x] for x in paths if os.environ.get(x) is not None]
|
20 |
+
env_vals.extend(["/usr/lib/x86_64-linux-gnu"])
|
21 |
+
sep = ":"
|
22 |
+
final_path = None
|
23 |
+
locations = sep.join(env_vals).split(sep)
|
24 |
+
for path in locations:
|
25 |
+
if path:
|
26 |
+
libpath = f"{path}/libjemalloc.so.1"
|
27 |
+
if os.path.isfile(libpath):
|
28 |
+
final_path = os.path.realpath(libpath)
|
29 |
+
for path in locations:
|
30 |
+
if path:
|
31 |
+
libpath = f"{path}/libjemalloc.so.2"
|
32 |
+
if os.path.isfile(libpath):
|
33 |
+
final_path = os.path.realpath(libpath)
|
34 |
+
if final_path:
|
35 |
+
os.environ["LD_PRELOAD"] = f"{final_path}:{os.environ.get('LD_PRELOAD', '')}"
|
36 |
+
else:
|
37 |
+
raise FileExistsError("Neither libjemalloc.so.1 nor libjemalloc.so.2 found.")
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/common/common.sh
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
###############################################################################
|
3 |
+
# Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company
|
4 |
+
###############################################################################
|
5 |
+
|
6 |
+
function print_error()
|
7 |
+
{
|
8 |
+
>&2 printf "\033[0;31mError: $1\n\033[0m"
|
9 |
+
}
|
10 |
+
|
11 |
+
function print_warning()
|
12 |
+
{
|
13 |
+
>&2 printf "\033[0;33mWarning: $1\n\033[0m"
|
14 |
+
}
|
15 |
+
|
16 |
+
function find_library()
|
17 |
+
{
|
18 |
+
local name="${1}"
|
19 |
+
local path
|
20 |
+
local libpath
|
21 |
+
local LOCATIONS
|
22 |
+
|
23 |
+
LOCATIONS="${LD_LIBRARY_PATH}:${BUILD_ROOT_LATEST}:${TF_MODULES_RELEASE_BUILD}:${TF_MODULES_DEBUG_BUILD}"
|
24 |
+
|
25 |
+
OLD_IFS="${IFS}"
|
26 |
+
IFS=":"
|
27 |
+
for path in ${LOCATIONS}; do
|
28 |
+
if [ ! -z "${path}" ]; then
|
29 |
+
libpath="${path}/${name}"
|
30 |
+
if [ -e "${libpath}" ]; then
|
31 |
+
readlink -f "${libpath}"
|
32 |
+
break
|
33 |
+
fi
|
34 |
+
fi
|
35 |
+
done
|
36 |
+
IFS="${OLD_IFS}"
|
37 |
+
}
|
38 |
+
|
39 |
+
function generate_mpi_hostfile()
|
40 |
+
{
|
41 |
+
echo "Generating MPI hostfile..."
|
42 |
+
local num_nodes=${2:-8}
|
43 |
+
local file_name="hostfile"
|
44 |
+
export MPI_HOSTFILE_PATH=$1/${file_name}
|
45 |
+
|
46 |
+
rm -rf ${MPI_HOSTFILE_PATH}
|
47 |
+
echo "PATH: ${MPI_HOSTFILE_PATH}"
|
48 |
+
touch ${MPI_HOSTFILE_PATH}
|
49 |
+
|
50 |
+
IFS=',' read -ra IPS <<< "$MULTI_HLS_IPS"
|
51 |
+
for i in "${IPS[@]}"; do
|
52 |
+
echo "$i slots=${num_nodes}" >> ${MPI_HOSTFILE_PATH}
|
53 |
+
done
|
54 |
+
|
55 |
+
echo "Config: "
|
56 |
+
cat ${MPI_HOSTFILE_PATH}
|
57 |
+
}
|
58 |
+
|
59 |
+
function run_per_ip()
|
60 |
+
{
|
61 |
+
if [ -n "$OMPI_COMM_WORLD_SIZE" ]; then
|
62 |
+
print_error "Function run_per_ip is not meant to be ran from within an OpenMPI context. It is intended to invoke mpirun by itelf."
|
63 |
+
exit 1
|
64 |
+
fi
|
65 |
+
|
66 |
+
_cmd="$@"
|
67 |
+
|
68 |
+
# Due to technical difficulties with the following solution, the _cmd stderr shall be redirected to stdout.
|
69 |
+
if [[ -z ${MULTI_HLS_IPS} ]]; then
|
70 |
+
$_cmd 2>&1
|
71 |
+
else
|
72 |
+
if [ -n "$MPI_TPC_INCLUDE" ]; then
|
73 |
+
_option_btl_tcp_if_include="--mca btl_tcp_if_include ${MPI_TPC_INCLUDE}"
|
74 |
+
else
|
75 |
+
_option_btl_tcp_if_include=""
|
76 |
+
fi
|
77 |
+
|
78 |
+
mpirun --allow-run-as-root \
|
79 |
+
--mca plm_rsh_args -p3022 \
|
80 |
+
${_option_btl_tcp_if_include} \
|
81 |
+
--tag-output \
|
82 |
+
--merge-stderr-to-stdout \
|
83 |
+
--prefix /usr/local/openmpi/ \
|
84 |
+
-H ${MULTI_HLS_IPS} \
|
85 |
+
bash -c "`declare`; `declare -x`; ($_cmd 2>&1)" 2>/dev/null
|
86 |
+
fi
|
87 |
+
}
|
88 |
+
|
89 |
+
function setup_libjemalloc()
|
90 |
+
{
|
91 |
+
local libjemalloc_1_lib="libjemalloc.so.1"
|
92 |
+
local libjemalloc_2_lib="libjemalloc.so.2"
|
93 |
+
local is_v2_not_present=`LD_PRELOAD=${libjemalloc_2_lib} head -0 2>&1 > /dev/null`
|
94 |
+
|
95 |
+
if [ -z "${is_v2_not_present}" ]; then
|
96 |
+
export LD_PRELOAD=${libjemalloc_2_lib}:$LD_PRELOAD
|
97 |
+
else
|
98 |
+
export LD_PRELOAD=${libjemalloc_1_lib}:$LD_PRELOAD
|
99 |
+
fi
|
100 |
+
}
|
101 |
+
|
102 |
+
function calc_optimal_cpu_resources_for_mpi()
|
103 |
+
{
|
104 |
+
# OpenMPI process bind resource type.
|
105 |
+
export MPI_MAP_BY=${MPI_MAP_BY:-"socket"}
|
106 |
+
echo MPI_MAP_BY=$MPI_MAP_BY
|
107 |
+
|
108 |
+
# Determine the optimal value of resources per process of OpenMPI binding based on local lscpu.
|
109 |
+
if [ "$MPI_MAP_BY" == "socket" ]; then
|
110 |
+
__mpi_map_by_pe=`lscpu | grep "CPU(s):" | python3 -c "print(int(input().split()[1])//${NUM_WORKERS_PER_HLS}//2)"`
|
111 |
+
elif [ "$MPI_MAP_BY" == "slot" ]; then
|
112 |
+
__mpi_map_by_pe=`lscpu | grep "CPU(s):" | python3 -c "print(int(input().split()[1])//${NUM_WORKERS_PER_HLS})"`
|
113 |
+
else
|
114 |
+
print_error "MPI_MAP_BY must be either 'socket' or 'slot'."
|
115 |
+
exit 1;
|
116 |
+
fi
|
117 |
+
export MPI_MAP_BY_PE=${MPI_MAP_BY_PE:-$__mpi_map_by_pe}
|
118 |
+
echo MPI_MAP_BY_PE=$MPI_MAP_BY_PE
|
119 |
+
|
120 |
+
if [ "$MPI_MAP_BY_PE" -gt "0" ]; then
|
121 |
+
__mpirun_args_map_by_pe=" --bind-to core --map-by $MPI_MAP_BY:PE=$MPI_MAP_BY_PE"
|
122 |
+
else
|
123 |
+
unset __mpirun_args_map_by_pe
|
124 |
+
fi
|
125 |
+
export MPIRUN_ARGS_MAP_BY_PE=$__mpirun_args_map_by_pe
|
126 |
+
}
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/common/debug.py
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
###############################################################################
|
16 |
+
# Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company
|
17 |
+
###############################################################################
|
18 |
+
|
19 |
+
from absl import flags
|
20 |
+
from absl import logging
|
21 |
+
from tensorflow.core.protobuf import debug_event_pb2
|
22 |
+
from tensorflow.python.debug.lib import debug_events_writer
|
23 |
+
from tensorflow.python.framework import op_callbacks
|
24 |
+
from tensorflow.python.ops import gen_debug_ops
|
25 |
+
import tensorflow as tf
|
26 |
+
import re
|
27 |
+
import os
|
28 |
+
import json
|
29 |
+
|
30 |
+
try:
|
31 |
+
import horovod.tensorflow as hvd
|
32 |
+
except ImportError:
|
33 |
+
hvd = None
|
34 |
+
|
35 |
+
|
36 |
+
flags.DEFINE_string(name='dump_config', default=None,
|
37 |
+
help='Defines config for tensor dumping')
|
38 |
+
|
39 |
+
|
40 |
+
class _DumpCallback(object):
|
41 |
+
def __init__(self, dump_root, tensor_debug_mode, circular_buffer_size, op_regex, output_regex=None):
|
42 |
+
self._dump_root = dump_root
|
43 |
+
if hvd is not None and hvd.is_initialized():
|
44 |
+
self._dump_root = os.path.join(
|
45 |
+
self._dump_root, f"rank_{hvd.rank()}")
|
46 |
+
self._tensor_debug_mode = debug_event_pb2.TensorDebugMode.Value(
|
47 |
+
tensor_debug_mode)
|
48 |
+
self._circular_buffer_size = circular_buffer_size
|
49 |
+
self._op_regex = re.compile(op_regex) if isinstance(
|
50 |
+
op_regex, str) else op_regex
|
51 |
+
self._output_regex = re.compile(output_regex) if isinstance(
|
52 |
+
output_regex, str) else output_regex
|
53 |
+
self._tfdbg_run_id = ''
|
54 |
+
self._dump_op_counter = 0
|
55 |
+
|
56 |
+
debug_writer_args = {
|
57 |
+
"dump_root": self._dump_root,
|
58 |
+
"circular_buffer_size": self._circular_buffer_size
|
59 |
+
}
|
60 |
+
|
61 |
+
if not tf.__version__.startswith("2.2"):
|
62 |
+
debug_writer_args["tfdbg_run_id"] = self._tfdbg_run_id
|
63 |
+
|
64 |
+
self._writer = debug_events_writer.DebugEventsWriter(
|
65 |
+
**debug_writer_args)
|
66 |
+
|
67 |
+
def callback(self, op_type, inputs, attrs, outputs, op_name=None, graph=None):
|
68 |
+
if op_name is not None and self._op_regex.match(op_name):
|
69 |
+
graph_name = "missing-graph-name"
|
70 |
+
if graph is not None and hasattr(graph, "name"):
|
71 |
+
graph_name = graph.name
|
72 |
+
|
73 |
+
logging.info("Adding dump op for '%s' of type '%s' from graph '%s'" % (
|
74 |
+
op_name, op_type, graph_name))
|
75 |
+
|
76 |
+
new_outputs = []
|
77 |
+
|
78 |
+
for output_slot, output in enumerate(outputs):
|
79 |
+
if self._output_regex is not None and not self._output_regex.match(output.name):
|
80 |
+
logging.info("Skipped output: " + output.name)
|
81 |
+
new_outputs.append(output)
|
82 |
+
continue
|
83 |
+
debug_identity_op_kwargs = {
|
84 |
+
"tfdbg_context_id": graph_name,
|
85 |
+
"op_name": op_name,
|
86 |
+
"output_slot": output_slot,
|
87 |
+
"tensor_debug_mode": self._tensor_debug_mode,
|
88 |
+
"debug_urls": ["file://%s" % self._dump_root],
|
89 |
+
"name": "dump_%d" % self._dump_op_counter
|
90 |
+
}
|
91 |
+
|
92 |
+
if not tf.__version__.startswith("2.2"):
|
93 |
+
debug_identity_op_kwargs["circular_buffer_size"] = self._circular_buffer_size
|
94 |
+
debug_identity_op_kwargs["tfdbg_run_id"] = self._tfdbg_run_id
|
95 |
+
|
96 |
+
self._dump_op_counter = self._dump_op_counter + 1
|
97 |
+
new_outputs.append(gen_debug_ops.debug_identity_v2(
|
98 |
+
output, **debug_identity_op_kwargs))
|
99 |
+
|
100 |
+
return new_outputs
|
101 |
+
else:
|
102 |
+
return None
|
103 |
+
|
104 |
+
def __enter__(self, *args, **kwargs):
|
105 |
+
op_callbacks.add_op_callback(self.callback)
|
106 |
+
logging.info("Enabled tensor dumping")
|
107 |
+
|
108 |
+
def __exit__(self, *args, **kwargs):
|
109 |
+
op_callbacks.remove_op_callback(self.callback)
|
110 |
+
logging.info("Disabled tensor dumping")
|
111 |
+
|
112 |
+
def __del__(self):
|
113 |
+
self._writer.Close()
|
114 |
+
|
115 |
+
|
116 |
+
class _Dummy(object):
|
117 |
+
def __enter__(self, *args, **kwargs):
|
118 |
+
pass
|
119 |
+
|
120 |
+
def __exit__(self, *args, **kwargs):
|
121 |
+
pass
|
122 |
+
|
123 |
+
|
124 |
+
def dump_callback(config_file=None):
|
125 |
+
if config_file is not None:
|
126 |
+
kwargs = json.load(open(config_file, 'r'))
|
127 |
+
return _DumpCallback(**kwargs)
|
128 |
+
try:
|
129 |
+
kwargs = json.load(open(flags.FLAGS.dump_config, 'r'))
|
130 |
+
return _DumpCallback(**kwargs)
|
131 |
+
except:
|
132 |
+
return _Dummy()
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/common/modeling/model_training_utils.py
ADDED
@@ -0,0 +1,491 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
"""A light weight utilities to train NLP models."""
|
16 |
+
|
17 |
+
from __future__ import absolute_import
|
18 |
+
from __future__ import division
|
19 |
+
from __future__ import print_function
|
20 |
+
|
21 |
+
import json
|
22 |
+
import os
|
23 |
+
import tempfile
|
24 |
+
|
25 |
+
from absl import logging
|
26 |
+
import tensorflow as tf
|
27 |
+
from TensorFlow.common.training import grad_utils
|
28 |
+
from TensorFlow.utils.misc import distribution_utils
|
29 |
+
|
30 |
+
_SUMMARY_TXT = 'training_summary.txt'
|
31 |
+
_MIN_SUMMARY_STEPS = 10
|
32 |
+
|
33 |
+
|
34 |
+
def _should_export_checkpoint(strategy):
|
35 |
+
return (not strategy) or strategy.extended.should_checkpoint
|
36 |
+
|
37 |
+
|
38 |
+
def _should_export_summary(strategy):
|
39 |
+
return (not strategy) or strategy.extended.should_save_summary
|
40 |
+
|
41 |
+
|
42 |
+
def _save_checkpoint(strategy, checkpoint, model_dir, checkpoint_prefix):
|
43 |
+
"""Saves model to with provided checkpoint prefix."""
|
44 |
+
|
45 |
+
if _should_export_checkpoint(strategy):
|
46 |
+
checkpoint_path = os.path.join(model_dir, checkpoint_prefix)
|
47 |
+
saved_path = checkpoint.save(checkpoint_path)
|
48 |
+
logging.info('Saving model as TF checkpoint: %s', saved_path)
|
49 |
+
else:
|
50 |
+
# In multi worker training we need every worker to save checkpoint, because
|
51 |
+
# variables can trigger synchronization on read and synchronization needs
|
52 |
+
# all workers to participate. To avoid workers overriding each other we save
|
53 |
+
# to a temporary directory on non-chief workers.
|
54 |
+
tmp_dir = tempfile.mkdtemp()
|
55 |
+
checkpoint.save(os.path.join(tmp_dir, 'ckpt'))
|
56 |
+
tf.io.gfile.rmtree(tmp_dir)
|
57 |
+
return
|
58 |
+
|
59 |
+
|
60 |
+
def _get_input_iterator(input_fn, strategy):
|
61 |
+
"""Returns distributed dataset iterator."""
|
62 |
+
# When training with TPU pods, datasets needs to be cloned across
|
63 |
+
# workers. Since Dataset instance cannot be cloned in eager mode, we instead
|
64 |
+
# pass callable that returns a dataset.
|
65 |
+
if not callable(input_fn):
|
66 |
+
raise ValueError('`input_fn` should be a closure that returns a dataset.')
|
67 |
+
iterator = iter(
|
68 |
+
strategy.experimental_distribute_datasets_from_function(input_fn))
|
69 |
+
return iterator
|
70 |
+
|
71 |
+
|
72 |
+
def _float_metric_value(metric):
|
73 |
+
"""Gets the value of a float-value keras metric."""
|
74 |
+
return metric.result().numpy().astype(float)
|
75 |
+
|
76 |
+
|
77 |
+
def steps_to_run(current_step, steps_per_epoch, steps_per_loop):
|
78 |
+
"""Calculates steps to run on device."""
|
79 |
+
if steps_per_loop <= 0:
|
80 |
+
raise ValueError('steps_per_loop should be positive integer.')
|
81 |
+
if steps_per_loop == 1:
|
82 |
+
return steps_per_loop
|
83 |
+
remainder_in_epoch = current_step % steps_per_epoch
|
84 |
+
if remainder_in_epoch != 0:
|
85 |
+
return min(steps_per_epoch - remainder_in_epoch, steps_per_loop)
|
86 |
+
else:
|
87 |
+
return steps_per_loop
|
88 |
+
|
89 |
+
|
90 |
+
def write_txt_summary(training_summary, summary_dir):
|
91 |
+
"""Writes a summary text file to record stats."""
|
92 |
+
summary_path = os.path.join(summary_dir, _SUMMARY_TXT)
|
93 |
+
with tf.io.gfile.GFile(summary_path, 'wb') as f:
|
94 |
+
logging.info('Training Summary: \n%s', str(training_summary))
|
95 |
+
f.write(json.dumps(training_summary, indent=4))
|
96 |
+
|
97 |
+
|
98 |
+
def run_customized_training_loop(
|
99 |
+
# pylint: disable=invalid-name
|
100 |
+
_sentinel=None,
|
101 |
+
# pylint: enable=invalid-name
|
102 |
+
strategy=None,
|
103 |
+
model_fn=None,
|
104 |
+
loss_fn=None,
|
105 |
+
scale_loss=True,
|
106 |
+
model_dir=None,
|
107 |
+
train_input_fn=None,
|
108 |
+
steps_per_epoch=None,
|
109 |
+
steps_per_loop=1,
|
110 |
+
epochs=1,
|
111 |
+
eval_input_fn=None,
|
112 |
+
eval_steps=None,
|
113 |
+
metric_fn=None,
|
114 |
+
init_checkpoint=None,
|
115 |
+
custom_callbacks=None,
|
116 |
+
run_eagerly=False,
|
117 |
+
sub_model_export_name=None,
|
118 |
+
explicit_allreduce=False,
|
119 |
+
pre_allreduce_callbacks=None,
|
120 |
+
post_allreduce_callbacks=None):
|
121 |
+
"""Run BERT pretrain model training using low-level API.
|
122 |
+
|
123 |
+
Arguments:
|
124 |
+
_sentinel: Used to prevent positional parameters. Internal, do not use.
|
125 |
+
strategy: Distribution strategy on which to run low level training loop.
|
126 |
+
model_fn: Function that returns a tuple (model, sub_model). Caller of this
|
127 |
+
function should add optimizer to the `model` via calling
|
128 |
+
`model.compile()` API or manually setting `model.optimizer` attribute.
|
129 |
+
Second element of the returned tuple(sub_model) is an optional sub model
|
130 |
+
to be used for initial checkpoint -- if provided.
|
131 |
+
loss_fn: Function with signature func(labels, logits) and returns a loss
|
132 |
+
tensor.
|
133 |
+
scale_loss: Whether to divide the raw loss by number of replicas before
|
134 |
+
gradients calculation.
|
135 |
+
model_dir: Model directory used during training for restoring/saving model
|
136 |
+
weights.
|
137 |
+
train_input_fn: Function that returns a tf.data.Dataset used for training.
|
138 |
+
steps_per_epoch: Number of steps to run per epoch. At the end of each
|
139 |
+
epoch, model checkpoint will be saved and evaluation will be conducted
|
140 |
+
if evaluation dataset is provided.
|
141 |
+
steps_per_loop: Number of steps per graph-mode loop. In order to reduce
|
142 |
+
communication in eager context, training logs are printed every
|
143 |
+
steps_per_loop.
|
144 |
+
epochs: Number of epochs to train.
|
145 |
+
eval_input_fn: Function that returns evaluation dataset. If none,
|
146 |
+
evaluation is skipped.
|
147 |
+
eval_steps: Number of steps to run evaluation. Required if `eval_input_fn`
|
148 |
+
is not none.
|
149 |
+
metric_fn: A metrics function that returns a Keras Metric object to record
|
150 |
+
evaluation result using evaluation dataset or with training dataset
|
151 |
+
after every epoch.
|
152 |
+
init_checkpoint: Optional checkpoint to load to `sub_model` returned by
|
153 |
+
`model_fn`.
|
154 |
+
custom_callbacks: A list of Keras Callbacks objects to run during
|
155 |
+
training. More specifically, `on_batch_begin()`, `on_batch_end()`,
|
156 |
+
methods are invoked during training.
|
157 |
+
run_eagerly: Whether to run model training in pure eager execution. This
|
158 |
+
should be disable for TPUStrategy.
|
159 |
+
sub_model_export_name: If not None, will export `sub_model` returned by
|
160 |
+
`model_fn` into checkpoint files. The name of intermediate checkpoint
|
161 |
+
file is {sub_model_export_name}_step_{step}.ckpt and the last
|
162 |
+
checkpint's name is {sub_model_export_name}.ckpt;
|
163 |
+
if None, `sub_model` will not be exported as checkpoint.
|
164 |
+
explicit_allreduce: Whether to explicitly perform gradient allreduce,
|
165 |
+
instead of relying on implicit allreduce in optimizer.apply_gradients().
|
166 |
+
default is False. For now, if training using FP16 mixed precision,
|
167 |
+
explicit allreduce will aggregate gradients in FP16 format. For TPU and
|
168 |
+
GPU training using FP32, explicit allreduce will aggregate gradients in
|
169 |
+
FP32 format.
|
170 |
+
pre_allreduce_callbacks: A list of callback functions that takes gradients
|
171 |
+
and model variables pairs as input, manipulate them, and returns a new
|
172 |
+
gradients and model variables paris. The callback functions will be
|
173 |
+
invoked in the list order and before gradients are allreduced.
|
174 |
+
With mixed precision training, the pre_allreduce_allbacks will be
|
175 |
+
applied on scaled_gradients. Default is no callbacks.
|
176 |
+
Only used when explicit_allreduce=True.
|
177 |
+
post_allreduce_callbacks: A list of callback functions that takes
|
178 |
+
gradients and model variables pairs as input, manipulate them, and
|
179 |
+
returns a new gradients and model variables paris. The callback
|
180 |
+
functions will be invoked in the list order and right before gradients
|
181 |
+
are applied to variables for updates. Default is no callbacks. Only used
|
182 |
+
when explicit_allreduce=True.
|
183 |
+
|
184 |
+
Returns:
|
185 |
+
Trained model.
|
186 |
+
|
187 |
+
Raises:
|
188 |
+
ValueError: (1) When model returned by `model_fn` does not have optimizer
|
189 |
+
attribute or when required parameters are set to none. (2) eval args are
|
190 |
+
not specified correctly. (3) metric_fn must be a callable if specified.
|
191 |
+
(4) sub_model_checkpoint_name is specified, but `sub_model` returned
|
192 |
+
by `model_fn` is None.
|
193 |
+
"""
|
194 |
+
|
195 |
+
if _sentinel is not None:
|
196 |
+
raise ValueError('only call `run_customized_training_loop()` '
|
197 |
+
'with named arguments.')
|
198 |
+
|
199 |
+
required_arguments = [
|
200 |
+
strategy, model_fn, loss_fn, model_dir, steps_per_epoch, train_input_fn
|
201 |
+
]
|
202 |
+
if [arg for arg in required_arguments if arg is None]:
|
203 |
+
raise ValueError('`strategy`, `model_fn`, `loss_fn`, `model_dir`, '
|
204 |
+
'`steps_per_loop` and `steps_per_epoch` are required '
|
205 |
+
'parameters.')
|
206 |
+
if steps_per_loop > steps_per_epoch:
|
207 |
+
logging.error(
|
208 |
+
'steps_per_loop: %d is specified to be greater than '
|
209 |
+
' steps_per_epoch: %d, we will use steps_per_epoch as'
|
210 |
+
' steps_per_loop.', steps_per_loop, steps_per_epoch)
|
211 |
+
steps_per_loop = steps_per_epoch
|
212 |
+
assert tf.executing_eagerly()
|
213 |
+
|
214 |
+
if run_eagerly:
|
215 |
+
if isinstance(strategy, tf.distribute.experimental.TPUStrategy):
|
216 |
+
raise ValueError(
|
217 |
+
'TPUStrategy should not run eagerly as it heavily relies on graph'
|
218 |
+
' optimization for the distributed system.')
|
219 |
+
|
220 |
+
if eval_input_fn and (eval_steps is None or metric_fn is None):
|
221 |
+
raise ValueError(
|
222 |
+
'`eval_step` and `metric_fn` are required when `eval_input_fn ` '
|
223 |
+
'is not none.')
|
224 |
+
if metric_fn and not callable(metric_fn):
|
225 |
+
raise ValueError(
|
226 |
+
'if `metric_fn` is specified, metric_fn must be a callable.')
|
227 |
+
|
228 |
+
total_training_steps = steps_per_epoch * epochs
|
229 |
+
train_iterator = _get_input_iterator(train_input_fn, strategy)
|
230 |
+
|
231 |
+
with distribution_utils.get_strategy_scope(strategy):
|
232 |
+
# To correctly place the model weights on accelerators,
|
233 |
+
# model and optimizer should be created in scope.
|
234 |
+
model, sub_model = model_fn()
|
235 |
+
if not hasattr(model, 'optimizer'):
|
236 |
+
raise ValueError('User should set optimizer attribute to model '
|
237 |
+
'inside `model_fn`.')
|
238 |
+
if sub_model_export_name and sub_model is None:
|
239 |
+
raise ValueError('sub_model_export_name is specified as %s, but '
|
240 |
+
'sub_model is None.' % sub_model_export_name)
|
241 |
+
|
242 |
+
optimizer = model.optimizer
|
243 |
+
|
244 |
+
if init_checkpoint:
|
245 |
+
logging.info(
|
246 |
+
'Checkpoint file %s found and restoring from '
|
247 |
+
'initial checkpoint for core model.', init_checkpoint)
|
248 |
+
checkpoint = tf.train.Checkpoint(model=sub_model)
|
249 |
+
checkpoint.restore(init_checkpoint).assert_existing_objects_matched()
|
250 |
+
logging.info('Loading from checkpoint file completed')
|
251 |
+
|
252 |
+
train_loss_metric = tf.keras.metrics.Mean(
|
253 |
+
'training_loss', dtype=tf.float32)
|
254 |
+
eval_metrics = [metric_fn()] if metric_fn else []
|
255 |
+
# If evaluation is required, make a copy of metric as it will be used by
|
256 |
+
# both train and evaluation.
|
257 |
+
train_metrics = [
|
258 |
+
metric.__class__.from_config(metric.get_config())
|
259 |
+
for metric in eval_metrics
|
260 |
+
]
|
261 |
+
|
262 |
+
# Create summary writers
|
263 |
+
if _should_export_summary(strategy):
|
264 |
+
summary_dir = os.path.join(model_dir, 'summaries')
|
265 |
+
else:
|
266 |
+
# In multi worker training we need every worker to write summary, because
|
267 |
+
# variables can trigger synchronization on read and synchronization needs
|
268 |
+
# all workers to participate.
|
269 |
+
summary_dir = tempfile.mkdtemp()
|
270 |
+
eval_summary_writer = tf.summary.create_file_writer(
|
271 |
+
os.path.join(summary_dir, 'eval'))
|
272 |
+
if steps_per_loop >= _MIN_SUMMARY_STEPS:
|
273 |
+
# Only writes summary when the stats are collected sufficiently over
|
274 |
+
# enough steps.
|
275 |
+
train_summary_writer = tf.summary.create_file_writer(
|
276 |
+
os.path.join(summary_dir, 'train'))
|
277 |
+
else:
|
278 |
+
train_summary_writer = None
|
279 |
+
|
280 |
+
# Collects training variables.
|
281 |
+
training_vars = model.trainable_variables
|
282 |
+
|
283 |
+
def _replicated_step(inputs):
|
284 |
+
"""Replicated training step."""
|
285 |
+
|
286 |
+
inputs, labels = inputs
|
287 |
+
with tf.GradientTape() as tape:
|
288 |
+
model_outputs = model(inputs, training=True)
|
289 |
+
loss = loss_fn(labels, model_outputs)
|
290 |
+
# Raw loss is used for reporting in metrics/logs.
|
291 |
+
raw_loss = loss
|
292 |
+
if scale_loss:
|
293 |
+
# Scales down the loss for gradients to be invariant from replicas.
|
294 |
+
loss = loss / strategy.num_replicas_in_sync
|
295 |
+
|
296 |
+
if explicit_allreduce:
|
297 |
+
grad_utils.minimize_using_explicit_allreduce(tape, optimizer, loss,
|
298 |
+
training_vars,
|
299 |
+
pre_allreduce_callbacks,
|
300 |
+
post_allreduce_callbacks)
|
301 |
+
else:
|
302 |
+
if isinstance(optimizer,
|
303 |
+
tf.keras.mixed_precision.experimental.LossScaleOptimizer):
|
304 |
+
with tape:
|
305 |
+
scaled_loss = optimizer.get_scaled_loss(loss)
|
306 |
+
scaled_grads = tape.gradient(scaled_loss, training_vars)
|
307 |
+
grads = optimizer.get_unscaled_gradients(scaled_grads)
|
308 |
+
else:
|
309 |
+
grads = tape.gradient(loss, training_vars)
|
310 |
+
optimizer.apply_gradients(zip(grads, training_vars))
|
311 |
+
# For reporting, the metric takes the mean of losses.
|
312 |
+
train_loss_metric.update_state(raw_loss)
|
313 |
+
for metric in train_metrics:
|
314 |
+
metric.update_state(labels, model_outputs)
|
315 |
+
|
316 |
+
@tf.function
|
317 |
+
def train_steps(iterator, steps):
|
318 |
+
"""Performs distributed training steps in a loop.
|
319 |
+
|
320 |
+
Args:
|
321 |
+
iterator: the distributed iterator of training datasets.
|
322 |
+
steps: an tf.int32 integer tensor to specify number of steps to run
|
323 |
+
inside host training loop.
|
324 |
+
|
325 |
+
Raises:
|
326 |
+
ValueError: Any of the arguments or tensor shapes are invalid.
|
327 |
+
"""
|
328 |
+
if not isinstance(steps, tf.Tensor):
|
329 |
+
raise ValueError('steps should be an Tensor. Python object may cause '
|
330 |
+
'retracing.')
|
331 |
+
|
332 |
+
for _ in tf.range(steps):
|
333 |
+
strategy.run(_replicated_step, args=(next(iterator),))
|
334 |
+
|
335 |
+
def train_single_step(iterator):
|
336 |
+
"""Performs a distributed training step.
|
337 |
+
|
338 |
+
Args:
|
339 |
+
iterator: the distributed iterator of training datasets.
|
340 |
+
|
341 |
+
Raises:
|
342 |
+
ValueError: Any of the arguments or tensor shapes are invalid.
|
343 |
+
"""
|
344 |
+
strategy.run(_replicated_step, args=(next(iterator),))
|
345 |
+
|
346 |
+
def test_step(iterator):
|
347 |
+
"""Calculates evaluation metrics on distributed devices."""
|
348 |
+
|
349 |
+
def _test_step_fn(inputs):
|
350 |
+
"""Replicated accuracy calculation."""
|
351 |
+
|
352 |
+
inputs, labels = inputs
|
353 |
+
model_outputs = model(inputs, training=False)
|
354 |
+
for metric in eval_metrics:
|
355 |
+
metric.update_state(labels, model_outputs)
|
356 |
+
|
357 |
+
strategy.run(_test_step_fn, args=(next(iterator),))
|
358 |
+
|
359 |
+
if not run_eagerly:
|
360 |
+
train_single_step = tf.function(train_single_step)
|
361 |
+
test_step = tf.function(test_step)
|
362 |
+
|
363 |
+
def _run_evaluation(current_training_step, test_iterator):
|
364 |
+
"""Runs validation steps and aggregate metrics."""
|
365 |
+
for _ in range(eval_steps):
|
366 |
+
test_step(test_iterator)
|
367 |
+
|
368 |
+
with eval_summary_writer.as_default():
|
369 |
+
for metric in eval_metrics + model.metrics:
|
370 |
+
metric_value = _float_metric_value(metric)
|
371 |
+
logging.info('Step: [%d] Validation %s = %f', current_training_step,
|
372 |
+
metric.name, metric_value)
|
373 |
+
tf.summary.scalar(
|
374 |
+
metric.name, metric_value, step=current_training_step)
|
375 |
+
eval_summary_writer.flush()
|
376 |
+
|
377 |
+
def _run_callbacks_on_batch_begin(batch):
|
378 |
+
"""Runs custom callbacks at the start of every step."""
|
379 |
+
if not custom_callbacks:
|
380 |
+
return
|
381 |
+
for callback in custom_callbacks:
|
382 |
+
callback.on_batch_begin(batch)
|
383 |
+
|
384 |
+
def _run_callbacks_on_batch_end(batch, logs):
|
385 |
+
"""Runs custom callbacks at the end of every step."""
|
386 |
+
if not custom_callbacks:
|
387 |
+
return
|
388 |
+
for callback in custom_callbacks:
|
389 |
+
callback.on_batch_end(batch, logs)
|
390 |
+
|
391 |
+
# Training loop starts here.
|
392 |
+
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
|
393 |
+
sub_model_checkpoint = tf.train.Checkpoint(
|
394 |
+
model=sub_model) if sub_model_export_name else None
|
395 |
+
|
396 |
+
latest_checkpoint_file = tf.train.latest_checkpoint(model_dir)
|
397 |
+
if latest_checkpoint_file:
|
398 |
+
logging.info(
|
399 |
+
'Checkpoint file %s found and restoring from '
|
400 |
+
'checkpoint', latest_checkpoint_file)
|
401 |
+
checkpoint.restore(latest_checkpoint_file)
|
402 |
+
logging.info('Loading from checkpoint file completed')
|
403 |
+
|
404 |
+
current_step = optimizer.iterations.numpy()
|
405 |
+
checkpoint_name = 'ctl_step_{step}.ckpt'
|
406 |
+
|
407 |
+
while current_step < total_training_steps:
|
408 |
+
# Training loss/metric are taking average over steps inside micro
|
409 |
+
# training loop. We reset the their values before each round.
|
410 |
+
train_loss_metric.reset_states()
|
411 |
+
for metric in train_metrics + model.metrics:
|
412 |
+
metric.reset_states()
|
413 |
+
|
414 |
+
_run_callbacks_on_batch_begin(current_step)
|
415 |
+
# Runs several steps in the host while loop.
|
416 |
+
steps = steps_to_run(current_step, steps_per_epoch, steps_per_loop)
|
417 |
+
|
418 |
+
if tf.test.is_built_with_cuda():
|
419 |
+
# TODO(zongweiz): merge with train_steps once tf.while_loop
|
420 |
+
# GPU performance bugs are fixed.
|
421 |
+
for _ in range(steps):
|
422 |
+
train_single_step(train_iterator)
|
423 |
+
else:
|
424 |
+
# Converts steps to a Tensor to avoid tf.function retracing.
|
425 |
+
train_steps(train_iterator,
|
426 |
+
tf.convert_to_tensor(steps, dtype=tf.int32))
|
427 |
+
train_loss = _float_metric_value(train_loss_metric)
|
428 |
+
current_step += steps
|
429 |
+
_run_callbacks_on_batch_end(current_step - 1, {'loss': train_loss})
|
430 |
+
|
431 |
+
# Updates training logging.
|
432 |
+
training_status = 'Train Step: %d/%d / loss = %s' % (
|
433 |
+
current_step, total_training_steps, train_loss)
|
434 |
+
|
435 |
+
if train_summary_writer:
|
436 |
+
with train_summary_writer.as_default():
|
437 |
+
tf.summary.scalar(
|
438 |
+
train_loss_metric.name, train_loss, step=current_step)
|
439 |
+
for metric in train_metrics + model.metrics:
|
440 |
+
metric_value = _float_metric_value(metric)
|
441 |
+
training_status += ' %s = %f' % (metric.name, metric_value)
|
442 |
+
tf.summary.scalar(metric.name, metric_value, step=current_step)
|
443 |
+
train_summary_writer.flush()
|
444 |
+
logging.info(training_status)
|
445 |
+
|
446 |
+
# Saves model checkpoints and run validation steps at every epoch end.
|
447 |
+
if current_step % steps_per_epoch == 0:
|
448 |
+
# To avoid repeated model saving, we do not save after the last
|
449 |
+
# step of training.
|
450 |
+
if current_step < total_training_steps:
|
451 |
+
_save_checkpoint(strategy, checkpoint, model_dir,
|
452 |
+
checkpoint_name.format(step=current_step))
|
453 |
+
if sub_model_export_name:
|
454 |
+
_save_checkpoint(
|
455 |
+
strategy, sub_model_checkpoint, model_dir,
|
456 |
+
'%s_step_%d.ckpt' % (sub_model_export_name, current_step))
|
457 |
+
if eval_input_fn:
|
458 |
+
logging.info('Running evaluation after step: %s.', current_step)
|
459 |
+
_run_evaluation(current_step,
|
460 |
+
_get_input_iterator(eval_input_fn, strategy))
|
461 |
+
# Re-initialize evaluation metric.
|
462 |
+
for metric in eval_metrics + model.metrics:
|
463 |
+
metric.reset_states()
|
464 |
+
|
465 |
+
_save_checkpoint(strategy, checkpoint, model_dir,
|
466 |
+
checkpoint_name.format(step=current_step))
|
467 |
+
if sub_model_export_name:
|
468 |
+
_save_checkpoint(strategy, sub_model_checkpoint, model_dir,
|
469 |
+
'%s.ckpt' % sub_model_export_name)
|
470 |
+
|
471 |
+
if eval_input_fn:
|
472 |
+
logging.info('Running final evaluation after training is complete.')
|
473 |
+
_run_evaluation(current_step,
|
474 |
+
_get_input_iterator(eval_input_fn, strategy))
|
475 |
+
|
476 |
+
training_summary = {
|
477 |
+
'total_training_steps': total_training_steps,
|
478 |
+
'train_loss': _float_metric_value(train_loss_metric),
|
479 |
+
}
|
480 |
+
if eval_metrics:
|
481 |
+
# TODO(hongkuny): Cleans up summary reporting in text.
|
482 |
+
training_summary['last_train_metrics'] = _float_metric_value(
|
483 |
+
train_metrics[0])
|
484 |
+
training_summary['eval_metrics'] = _float_metric_value(eval_metrics[0])
|
485 |
+
|
486 |
+
write_txt_summary(training_summary, summary_dir)
|
487 |
+
|
488 |
+
if not _should_export_summary(strategy):
|
489 |
+
tf.io.gfile.rmtree(summary_dir)
|
490 |
+
|
491 |
+
return model
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/common/modeling/performance.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Lint as: python3
|
2 |
+
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
# ==============================================================================
|
16 |
+
"""Functions and classes related to training performance."""
|
17 |
+
|
18 |
+
import tensorflow as tf
|
19 |
+
|
20 |
+
|
21 |
+
def configure_optimizer(optimizer,
|
22 |
+
use_float16=False,
|
23 |
+
use_graph_rewrite=False,
|
24 |
+
loss_scale="dynamic"):
|
25 |
+
"""Configures optimizer object with performance options."""
|
26 |
+
if use_float16:
|
27 |
+
# Wraps optimizer with a LossScaleOptimizer. This is done automatically
|
28 |
+
# in compile() with the "mixed_float16" policy, but since we do not call
|
29 |
+
# compile(), we must wrap the optimizer manually.
|
30 |
+
optimizer = (
|
31 |
+
tf.keras.mixed_precision.experimental.LossScaleOptimizer(
|
32 |
+
optimizer, loss_scale=loss_scale))
|
33 |
+
if use_graph_rewrite:
|
34 |
+
# Note: the model dtype must be 'float32', which will ensure
|
35 |
+
# tf.ckeras.mixed_precision and
|
36 |
+
# tf.train.experimental.enable_mixed_precision_graph_rewrite do not double
|
37 |
+
# up.
|
38 |
+
optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(
|
39 |
+
optimizer)
|
40 |
+
return optimizer
|
41 |
+
|
42 |
+
|
43 |
+
def set_mixed_precision_policy(dtype, loss_scale=None):
|
44 |
+
"""Sets mix precision policy."""
|
45 |
+
if dtype == tf.float16:
|
46 |
+
policy = tf.keras.mixed_precision.experimental.Policy(
|
47 |
+
'mixed_float16', loss_scale=loss_scale)
|
48 |
+
tf.keras.mixed_precision.experimental.set_policy(policy)
|
49 |
+
elif dtype == tf.bfloat16:
|
50 |
+
policy = tf.keras.mixed_precision.experimental.Policy(
|
51 |
+
'mixed_bfloat16')
|
52 |
+
tf.keras.mixed_precision.experimental.set_policy(policy)
|
53 |
+
elif dtype == tf.float32:
|
54 |
+
tf.keras.mixed_precision.experimental.set_policy('float32')
|
55 |
+
else:
|
56 |
+
raise ValueError("Unexpected dtype: %s" % dtype)
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/common/modeling/tf_utils.py
ADDED
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
"""Common TF utilities."""
|
16 |
+
|
17 |
+
from __future__ import absolute_import
|
18 |
+
from __future__ import division
|
19 |
+
from __future__ import print_function
|
20 |
+
|
21 |
+
import six
|
22 |
+
import tensorflow as tf
|
23 |
+
|
24 |
+
from tensorflow.python.util import deprecation
|
25 |
+
from TensorFlow.common.modeling import activations
|
26 |
+
|
27 |
+
|
28 |
+
@deprecation.deprecated(
|
29 |
+
None,
|
30 |
+
"tf.keras.layers.Layer supports multiple positional args and kwargs as "
|
31 |
+
"input tensors. pack/unpack inputs to override __call__ is no longer "
|
32 |
+
"needed."
|
33 |
+
)
|
34 |
+
def pack_inputs(inputs):
|
35 |
+
"""Pack a list of `inputs` tensors to a tuple.
|
36 |
+
|
37 |
+
Args:
|
38 |
+
inputs: a list of tensors.
|
39 |
+
|
40 |
+
Returns:
|
41 |
+
a tuple of tensors. if any input is None, replace it with a special constant
|
42 |
+
tensor.
|
43 |
+
"""
|
44 |
+
inputs = tf.nest.flatten(inputs)
|
45 |
+
outputs = []
|
46 |
+
for x in inputs:
|
47 |
+
if x is None:
|
48 |
+
outputs.append(tf.constant(0, shape=[], dtype=tf.int32))
|
49 |
+
else:
|
50 |
+
outputs.append(x)
|
51 |
+
return tuple(outputs)
|
52 |
+
|
53 |
+
|
54 |
+
@deprecation.deprecated(
|
55 |
+
None,
|
56 |
+
"tf.keras.layers.Layer supports multiple positional args and kwargs as "
|
57 |
+
"input tensors. pack/unpack inputs to override __call__ is no longer "
|
58 |
+
"needed."
|
59 |
+
)
|
60 |
+
def unpack_inputs(inputs):
|
61 |
+
"""unpack a tuple of `inputs` tensors to a tuple.
|
62 |
+
|
63 |
+
Args:
|
64 |
+
inputs: a list of tensors.
|
65 |
+
|
66 |
+
Returns:
|
67 |
+
a tuple of tensors. if any input is a special constant tensor, replace it
|
68 |
+
with None.
|
69 |
+
"""
|
70 |
+
inputs = tf.nest.flatten(inputs)
|
71 |
+
outputs = []
|
72 |
+
for x in inputs:
|
73 |
+
if is_special_none_tensor(x):
|
74 |
+
outputs.append(None)
|
75 |
+
else:
|
76 |
+
outputs.append(x)
|
77 |
+
x = tuple(outputs)
|
78 |
+
|
79 |
+
# To trick the very pointless 'unbalanced-tuple-unpacking' pylint check
|
80 |
+
# from triggering.
|
81 |
+
if len(x) == 1:
|
82 |
+
return x[0]
|
83 |
+
return tuple(outputs)
|
84 |
+
|
85 |
+
|
86 |
+
def is_special_none_tensor(tensor):
|
87 |
+
"""Checks if a tensor is a special None Tensor."""
|
88 |
+
return tensor.shape.ndims == 0 and tensor.dtype == tf.int32
|
89 |
+
|
90 |
+
|
91 |
+
# TODO(hongkuny): consider moving custom string-map lookup to keras api.
|
92 |
+
def get_activation(identifier):
|
93 |
+
"""Maps a identifier to a Python function, e.g., "relu" => `tf.nn.relu`.
|
94 |
+
|
95 |
+
It checks string first and if it is one of customized activation not in TF,
|
96 |
+
the corresponding activation will be returned. For non-customized activation
|
97 |
+
names and callable identifiers, always fallback to tf.keras.activations.get.
|
98 |
+
|
99 |
+
Args:
|
100 |
+
identifier: String name of the activation function or callable.
|
101 |
+
|
102 |
+
Returns:
|
103 |
+
A Python function corresponding to the activation function.
|
104 |
+
"""
|
105 |
+
if isinstance(identifier, six.string_types):
|
106 |
+
name_to_fn = {
|
107 |
+
"gelu": activations.gelu,
|
108 |
+
"simple_swish": activations.simple_swish,
|
109 |
+
"hard_swish": activations.hard_swish,
|
110 |
+
"identity": activations.identity,
|
111 |
+
}
|
112 |
+
identifier = str(identifier).lower()
|
113 |
+
if identifier in name_to_fn:
|
114 |
+
return tf.keras.activations.get(name_to_fn[identifier])
|
115 |
+
return tf.keras.activations.get(identifier)
|
116 |
+
|
117 |
+
|
118 |
+
def get_shape_list(tensor, expected_rank=None, name=None):
|
119 |
+
"""Returns a list of the shape of tensor, preferring static dimensions.
|
120 |
+
|
121 |
+
Args:
|
122 |
+
tensor: A tf.Tensor object to find the shape of.
|
123 |
+
expected_rank: (optional) int. The expected rank of `tensor`. If this is
|
124 |
+
specified and the `tensor` has a different rank, and exception will be
|
125 |
+
thrown.
|
126 |
+
name: Optional name of the tensor for the error message.
|
127 |
+
|
128 |
+
Returns:
|
129 |
+
A list of dimensions of the shape of tensor. All static dimensions will
|
130 |
+
be returned as python integers, and dynamic dimensions will be returned
|
131 |
+
as tf.Tensor scalars.
|
132 |
+
"""
|
133 |
+
if expected_rank is not None:
|
134 |
+
assert_rank(tensor, expected_rank, name)
|
135 |
+
|
136 |
+
shape = tensor.shape.as_list()
|
137 |
+
|
138 |
+
non_static_indexes = []
|
139 |
+
for (index, dim) in enumerate(shape):
|
140 |
+
if dim is None:
|
141 |
+
non_static_indexes.append(index)
|
142 |
+
|
143 |
+
if not non_static_indexes:
|
144 |
+
return shape
|
145 |
+
|
146 |
+
dyn_shape = tf.shape(tensor)
|
147 |
+
for index in non_static_indexes:
|
148 |
+
shape[index] = dyn_shape[index]
|
149 |
+
return shape
|
150 |
+
|
151 |
+
|
152 |
+
def assert_rank(tensor, expected_rank, name=None):
|
153 |
+
"""Raises an exception if the tensor rank is not of the expected rank.
|
154 |
+
|
155 |
+
Args:
|
156 |
+
tensor: A tf.Tensor to check the rank of.
|
157 |
+
expected_rank: Python integer or list of integers, expected rank.
|
158 |
+
name: Optional name of the tensor for the error message.
|
159 |
+
|
160 |
+
Raises:
|
161 |
+
ValueError: If the expected shape doesn't match the actual shape.
|
162 |
+
"""
|
163 |
+
expected_rank_dict = {}
|
164 |
+
if isinstance(expected_rank, six.integer_types):
|
165 |
+
expected_rank_dict[expected_rank] = True
|
166 |
+
else:
|
167 |
+
for x in expected_rank:
|
168 |
+
expected_rank_dict[x] = True
|
169 |
+
|
170 |
+
actual_rank = tensor.shape.ndims
|
171 |
+
if actual_rank not in expected_rank_dict:
|
172 |
+
raise ValueError(
|
173 |
+
"For the tensor `%s`, the actual tensor rank `%d` (shape = %s) is not "
|
174 |
+
"equal to the expected tensor rank `%s`" %
|
175 |
+
(name, actual_rank, str(tensor.shape), str(expected_rank)))
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/common/training/__init__.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/common/training/grad_utils.py
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
"""Some gradient util functions to help users writing custom training loop."""
|
16 |
+
|
17 |
+
from __future__ import absolute_import
|
18 |
+
from __future__ import division
|
19 |
+
# from __future__ import google_type_annotations
|
20 |
+
from __future__ import print_function
|
21 |
+
|
22 |
+
from absl import logging
|
23 |
+
|
24 |
+
import tensorflow.compat.v2 as tf
|
25 |
+
|
26 |
+
|
27 |
+
def _filter_grads(grads_and_vars):
|
28 |
+
"""Filter out iterable with grad equal to None."""
|
29 |
+
grads_and_vars = tuple(grads_and_vars)
|
30 |
+
if not grads_and_vars:
|
31 |
+
return grads_and_vars
|
32 |
+
filtered = []
|
33 |
+
vars_with_empty_grads = []
|
34 |
+
for grad, var in grads_and_vars:
|
35 |
+
if grad is None:
|
36 |
+
vars_with_empty_grads.append(var)
|
37 |
+
else:
|
38 |
+
filtered.append((grad, var))
|
39 |
+
filtered = tuple(filtered)
|
40 |
+
if not filtered:
|
41 |
+
raise ValueError("No gradients provided for any variable: %s." %
|
42 |
+
([v.name for _, v in grads_and_vars],))
|
43 |
+
if vars_with_empty_grads:
|
44 |
+
logging.warning(
|
45 |
+
("Gradients do not exist for variables %s when minimizing the loss."),
|
46 |
+
([v.name for v in vars_with_empty_grads]))
|
47 |
+
return filtered
|
48 |
+
|
49 |
+
|
50 |
+
def _filter_and_allreduce_gradients(grads_and_vars,
|
51 |
+
allreduce_precision="float32"):
|
52 |
+
"""Filter None grads and then allreduce gradients in specified precision.
|
53 |
+
|
54 |
+
This utils function is used when users intent to explicitly allreduce
|
55 |
+
gradients and customize gradients operations before and after allreduce.
|
56 |
+
The allreduced gradients are then passed to optimizer.apply_gradients(
|
57 |
+
experimental_aggregate_gradients=False).
|
58 |
+
|
59 |
+
Arguments:
|
60 |
+
grads_and_vars: gradients and variables pairs.
|
61 |
+
allreduce_precision: Whether to allreduce gradients in float32 or float16.
|
62 |
+
|
63 |
+
Returns:
|
64 |
+
pairs of allreduced non-None gradients and variables.
|
65 |
+
"""
|
66 |
+
filtered_grads_and_vars = _filter_grads(grads_and_vars)
|
67 |
+
(grads, variables) = zip(*filtered_grads_and_vars)
|
68 |
+
if allreduce_precision == "float16":
|
69 |
+
grads = [tf.cast(grad, "float16") for grad in grads]
|
70 |
+
allreduced_grads = tf.distribute.get_replica_context().all_reduce(
|
71 |
+
tf.distribute.ReduceOp.SUM, grads)
|
72 |
+
if allreduce_precision == "float16":
|
73 |
+
allreduced_grads = [tf.cast(grad, "float32") for grad in allreduced_grads]
|
74 |
+
return allreduced_grads, variables
|
75 |
+
|
76 |
+
|
77 |
+
def _run_callbacks(callbacks, grads_and_vars):
|
78 |
+
for callback in callbacks:
|
79 |
+
grads_and_vars = callback(grads_and_vars)
|
80 |
+
return grads_and_vars
|
81 |
+
|
82 |
+
|
83 |
+
def minimize_using_explicit_allreduce(tape,
|
84 |
+
optimizer,
|
85 |
+
loss,
|
86 |
+
trainable_variables,
|
87 |
+
pre_allreduce_callbacks=None,
|
88 |
+
post_allreduce_callbacks=None):
|
89 |
+
"""Minimizes loss for one step by updating `trainable_variables`.
|
90 |
+
|
91 |
+
Minimizes loss for one step by updating `trainable_variables`.
|
92 |
+
This explicitly performs gradient allreduce, instead of relying on implicit
|
93 |
+
allreduce in optimizer.apply_gradients(). If training using FP16 mixed
|
94 |
+
precision, explicit allreduce will aggregate gradients in FP16 format.
|
95 |
+
For TPU and GPU training using FP32, explicit allreduce will aggregate
|
96 |
+
gradients in FP32 format.
|
97 |
+
|
98 |
+
Arguments:
|
99 |
+
tape: An instance of `tf.GradientTape`.
|
100 |
+
optimizer: An instance of `tf.keras.optimizers.Optimizer`.
|
101 |
+
loss: the loss tensor.
|
102 |
+
trainable_variables: A list of model Variables.
|
103 |
+
pre_allreduce_callbacks: A list of callback functions that takes gradients
|
104 |
+
and model variables pairs as input, manipulate them, and returns a new
|
105 |
+
gradients and model variables pairs. The callback functions will be
|
106 |
+
invoked in the list order and before gradients are allreduced.
|
107 |
+
With mixed precision training, the pre_allreduce_allbacks will be
|
108 |
+
applied on scaled_gradients. Default is no callbacks.
|
109 |
+
post_allreduce_callbacks: A list of callback functions that takes
|
110 |
+
gradients and model variables pairs as input, manipulate them, and
|
111 |
+
returns a new gradients and model variables paris. The callback
|
112 |
+
functions will be invoked in the list order and right before gradients
|
113 |
+
are applied to variables for updates. Default is no callbacks.
|
114 |
+
"""
|
115 |
+
if isinstance(optimizer,
|
116 |
+
tf.keras.mixed_precision.experimental.LossScaleOptimizer):
|
117 |
+
# FP16 GPU code path
|
118 |
+
with tape:
|
119 |
+
scaled_loss = optimizer.get_scaled_loss(loss)
|
120 |
+
scaled_grads = tape.gradient(scaled_loss, trainable_variables)
|
121 |
+
grads_and_vars = zip(scaled_grads, trainable_variables)
|
122 |
+
if pre_allreduce_callbacks:
|
123 |
+
grads_and_vars = _run_callbacks(pre_allreduce_callbacks, grads_and_vars)
|
124 |
+
(allreduced_scaled_grads,
|
125 |
+
filtered_training_vars) = _filter_and_allreduce_gradients(
|
126 |
+
grads_and_vars, allreduce_precision="float16")
|
127 |
+
allreduced_unscaled_grads = optimizer.get_unscaled_gradients(
|
128 |
+
allreduced_scaled_grads)
|
129 |
+
grads_and_vars = zip(allreduced_unscaled_grads, filtered_training_vars)
|
130 |
+
else:
|
131 |
+
# TPU or FP32 GPU code path
|
132 |
+
grads = tape.gradient(loss, trainable_variables)
|
133 |
+
grads_and_vars = zip(grads, trainable_variables)
|
134 |
+
if pre_allreduce_callbacks:
|
135 |
+
grads_and_vars = _run_callbacks(pre_allreduce_callbacks, grads_and_vars)
|
136 |
+
(allreduced_grads,
|
137 |
+
filtered_training_vars) = _filter_and_allreduce_gradients(
|
138 |
+
grads_and_vars, allreduce_precision="float32")
|
139 |
+
grads_and_vars = zip(allreduced_grads, filtered_training_vars)
|
140 |
+
if post_allreduce_callbacks:
|
141 |
+
grads_and_vars = _run_callbacks(post_allreduce_callbacks, grads_and_vars)
|
142 |
+
optimizer.apply_gradients(
|
143 |
+
grads_and_vars, experimental_aggregate_gradients=False)
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/common/training/runnable.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
"""An abstraction that users can easily handle their custom training loops."""
|
16 |
+
|
17 |
+
from __future__ import absolute_import
|
18 |
+
from __future__ import division
|
19 |
+
# from __future__ import google_type_annotations
|
20 |
+
from __future__ import print_function
|
21 |
+
|
22 |
+
import abc
|
23 |
+
import six
|
24 |
+
import tensorflow.compat.v2 as tf
|
25 |
+
from typing import Dict, Optional, Text
|
26 |
+
|
27 |
+
|
28 |
+
@six.add_metaclass(abc.ABCMeta)
|
29 |
+
class AbstractTrainable(tf.Module):
|
30 |
+
"""An abstract class defining the APIs required for training."""
|
31 |
+
|
32 |
+
@abc.abstractmethod
|
33 |
+
def train(self,
|
34 |
+
num_steps: Optional[tf.Tensor]) -> Optional[Dict[Text, tf.Tensor]]:
|
35 |
+
"""Implements model training with multiple steps.
|
36 |
+
|
37 |
+
In training, it is common to break the total training steps into several
|
38 |
+
training loops, so users can do checkpointing, write summaries and run some
|
39 |
+
python callbacks. This is necessary for getting good performance in TPU
|
40 |
+
training, as the overhead for launching a multi worker tf.function may be
|
41 |
+
large in Eager mode. It is usually encouraged to create a host training loop
|
42 |
+
(e.g. using a `tf.range` wrapping `strategy.run` inside a
|
43 |
+
`tf.function`) in the TPU case. For the cases that don't require host
|
44 |
+
training loop to acheive peak performance, users can just implement a simple
|
45 |
+
python loop to drive each step.
|
46 |
+
|
47 |
+
Args:
|
48 |
+
num_steps: A guideline for how many training steps to run. Note that it is
|
49 |
+
up to the model what constitutes a "step" (this may involve more than
|
50 |
+
one update to model parameters, e.g. if training a GAN).
|
51 |
+
|
52 |
+
Returns:
|
53 |
+
The function may return a dictionary of `Tensors`, which will be
|
54 |
+
written to logs and as TensorBoard summaries.
|
55 |
+
"""
|
56 |
+
pass
|
57 |
+
|
58 |
+
|
59 |
+
@six.add_metaclass(abc.ABCMeta)
|
60 |
+
class AbstractEvaluable(tf.Module):
|
61 |
+
"""An abstract class defining the APIs required for evaluation."""
|
62 |
+
|
63 |
+
@abc.abstractmethod
|
64 |
+
def evaluate(
|
65 |
+
self, num_steps: Optional[tf.Tensor]) -> Optional[Dict[Text, tf.Tensor]]:
|
66 |
+
"""Implements model evaluation.
|
67 |
+
|
68 |
+
Args:
|
69 |
+
num_steps: A guideline for how many evaluation steps to run. Note that it
|
70 |
+
is up to the model what constitutes a "step". Generally, it may be
|
71 |
+
desirable to support both a limited number of eval steps and iterating
|
72 |
+
over a full dataset (however many steps are required) when `num_steps`
|
73 |
+
is `None`.
|
74 |
+
|
75 |
+
Returns:
|
76 |
+
The function may return a dictionary of `Tensors`, which will be
|
77 |
+
written to logs and as TensorBoard summaries.
|
78 |
+
"""
|
79 |
+
pass
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/common/training/standard_runnable.py
ADDED
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
"""An abstraction that users can easily handle their custom training loops."""
|
16 |
+
|
17 |
+
from __future__ import absolute_import
|
18 |
+
from __future__ import division
|
19 |
+
# from __future__ import google_type_annotations
|
20 |
+
from __future__ import print_function
|
21 |
+
|
22 |
+
import abc
|
23 |
+
import six
|
24 |
+
import tensorflow.compat.v2 as tf
|
25 |
+
from typing import Dict, Optional, Text
|
26 |
+
|
27 |
+
from TensorFlow.common.training import runnable
|
28 |
+
from TensorFlow.common.training import utils
|
29 |
+
|
30 |
+
|
31 |
+
@six.add_metaclass(abc.ABCMeta)
|
32 |
+
class StandardTrainable(runnable.AbstractTrainable):
|
33 |
+
"""Implements the standard functionality of AbstractTrainable APIs."""
|
34 |
+
|
35 |
+
def __init__(self, use_tf_while_loop=True, use_tf_function=True):
|
36 |
+
if use_tf_while_loop and not use_tf_function:
|
37 |
+
raise ValueError("`use_tf_while_loop=True` and `use_tf_function=False` "
|
38 |
+
"is not supported")
|
39 |
+
self.use_tf_while_loop = use_tf_while_loop
|
40 |
+
self.use_tf_function = use_tf_function
|
41 |
+
self.train_dataset = None
|
42 |
+
self.train_iter = None
|
43 |
+
self.train_loop_fn = None
|
44 |
+
|
45 |
+
@abc.abstractmethod
|
46 |
+
def build_train_dataset(self):
|
47 |
+
"""Builds the training datasets.
|
48 |
+
|
49 |
+
Returns:
|
50 |
+
A tf.nest-compatible structure of tf.data.Dataset or DistributedDataset.
|
51 |
+
"""
|
52 |
+
pass
|
53 |
+
|
54 |
+
def train(self,
|
55 |
+
num_steps: Optional[tf.Tensor]) -> Optional[Dict[Text, tf.Tensor]]:
|
56 |
+
"""See base class."""
|
57 |
+
if self.train_dataset is None:
|
58 |
+
# Build train input dataset
|
59 |
+
self.train_dataset = self.build_train_dataset()
|
60 |
+
self.train_iter = tf.nest.map_structure(iter, self.train_dataset)
|
61 |
+
|
62 |
+
if self.train_loop_fn is None:
|
63 |
+
train_fn = self.train_step
|
64 |
+
if self.use_tf_while_loop:
|
65 |
+
self.train_loop_fn = utils.create_tf_while_loop_fn(train_fn)
|
66 |
+
else:
|
67 |
+
if self.use_tf_function:
|
68 |
+
train_fn = tf.function(train_fn)
|
69 |
+
self.train_loop_fn = utils.create_loop_fn(train_fn)
|
70 |
+
|
71 |
+
self.train_loop_begin()
|
72 |
+
self.train_loop_fn(self.train_iter, num_steps)
|
73 |
+
return self.train_loop_end()
|
74 |
+
|
75 |
+
def train_loop_begin(self):
|
76 |
+
"""Called once at the beginning of the training loop.
|
77 |
+
|
78 |
+
This is a good place to reset metrics that accumulate values over multiple
|
79 |
+
steps of training.
|
80 |
+
"""
|
81 |
+
pass
|
82 |
+
|
83 |
+
@abc.abstractmethod
|
84 |
+
def train_step(self, iterator):
|
85 |
+
"""Implements one step of training.
|
86 |
+
|
87 |
+
What a "step" consists of is up to the implementer. If using distribution
|
88 |
+
strategies, the call to this method should take place in the "cross-replica
|
89 |
+
context" for generality, to allow e.g. multiple iterator dequeues and calls
|
90 |
+
to `strategy.run`.
|
91 |
+
|
92 |
+
Args:
|
93 |
+
iterator: A tf.nest-compatible structure of tf.data Iterator or
|
94 |
+
DistributedIterator.
|
95 |
+
"""
|
96 |
+
pass
|
97 |
+
|
98 |
+
def train_loop_end(self) -> Optional[Dict[Text, tf.Tensor]]:
|
99 |
+
"""Called at the end of the training loop.
|
100 |
+
|
101 |
+
This is a good place to get metric results. The value returned from this
|
102 |
+
function will be returned as-is from the train() method.
|
103 |
+
|
104 |
+
Returns:
|
105 |
+
The function may return a dictionary of `Tensors`, which will be
|
106 |
+
written to logs and as TensorBoard summaries.
|
107 |
+
"""
|
108 |
+
pass
|
109 |
+
|
110 |
+
|
111 |
+
@six.add_metaclass(abc.ABCMeta)
|
112 |
+
class StandardEvaluable(runnable.AbstractEvaluable):
|
113 |
+
"""Implements the standard functionality of AbstractEvaluable APIs."""
|
114 |
+
|
115 |
+
def __init__(self, use_tf_function=True):
|
116 |
+
self.eval_use_tf_function = use_tf_function
|
117 |
+
self.eval_dataset = None
|
118 |
+
self.eval_loop_fn = None
|
119 |
+
|
120 |
+
@abc.abstractmethod
|
121 |
+
def build_eval_dataset(self):
|
122 |
+
"""Builds the evaluation datasets.
|
123 |
+
|
124 |
+
Returns:
|
125 |
+
A tf.nest-compatible structure of tf.data.Dataset or DistributedDataset.
|
126 |
+
"""
|
127 |
+
pass
|
128 |
+
|
129 |
+
def evaluate(
|
130 |
+
self, num_steps: Optional[tf.Tensor]) -> Optional[Dict[Text, tf.Tensor]]:
|
131 |
+
"""See base class."""
|
132 |
+
if self.eval_dataset is None:
|
133 |
+
# Build train input dataset
|
134 |
+
self.eval_dataset = self.build_eval_dataset()
|
135 |
+
|
136 |
+
if self.eval_loop_fn is None:
|
137 |
+
eval_fn = self.eval_step
|
138 |
+
if self.eval_use_tf_function:
|
139 |
+
eval_fn = tf.function(eval_fn)
|
140 |
+
self.eval_loop_fn = utils.create_loop_fn(eval_fn)
|
141 |
+
|
142 |
+
# TODO(b/147718615): When async RPC is enabled in eager runtime, we make
|
143 |
+
# eval iterator as a class member so it doesn't get destroyed when out of
|
144 |
+
# the function scope.
|
145 |
+
self.eval_iter = tf.nest.map_structure(iter, self.eval_dataset)
|
146 |
+
|
147 |
+
self.eval_begin()
|
148 |
+
self.eval_loop_fn(self.eval_iter, num_steps)
|
149 |
+
return self.eval_end()
|
150 |
+
|
151 |
+
def eval_begin(self):
|
152 |
+
"""Called once at the beginning of the evaluation.
|
153 |
+
|
154 |
+
This is a good place to reset metrics that accumulate values over the entire
|
155 |
+
evaluation.
|
156 |
+
"""
|
157 |
+
pass
|
158 |
+
|
159 |
+
@abc.abstractmethod
|
160 |
+
def eval_step(self, iterator):
|
161 |
+
"""Implements one step of evaluation.
|
162 |
+
|
163 |
+
What a "step" consists of is up to the implementer. If using distribution
|
164 |
+
strategies, the call to this method should take place in the "cross-replica
|
165 |
+
context" for generality, to allow e.g. multiple iterator dequeues and calls
|
166 |
+
to `strategy.run`.
|
167 |
+
|
168 |
+
Args:
|
169 |
+
iterator: A tf.nest-compatible structure of tf.data Iterator or
|
170 |
+
DistributedIterator.
|
171 |
+
"""
|
172 |
+
pass
|
173 |
+
|
174 |
+
def eval_end(self) -> Optional[Dict[Text, tf.Tensor]]:
|
175 |
+
"""Called at the end of the evaluation.
|
176 |
+
|
177 |
+
This is a good place to get metric results. The value returned from this
|
178 |
+
function will be returned as-is from the evaluate() method.
|
179 |
+
|
180 |
+
Returns:
|
181 |
+
The function may return a dictionary of `Tensors`, which will be
|
182 |
+
written to logs and as TensorBoard summaries.
|
183 |
+
"""
|
184 |
+
pass
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/common/training/utils.py
ADDED
@@ -0,0 +1,342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
"""Some layered modules/functions to help users writing custom training loop."""
|
16 |
+
|
17 |
+
from __future__ import absolute_import
|
18 |
+
from __future__ import division
|
19 |
+
# from __future__ import google_type_annotations
|
20 |
+
from __future__ import print_function
|
21 |
+
|
22 |
+
import abc
|
23 |
+
import inspect
|
24 |
+
import six
|
25 |
+
|
26 |
+
import tensorflow.compat.v2 as tf
|
27 |
+
|
28 |
+
|
29 |
+
def create_loop_fn(step_fn):
|
30 |
+
"""Creates a multiple steps function driven by the python while loop.
|
31 |
+
|
32 |
+
Args:
|
33 |
+
step_fn: A function which takes `iterator` as input.
|
34 |
+
|
35 |
+
Returns:
|
36 |
+
A callable defined as the `loop_fn` defination below.
|
37 |
+
"""
|
38 |
+
|
39 |
+
def loop_fn(iterator, num_steps, state=None, reduce_fn=None):
|
40 |
+
"""A loop function with multiple steps.
|
41 |
+
|
42 |
+
Args:
|
43 |
+
iterator: A nested structure of tf.data `Iterator` or
|
44 |
+
`DistributedIterator`.
|
45 |
+
num_steps: The number of steps in the loop. If `num_steps==-1`, will
|
46 |
+
iterate until exausting the iterator.
|
47 |
+
state: An optional initial state before running the loop.
|
48 |
+
reduce_fn: a callable defined as `def reduce_fn(state, value)`, where
|
49 |
+
`value` is the outputs from `step_fn`.
|
50 |
+
|
51 |
+
Returns:
|
52 |
+
The updated state.
|
53 |
+
"""
|
54 |
+
try:
|
55 |
+
step = 0
|
56 |
+
# To make sure the OutOfRangeError exception can be handled well with
|
57 |
+
# async remote eager, we need to wrap the loop body in a `async_scope`.
|
58 |
+
with tf.experimental.async_scope():
|
59 |
+
while (num_steps == -1 or step < num_steps):
|
60 |
+
outputs = step_fn(iterator)
|
61 |
+
if reduce_fn is not None:
|
62 |
+
state = reduce_fn(state, outputs)
|
63 |
+
step += 1
|
64 |
+
return state
|
65 |
+
except (StopIteration, tf.errors.OutOfRangeError):
|
66 |
+
tf.experimental.async_clear_error()
|
67 |
+
return state
|
68 |
+
|
69 |
+
return loop_fn
|
70 |
+
|
71 |
+
|
72 |
+
def create_tf_while_loop_fn(step_fn):
|
73 |
+
"""Create a multiple steps function driven by tf.while_loop on the host.
|
74 |
+
|
75 |
+
Args:
|
76 |
+
step_fn: A function which takes `iterator` as input.
|
77 |
+
|
78 |
+
Returns:
|
79 |
+
A callable defined as the `loop_fn` defination below.
|
80 |
+
"""
|
81 |
+
|
82 |
+
@tf.function
|
83 |
+
def loop_fn(iterator, num_steps):
|
84 |
+
"""A loop function with multiple steps.
|
85 |
+
|
86 |
+
Args:
|
87 |
+
iterator: A nested structure of tf.data `Iterator` or
|
88 |
+
`DistributedIterator`.
|
89 |
+
num_steps: The number of steps in the loop. Must be a tf.Tensor.
|
90 |
+
"""
|
91 |
+
if not isinstance(num_steps, tf.Tensor):
|
92 |
+
raise ValueError("`num_steps` should be an `tf.Tensor`. Python object "
|
93 |
+
"may cause retracing.")
|
94 |
+
|
95 |
+
for _ in tf.range(num_steps):
|
96 |
+
step_fn(iterator)
|
97 |
+
|
98 |
+
return loop_fn
|
99 |
+
|
100 |
+
|
101 |
+
def make_distributed_dataset(strategy, dataset_or_fn, *args, **kwargs):
|
102 |
+
"""A helper function to create distributed dataset.
|
103 |
+
|
104 |
+
Args:
|
105 |
+
strategy: An instance of `tf.distribute.Strategy`.
|
106 |
+
dataset_or_fn: A instance of `tf.data.Dataset` or a function which takes an
|
107 |
+
`tf.distribute.InputContext` as input and returns a `tf.data.Dataset`. If
|
108 |
+
it is a function, it could optionally have an argument named
|
109 |
+
`input_context` which is `tf.distribute.InputContext` argument type.
|
110 |
+
*args: The list of arguments to be passed to dataset_or_fn.
|
111 |
+
**kwargs: Any keyword arguments to be passed.
|
112 |
+
|
113 |
+
Returns:
|
114 |
+
A distributed Dataset.
|
115 |
+
"""
|
116 |
+
if strategy is None:
|
117 |
+
strategy = tf.distribute.get_strategy()
|
118 |
+
|
119 |
+
if isinstance(dataset_or_fn, tf.data.Dataset):
|
120 |
+
return strategy.experimental_distribute_dataset(dataset_or_fn)
|
121 |
+
|
122 |
+
if not callable(dataset_or_fn):
|
123 |
+
raise ValueError("`dataset_or_fn` should be either callable or an instance "
|
124 |
+
"of `tf.data.Dataset`")
|
125 |
+
|
126 |
+
def dataset_fn(ctx):
|
127 |
+
"""Wrapped dataset function for creating distributed dataset.."""
|
128 |
+
|
129 |
+
# If `dataset_or_fn` is a function and has `input_context` as argument
|
130 |
+
# names, pass `ctx` as the value of `input_context` when calling
|
131 |
+
# `dataset_or_fn`. Otherwise `ctx` will not be used when calling
|
132 |
+
# `dataset_or_fn`.
|
133 |
+
if six.PY3:
|
134 |
+
argspec = inspect.getfullargspec(dataset_or_fn)
|
135 |
+
else:
|
136 |
+
argspec = inspect.getargspec(dataset_or_fn)
|
137 |
+
args_names = argspec.args
|
138 |
+
|
139 |
+
if "input_context" in args_names:
|
140 |
+
kwargs["input_context"] = ctx
|
141 |
+
ds = dataset_or_fn(*args, **kwargs)
|
142 |
+
return ds
|
143 |
+
|
144 |
+
return strategy.experimental_distribute_datasets_from_function(dataset_fn)
|
145 |
+
|
146 |
+
|
147 |
+
class SummaryManager(object):
|
148 |
+
"""A class manages writing summaries."""
|
149 |
+
|
150 |
+
def __init__(self,
|
151 |
+
summary_writer,
|
152 |
+
summary_fn,
|
153 |
+
global_step=None,
|
154 |
+
summary_interval=None):
|
155 |
+
"""Construct a summary manager object.
|
156 |
+
|
157 |
+
Args:
|
158 |
+
summary_writer: A `tf.summary.SummaryWriter` instance for writing
|
159 |
+
summaries.
|
160 |
+
summary_fn: A callable defined as `def summary_fn(name, tensor,
|
161 |
+
step=None)`, which describes the summary operation.
|
162 |
+
global_step: A `tf.Variable` instance for checking the current global step
|
163 |
+
value, in case users want to save summaries every N steps.
|
164 |
+
summary_interval: An integer, indicates the minimum step interval between
|
165 |
+
two summaries.
|
166 |
+
"""
|
167 |
+
if summary_writer is not None:
|
168 |
+
self._summary_writer = summary_writer
|
169 |
+
self._enabled = True
|
170 |
+
else:
|
171 |
+
self._summary_writer = tf.summary.create_noop_writer()
|
172 |
+
self._enabled = False
|
173 |
+
self._summary_fn = summary_fn
|
174 |
+
|
175 |
+
if global_step is None:
|
176 |
+
self._global_step = tf.summary.experimental.get_step()
|
177 |
+
else:
|
178 |
+
self._global_step = global_step
|
179 |
+
|
180 |
+
if summary_interval is not None:
|
181 |
+
if self._global_step is None:
|
182 |
+
raise ValueError("`summary_interval` is not None, but no `global_step` "
|
183 |
+
"can be obtained ")
|
184 |
+
self._last_summary_step = self._global_step.numpy()
|
185 |
+
self._summary_interval = summary_interval
|
186 |
+
|
187 |
+
@property
|
188 |
+
def summary_interval(self):
|
189 |
+
return self._summary_interval
|
190 |
+
|
191 |
+
@property
|
192 |
+
def summary_writer(self):
|
193 |
+
"""Returns the underlying summary writer."""
|
194 |
+
return self._summary_writer
|
195 |
+
|
196 |
+
def flush(self):
|
197 |
+
"""Flush the underlying summary writer."""
|
198 |
+
if self._enabled:
|
199 |
+
tf.summary.flush(self._summary_writer)
|
200 |
+
|
201 |
+
def write_summaries(self, items, always_write=True):
|
202 |
+
"""Write a bulk of summaries.
|
203 |
+
|
204 |
+
Args:
|
205 |
+
items: a dictionary of `Tensors` for writing summaries.
|
206 |
+
always_write: An optional boolean. If `True`, the manager will always
|
207 |
+
write summaries unless the summaries have been written for the same
|
208 |
+
step. Otherwise the manager will only write the summaries if the
|
209 |
+
interval between summaries are larger than `summary_interval`.
|
210 |
+
|
211 |
+
Returns:
|
212 |
+
A boolean indicates whether the summaries are written or not.
|
213 |
+
"""
|
214 |
+
# TODO(rxsang): Support writing summaries with nested structure, so users
|
215 |
+
# can split the summaries into different directories for nicer visualization
|
216 |
+
# in Tensorboard, like train and eval metrics.
|
217 |
+
if not self._enabled:
|
218 |
+
return False
|
219 |
+
|
220 |
+
if self._summary_interval is not None:
|
221 |
+
current_step = self._global_step.numpy()
|
222 |
+
if current_step == self._last_summary_step:
|
223 |
+
return False
|
224 |
+
if not always_write and current_step < (self._last_summary_step +
|
225 |
+
self._summary_interval):
|
226 |
+
return False
|
227 |
+
self._last_summary_step = current_step
|
228 |
+
|
229 |
+
with self._summary_writer.as_default():
|
230 |
+
for name, tensor in items.items():
|
231 |
+
self._summary_fn(name, tensor, step=self._global_step)
|
232 |
+
return True
|
233 |
+
|
234 |
+
|
235 |
+
@six.add_metaclass(abc.ABCMeta)
|
236 |
+
class Trigger(object):
|
237 |
+
"""An abstract class representing a "trigger" for some event."""
|
238 |
+
|
239 |
+
@abc.abstractmethod
|
240 |
+
def __call__(self, value: float, force_trigger=False):
|
241 |
+
"""Maybe trigger the event based on the given value.
|
242 |
+
|
243 |
+
Args:
|
244 |
+
value: the value for triggering.
|
245 |
+
force_trigger: Whether the trigger is forced triggered.
|
246 |
+
|
247 |
+
Returns:
|
248 |
+
`True` if the trigger is triggered on the given `value`, and
|
249 |
+
`False` otherwise.
|
250 |
+
"""
|
251 |
+
|
252 |
+
@abc.abstractmethod
|
253 |
+
def reset(self):
|
254 |
+
"""Reset states in the trigger."""
|
255 |
+
|
256 |
+
|
257 |
+
class IntervalTrigger(Trigger):
|
258 |
+
"""Triggers on every fixed interval."""
|
259 |
+
|
260 |
+
def __init__(self, interval, start=0):
|
261 |
+
"""Constructs the IntervalTrigger.
|
262 |
+
|
263 |
+
Args:
|
264 |
+
interval: The triggering interval.
|
265 |
+
start: An initial value for the trigger.
|
266 |
+
"""
|
267 |
+
self._interval = interval
|
268 |
+
self._last_trigger_value = start
|
269 |
+
|
270 |
+
def __call__(self, value, force_trigger=False):
|
271 |
+
"""Maybe trigger the event based on the given value.
|
272 |
+
|
273 |
+
Args:
|
274 |
+
value: the value for triggering.
|
275 |
+
force_trigger: If True, the trigger will be forced triggered unless the
|
276 |
+
last trigger value is equal to `value`.
|
277 |
+
|
278 |
+
Returns:
|
279 |
+
`True` if the trigger is triggered on the given `value`, and
|
280 |
+
`False` otherwise.
|
281 |
+
"""
|
282 |
+
if force_trigger and value != self._last_trigger_value:
|
283 |
+
self._last_trigger_value = value
|
284 |
+
return True
|
285 |
+
|
286 |
+
if self._interval and self._interval > 0:
|
287 |
+
if value >= self._last_trigger_value + self._interval:
|
288 |
+
self._last_trigger_value = value
|
289 |
+
return True
|
290 |
+
return False
|
291 |
+
|
292 |
+
def reset(self):
|
293 |
+
"""See base class."""
|
294 |
+
self._last_trigger_value = 0
|
295 |
+
|
296 |
+
|
297 |
+
class EpochHelper(object):
|
298 |
+
"""A Helper class to handle epochs in Customized Training Loop."""
|
299 |
+
|
300 |
+
def __init__(self, epoch_steps, global_step):
|
301 |
+
"""Constructs the EpochHelper.
|
302 |
+
|
303 |
+
Args:
|
304 |
+
epoch_steps: An integer indicates how many steps in an epoch.
|
305 |
+
global_step: A `tf.Variable` instance indicates the current global step.
|
306 |
+
"""
|
307 |
+
self._epoch_steps = epoch_steps
|
308 |
+
self._global_step = global_step
|
309 |
+
self._current_epoch = None
|
310 |
+
self._epoch_start_step = None
|
311 |
+
self._in_epoch = False
|
312 |
+
|
313 |
+
def epoch_begin(self):
|
314 |
+
"""Returns whether a new epoch should begin."""
|
315 |
+
if self._in_epoch:
|
316 |
+
return False
|
317 |
+
current_step = self._global_step.numpy()
|
318 |
+
self._epoch_start_step = current_step
|
319 |
+
self._current_epoch = current_step // self._epoch_steps
|
320 |
+
self._in_epoch = True
|
321 |
+
return True
|
322 |
+
|
323 |
+
def epoch_end(self):
|
324 |
+
"""Returns whether the current epoch should end."""
|
325 |
+
if not self._in_epoch:
|
326 |
+
raise ValueError("`epoch_end` can only be called inside an epoch")
|
327 |
+
current_step = self._global_step.numpy()
|
328 |
+
epoch = current_step // self._epoch_steps
|
329 |
+
|
330 |
+
if epoch > self._current_epoch:
|
331 |
+
self._in_epoch = False
|
332 |
+
return True
|
333 |
+
return False
|
334 |
+
|
335 |
+
@property
|
336 |
+
def batch_index(self):
|
337 |
+
"""Index of the next batch within the current epoch."""
|
338 |
+
return self._global_step.numpy() - self._epoch_start_step
|
339 |
+
|
340 |
+
@property
|
341 |
+
def current_epoch(self):
|
342 |
+
return self._current_epoch
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/__init__.py
ADDED
File without changes
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/flags/README.md
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Adding Abseil (absl) flags quickstart
|
2 |
+
## Defining a flag
|
3 |
+
absl flag definitions are similar to argparse, although they are defined on a global namespace.
|
4 |
+
|
5 |
+
For instance defining a string flag looks like:
|
6 |
+
```$xslt
|
7 |
+
from absl import flags
|
8 |
+
flags.DEFINE_string(
|
9 |
+
name="my_flag",
|
10 |
+
default="a_sensible_default",
|
11 |
+
help="Here is what this flag does."
|
12 |
+
)
|
13 |
+
```
|
14 |
+
|
15 |
+
All three arguments are required, but default may be `None`. A common optional argument is
|
16 |
+
short_name for defining abreviations. Certain `DEFINE_*` methods will have other required arguments.
|
17 |
+
For instance `DEFINE_enum` requires the `enum_values` argument to be specified.
|
18 |
+
|
19 |
+
## Key Flags
|
20 |
+
absl has the concept of a key flag. Any flag defined in `__main__` is considered a key flag by
|
21 |
+
default. Key flags are displayed in `--help`, others only appear in `--helpfull`. In order to
|
22 |
+
handle key flags that are defined outside the module in question, absl provides the
|
23 |
+
`flags.adopt_module_key_flags()` method. This adds the key flags of a different module to one's own
|
24 |
+
key flags. For example:
|
25 |
+
```$xslt
|
26 |
+
File: flag_source.py
|
27 |
+
---------------------------------------
|
28 |
+
|
29 |
+
from absl import flags
|
30 |
+
flags.DEFINE_string(name="my_flag", default="abc", help="a flag.")
|
31 |
+
```
|
32 |
+
|
33 |
+
```$xslt
|
34 |
+
File: my_module.py
|
35 |
+
---------------------------------------
|
36 |
+
|
37 |
+
from absl import app as absl_app
|
38 |
+
from absl import flags
|
39 |
+
|
40 |
+
import flag_source
|
41 |
+
|
42 |
+
flags.adopt_module_key_flags(flag_source)
|
43 |
+
|
44 |
+
def main(_):
|
45 |
+
pass
|
46 |
+
|
47 |
+
absl_app.run(main, [__file__, "-h"]
|
48 |
+
```
|
49 |
+
|
50 |
+
when `my_module.py` is run it will show the help text for `my_flag`. Because not all flags defined
|
51 |
+
in a file are equally important, `official/utils/flags/core.py` (generally imported as flags_core)
|
52 |
+
provides an abstraction for handling key flag declaration in an easy way through the
|
53 |
+
`register_key_flags_in_core()` function, which allows a module to make a single
|
54 |
+
`adopt_key_flags(flags_core)` call when using the util flag declaration functions.
|
55 |
+
|
56 |
+
## Validators
|
57 |
+
Often the constraints on a flag are complicated. absl provides the validator decorator to allow
|
58 |
+
one to mark a function as a flag validation function. Suppose we want users to provide a flag
|
59 |
+
which is a palindrome.
|
60 |
+
|
61 |
+
```$xslt
|
62 |
+
from absl import flags
|
63 |
+
|
64 |
+
flags.DEFINE_string(name="pal_flag", short_name="pf", default="", help="Give me a palindrome")
|
65 |
+
|
66 |
+
@flags.validator("pal_flag")
|
67 |
+
def _check_pal(provided_pal_flag):
|
68 |
+
return provided_pal_flag == provided_pal_flag[::-1]
|
69 |
+
|
70 |
+
```
|
71 |
+
|
72 |
+
Validators take the form that returning True (truthy) passes, and all others
|
73 |
+
(False, None, exception) fail.
|
74 |
+
|
75 |
+
## Testing
|
76 |
+
To test using absl, simply declare flags in the setupClass method of TensorFlow's TestCase.
|
77 |
+
|
78 |
+
```$xslt
|
79 |
+
from absl import flags
|
80 |
+
import tensorflow as tf
|
81 |
+
|
82 |
+
def define_flags():
|
83 |
+
flags.DEFINE_string(name="test_flag", default="abc", help="an example flag")
|
84 |
+
|
85 |
+
|
86 |
+
class BaseTester(unittest.TestCase):
|
87 |
+
|
88 |
+
@classmethod
|
89 |
+
def setUpClass(cls):
|
90 |
+
super(BaseTester, cls).setUpClass()
|
91 |
+
define_flags()
|
92 |
+
|
93 |
+
def test_trivial(self):
|
94 |
+
flags_core.parse_flags([__file__, "test_flag", "def"])
|
95 |
+
self.AssertEqual(flags.FLAGS.test_flag, "def")
|
96 |
+
|
97 |
+
```
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/flags/__init__.py
ADDED
File without changes
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/flags/_base.py
ADDED
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
"""Flags which will be nearly universal across models."""
|
16 |
+
|
17 |
+
from __future__ import absolute_import
|
18 |
+
from __future__ import division
|
19 |
+
from __future__ import print_function
|
20 |
+
|
21 |
+
from absl import flags
|
22 |
+
import tensorflow as tf
|
23 |
+
|
24 |
+
from TensorFlow.utils.flags._conventions import help_wrap
|
25 |
+
from TensorFlow.utils.logs import hooks_helper
|
26 |
+
|
27 |
+
|
28 |
+
def define_base(data_dir=True, model_dir=True, clean=False, train_epochs=False,
|
29 |
+
epochs_between_evals=False, stop_threshold=False,
|
30 |
+
batch_size=True, num_gpu=False, hooks=False, export_dir=False,
|
31 |
+
distribution_strategy=False, run_eagerly=False):
|
32 |
+
"""Register base flags.
|
33 |
+
|
34 |
+
Args:
|
35 |
+
data_dir: Create a flag for specifying the input data directory.
|
36 |
+
model_dir: Create a flag for specifying the model file directory.
|
37 |
+
clean: Create a flag for removing the model_dir.
|
38 |
+
train_epochs: Create a flag to specify the number of training epochs.
|
39 |
+
epochs_between_evals: Create a flag to specify the frequency of testing.
|
40 |
+
stop_threshold: Create a flag to specify a threshold accuracy or other
|
41 |
+
eval metric which should trigger the end of training.
|
42 |
+
batch_size: Create a flag to specify the batch size.
|
43 |
+
num_gpu: Create a flag to specify the number of GPUs used.
|
44 |
+
hooks: Create a flag to specify hooks for logging.
|
45 |
+
export_dir: Create a flag to specify where a SavedModel should be exported.
|
46 |
+
distribution_strategy: Create a flag to specify which Distribution Strategy
|
47 |
+
to use.
|
48 |
+
run_eagerly: Create a flag to specify to run eagerly op by op.
|
49 |
+
Returns:
|
50 |
+
A list of flags for core.py to marks as key flags.
|
51 |
+
"""
|
52 |
+
key_flags = []
|
53 |
+
|
54 |
+
if data_dir:
|
55 |
+
flags.DEFINE_string(
|
56 |
+
name="data_dir", short_name="dd", default="/tmp",
|
57 |
+
help=help_wrap("The location of the input data."))
|
58 |
+
key_flags.append("data_dir")
|
59 |
+
|
60 |
+
if model_dir:
|
61 |
+
flags.DEFINE_string(
|
62 |
+
name="model_dir", short_name="md", default="/tmp",
|
63 |
+
help=help_wrap("The location of the model checkpoint files."))
|
64 |
+
key_flags.append("model_dir")
|
65 |
+
|
66 |
+
if clean:
|
67 |
+
flags.DEFINE_boolean(
|
68 |
+
name="clean", default=False,
|
69 |
+
help=help_wrap("If set, model_dir will be removed if it exists."))
|
70 |
+
key_flags.append("clean")
|
71 |
+
|
72 |
+
if train_epochs:
|
73 |
+
flags.DEFINE_integer(
|
74 |
+
name="train_epochs", short_name="te", default=1,
|
75 |
+
help=help_wrap("The number of epochs used to train."))
|
76 |
+
key_flags.append("train_epochs")
|
77 |
+
|
78 |
+
if epochs_between_evals:
|
79 |
+
flags.DEFINE_integer(
|
80 |
+
name="epochs_between_evals", short_name="ebe", default=1,
|
81 |
+
help=help_wrap("The number of training epochs to run between "
|
82 |
+
"evaluations."))
|
83 |
+
key_flags.append("epochs_between_evals")
|
84 |
+
|
85 |
+
if stop_threshold:
|
86 |
+
flags.DEFINE_float(
|
87 |
+
name="stop_threshold", short_name="st",
|
88 |
+
default=None,
|
89 |
+
help=help_wrap("If passed, training will stop at the earlier of "
|
90 |
+
"train_epochs and when the evaluation metric is "
|
91 |
+
"greater than or equal to stop_threshold."))
|
92 |
+
|
93 |
+
if batch_size:
|
94 |
+
flags.DEFINE_integer(
|
95 |
+
name="batch_size", short_name="bs", default=32,
|
96 |
+
help=help_wrap("Batch size for training and evaluation. When using "
|
97 |
+
"multiple gpus, this is the global batch size for "
|
98 |
+
"all devices. For example, if the batch size is 32 "
|
99 |
+
"and there are 4 GPUs, each GPU will get 8 examples on "
|
100 |
+
"each step."))
|
101 |
+
key_flags.append("batch_size")
|
102 |
+
|
103 |
+
if num_gpu:
|
104 |
+
flags.DEFINE_integer(
|
105 |
+
name="num_gpus", short_name="ng",
|
106 |
+
default=1,
|
107 |
+
help=help_wrap(
|
108 |
+
"How many GPUs to use at each worker with the "
|
109 |
+
"DistributionStrategies API. The default is 1."))
|
110 |
+
|
111 |
+
if run_eagerly:
|
112 |
+
flags.DEFINE_boolean(
|
113 |
+
name="run_eagerly", default=False,
|
114 |
+
help="Run the model op by op without building a model function.")
|
115 |
+
|
116 |
+
if hooks:
|
117 |
+
# Construct a pretty summary of hooks.
|
118 |
+
hook_list_str = (
|
119 |
+
u"\ufeff Hook:\n" + u"\n".join([u"\ufeff {}".format(key) for key
|
120 |
+
in hooks_helper.HOOKS]))
|
121 |
+
flags.DEFINE_list(
|
122 |
+
name="hooks", short_name="hk", default="LoggingTensorHook",
|
123 |
+
help=help_wrap(
|
124 |
+
u"A list of (case insensitive) strings to specify the names of "
|
125 |
+
u"training hooks.\n{}\n\ufeff Example: `--hooks ProfilerHook,"
|
126 |
+
u"ExamplesPerSecondHook`\n See official.utils.logs.hooks_helper "
|
127 |
+
u"for details.".format(hook_list_str))
|
128 |
+
)
|
129 |
+
key_flags.append("hooks")
|
130 |
+
|
131 |
+
if export_dir:
|
132 |
+
flags.DEFINE_string(
|
133 |
+
name="export_dir", short_name="ed", default=None,
|
134 |
+
help=help_wrap("If set, a SavedModel serialization of the model will "
|
135 |
+
"be exported to this directory at the end of training. "
|
136 |
+
"See the README for more details and relevant links.")
|
137 |
+
)
|
138 |
+
key_flags.append("export_dir")
|
139 |
+
|
140 |
+
if distribution_strategy:
|
141 |
+
flags.DEFINE_string(
|
142 |
+
name="distribution_strategy", short_name="ds", default="mirrored",
|
143 |
+
help=help_wrap("The Distribution Strategy to use for training. "
|
144 |
+
"Accepted values are 'off', 'one_device', "
|
145 |
+
"'mirrored', 'parameter_server', 'collective', "
|
146 |
+
"case insensitive. 'off' means not to use "
|
147 |
+
"Distribution Strategy; 'default' means to choose "
|
148 |
+
"from `MirroredStrategy` or `OneDeviceStrategy` "
|
149 |
+
"according to the number of GPUs.")
|
150 |
+
)
|
151 |
+
|
152 |
+
|
153 |
+
return key_flags
|
154 |
+
|
155 |
+
|
156 |
+
def get_num_gpus(flags_obj):
|
157 |
+
"""Treat num_gpus=-1 as 'use all'."""
|
158 |
+
if flags_obj.num_gpus != -1:
|
159 |
+
return flags_obj.num_gpus
|
160 |
+
|
161 |
+
from tensorflow.python.client import device_lib # pylint: disable=g-import-not-at-top
|
162 |
+
local_device_protos = device_lib.list_local_devices()
|
163 |
+
return sum([1 for d in local_device_protos if d.device_type == "GPU"])
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/flags/_device.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
"""Flags for managing compute devices. Currently only contains TPU flags."""
|
16 |
+
|
17 |
+
from __future__ import absolute_import
|
18 |
+
from __future__ import division
|
19 |
+
from __future__ import print_function
|
20 |
+
|
21 |
+
from absl import flags
|
22 |
+
import tensorflow as tf
|
23 |
+
|
24 |
+
from TensorFlow.utils.flags._conventions import help_wrap
|
25 |
+
|
26 |
+
|
27 |
+
def require_cloud_storage(flag_names):
|
28 |
+
"""Register a validator to check directory flags.
|
29 |
+
Args:
|
30 |
+
flag_names: An iterable of strings containing the names of flags to be
|
31 |
+
checked.
|
32 |
+
"""
|
33 |
+
msg = "TPU requires GCS path for {}".format(", ".join(flag_names))
|
34 |
+
@flags.multi_flags_validator(["tpu"] + flag_names, message=msg)
|
35 |
+
def _path_check(flag_values): # pylint: disable=missing-docstring
|
36 |
+
if flag_values["tpu"] is None:
|
37 |
+
return True
|
38 |
+
|
39 |
+
valid_flags = True
|
40 |
+
for key in flag_names:
|
41 |
+
if not flag_values[key].startswith("gs://"):
|
42 |
+
tf.compat.v1.logging.error("{} must be a GCS path.".format(key))
|
43 |
+
valid_flags = False
|
44 |
+
|
45 |
+
return valid_flags
|
46 |
+
|
47 |
+
|
48 |
+
def define_device(tpu=True):
|
49 |
+
"""Register device specific flags.
|
50 |
+
Args:
|
51 |
+
tpu: Create flags to specify TPU operation.
|
52 |
+
Returns:
|
53 |
+
A list of flags for core.py to marks as key flags.
|
54 |
+
"""
|
55 |
+
|
56 |
+
key_flags = []
|
57 |
+
|
58 |
+
if tpu:
|
59 |
+
flags.DEFINE_string(
|
60 |
+
name="tpu", default=None,
|
61 |
+
help=help_wrap(
|
62 |
+
"The Cloud TPU to use for training. This should be either the name "
|
63 |
+
"used when creating the Cloud TPU, or a "
|
64 |
+
"grpc://ip.address.of.tpu:8470 url. Passing `local` will use the"
|
65 |
+
"CPU of the local instance instead. (Good for debugging.)"))
|
66 |
+
key_flags.append("tpu")
|
67 |
+
|
68 |
+
flags.DEFINE_string(
|
69 |
+
name="tpu_zone", default=None,
|
70 |
+
help=help_wrap(
|
71 |
+
"[Optional] GCE zone where the Cloud TPU is located in. If not "
|
72 |
+
"specified, we will attempt to automatically detect the GCE "
|
73 |
+
"project from metadata."))
|
74 |
+
|
75 |
+
flags.DEFINE_string(
|
76 |
+
name="tpu_gcp_project", default=None,
|
77 |
+
help=help_wrap(
|
78 |
+
"[Optional] Project name for the Cloud TPU-enabled project. If not "
|
79 |
+
"specified, we will attempt to automatically detect the GCE "
|
80 |
+
"project from metadata."))
|
81 |
+
|
82 |
+
flags.DEFINE_integer(name="num_tpu_shards", default=8,
|
83 |
+
help=help_wrap("Number of shards (TPU chips)."))
|
84 |
+
|
85 |
+
return key_flags
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/flags/_distribution.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
"""Flags related to distributed execution."""
|
16 |
+
|
17 |
+
from __future__ import absolute_import
|
18 |
+
from __future__ import division
|
19 |
+
from __future__ import print_function
|
20 |
+
|
21 |
+
from absl import flags
|
22 |
+
import tensorflow as tf
|
23 |
+
|
24 |
+
from TensorFlow.utils.flags._conventions import help_wrap
|
25 |
+
|
26 |
+
|
27 |
+
def define_distribution(worker_hosts=True, task_index=True):
|
28 |
+
"""Register distributed execution flags.
|
29 |
+
|
30 |
+
Args:
|
31 |
+
worker_hosts: Create a flag for specifying comma-separated list of workers.
|
32 |
+
task_index: Create a flag for specifying index of task.
|
33 |
+
|
34 |
+
Returns:
|
35 |
+
A list of flags for core.py to marks as key flags.
|
36 |
+
"""
|
37 |
+
key_flags = []
|
38 |
+
|
39 |
+
if worker_hosts:
|
40 |
+
flags.DEFINE_string(
|
41 |
+
name='worker_hosts', default=None,
|
42 |
+
help=help_wrap(
|
43 |
+
'Comma-separated list of worker ip:port pairs for running '
|
44 |
+
'multi-worker models with DistributionStrategy. The user would '
|
45 |
+
'start the program on each host with identical value for this '
|
46 |
+
'flag.'))
|
47 |
+
|
48 |
+
if task_index:
|
49 |
+
flags.DEFINE_integer(
|
50 |
+
name='task_index', default=-1,
|
51 |
+
help=help_wrap('If multi-worker training, the task_index of this '
|
52 |
+
'worker.'))
|
53 |
+
|
54 |
+
return key_flags
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/flags/core.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
"""Public interface for flag definition.
|
16 |
+
|
17 |
+
See _example.py for detailed instructions on defining flags.
|
18 |
+
"""
|
19 |
+
|
20 |
+
from __future__ import absolute_import
|
21 |
+
from __future__ import division
|
22 |
+
from __future__ import print_function
|
23 |
+
|
24 |
+
import sys
|
25 |
+
from six.moves import shlex_quote
|
26 |
+
|
27 |
+
from absl import app as absl_app
|
28 |
+
from absl import flags
|
29 |
+
|
30 |
+
from TensorFlow.utils.flags import _base
|
31 |
+
from TensorFlow.utils.flags import _benchmark
|
32 |
+
from TensorFlow.utils.flags import _conventions
|
33 |
+
from TensorFlow.utils.flags import _device
|
34 |
+
from TensorFlow.utils.flags import _distribution
|
35 |
+
from TensorFlow.utils.flags import _misc
|
36 |
+
from TensorFlow.utils.flags import _performance
|
37 |
+
|
38 |
+
|
39 |
+
def set_defaults(**kwargs):
|
40 |
+
for key, value in kwargs.items():
|
41 |
+
flags.FLAGS.set_default(name=key, value=value)
|
42 |
+
|
43 |
+
|
44 |
+
def parse_flags(argv=None):
|
45 |
+
"""Reset flags and reparse. Currently only used in testing."""
|
46 |
+
flags.FLAGS.unparse_flags()
|
47 |
+
absl_app.parse_flags_with_usage(argv or sys.argv)
|
48 |
+
|
49 |
+
|
50 |
+
def register_key_flags_in_core(f):
|
51 |
+
"""Defines a function in core.py, and registers its key flags.
|
52 |
+
|
53 |
+
absl uses the location of a flags.declare_key_flag() to determine the context
|
54 |
+
in which a flag is key. By making all declares in core, this allows model
|
55 |
+
main functions to call flags.adopt_module_key_flags() on core and correctly
|
56 |
+
chain key flags.
|
57 |
+
|
58 |
+
Args:
|
59 |
+
f: The function to be wrapped
|
60 |
+
|
61 |
+
Returns:
|
62 |
+
The "core-defined" version of the input function.
|
63 |
+
"""
|
64 |
+
|
65 |
+
def core_fn(*args, **kwargs):
|
66 |
+
key_flags = f(*args, **kwargs)
|
67 |
+
[flags.declare_key_flag(fl) for fl in key_flags] # pylint: disable=expression-not-assigned
|
68 |
+
return core_fn
|
69 |
+
|
70 |
+
|
71 |
+
define_base = register_key_flags_in_core(_base.define_base)
|
72 |
+
# We have define_base_eager for compatibility, since it used to be a separate
|
73 |
+
# function from define_base.
|
74 |
+
define_base_eager = define_base
|
75 |
+
define_log_steps = register_key_flags_in_core(_benchmark.define_log_steps)
|
76 |
+
define_benchmark = register_key_flags_in_core(_benchmark.define_benchmark)
|
77 |
+
define_device = register_key_flags_in_core(_device.define_device)
|
78 |
+
define_image = register_key_flags_in_core(_misc.define_image)
|
79 |
+
define_performance = register_key_flags_in_core(_performance.define_performance)
|
80 |
+
define_distribution = register_key_flags_in_core(
|
81 |
+
_distribution.define_distribution)
|
82 |
+
|
83 |
+
|
84 |
+
help_wrap = _conventions.help_wrap
|
85 |
+
|
86 |
+
|
87 |
+
get_num_gpus = _base.get_num_gpus
|
88 |
+
get_tf_dtype = _performance.get_tf_dtype
|
89 |
+
get_loss_scale = _performance.get_loss_scale
|
90 |
+
DTYPE_MAP = _performance.DTYPE_MAP
|
91 |
+
require_cloud_storage = _device.require_cloud_storage
|
92 |
+
|
93 |
+
def _get_nondefault_flags_as_dict():
|
94 |
+
"""Returns the nondefault flags as a dict from flag name to value."""
|
95 |
+
nondefault_flags = {}
|
96 |
+
for flag_name in flags.FLAGS:
|
97 |
+
flag_value = getattr(flags.FLAGS, flag_name)
|
98 |
+
if (flag_name != flags.FLAGS[flag_name].short_name and
|
99 |
+
flag_value != flags.FLAGS[flag_name].default):
|
100 |
+
nondefault_flags[flag_name] = flag_value
|
101 |
+
return nondefault_flags
|
102 |
+
|
103 |
+
|
104 |
+
def get_nondefault_flags_as_str():
|
105 |
+
"""Returns flags as a string that can be passed as command line arguments.
|
106 |
+
|
107 |
+
E.g., returns: "--batch_size=256 --use_synthetic_data" for the following code
|
108 |
+
block:
|
109 |
+
|
110 |
+
```
|
111 |
+
flags.FLAGS.batch_size = 256
|
112 |
+
flags.FLAGS.use_synthetic_data = True
|
113 |
+
print(get_nondefault_flags_as_str())
|
114 |
+
```
|
115 |
+
|
116 |
+
Only flags with nondefault values are returned, as passing default flags as
|
117 |
+
command line arguments has no effect.
|
118 |
+
|
119 |
+
Returns:
|
120 |
+
A string with the flags, that can be passed as command line arguments to a
|
121 |
+
program to use the flags.
|
122 |
+
"""
|
123 |
+
nondefault_flags = _get_nondefault_flags_as_dict()
|
124 |
+
flag_strings = []
|
125 |
+
for name, value in sorted(nondefault_flags.items()):
|
126 |
+
if isinstance(value, bool):
|
127 |
+
flag_str = '--{}'.format(name) if value else '--no{}'.format(name)
|
128 |
+
elif isinstance(value, list):
|
129 |
+
flag_str = '--{}={}'.format(name, ','.join(value))
|
130 |
+
else:
|
131 |
+
flag_str = '--{}={}'.format(name, value)
|
132 |
+
flag_strings.append(flag_str)
|
133 |
+
return ' '.join(shlex_quote(flag_str) for flag_str in flag_strings)
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/hyperparams_flags.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
"""Common flags for importing hyperparameters."""
|
16 |
+
|
17 |
+
from __future__ import absolute_import
|
18 |
+
from __future__ import division
|
19 |
+
# from __future__ import google_type_annotations
|
20 |
+
from __future__ import print_function
|
21 |
+
|
22 |
+
from absl import flags
|
23 |
+
from TensorFlow.utils.flags import core as flags_core
|
24 |
+
|
25 |
+
FLAGS = flags.FLAGS
|
26 |
+
|
27 |
+
|
28 |
+
def define_common_hparams_flags():
|
29 |
+
"""Define the common flags across models."""
|
30 |
+
|
31 |
+
flags.DEFINE_string(
|
32 |
+
'model_dir',
|
33 |
+
default=None,
|
34 |
+
help=('The directory where the model and training/evaluation summaries'
|
35 |
+
'are stored.'))
|
36 |
+
|
37 |
+
flags.DEFINE_integer(
|
38 |
+
'train_batch_size', default=None, help='Batch size for training.')
|
39 |
+
|
40 |
+
flags.DEFINE_integer(
|
41 |
+
'eval_batch_size', default=None, help='Batch size for evaluation.')
|
42 |
+
|
43 |
+
flags.DEFINE_string(
|
44 |
+
'precision',
|
45 |
+
default=None,
|
46 |
+
help=('Precision to use; one of: {bfloat16, float32}'))
|
47 |
+
|
48 |
+
flags.DEFINE_string(
|
49 |
+
'config_file',
|
50 |
+
default=None,
|
51 |
+
help=('A YAML file which specifies overrides. Note that this file can be '
|
52 |
+
'used as an override template to override the default parameters '
|
53 |
+
'specified in Python. If the same parameter is specified in both '
|
54 |
+
'`--config_file` and `--params_override`, the one in '
|
55 |
+
'`--params_override` will be used finally.'))
|
56 |
+
|
57 |
+
flags.DEFINE_string(
|
58 |
+
'params_override',
|
59 |
+
default=None,
|
60 |
+
help=('a YAML/JSON string or a YAML file which specifies additional '
|
61 |
+
'overrides over the default parameters and those specified in '
|
62 |
+
'`--config_file`. Note that this is supposed to be used only to '
|
63 |
+
'override the model parameters, but not the parameters like TPU '
|
64 |
+
'specific flags. One canonical use case of `--config_file` and '
|
65 |
+
'`--params_override` is users first define a template config file '
|
66 |
+
'using `--config_file`, then use `--params_override` to adjust the '
|
67 |
+
'minimal set of tuning parameters, for example setting up different'
|
68 |
+
' `train_batch_size`. '
|
69 |
+
'The final override order of parameters: default_model_params --> '
|
70 |
+
'params from config_file --> params in params_override.'
|
71 |
+
'See also the help message of `--config_file`.'))
|
72 |
+
flags.DEFINE_integer('save_checkpoint_freq', None,
|
73 |
+
'Number of steps to save checkpoint.')
|
74 |
+
|
75 |
+
|
76 |
+
def initialize_common_flags():
|
77 |
+
"""Define the common flags across models."""
|
78 |
+
define_common_hparams_flags()
|
79 |
+
|
80 |
+
flags_core.define_device(tpu=True)
|
81 |
+
flags_core.define_base(
|
82 |
+
num_gpu=True, model_dir=False, data_dir=False, batch_size=False)
|
83 |
+
flags_core.define_distribution(worker_hosts=True, task_index=True)
|
84 |
+
flags_core.define_performance(all_reduce_alg=True, num_packs=True)
|
85 |
+
|
86 |
+
# Reset the default value of num_gpus to zero.
|
87 |
+
FLAGS.num_gpus = 0
|
88 |
+
|
89 |
+
flags.DEFINE_string(
|
90 |
+
'strategy_type', 'mirrored', 'Type of distribute strategy.'
|
91 |
+
'One of mirrored, tpu and multiworker.')
|
92 |
+
|
93 |
+
|
94 |
+
def strategy_flags_dict():
|
95 |
+
"""Returns TPU and/or GPU related flags in a dictionary."""
|
96 |
+
return {
|
97 |
+
# TPUStrategy related flags.
|
98 |
+
'tpu': FLAGS.tpu,
|
99 |
+
# MultiWorkerMirroredStrategy related flags.
|
100 |
+
'all_reduce_alg': FLAGS.all_reduce_alg,
|
101 |
+
'worker_hosts': FLAGS.worker_hosts,
|
102 |
+
'task_index': FLAGS.task_index,
|
103 |
+
# MirroredStrategy and OneDeviceStrategy
|
104 |
+
'num_gpus': FLAGS.num_gpus,
|
105 |
+
'num_packs': FLAGS.num_packs,
|
106 |
+
}
|
107 |
+
|
108 |
+
|
109 |
+
def hparam_flags_dict():
|
110 |
+
"""Returns model params related flags in a dictionary."""
|
111 |
+
return {
|
112 |
+
'data_dir': FLAGS.data_dir,
|
113 |
+
'model_dir': FLAGS.model_dir,
|
114 |
+
'train_batch_size': FLAGS.train_batch_size,
|
115 |
+
'eval_batch_size': FLAGS.eval_batch_size,
|
116 |
+
'precision': FLAGS.precision,
|
117 |
+
'config_file': FLAGS.config_file,
|
118 |
+
'params_override': FLAGS.params_override,
|
119 |
+
}
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/logs/__init__.py
ADDED
File without changes
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/logs/cloud_lib.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
|
16 |
+
"""Utilities that interact with cloud service.
|
17 |
+
"""
|
18 |
+
|
19 |
+
import requests
|
20 |
+
|
21 |
+
GCP_METADATA_URL = "http://metadata/computeMetadata/v1/instance/hostname"
|
22 |
+
GCP_METADATA_HEADER = {"Metadata-Flavor": "Google"}
|
23 |
+
|
24 |
+
|
25 |
+
def on_gcp():
|
26 |
+
"""Detect whether the current running environment is on GCP."""
|
27 |
+
try:
|
28 |
+
# Timeout in 5 seconds, in case the test environment has connectivity issue.
|
29 |
+
# There is not default timeout, which means it might block forever.
|
30 |
+
response = requests.get(
|
31 |
+
GCP_METADATA_URL, headers=GCP_METADATA_HEADER, timeout=5)
|
32 |
+
return response.status_code == 200
|
33 |
+
except requests.exceptions.RequestException:
|
34 |
+
return False
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/logs/guidelines.md
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Logging in official models
|
2 |
+
|
3 |
+
This library adds logging functions that print or save tensor values. Official models should define all common hooks
|
4 |
+
(using hooks helper) and a benchmark logger.
|
5 |
+
|
6 |
+
1. **Training Hooks**
|
7 |
+
|
8 |
+
Hooks are a TensorFlow concept that define specific actions at certain points of the execution. We use them to obtain and log
|
9 |
+
tensor values during training.
|
10 |
+
|
11 |
+
hooks_helper.py provides an easy way to create common hooks. The following hooks are currently defined:
|
12 |
+
* LoggingTensorHook: Logs tensor values
|
13 |
+
* ProfilerHook: Writes a timeline json that can be loaded into chrome://tracing.
|
14 |
+
* ExamplesPerSecondHook: Logs the number of examples processed per second.
|
15 |
+
* LoggingMetricHook: Similar to LoggingTensorHook, except that the tensors are logged in a format defined by our data
|
16 |
+
anaylsis pipeline.
|
17 |
+
|
18 |
+
|
19 |
+
2. **Benchmarks**
|
20 |
+
|
21 |
+
The benchmark logger provides useful functions for logging environment information, and evaluation results.
|
22 |
+
The module also contains a context which is used to update the status of the run.
|
23 |
+
|
24 |
+
Example usage:
|
25 |
+
|
26 |
+
```
|
27 |
+
from absl import app as absl_app
|
28 |
+
|
29 |
+
from TensorFlow.utils.logs import hooks_helper
|
30 |
+
from TensorFlow.utils.logs import logger
|
31 |
+
|
32 |
+
def model_main(flags_obj):
|
33 |
+
estimator = ...
|
34 |
+
|
35 |
+
benchmark_logger = logger.get_benchmark_logger()
|
36 |
+
benchmark_logger.log_run_info(...)
|
37 |
+
|
38 |
+
train_hooks = hooks_helper.get_train_hooks(...)
|
39 |
+
|
40 |
+
for epoch in range(10):
|
41 |
+
estimator.train(..., hooks=train_hooks)
|
42 |
+
eval_results = estimator.evaluate(...)
|
43 |
+
|
44 |
+
# Log a dictionary of metrics
|
45 |
+
benchmark_logger.log_evaluation_result(eval_results)
|
46 |
+
|
47 |
+
# Log an individual metric
|
48 |
+
benchmark_logger.log_metric(...)
|
49 |
+
|
50 |
+
|
51 |
+
def main(_):
|
52 |
+
with logger.benchmark_context(flags.FLAGS):
|
53 |
+
model_main(flags.FLAGS)
|
54 |
+
|
55 |
+
if __name__ == "__main__":
|
56 |
+
# define flags
|
57 |
+
absl_app.run(main)
|
58 |
+
```
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/logs/hooks.py
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
|
16 |
+
"""Hook that counts examples per second every N steps or seconds."""
|
17 |
+
|
18 |
+
|
19 |
+
from __future__ import absolute_import
|
20 |
+
from __future__ import division
|
21 |
+
from __future__ import print_function
|
22 |
+
|
23 |
+
import tensorflow as tf # pylint: disable=g-bad-import-order
|
24 |
+
|
25 |
+
from TensorFlow.utils.logs import logger
|
26 |
+
|
27 |
+
|
28 |
+
class ExamplesPerSecondHook(tf.estimator.SessionRunHook):
|
29 |
+
"""Hook to print out examples per second.
|
30 |
+
|
31 |
+
Total time is tracked and then divided by the total number of steps
|
32 |
+
to get the average step time and then batch_size is used to determine
|
33 |
+
the running average of examples per second. The examples per second for the
|
34 |
+
most recent interval is also logged.
|
35 |
+
"""
|
36 |
+
|
37 |
+
def __init__(self,
|
38 |
+
batch_size,
|
39 |
+
every_n_steps=None,
|
40 |
+
every_n_secs=None,
|
41 |
+
warm_steps=0,
|
42 |
+
metric_logger=None):
|
43 |
+
"""Initializer for ExamplesPerSecondHook.
|
44 |
+
|
45 |
+
Args:
|
46 |
+
batch_size: Total batch size across all workers used to calculate
|
47 |
+
examples/second from global time.
|
48 |
+
every_n_steps: Log stats every n steps.
|
49 |
+
every_n_secs: Log stats every n seconds. Exactly one of the
|
50 |
+
`every_n_steps` or `every_n_secs` should be set.
|
51 |
+
warm_steps: The number of steps to be skipped before logging and running
|
52 |
+
average calculation. warm_steps steps refers to global steps across all
|
53 |
+
workers, not on each worker
|
54 |
+
metric_logger: instance of `BenchmarkLogger`, the benchmark logger that
|
55 |
+
hook should use to write the log. If None, BaseBenchmarkLogger will
|
56 |
+
be used.
|
57 |
+
|
58 |
+
Raises:
|
59 |
+
ValueError: if neither `every_n_steps` or `every_n_secs` is set, or
|
60 |
+
both are set.
|
61 |
+
"""
|
62 |
+
|
63 |
+
if (every_n_steps is None) == (every_n_secs is None):
|
64 |
+
raise ValueError("exactly one of every_n_steps"
|
65 |
+
" and every_n_secs should be provided.")
|
66 |
+
|
67 |
+
self._logger = metric_logger or logger.BaseBenchmarkLogger()
|
68 |
+
|
69 |
+
self._timer = tf.estimator.SecondOrStepTimer(
|
70 |
+
every_steps=every_n_steps, every_secs=every_n_secs)
|
71 |
+
|
72 |
+
self._step_train_time = 0
|
73 |
+
self._total_steps = 0
|
74 |
+
self._batch_size = batch_size
|
75 |
+
self._warm_steps = warm_steps
|
76 |
+
# List of examples per second logged every_n_steps.
|
77 |
+
self.current_examples_per_sec_list = []
|
78 |
+
|
79 |
+
def begin(self):
|
80 |
+
"""Called once before using the session to check global step."""
|
81 |
+
self._global_step_tensor = tf.compat.v1.train.get_global_step()
|
82 |
+
if self._global_step_tensor is None:
|
83 |
+
raise RuntimeError(
|
84 |
+
"Global step should be created to use StepCounterHook.")
|
85 |
+
|
86 |
+
def before_run(self, run_context): # pylint: disable=unused-argument
|
87 |
+
"""Called before each call to run().
|
88 |
+
|
89 |
+
Args:
|
90 |
+
run_context: A SessionRunContext object.
|
91 |
+
|
92 |
+
Returns:
|
93 |
+
A SessionRunArgs object or None if never triggered.
|
94 |
+
"""
|
95 |
+
return tf.estimator.SessionRunArgs(self._global_step_tensor)
|
96 |
+
|
97 |
+
def after_run(self, run_context, run_values): # pylint: disable=unused-argument
|
98 |
+
"""Called after each call to run().
|
99 |
+
|
100 |
+
Args:
|
101 |
+
run_context: A SessionRunContext object.
|
102 |
+
run_values: A SessionRunValues object.
|
103 |
+
"""
|
104 |
+
global_step = run_values.results
|
105 |
+
|
106 |
+
if self._timer.should_trigger_for_step(
|
107 |
+
global_step) and global_step > self._warm_steps:
|
108 |
+
elapsed_time, elapsed_steps = self._timer.update_last_triggered_step(
|
109 |
+
global_step)
|
110 |
+
if elapsed_time is not None:
|
111 |
+
self._step_train_time += elapsed_time
|
112 |
+
self._total_steps += elapsed_steps
|
113 |
+
|
114 |
+
# average examples per second is based on the total (accumulative)
|
115 |
+
# training steps and training time so far
|
116 |
+
average_examples_per_sec = self._batch_size * (
|
117 |
+
self._total_steps / self._step_train_time)
|
118 |
+
# current examples per second is based on the elapsed training steps
|
119 |
+
# and training time per batch
|
120 |
+
current_examples_per_sec = self._batch_size * (
|
121 |
+
elapsed_steps / elapsed_time)
|
122 |
+
# Logs entries to be read from hook during or after run.
|
123 |
+
self.current_examples_per_sec_list.append(current_examples_per_sec)
|
124 |
+
self._logger.log_metric(
|
125 |
+
"average_examples_per_sec", average_examples_per_sec,
|
126 |
+
global_step=global_step)
|
127 |
+
|
128 |
+
self._logger.log_metric(
|
129 |
+
"current_examples_per_sec", current_examples_per_sec,
|
130 |
+
global_step=global_step)
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/logs/hooks_helper.py
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
|
16 |
+
"""Hooks helper to return a list of TensorFlow hooks for training by name.
|
17 |
+
|
18 |
+
More hooks can be added to this set. To add a new hook, 1) add the new hook to
|
19 |
+
the registry in HOOKS, 2) add a corresponding function that parses out necessary
|
20 |
+
parameters.
|
21 |
+
"""
|
22 |
+
|
23 |
+
from __future__ import absolute_import
|
24 |
+
from __future__ import division
|
25 |
+
from __future__ import print_function
|
26 |
+
|
27 |
+
import tensorflow as tf # pylint: disable=g-bad-import-order
|
28 |
+
|
29 |
+
from TensorFlow.utils.logs import hooks
|
30 |
+
from TensorFlow.utils.logs import logger
|
31 |
+
from TensorFlow.utils.logs import metric_hook
|
32 |
+
|
33 |
+
_TENSORS_TO_LOG = dict((x, x) for x in ['learning_rate',
|
34 |
+
'cross_entropy',
|
35 |
+
'train_accuracy'])
|
36 |
+
|
37 |
+
|
38 |
+
def get_train_hooks(name_list, use_tpu=False, **kwargs):
|
39 |
+
"""Factory for getting a list of TensorFlow hooks for training by name.
|
40 |
+
|
41 |
+
Args:
|
42 |
+
name_list: a list of strings to name desired hook classes. Allowed:
|
43 |
+
LoggingTensorHook, ProfilerHook, ExamplesPerSecondHook, which are defined
|
44 |
+
as keys in HOOKS
|
45 |
+
use_tpu: Boolean of whether computation occurs on a TPU. This will disable
|
46 |
+
hooks altogether.
|
47 |
+
**kwargs: a dictionary of arguments to the hooks.
|
48 |
+
|
49 |
+
Returns:
|
50 |
+
list of instantiated hooks, ready to be used in a classifier.train call.
|
51 |
+
|
52 |
+
Raises:
|
53 |
+
ValueError: if an unrecognized name is passed.
|
54 |
+
"""
|
55 |
+
|
56 |
+
if not name_list:
|
57 |
+
return []
|
58 |
+
|
59 |
+
if use_tpu:
|
60 |
+
tf.compat.v1.logging.warning('hooks_helper received name_list `{}`, but a '
|
61 |
+
'TPU is specified. No hooks will be used.'
|
62 |
+
.format(name_list))
|
63 |
+
return []
|
64 |
+
|
65 |
+
train_hooks = []
|
66 |
+
for name in name_list:
|
67 |
+
hook_name = HOOKS.get(name.strip().lower())
|
68 |
+
if hook_name is None:
|
69 |
+
raise ValueError('Unrecognized training hook requested: {}'.format(name))
|
70 |
+
else:
|
71 |
+
train_hooks.append(hook_name(**kwargs))
|
72 |
+
|
73 |
+
return train_hooks
|
74 |
+
|
75 |
+
|
76 |
+
def get_logging_tensor_hook(every_n_iter=100, tensors_to_log=None, **kwargs): # pylint: disable=unused-argument
|
77 |
+
"""Function to get LoggingTensorHook.
|
78 |
+
|
79 |
+
Args:
|
80 |
+
every_n_iter: `int`, print the values of `tensors` once every N local
|
81 |
+
steps taken on the current worker.
|
82 |
+
tensors_to_log: List of tensor names or dictionary mapping labels to tensor
|
83 |
+
names. If not set, log _TENSORS_TO_LOG by default.
|
84 |
+
**kwargs: a dictionary of arguments to LoggingTensorHook.
|
85 |
+
|
86 |
+
Returns:
|
87 |
+
Returns a LoggingTensorHook with a standard set of tensors that will be
|
88 |
+
printed to stdout.
|
89 |
+
"""
|
90 |
+
if tensors_to_log is None:
|
91 |
+
tensors_to_log = _TENSORS_TO_LOG
|
92 |
+
|
93 |
+
return tf.estimator.LoggingTensorHook(
|
94 |
+
tensors=tensors_to_log,
|
95 |
+
every_n_iter=every_n_iter)
|
96 |
+
|
97 |
+
|
98 |
+
def get_profiler_hook(model_dir, save_steps=1000, **kwargs): # pylint: disable=unused-argument
|
99 |
+
"""Function to get ProfilerHook.
|
100 |
+
|
101 |
+
Args:
|
102 |
+
model_dir: The directory to save the profile traces to.
|
103 |
+
save_steps: `int`, print profile traces every N steps.
|
104 |
+
**kwargs: a dictionary of arguments to ProfilerHook.
|
105 |
+
|
106 |
+
Returns:
|
107 |
+
Returns a ProfilerHook that writes out timelines that can be loaded into
|
108 |
+
profiling tools like chrome://tracing.
|
109 |
+
"""
|
110 |
+
return tf.estimator.ProfilerHook(save_steps=save_steps, output_dir=model_dir)
|
111 |
+
|
112 |
+
|
113 |
+
def get_examples_per_second_hook(every_n_steps=100,
|
114 |
+
batch_size=128,
|
115 |
+
warm_steps=5,
|
116 |
+
**kwargs): # pylint: disable=unused-argument
|
117 |
+
"""Function to get ExamplesPerSecondHook.
|
118 |
+
|
119 |
+
Args:
|
120 |
+
every_n_steps: `int`, print current and average examples per second every
|
121 |
+
N steps.
|
122 |
+
batch_size: `int`, total batch size used to calculate examples/second from
|
123 |
+
global time.
|
124 |
+
warm_steps: skip this number of steps before logging and running average.
|
125 |
+
**kwargs: a dictionary of arguments to ExamplesPerSecondHook.
|
126 |
+
|
127 |
+
Returns:
|
128 |
+
Returns a ProfilerHook that writes out timelines that can be loaded into
|
129 |
+
profiling tools like chrome://tracing.
|
130 |
+
"""
|
131 |
+
return hooks.ExamplesPerSecondHook(
|
132 |
+
batch_size=batch_size, every_n_steps=every_n_steps,
|
133 |
+
warm_steps=warm_steps, metric_logger=logger.get_benchmark_logger())
|
134 |
+
|
135 |
+
|
136 |
+
def get_logging_metric_hook(tensors_to_log=None,
|
137 |
+
every_n_secs=600,
|
138 |
+
**kwargs): # pylint: disable=unused-argument
|
139 |
+
"""Function to get LoggingMetricHook.
|
140 |
+
|
141 |
+
Args:
|
142 |
+
tensors_to_log: List of tensor names or dictionary mapping labels to tensor
|
143 |
+
names. If not set, log _TENSORS_TO_LOG by default.
|
144 |
+
every_n_secs: `int`, the frequency for logging the metric. Default to every
|
145 |
+
10 mins.
|
146 |
+
**kwargs: a dictionary of arguments.
|
147 |
+
|
148 |
+
Returns:
|
149 |
+
Returns a LoggingMetricHook that saves tensor values in a JSON format.
|
150 |
+
"""
|
151 |
+
if tensors_to_log is None:
|
152 |
+
tensors_to_log = _TENSORS_TO_LOG
|
153 |
+
return metric_hook.LoggingMetricHook(
|
154 |
+
tensors=tensors_to_log,
|
155 |
+
metric_logger=logger.get_benchmark_logger(),
|
156 |
+
every_n_secs=every_n_secs)
|
157 |
+
|
158 |
+
|
159 |
+
def get_step_counter_hook(**kwargs):
|
160 |
+
"""Function to get StepCounterHook."""
|
161 |
+
del kwargs
|
162 |
+
return tf.estimator.StepCounterHook()
|
163 |
+
|
164 |
+
|
165 |
+
# A dictionary to map one hook name and its corresponding function
|
166 |
+
HOOKS = {
|
167 |
+
'loggingtensorhook': get_logging_tensor_hook,
|
168 |
+
'profilerhook': get_profiler_hook,
|
169 |
+
'examplespersecondhook': get_examples_per_second_hook,
|
170 |
+
'loggingmetrichook': get_logging_metric_hook,
|
171 |
+
'stepcounterhook': get_step_counter_hook
|
172 |
+
}
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/logs/logger.py
ADDED
@@ -0,0 +1,423 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
|
16 |
+
"""Logging utilities for benchmark.
|
17 |
+
|
18 |
+
For collecting local environment metrics like CPU and memory, certain python
|
19 |
+
packages need be installed. See README for details.
|
20 |
+
"""
|
21 |
+
from __future__ import absolute_import
|
22 |
+
from __future__ import division
|
23 |
+
from __future__ import print_function
|
24 |
+
|
25 |
+
import contextlib
|
26 |
+
import datetime
|
27 |
+
import json
|
28 |
+
import multiprocessing
|
29 |
+
import numbers
|
30 |
+
import os
|
31 |
+
import threading
|
32 |
+
import uuid
|
33 |
+
|
34 |
+
from six.moves import _thread as thread
|
35 |
+
from absl import flags
|
36 |
+
import tensorflow as tf
|
37 |
+
from tensorflow.python.client import device_lib
|
38 |
+
|
39 |
+
from TensorFlow.utils.logs import cloud_lib
|
40 |
+
|
41 |
+
METRIC_LOG_FILE_NAME = "metric.log"
|
42 |
+
BENCHMARK_RUN_LOG_FILE_NAME = "benchmark_run.log"
|
43 |
+
_DATE_TIME_FORMAT_PATTERN = "%Y-%m-%dT%H:%M:%S.%fZ"
|
44 |
+
GCP_TEST_ENV = "GCP"
|
45 |
+
RUN_STATUS_SUCCESS = "success"
|
46 |
+
RUN_STATUS_FAILURE = "failure"
|
47 |
+
RUN_STATUS_RUNNING = "running"
|
48 |
+
|
49 |
+
|
50 |
+
FLAGS = flags.FLAGS
|
51 |
+
|
52 |
+
# Don't use it directly. Use get_benchmark_logger to access a logger.
|
53 |
+
_benchmark_logger = None
|
54 |
+
_logger_lock = threading.Lock()
|
55 |
+
|
56 |
+
|
57 |
+
def config_benchmark_logger(flag_obj=None):
|
58 |
+
"""Config the global benchmark logger."""
|
59 |
+
_logger_lock.acquire()
|
60 |
+
try:
|
61 |
+
global _benchmark_logger
|
62 |
+
if not flag_obj:
|
63 |
+
flag_obj = FLAGS
|
64 |
+
|
65 |
+
if (not hasattr(flag_obj, "benchmark_logger_type") or
|
66 |
+
flag_obj.benchmark_logger_type == "BaseBenchmarkLogger"):
|
67 |
+
_benchmark_logger = BaseBenchmarkLogger()
|
68 |
+
elif flag_obj.benchmark_logger_type == "BenchmarkFileLogger":
|
69 |
+
_benchmark_logger = BenchmarkFileLogger(flag_obj.benchmark_log_dir)
|
70 |
+
elif flag_obj.benchmark_logger_type == "BenchmarkBigQueryLogger":
|
71 |
+
from benchmark import benchmark_uploader as bu # pylint: disable=g-import-not-at-top
|
72 |
+
bq_uploader = bu.BigQueryUploader(gcp_project=flag_obj.gcp_project)
|
73 |
+
_benchmark_logger = BenchmarkBigQueryLogger(
|
74 |
+
bigquery_uploader=bq_uploader,
|
75 |
+
bigquery_data_set=flag_obj.bigquery_data_set,
|
76 |
+
bigquery_run_table=flag_obj.bigquery_run_table,
|
77 |
+
bigquery_run_status_table=flag_obj.bigquery_run_status_table,
|
78 |
+
bigquery_metric_table=flag_obj.bigquery_metric_table,
|
79 |
+
run_id=str(uuid.uuid4()))
|
80 |
+
else:
|
81 |
+
raise ValueError("Unrecognized benchmark_logger_type: %s"
|
82 |
+
% flag_obj.benchmark_logger_type)
|
83 |
+
|
84 |
+
finally:
|
85 |
+
_logger_lock.release()
|
86 |
+
return _benchmark_logger
|
87 |
+
|
88 |
+
|
89 |
+
def get_benchmark_logger():
|
90 |
+
if not _benchmark_logger:
|
91 |
+
config_benchmark_logger()
|
92 |
+
return _benchmark_logger
|
93 |
+
|
94 |
+
|
95 |
+
@contextlib.contextmanager
|
96 |
+
def benchmark_context(flag_obj):
|
97 |
+
"""Context of benchmark, which will update status of the run accordingly."""
|
98 |
+
benchmark_logger = config_benchmark_logger(flag_obj)
|
99 |
+
try:
|
100 |
+
yield
|
101 |
+
benchmark_logger.on_finish(RUN_STATUS_SUCCESS)
|
102 |
+
except Exception: # pylint: disable=broad-except
|
103 |
+
# Catch all the exception, update the run status to be failure, and re-raise
|
104 |
+
benchmark_logger.on_finish(RUN_STATUS_FAILURE)
|
105 |
+
raise
|
106 |
+
|
107 |
+
|
108 |
+
class BaseBenchmarkLogger(object):
|
109 |
+
"""Class to log the benchmark information to STDOUT."""
|
110 |
+
|
111 |
+
def log_evaluation_result(self, eval_results):
|
112 |
+
"""Log the evaluation result.
|
113 |
+
|
114 |
+
The evaluate result is a dictionary that contains metrics defined in
|
115 |
+
model_fn. It also contains a entry for global_step which contains the value
|
116 |
+
of the global step when evaluation was performed.
|
117 |
+
|
118 |
+
Args:
|
119 |
+
eval_results: dict, the result of evaluate.
|
120 |
+
"""
|
121 |
+
if not isinstance(eval_results, dict):
|
122 |
+
tf.compat.v1.logging.warning(
|
123 |
+
"eval_results should be dictionary for logging. Got %s",
|
124 |
+
type(eval_results))
|
125 |
+
return
|
126 |
+
global_step = eval_results[tf.compat.v1.GraphKeys.GLOBAL_STEP]
|
127 |
+
for key in sorted(eval_results):
|
128 |
+
if key != tf.compat.v1.GraphKeys.GLOBAL_STEP:
|
129 |
+
self.log_metric(key, eval_results[key], global_step=global_step)
|
130 |
+
|
131 |
+
def log_metric(self, name, value, unit=None, global_step=None, extras=None):
|
132 |
+
"""Log the benchmark metric information to local file.
|
133 |
+
|
134 |
+
Currently the logging is done in a synchronized way. This should be updated
|
135 |
+
to log asynchronously.
|
136 |
+
|
137 |
+
Args:
|
138 |
+
name: string, the name of the metric to log.
|
139 |
+
value: number, the value of the metric. The value will not be logged if it
|
140 |
+
is not a number type.
|
141 |
+
unit: string, the unit of the metric, E.g "image per second".
|
142 |
+
global_step: int, the global_step when the metric is logged.
|
143 |
+
extras: map of string:string, the extra information about the metric.
|
144 |
+
"""
|
145 |
+
metric = _process_metric_to_json(name, value, unit, global_step, extras)
|
146 |
+
if metric:
|
147 |
+
tf.compat.v1.logging.info("Benchmark metric: %s", metric)
|
148 |
+
|
149 |
+
def log_run_info(self, model_name, dataset_name, run_params, test_id=None):
|
150 |
+
tf.compat.v1.logging.info(
|
151 |
+
"Benchmark run: %s", _gather_run_info(model_name, dataset_name,
|
152 |
+
run_params, test_id))
|
153 |
+
|
154 |
+
def on_finish(self, status):
|
155 |
+
pass
|
156 |
+
|
157 |
+
|
158 |
+
class BenchmarkFileLogger(BaseBenchmarkLogger):
|
159 |
+
"""Class to log the benchmark information to local disk."""
|
160 |
+
|
161 |
+
def __init__(self, logging_dir):
|
162 |
+
super(BenchmarkFileLogger, self).__init__()
|
163 |
+
self._logging_dir = logging_dir
|
164 |
+
if not tf.io.gfile.isdir(self._logging_dir):
|
165 |
+
tf.io.gfile.makedirs(self._logging_dir)
|
166 |
+
self._metric_file_handler = tf.io.gfile.GFile(
|
167 |
+
os.path.join(self._logging_dir, METRIC_LOG_FILE_NAME), "a")
|
168 |
+
|
169 |
+
def log_metric(self, name, value, unit=None, global_step=None, extras=None):
|
170 |
+
"""Log the benchmark metric information to local file.
|
171 |
+
|
172 |
+
Currently the logging is done in a synchronized way. This should be updated
|
173 |
+
to log asynchronously.
|
174 |
+
|
175 |
+
Args:
|
176 |
+
name: string, the name of the metric to log.
|
177 |
+
value: number, the value of the metric. The value will not be logged if it
|
178 |
+
is not a number type.
|
179 |
+
unit: string, the unit of the metric, E.g "image per second".
|
180 |
+
global_step: int, the global_step when the metric is logged.
|
181 |
+
extras: map of string:string, the extra information about the metric.
|
182 |
+
"""
|
183 |
+
metric = _process_metric_to_json(name, value, unit, global_step, extras)
|
184 |
+
if metric:
|
185 |
+
try:
|
186 |
+
json.dump(metric, self._metric_file_handler)
|
187 |
+
self._metric_file_handler.write("\n")
|
188 |
+
self._metric_file_handler.flush()
|
189 |
+
except (TypeError, ValueError) as e:
|
190 |
+
tf.compat.v1.logging.warning(
|
191 |
+
"Failed to dump metric to log file: name %s, value %s, error %s",
|
192 |
+
name, value, e)
|
193 |
+
|
194 |
+
def log_run_info(self, model_name, dataset_name, run_params, test_id=None):
|
195 |
+
"""Collect most of the TF runtime information for the local env.
|
196 |
+
|
197 |
+
The schema of the run info follows official/benchmark/datastore/schema.
|
198 |
+
|
199 |
+
Args:
|
200 |
+
model_name: string, the name of the model.
|
201 |
+
dataset_name: string, the name of dataset for training and evaluation.
|
202 |
+
run_params: dict, the dictionary of parameters for the run, it could
|
203 |
+
include hyperparameters or other params that are important for the run.
|
204 |
+
test_id: string, the unique name of the test run by the combination of key
|
205 |
+
parameters, eg batch size, num of GPU. It is hardware independent.
|
206 |
+
"""
|
207 |
+
run_info = _gather_run_info(model_name, dataset_name, run_params, test_id)
|
208 |
+
|
209 |
+
with tf.io.gfile.GFile(os.path.join(
|
210 |
+
self._logging_dir, BENCHMARK_RUN_LOG_FILE_NAME), "w") as f:
|
211 |
+
try:
|
212 |
+
json.dump(run_info, f)
|
213 |
+
f.write("\n")
|
214 |
+
except (TypeError, ValueError) as e:
|
215 |
+
tf.compat.v1.logging.warning(
|
216 |
+
"Failed to dump benchmark run info to log file: %s", e)
|
217 |
+
|
218 |
+
def on_finish(self, status):
|
219 |
+
self._metric_file_handler.flush()
|
220 |
+
self._metric_file_handler.close()
|
221 |
+
|
222 |
+
|
223 |
+
class BenchmarkBigQueryLogger(BaseBenchmarkLogger):
|
224 |
+
"""Class to log the benchmark information to BigQuery data store."""
|
225 |
+
|
226 |
+
def __init__(self,
|
227 |
+
bigquery_uploader,
|
228 |
+
bigquery_data_set,
|
229 |
+
bigquery_run_table,
|
230 |
+
bigquery_run_status_table,
|
231 |
+
bigquery_metric_table,
|
232 |
+
run_id):
|
233 |
+
super(BenchmarkBigQueryLogger, self).__init__()
|
234 |
+
self._bigquery_uploader = bigquery_uploader
|
235 |
+
self._bigquery_data_set = bigquery_data_set
|
236 |
+
self._bigquery_run_table = bigquery_run_table
|
237 |
+
self._bigquery_run_status_table = bigquery_run_status_table
|
238 |
+
self._bigquery_metric_table = bigquery_metric_table
|
239 |
+
self._run_id = run_id
|
240 |
+
|
241 |
+
def log_metric(self, name, value, unit=None, global_step=None, extras=None):
|
242 |
+
"""Log the benchmark metric information to bigquery.
|
243 |
+
|
244 |
+
Args:
|
245 |
+
name: string, the name of the metric to log.
|
246 |
+
value: number, the value of the metric. The value will not be logged if it
|
247 |
+
is not a number type.
|
248 |
+
unit: string, the unit of the metric, E.g "image per second".
|
249 |
+
global_step: int, the global_step when the metric is logged.
|
250 |
+
extras: map of string:string, the extra information about the metric.
|
251 |
+
"""
|
252 |
+
metric = _process_metric_to_json(name, value, unit, global_step, extras)
|
253 |
+
if metric:
|
254 |
+
# Starting new thread for bigquery upload in case it might take long time
|
255 |
+
# and impact the benchmark and performance measurement. Starting a new
|
256 |
+
# thread might have potential performance impact for model that run on
|
257 |
+
# CPU.
|
258 |
+
thread.start_new_thread(
|
259 |
+
self._bigquery_uploader.upload_benchmark_metric_json,
|
260 |
+
(self._bigquery_data_set,
|
261 |
+
self._bigquery_metric_table,
|
262 |
+
self._run_id,
|
263 |
+
[metric]))
|
264 |
+
|
265 |
+
def log_run_info(self, model_name, dataset_name, run_params, test_id=None):
|
266 |
+
"""Collect most of the TF runtime information for the local env.
|
267 |
+
|
268 |
+
The schema of the run info follows official/benchmark/datastore/schema.
|
269 |
+
|
270 |
+
Args:
|
271 |
+
model_name: string, the name of the model.
|
272 |
+
dataset_name: string, the name of dataset for training and evaluation.
|
273 |
+
run_params: dict, the dictionary of parameters for the run, it could
|
274 |
+
include hyperparameters or other params that are important for the run.
|
275 |
+
test_id: string, the unique name of the test run by the combination of key
|
276 |
+
parameters, eg batch size, num of GPU. It is hardware independent.
|
277 |
+
"""
|
278 |
+
run_info = _gather_run_info(model_name, dataset_name, run_params, test_id)
|
279 |
+
# Starting new thread for bigquery upload in case it might take long time
|
280 |
+
# and impact the benchmark and performance measurement. Starting a new
|
281 |
+
# thread might have potential performance impact for model that run on CPU.
|
282 |
+
thread.start_new_thread(
|
283 |
+
self._bigquery_uploader.upload_benchmark_run_json,
|
284 |
+
(self._bigquery_data_set,
|
285 |
+
self._bigquery_run_table,
|
286 |
+
self._run_id,
|
287 |
+
run_info))
|
288 |
+
thread.start_new_thread(
|
289 |
+
self._bigquery_uploader.insert_run_status,
|
290 |
+
(self._bigquery_data_set,
|
291 |
+
self._bigquery_run_status_table,
|
292 |
+
self._run_id,
|
293 |
+
RUN_STATUS_RUNNING))
|
294 |
+
|
295 |
+
def on_finish(self, status):
|
296 |
+
self._bigquery_uploader.update_run_status(
|
297 |
+
self._bigquery_data_set,
|
298 |
+
self._bigquery_run_status_table,
|
299 |
+
self._run_id,
|
300 |
+
status)
|
301 |
+
|
302 |
+
|
303 |
+
def _gather_run_info(model_name, dataset_name, run_params, test_id):
|
304 |
+
"""Collect the benchmark run information for the local environment."""
|
305 |
+
run_info = {
|
306 |
+
"model_name": model_name,
|
307 |
+
"dataset": {"name": dataset_name},
|
308 |
+
"machine_config": {},
|
309 |
+
"test_id": test_id,
|
310 |
+
"run_date": datetime.datetime.utcnow().strftime(
|
311 |
+
_DATE_TIME_FORMAT_PATTERN)}
|
312 |
+
_collect_tensorflow_info(run_info)
|
313 |
+
_collect_tensorflow_environment_variables(run_info)
|
314 |
+
_collect_run_params(run_info, run_params)
|
315 |
+
_collect_cpu_info(run_info)
|
316 |
+
_collect_memory_info(run_info)
|
317 |
+
_collect_test_environment(run_info)
|
318 |
+
return run_info
|
319 |
+
|
320 |
+
|
321 |
+
def _process_metric_to_json(
|
322 |
+
name, value, unit=None, global_step=None, extras=None):
|
323 |
+
"""Validate the metric data and generate JSON for insert."""
|
324 |
+
if not isinstance(value, numbers.Number):
|
325 |
+
tf.compat.v1.logging.warning(
|
326 |
+
"Metric value to log should be a number. Got %s", type(value))
|
327 |
+
return None
|
328 |
+
|
329 |
+
extras = _convert_to_json_dict(extras)
|
330 |
+
return {
|
331 |
+
"name": name,
|
332 |
+
"value": float(value),
|
333 |
+
"unit": unit,
|
334 |
+
"global_step": global_step,
|
335 |
+
"timestamp": datetime.datetime.utcnow().strftime(
|
336 |
+
_DATE_TIME_FORMAT_PATTERN),
|
337 |
+
"extras": extras}
|
338 |
+
|
339 |
+
|
340 |
+
def _collect_tensorflow_info(run_info):
|
341 |
+
run_info["tensorflow_version"] = {
|
342 |
+
"version": tf.version.VERSION, "git_hash": tf.version.GIT_VERSION}
|
343 |
+
|
344 |
+
|
345 |
+
def _collect_run_params(run_info, run_params):
|
346 |
+
"""Log the parameter information for the benchmark run."""
|
347 |
+
def process_param(name, value):
|
348 |
+
type_check = {
|
349 |
+
str: {"name": name, "string_value": value},
|
350 |
+
int: {"name": name, "long_value": value},
|
351 |
+
bool: {"name": name, "bool_value": str(value)},
|
352 |
+
float: {"name": name, "float_value": value},
|
353 |
+
}
|
354 |
+
return type_check.get(type(value),
|
355 |
+
{"name": name, "string_value": str(value)})
|
356 |
+
if run_params:
|
357 |
+
run_info["run_parameters"] = [
|
358 |
+
process_param(k, v) for k, v in sorted(run_params.items())]
|
359 |
+
|
360 |
+
|
361 |
+
def _collect_tensorflow_environment_variables(run_info):
|
362 |
+
run_info["tensorflow_environment_variables"] = [
|
363 |
+
{"name": k, "value": v}
|
364 |
+
for k, v in sorted(os.environ.items()) if k.startswith("TF_")]
|
365 |
+
|
366 |
+
|
367 |
+
# The following code is mirrored from tensorflow/tools/test/system_info_lib
|
368 |
+
# which is not exposed for import.
|
369 |
+
def _collect_cpu_info(run_info):
|
370 |
+
"""Collect the CPU information for the local environment."""
|
371 |
+
cpu_info = {}
|
372 |
+
|
373 |
+
cpu_info["num_cores"] = multiprocessing.cpu_count()
|
374 |
+
|
375 |
+
try:
|
376 |
+
# Note: cpuinfo is not installed in the TensorFlow OSS tree.
|
377 |
+
# It is installable via pip.
|
378 |
+
import cpuinfo # pylint: disable=g-import-not-at-top
|
379 |
+
|
380 |
+
info = cpuinfo.get_cpu_info()
|
381 |
+
cpu_info["cpu_info"] = info["brand"]
|
382 |
+
cpu_info["mhz_per_cpu"] = info["hz_advertised_raw"][0] / 1.0e6
|
383 |
+
|
384 |
+
run_info["machine_config"]["cpu_info"] = cpu_info
|
385 |
+
except ImportError:
|
386 |
+
tf.compat.v1.logging.warn(
|
387 |
+
"'cpuinfo' not imported. CPU info will not be logged.")
|
388 |
+
|
389 |
+
|
390 |
+
def _collect_memory_info(run_info):
|
391 |
+
try:
|
392 |
+
# Note: psutil is not installed in the TensorFlow OSS tree.
|
393 |
+
# It is installable via pip.
|
394 |
+
import psutil # pylint: disable=g-import-not-at-top
|
395 |
+
vmem = psutil.virtual_memory()
|
396 |
+
run_info["machine_config"]["memory_total"] = vmem.total
|
397 |
+
run_info["machine_config"]["memory_available"] = vmem.available
|
398 |
+
except ImportError:
|
399 |
+
tf.compat.v1.logging.warn(
|
400 |
+
"'psutil' not imported. Memory info will not be logged.")
|
401 |
+
|
402 |
+
|
403 |
+
def _collect_test_environment(run_info):
|
404 |
+
"""Detect the local environment, eg GCE, AWS or DGX, etc."""
|
405 |
+
if cloud_lib.on_gcp():
|
406 |
+
run_info["test_environment"] = GCP_TEST_ENV
|
407 |
+
# TODO(scottzhu): Add more testing env detection for other platform
|
408 |
+
|
409 |
+
|
410 |
+
def _parse_gpu_model(physical_device_desc):
|
411 |
+
# Assume all the GPU connected are same model
|
412 |
+
for kv in physical_device_desc.split(","):
|
413 |
+
k, _, v = kv.partition(":")
|
414 |
+
if k.strip() == "name":
|
415 |
+
return v.strip()
|
416 |
+
return None
|
417 |
+
|
418 |
+
|
419 |
+
def _convert_to_json_dict(input_dict):
|
420 |
+
if input_dict:
|
421 |
+
return [{"name": k, "value": v} for k, v in sorted(input_dict.items())]
|
422 |
+
else:
|
423 |
+
return []
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/logs/metric_hook.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
"""Session hook for logging benchmark metric."""
|
16 |
+
|
17 |
+
from __future__ import absolute_import
|
18 |
+
from __future__ import division
|
19 |
+
from __future__ import print_function
|
20 |
+
|
21 |
+
import tensorflow as tf # pylint: disable=g-bad-import-order
|
22 |
+
|
23 |
+
|
24 |
+
class LoggingMetricHook(tf.estimator.LoggingTensorHook):
|
25 |
+
"""Hook to log benchmark metric information.
|
26 |
+
|
27 |
+
This hook is very similar as tf.train.LoggingTensorHook, which logs given
|
28 |
+
tensors every N local steps, every N seconds, or at the end. The metric
|
29 |
+
information will be logged to given log_dir or via metric_logger in JSON
|
30 |
+
format, which can be consumed by data analysis pipeline later.
|
31 |
+
|
32 |
+
Note that if `at_end` is True, `tensors` should not include any tensor
|
33 |
+
whose evaluation produces a side effect such as consuming additional inputs.
|
34 |
+
"""
|
35 |
+
|
36 |
+
def __init__(self, tensors, metric_logger=None,
|
37 |
+
every_n_iter=None, every_n_secs=None, at_end=False):
|
38 |
+
"""Initializer for LoggingMetricHook.
|
39 |
+
|
40 |
+
Args:
|
41 |
+
tensors: `dict` that maps string-valued tags to tensors/tensor names,
|
42 |
+
or `iterable` of tensors/tensor names.
|
43 |
+
metric_logger: instance of `BenchmarkLogger`, the benchmark logger that
|
44 |
+
hook should use to write the log.
|
45 |
+
every_n_iter: `int`, print the values of `tensors` once every N local
|
46 |
+
steps taken on the current worker.
|
47 |
+
every_n_secs: `int` or `float`, print the values of `tensors` once every N
|
48 |
+
seconds. Exactly one of `every_n_iter` and `every_n_secs` should be
|
49 |
+
provided.
|
50 |
+
at_end: `bool` specifying whether to print the values of `tensors` at the
|
51 |
+
end of the run.
|
52 |
+
|
53 |
+
Raises:
|
54 |
+
ValueError:
|
55 |
+
1. `every_n_iter` is non-positive, or
|
56 |
+
2. Exactly one of every_n_iter and every_n_secs should be provided.
|
57 |
+
3. Exactly one of log_dir and metric_logger should be provided.
|
58 |
+
"""
|
59 |
+
super(LoggingMetricHook, self).__init__(
|
60 |
+
tensors=tensors,
|
61 |
+
every_n_iter=every_n_iter,
|
62 |
+
every_n_secs=every_n_secs,
|
63 |
+
at_end=at_end)
|
64 |
+
|
65 |
+
if metric_logger is None:
|
66 |
+
raise ValueError("metric_logger should be provided.")
|
67 |
+
self._logger = metric_logger
|
68 |
+
|
69 |
+
def begin(self):
|
70 |
+
super(LoggingMetricHook, self).begin()
|
71 |
+
self._global_step_tensor = tf.compat.v1.train.get_global_step()
|
72 |
+
if self._global_step_tensor is None:
|
73 |
+
raise RuntimeError(
|
74 |
+
"Global step should be created to use LoggingMetricHook.")
|
75 |
+
if self._global_step_tensor.name not in self._current_tensors:
|
76 |
+
self._current_tensors[self._global_step_tensor.name] = (
|
77 |
+
self._global_step_tensor)
|
78 |
+
|
79 |
+
def after_run(self, unused_run_context, run_values):
|
80 |
+
# should_trigger is a internal state that populated at before_run, and it is
|
81 |
+
# using self_timer to determine whether it should trigger.
|
82 |
+
if self._should_trigger:
|
83 |
+
self._log_metric(run_values.results)
|
84 |
+
|
85 |
+
self._iter_count += 1
|
86 |
+
|
87 |
+
def end(self, session):
|
88 |
+
if self._log_at_end:
|
89 |
+
values = session.run(self._current_tensors)
|
90 |
+
self._log_metric(values)
|
91 |
+
|
92 |
+
def _log_metric(self, tensor_values):
|
93 |
+
self._timer.update_last_triggered_step(self._iter_count)
|
94 |
+
global_step = tensor_values[self._global_step_tensor.name]
|
95 |
+
# self._tag_order is populated during the init of LoggingTensorHook
|
96 |
+
for tag in self._tag_order:
|
97 |
+
self._logger.log_metric(tag, tensor_values[tag], global_step=global_step)
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/logs/mlperf_helper.py
ADDED
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
|
16 |
+
"""Wrapper for the mlperf logging utils.
|
17 |
+
|
18 |
+
MLPerf compliance logging is only desired under a limited set of circumstances.
|
19 |
+
This module is intended to keep users from needing to consider logging (or
|
20 |
+
install the module) unless they are performing mlperf runs.
|
21 |
+
"""
|
22 |
+
|
23 |
+
from __future__ import absolute_import
|
24 |
+
from __future__ import division
|
25 |
+
from __future__ import print_function
|
26 |
+
|
27 |
+
from collections import namedtuple
|
28 |
+
import json
|
29 |
+
import os
|
30 |
+
import re
|
31 |
+
import subprocess
|
32 |
+
import sys
|
33 |
+
import typing
|
34 |
+
|
35 |
+
import tensorflow as tf
|
36 |
+
|
37 |
+
_MIN_VERSION = (0, 0, 10)
|
38 |
+
_STACK_OFFSET = 2
|
39 |
+
|
40 |
+
SUDO = "sudo" if os.geteuid() else ""
|
41 |
+
|
42 |
+
# This indirection is used in docker.
|
43 |
+
DROP_CACHE_LOC = os.getenv("DROP_CACHE_LOC", "/proc/sys/vm/drop_caches")
|
44 |
+
|
45 |
+
_NCF_PREFIX = "NCF_RAW_"
|
46 |
+
|
47 |
+
# TODO(robieta): move line parsing to mlperf util
|
48 |
+
_PREFIX = r"(?:{})?:::MLPv([0-9]+).([0-9]+).([0-9]+)".format(_NCF_PREFIX)
|
49 |
+
_BENCHMARK = r"([a-zA-Z0-9_]+)"
|
50 |
+
_TIMESTAMP = r"([0-9]+\.[0-9]+)"
|
51 |
+
_CALLSITE = r"\((.+):([0-9]+)\)"
|
52 |
+
_TAG = r"([a-zA-Z0-9_]+)"
|
53 |
+
_VALUE = r"(.*)"
|
54 |
+
|
55 |
+
ParsedLine = namedtuple("ParsedLine", ["version", "benchmark", "timestamp",
|
56 |
+
"callsite", "tag", "value"])
|
57 |
+
|
58 |
+
LINE_PATTERN = re.compile(
|
59 |
+
"^{prefix} {benchmark} {timestamp} {callsite} {tag}(: |$){value}?$".format(
|
60 |
+
prefix=_PREFIX, benchmark=_BENCHMARK, timestamp=_TIMESTAMP,
|
61 |
+
callsite=_CALLSITE, tag=_TAG, value=_VALUE))
|
62 |
+
|
63 |
+
|
64 |
+
def parse_line(line): # type: (str) -> typing.Optional[ParsedLine]
|
65 |
+
match = LINE_PATTERN.match(line.strip())
|
66 |
+
if not match:
|
67 |
+
return
|
68 |
+
|
69 |
+
major, minor, micro, benchmark, timestamp = match.groups()[:5]
|
70 |
+
call_file, call_line, tag, _, value = match.groups()[5:]
|
71 |
+
|
72 |
+
return ParsedLine(version=(int(major), int(minor), int(micro)),
|
73 |
+
benchmark=benchmark, timestamp=timestamp,
|
74 |
+
callsite=(call_file, call_line), tag=tag, value=value)
|
75 |
+
|
76 |
+
|
77 |
+
def unparse_line(parsed_line): # type: (ParsedLine) -> str
|
78 |
+
version_str = "{}.{}.{}".format(*parsed_line.version)
|
79 |
+
callsite_str = "({}:{})".format(*parsed_line.callsite)
|
80 |
+
value_str = ": {}".format(parsed_line.value) if parsed_line.value else ""
|
81 |
+
return ":::MLPv{} {} {} {} {} {}".format(
|
82 |
+
version_str, parsed_line.benchmark, parsed_line.timestamp, callsite_str,
|
83 |
+
parsed_line.tag, value_str)
|
84 |
+
|
85 |
+
|
86 |
+
def get_mlperf_log():
|
87 |
+
"""Shielded import of mlperf_log module."""
|
88 |
+
try:
|
89 |
+
import mlperf_compliance
|
90 |
+
|
91 |
+
def test_mlperf_log_pip_version():
|
92 |
+
"""Check that mlperf_compliance is up to date."""
|
93 |
+
import pkg_resources
|
94 |
+
version = pkg_resources.get_distribution("mlperf_compliance")
|
95 |
+
version = tuple(int(i) for i in version.version.split("."))
|
96 |
+
if version < _MIN_VERSION:
|
97 |
+
tf.compat.v1.logging.warning(
|
98 |
+
"mlperf_compliance is version {}, must be >= {}".format(
|
99 |
+
".".join([str(i) for i in version]),
|
100 |
+
".".join([str(i) for i in _MIN_VERSION])))
|
101 |
+
raise ImportError
|
102 |
+
return mlperf_compliance.mlperf_log
|
103 |
+
|
104 |
+
mlperf_log = test_mlperf_log_pip_version()
|
105 |
+
|
106 |
+
except ImportError:
|
107 |
+
mlperf_log = None
|
108 |
+
|
109 |
+
return mlperf_log
|
110 |
+
|
111 |
+
|
112 |
+
class Logger(object):
|
113 |
+
"""MLPerf logger indirection class.
|
114 |
+
|
115 |
+
This logger only logs for MLPerf runs, and prevents various errors associated
|
116 |
+
with not having the mlperf_compliance package installed.
|
117 |
+
"""
|
118 |
+
class Tags(object):
|
119 |
+
def __init__(self, mlperf_log):
|
120 |
+
self._enabled = False
|
121 |
+
self._mlperf_log = mlperf_log
|
122 |
+
|
123 |
+
def __getattr__(self, item):
|
124 |
+
if self._mlperf_log is None or not self._enabled:
|
125 |
+
return
|
126 |
+
return getattr(self._mlperf_log, item)
|
127 |
+
|
128 |
+
def __init__(self):
|
129 |
+
self._enabled = False
|
130 |
+
self._mlperf_log = get_mlperf_log()
|
131 |
+
self.tags = self.Tags(self._mlperf_log)
|
132 |
+
|
133 |
+
def __call__(self, enable=False):
|
134 |
+
if enable and self._mlperf_log is None:
|
135 |
+
raise ImportError("MLPerf logging was requested, but mlperf_compliance "
|
136 |
+
"module could not be loaded.")
|
137 |
+
|
138 |
+
self._enabled = enable
|
139 |
+
self.tags._enabled = enable
|
140 |
+
return self
|
141 |
+
|
142 |
+
def __enter__(self):
|
143 |
+
pass
|
144 |
+
|
145 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
146 |
+
self._enabled = False
|
147 |
+
self.tags._enabled = False
|
148 |
+
|
149 |
+
@property
|
150 |
+
def log_file(self):
|
151 |
+
if self._mlperf_log is None:
|
152 |
+
return
|
153 |
+
return self._mlperf_log.LOG_FILE
|
154 |
+
|
155 |
+
@property
|
156 |
+
def enabled(self):
|
157 |
+
return self._enabled
|
158 |
+
|
159 |
+
def ncf_print(self, key, value=None, stack_offset=_STACK_OFFSET,
|
160 |
+
deferred=False, extra_print=False, prefix=_NCF_PREFIX):
|
161 |
+
if self._mlperf_log is None or not self.enabled:
|
162 |
+
return
|
163 |
+
self._mlperf_log.ncf_print(key=key, value=value, stack_offset=stack_offset,
|
164 |
+
deferred=deferred, extra_print=extra_print,
|
165 |
+
prefix=prefix)
|
166 |
+
|
167 |
+
def set_ncf_root(self, path):
|
168 |
+
if self._mlperf_log is None:
|
169 |
+
return
|
170 |
+
self._mlperf_log.ROOT_DIR_NCF = path
|
171 |
+
|
172 |
+
|
173 |
+
LOGGER = Logger()
|
174 |
+
ncf_print, set_ncf_root = LOGGER.ncf_print, LOGGER.set_ncf_root
|
175 |
+
TAGS = LOGGER.tags
|
176 |
+
|
177 |
+
|
178 |
+
def clear_system_caches():
|
179 |
+
if not LOGGER.enabled:
|
180 |
+
return
|
181 |
+
ret_code = subprocess.call(
|
182 |
+
["sync && echo 3 | {} tee {}".format(SUDO, DROP_CACHE_LOC)],
|
183 |
+
shell=True)
|
184 |
+
|
185 |
+
if ret_code:
|
186 |
+
raise ValueError("Failed to clear caches")
|
187 |
+
|
188 |
+
|
189 |
+
if __name__ == "__main__":
|
190 |
+
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
|
191 |
+
with LOGGER(True):
|
192 |
+
ncf_print(key=TAGS.RUN_START)
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/misc/__init__.py
ADDED
File without changes
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/misc/callstack_sampler.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""A simple Python callstack sampler."""
|
2 |
+
|
3 |
+
import contextlib
|
4 |
+
import datetime
|
5 |
+
import signal
|
6 |
+
import traceback
|
7 |
+
|
8 |
+
|
9 |
+
class CallstackSampler(object):
|
10 |
+
"""A simple signal-based Python callstack sampler.
|
11 |
+
"""
|
12 |
+
|
13 |
+
def __init__(self, interval=None):
|
14 |
+
self.stacks = []
|
15 |
+
self.interval = 0.001 if interval is None else interval
|
16 |
+
|
17 |
+
def _sample(self, signum, frame):
|
18 |
+
"""Samples the current stack."""
|
19 |
+
del signum
|
20 |
+
stack = traceback.extract_stack(frame)
|
21 |
+
formatted_stack = []
|
22 |
+
formatted_stack.append(datetime.datetime.utcnow())
|
23 |
+
for filename, lineno, function_name, text in stack:
|
24 |
+
formatted_frame = '{}:{}({})({})'.format(filename, lineno, function_name,
|
25 |
+
text)
|
26 |
+
formatted_stack.append(formatted_frame)
|
27 |
+
self.stacks.append(formatted_stack)
|
28 |
+
signal.setitimer(signal.ITIMER_VIRTUAL, self.interval, 0)
|
29 |
+
|
30 |
+
@contextlib.contextmanager
|
31 |
+
def profile(self):
|
32 |
+
signal.signal(signal.SIGVTALRM, self._sample)
|
33 |
+
signal.setitimer(signal.ITIMER_VIRTUAL, self.interval, 0)
|
34 |
+
try:
|
35 |
+
yield
|
36 |
+
finally:
|
37 |
+
signal.setitimer(signal.ITIMER_VIRTUAL, 0)
|
38 |
+
|
39 |
+
def save(self, fname):
|
40 |
+
with open(fname, 'w') as f:
|
41 |
+
for s in self.stacks:
|
42 |
+
for l in s:
|
43 |
+
f.write('%s\n' % l)
|
44 |
+
f.write('\n')
|
45 |
+
|
46 |
+
|
47 |
+
@contextlib.contextmanager
|
48 |
+
def callstack_sampling(filename, interval=None):
|
49 |
+
"""Periodically samples the Python callstack.
|
50 |
+
|
51 |
+
Args:
|
52 |
+
filename: the filename
|
53 |
+
interval: the sampling interval, in seconds. Defaults to 0.001.
|
54 |
+
|
55 |
+
Yields:
|
56 |
+
nothing
|
57 |
+
"""
|
58 |
+
sampler = CallstackSampler(interval=interval)
|
59 |
+
with sampler.profile():
|
60 |
+
yield
|
61 |
+
sampler.save(filename)
|
62 |
+
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/misc/distribution_utils.py
ADDED
@@ -0,0 +1,345 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
"""Helper functions for running models in a distributed setting."""
|
16 |
+
|
17 |
+
from __future__ import absolute_import
|
18 |
+
from __future__ import division
|
19 |
+
from __future__ import print_function
|
20 |
+
|
21 |
+
import json
|
22 |
+
import os
|
23 |
+
import random
|
24 |
+
import string
|
25 |
+
import tensorflow.compat.v2 as tf
|
26 |
+
|
27 |
+
from TensorFlow.utils.misc import tpu_lib
|
28 |
+
|
29 |
+
from habana_frameworks.tensorflow.distribute import HPUStrategy
|
30 |
+
|
31 |
+
def _collective_communication(all_reduce_alg):
|
32 |
+
"""Return a CollectiveCommunication based on all_reduce_alg.
|
33 |
+
|
34 |
+
Args:
|
35 |
+
all_reduce_alg: a string specifying which collective communication to pick,
|
36 |
+
or None.
|
37 |
+
|
38 |
+
Returns:
|
39 |
+
tf.distribute.experimental.CollectiveCommunication object
|
40 |
+
|
41 |
+
Raises:
|
42 |
+
ValueError: if `all_reduce_alg` not in [None, 'ring', 'nccl']
|
43 |
+
"""
|
44 |
+
collective_communication_options = {
|
45 |
+
None: tf.distribute.experimental.CollectiveCommunication.AUTO,
|
46 |
+
"ring": tf.distribute.experimental.CollectiveCommunication.RING,
|
47 |
+
"nccl": tf.distribute.experimental.CollectiveCommunication.NCCL
|
48 |
+
}
|
49 |
+
if all_reduce_alg not in collective_communication_options:
|
50 |
+
raise ValueError(
|
51 |
+
"When used with `multi_worker_mirrored`, valid values for "
|
52 |
+
"all_reduce_alg are ['ring', 'nccl']. Supplied value: {}".format(
|
53 |
+
all_reduce_alg))
|
54 |
+
return collective_communication_options[all_reduce_alg]
|
55 |
+
|
56 |
+
|
57 |
+
def _mirrored_cross_device_ops(all_reduce_alg, num_packs):
|
58 |
+
"""Return a CrossDeviceOps based on all_reduce_alg and num_packs.
|
59 |
+
|
60 |
+
Args:
|
61 |
+
all_reduce_alg: a string specifying which cross device op to pick, or None.
|
62 |
+
num_packs: an integer specifying number of packs for the cross device op.
|
63 |
+
|
64 |
+
Returns:
|
65 |
+
tf.distribute.CrossDeviceOps object or None.
|
66 |
+
|
67 |
+
Raises:
|
68 |
+
ValueError: if `all_reduce_alg` not in [None, 'nccl', 'hierarchical_copy'].
|
69 |
+
"""
|
70 |
+
if all_reduce_alg is None:
|
71 |
+
return None
|
72 |
+
mirrored_all_reduce_options = {
|
73 |
+
"nccl": tf.distribute.NcclAllReduce,
|
74 |
+
"hierarchical_copy": tf.distribute.HierarchicalCopyAllReduce
|
75 |
+
}
|
76 |
+
if all_reduce_alg not in mirrored_all_reduce_options:
|
77 |
+
raise ValueError(
|
78 |
+
"When used with `mirrored`, valid values for all_reduce_alg are "
|
79 |
+
"['nccl', 'hierarchical_copy']. Supplied value: {}".format(
|
80 |
+
all_reduce_alg))
|
81 |
+
cross_device_ops_class = mirrored_all_reduce_options[all_reduce_alg]
|
82 |
+
return cross_device_ops_class(num_packs=num_packs)
|
83 |
+
|
84 |
+
|
85 |
+
def get_distribution_strategy(distribution_strategy="mirrored",
|
86 |
+
num_gpus=0,
|
87 |
+
num_hpus=0,
|
88 |
+
all_reduce_alg=None,
|
89 |
+
num_packs=1,
|
90 |
+
tpu_address=None):
|
91 |
+
"""Return a DistributionStrategy for running the model.
|
92 |
+
|
93 |
+
Args:
|
94 |
+
distribution_strategy: a string specifying which distribution strategy to
|
95 |
+
use. Accepted values are 'off', 'one_device', 'mirrored',
|
96 |
+
'parameter_server', 'multi_worker_mirrored', and 'tpu' -- case insensitive.
|
97 |
+
'off' means not to use Distribution Strategy; 'tpu' means to use
|
98 |
+
TPUStrategy using `tpu_address`.
|
99 |
+
num_gpus: Number of GPUs to run this model.
|
100 |
+
all_reduce_alg: Optional. Specifies which algorithm to use when performing
|
101 |
+
all-reduce. For `MirroredStrategy`, valid values are "nccl" and
|
102 |
+
"hierarchical_copy". For `MultiWorkerMirroredStrategy`, valid values are
|
103 |
+
"ring" and "nccl". If None, DistributionStrategy will choose based on
|
104 |
+
device topology.
|
105 |
+
num_packs: Optional. Sets the `num_packs` in `tf.distribute.NcclAllReduce`
|
106 |
+
or `tf.distribute.HierarchicalCopyAllReduce` for `MirroredStrategy`.
|
107 |
+
tpu_address: Optional. String that represents TPU to connect to. Must not
|
108 |
+
be None if `distribution_strategy` is set to `tpu`.
|
109 |
+
Returns:
|
110 |
+
tf.distribute.DistibutionStrategy object.
|
111 |
+
Raises:
|
112 |
+
ValueError: if `distribution_strategy` is 'off' or 'one_device' and
|
113 |
+
`num_gpus` is larger than 1; or `num_gpus` is negative or if
|
114 |
+
`distribution_strategy` is `tpu` but `tpu_address` is not specified.
|
115 |
+
"""
|
116 |
+
if num_gpus < 0:
|
117 |
+
raise ValueError("`num_gpus` can not be negative.")
|
118 |
+
|
119 |
+
distribution_strategy = distribution_strategy.lower()
|
120 |
+
if distribution_strategy == "off":
|
121 |
+
if num_gpus > 1:
|
122 |
+
raise ValueError(
|
123 |
+
"When {} GPUs are specified, distribution_strategy "
|
124 |
+
"flag cannot be set to 'off'.".format(num_gpus))
|
125 |
+
return None
|
126 |
+
|
127 |
+
if distribution_strategy == "hpu":
|
128 |
+
return HPUStrategy()
|
129 |
+
|
130 |
+
if distribution_strategy == "tpu":
|
131 |
+
# When tpu_address is an empty string, we communicate with local TPUs.
|
132 |
+
cluster_resolver = tpu_lib.tpu_initialize(tpu_address)
|
133 |
+
return tf.distribute.experimental.TPUStrategy(cluster_resolver)
|
134 |
+
|
135 |
+
if distribution_strategy == "multi_worker_mirrored":
|
136 |
+
return tf.distribute.experimental.MultiWorkerMirroredStrategy(
|
137 |
+
communication=_collective_communication(all_reduce_alg))
|
138 |
+
|
139 |
+
if distribution_strategy == "one_device":
|
140 |
+
if num_gpus == 0 and num_hpus == 0:
|
141 |
+
return tf.distribute.OneDeviceStrategy("device:CPU:0")
|
142 |
+
if num_hpus == 1:
|
143 |
+
return tf.distribute.OneDeviceStrategy("device:HPU:0")
|
144 |
+
if num_gpus > 1 or num_hpus > 1:
|
145 |
+
raise ValueError("`OneDeviceStrategy` can not be used for more than "
|
146 |
+
"one device.")
|
147 |
+
return tf.distribute.OneDeviceStrategy("device:GPU:0")
|
148 |
+
|
149 |
+
if distribution_strategy == "mirrored":
|
150 |
+
if num_gpus == 0:
|
151 |
+
devices = ["device:CPU:0"]
|
152 |
+
else:
|
153 |
+
devices = ["device:GPU:%d" % i for i in range(num_gpus)]
|
154 |
+
return tf.distribute.MirroredStrategy(
|
155 |
+
devices=devices,
|
156 |
+
cross_device_ops=_mirrored_cross_device_ops(all_reduce_alg, num_packs))
|
157 |
+
|
158 |
+
if distribution_strategy == "parameter_server":
|
159 |
+
return tf.distribute.experimental.ParameterServerStrategy()
|
160 |
+
|
161 |
+
raise ValueError(
|
162 |
+
"Unrecognized Distribution Strategy: %r" % distribution_strategy)
|
163 |
+
|
164 |
+
|
165 |
+
def per_replica_batch_size(batch_size, num_gpus):
|
166 |
+
"""For multi-gpu, batch-size must be a multiple of the number of GPUs.
|
167 |
+
|
168 |
+
|
169 |
+
Note that distribution strategy handles this automatically when used with
|
170 |
+
Keras. For using with Estimator, we need to get per GPU batch.
|
171 |
+
|
172 |
+
Args:
|
173 |
+
batch_size: Global batch size to be divided among devices. This should be
|
174 |
+
equal to num_gpus times the single-GPU batch_size for multi-gpu training.
|
175 |
+
num_gpus: How many GPUs are used with DistributionStrategies.
|
176 |
+
|
177 |
+
Returns:
|
178 |
+
Batch size per device.
|
179 |
+
|
180 |
+
Raises:
|
181 |
+
ValueError: if batch_size is not divisible by number of devices
|
182 |
+
"""
|
183 |
+
if num_gpus <= 1:
|
184 |
+
return batch_size
|
185 |
+
|
186 |
+
remainder = batch_size % num_gpus
|
187 |
+
if remainder:
|
188 |
+
err = ('When running with multiple GPUs, batch size '
|
189 |
+
'must be a multiple of the number of available GPUs. Found {} '
|
190 |
+
'GPUs with a batch size of {}; try --batch_size={} instead.'
|
191 |
+
).format(num_gpus, batch_size, batch_size - remainder)
|
192 |
+
raise ValueError(err)
|
193 |
+
return int(batch_size / num_gpus)
|
194 |
+
|
195 |
+
|
196 |
+
# The `SyntheticDataset` is a temporary solution for generating synthetic data
|
197 |
+
# directly on devices. It is only useful for Keras with Distribution
|
198 |
+
# Strategies. We will have better support in `tf.data` or Distribution Strategy
|
199 |
+
# later.
|
200 |
+
class SyntheticDataset(object):
|
201 |
+
"""A dataset that generates synthetic data on each device."""
|
202 |
+
|
203 |
+
def __init__(self, dataset, split_by=1):
|
204 |
+
# dataset.take(1) doesn't have GPU kernel.
|
205 |
+
with tf.device('device:CPU:0'):
|
206 |
+
tensor = tf.data.experimental.get_single_element(dataset.take(1))
|
207 |
+
flat_tensor = tf.nest.flatten(tensor)
|
208 |
+
variable_data = []
|
209 |
+
initializers = []
|
210 |
+
for t in flat_tensor:
|
211 |
+
rebatched_t = tf.split(t, num_or_size_splits=split_by, axis=0)[0]
|
212 |
+
assert rebatched_t.shape.is_fully_defined(), rebatched_t.shape
|
213 |
+
v = tf.compat.v1.get_local_variable(self._random_name(),
|
214 |
+
initializer=rebatched_t)
|
215 |
+
variable_data.append(v)
|
216 |
+
initializers.append(v.initializer)
|
217 |
+
input_data = tf.nest.pack_sequence_as(tensor, variable_data)
|
218 |
+
self._iterator = SyntheticIterator(input_data, initializers)
|
219 |
+
|
220 |
+
def _random_name(self, size=10, chars=string.ascii_uppercase + string.digits):
|
221 |
+
return ''.join(random.choice(chars) for _ in range(size))
|
222 |
+
|
223 |
+
def __iter__(self):
|
224 |
+
return self._iterator
|
225 |
+
|
226 |
+
def make_one_shot_iterator(self):
|
227 |
+
return self._iterator
|
228 |
+
|
229 |
+
def make_initializable_iterator(self):
|
230 |
+
return self._iterator
|
231 |
+
|
232 |
+
|
233 |
+
class SyntheticIterator(object):
|
234 |
+
"""A dataset that generates synthetic data on each device."""
|
235 |
+
|
236 |
+
def __init__(self, input_data, initializers):
|
237 |
+
self._input_data = input_data
|
238 |
+
self._initializers = initializers
|
239 |
+
|
240 |
+
def get_next(self):
|
241 |
+
return self._input_data
|
242 |
+
|
243 |
+
def next(self):
|
244 |
+
return self.__next__()
|
245 |
+
|
246 |
+
def __next__(self):
|
247 |
+
try:
|
248 |
+
return self.get_next()
|
249 |
+
except tf.errors.OutOfRangeError:
|
250 |
+
raise StopIteration
|
251 |
+
|
252 |
+
def initialize(self):
|
253 |
+
if tf.executing_eagerly():
|
254 |
+
return tf.no_op()
|
255 |
+
else:
|
256 |
+
return self._initializers
|
257 |
+
|
258 |
+
|
259 |
+
def _monkey_patch_dataset_method(strategy):
|
260 |
+
"""Monkey-patch `strategy`'s `make_dataset_iterator` method."""
|
261 |
+
def make_dataset(self, dataset):
|
262 |
+
tf.compat.v1.logging.info('Using pure synthetic data.')
|
263 |
+
with self.scope():
|
264 |
+
if self.extended._global_batch_size: # pylint: disable=protected-access
|
265 |
+
return SyntheticDataset(dataset, self.num_replicas_in_sync)
|
266 |
+
else:
|
267 |
+
return SyntheticDataset(dataset)
|
268 |
+
|
269 |
+
def make_iterator(self, dataset):
|
270 |
+
dist_dataset = make_dataset(self, dataset)
|
271 |
+
return iter(dist_dataset)
|
272 |
+
|
273 |
+
strategy.orig_make_dataset_iterator = strategy.make_dataset_iterator
|
274 |
+
strategy.make_dataset_iterator = make_iterator
|
275 |
+
strategy.orig_distribute_dataset = strategy.experimental_distribute_dataset
|
276 |
+
strategy.experimental_distribute_dataset = make_dataset
|
277 |
+
|
278 |
+
|
279 |
+
def _undo_monkey_patch_dataset_method(strategy):
|
280 |
+
if hasattr(strategy, 'orig_make_dataset_iterator'):
|
281 |
+
strategy.make_dataset_iterator = strategy.orig_make_dataset_iterator
|
282 |
+
if hasattr(strategy, 'orig_distribute_dataset'):
|
283 |
+
strategy.make_dataset_iterator = strategy.orig_distribute_dataset
|
284 |
+
|
285 |
+
|
286 |
+
def set_up_synthetic_data():
|
287 |
+
_monkey_patch_dataset_method(tf.distribute.OneDeviceStrategy)
|
288 |
+
_monkey_patch_dataset_method(tf.distribute.MirroredStrategy)
|
289 |
+
_monkey_patch_dataset_method(
|
290 |
+
tf.distribute.experimental.MultiWorkerMirroredStrategy)
|
291 |
+
|
292 |
+
|
293 |
+
def undo_set_up_synthetic_data():
|
294 |
+
_undo_monkey_patch_dataset_method(tf.distribute.OneDeviceStrategy)
|
295 |
+
_undo_monkey_patch_dataset_method(tf.distribute.MirroredStrategy)
|
296 |
+
_undo_monkey_patch_dataset_method(
|
297 |
+
tf.distribute.experimental.MultiWorkerMirroredStrategy)
|
298 |
+
|
299 |
+
|
300 |
+
def configure_cluster(worker_hosts=None, task_index=-1):
|
301 |
+
"""Set multi-worker cluster spec in TF_CONFIG environment variable.
|
302 |
+
|
303 |
+
Args:
|
304 |
+
worker_hosts: comma-separated list of worker ip:port pairs.
|
305 |
+
|
306 |
+
Returns:
|
307 |
+
Number of workers in the cluster.
|
308 |
+
"""
|
309 |
+
tf_config = json.loads(os.environ.get('TF_CONFIG', '{}'))
|
310 |
+
if tf_config:
|
311 |
+
num_workers = (len(tf_config['cluster'].get('chief', [])) +
|
312 |
+
len(tf_config['cluster'].get('worker', [])))
|
313 |
+
elif worker_hosts:
|
314 |
+
workers = worker_hosts.split(',')
|
315 |
+
num_workers = len(workers)
|
316 |
+
if num_workers > 1 and task_index < 0:
|
317 |
+
raise ValueError('Must specify task_index when number of workers > 1')
|
318 |
+
task_index = 0 if num_workers == 1 else task_index
|
319 |
+
os.environ['TF_CONFIG'] = json.dumps({
|
320 |
+
'cluster': {
|
321 |
+
'worker': workers
|
322 |
+
},
|
323 |
+
'task': {'type': 'worker', 'index': task_index}
|
324 |
+
})
|
325 |
+
else:
|
326 |
+
num_workers = 1
|
327 |
+
return num_workers
|
328 |
+
|
329 |
+
|
330 |
+
def get_strategy_scope(strategy):
|
331 |
+
if strategy:
|
332 |
+
strategy_scope = strategy.scope()
|
333 |
+
else:
|
334 |
+
strategy_scope = DummyContextManager()
|
335 |
+
|
336 |
+
return strategy_scope
|
337 |
+
|
338 |
+
|
339 |
+
class DummyContextManager(object):
|
340 |
+
|
341 |
+
def __enter__(self):
|
342 |
+
pass
|
343 |
+
|
344 |
+
def __exit__(self, *args):
|
345 |
+
pass
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/misc/keras_utils.py
ADDED
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
"""Helper functions for the Keras implementations of models."""
|
16 |
+
|
17 |
+
from __future__ import absolute_import
|
18 |
+
from __future__ import division
|
19 |
+
from __future__ import print_function
|
20 |
+
|
21 |
+
import multiprocessing
|
22 |
+
import os
|
23 |
+
import time
|
24 |
+
|
25 |
+
from absl import logging
|
26 |
+
import tensorflow.compat.v2 as tf
|
27 |
+
from tensorflow.python import tf2
|
28 |
+
from tensorflow.python.profiler import profiler_v2 as profiler
|
29 |
+
|
30 |
+
|
31 |
+
class BatchTimestamp(object):
|
32 |
+
"""A structure to store batch time stamp."""
|
33 |
+
|
34 |
+
def __init__(self, batch_index, timestamp):
|
35 |
+
self.batch_index = batch_index
|
36 |
+
self.timestamp = timestamp
|
37 |
+
|
38 |
+
def __repr__(self):
|
39 |
+
return "'BatchTimestamp<batch_index: {}, timestamp: {}>'".format(
|
40 |
+
self.batch_index, self.timestamp)
|
41 |
+
|
42 |
+
|
43 |
+
class TimeHistory(tf.keras.callbacks.Callback):
|
44 |
+
"""Callback for Keras models."""
|
45 |
+
|
46 |
+
def __init__(self, batch_size, log_steps, logdir=None):
|
47 |
+
"""Callback for logging performance.
|
48 |
+
|
49 |
+
Args:
|
50 |
+
batch_size: Total batch size.
|
51 |
+
log_steps: Interval of steps between logging of batch level stats.
|
52 |
+
logdir: Optional directory to write TensorBoard summaries.
|
53 |
+
"""
|
54 |
+
# TODO(wcromar): remove this parameter and rely on `logs` parameter of
|
55 |
+
# on_train_batch_end()
|
56 |
+
self.batch_size = batch_size
|
57 |
+
super(TimeHistory, self).__init__()
|
58 |
+
self.log_steps = log_steps
|
59 |
+
self.last_log_step = 0
|
60 |
+
self.steps_before_epoch = 0
|
61 |
+
self.steps_in_epoch = 0
|
62 |
+
self.start_time = None
|
63 |
+
|
64 |
+
if logdir:
|
65 |
+
self.summary_writer = tf.summary.create_file_writer(logdir)
|
66 |
+
else:
|
67 |
+
self.summary_writer = None
|
68 |
+
|
69 |
+
# Logs start of step 1 then end of each step based on log_steps interval.
|
70 |
+
self.timestamp_log = []
|
71 |
+
|
72 |
+
# Records the time each epoch takes to run from start to finish of epoch.
|
73 |
+
self.epoch_runtime_log = []
|
74 |
+
|
75 |
+
@property
|
76 |
+
def global_steps(self):
|
77 |
+
"""The current 1-indexed global step."""
|
78 |
+
return self.steps_before_epoch + self.steps_in_epoch
|
79 |
+
|
80 |
+
@property
|
81 |
+
def average_steps_per_second(self):
|
82 |
+
"""The average training steps per second across all epochs."""
|
83 |
+
return self.global_steps / sum(self.epoch_runtime_log)
|
84 |
+
|
85 |
+
@property
|
86 |
+
def average_examples_per_second(self):
|
87 |
+
"""The average number of training examples per second across all epochs."""
|
88 |
+
return self.average_steps_per_second * self.batch_size
|
89 |
+
|
90 |
+
def on_train_end(self, logs=None):
|
91 |
+
self.train_finish_time = time.time()
|
92 |
+
|
93 |
+
if self.summary_writer:
|
94 |
+
self.summary_writer.flush()
|
95 |
+
|
96 |
+
def on_epoch_begin(self, epoch, logs=None):
|
97 |
+
self.epoch_start = time.time()
|
98 |
+
|
99 |
+
def on_batch_begin(self, batch, logs=None):
|
100 |
+
if not self.start_time:
|
101 |
+
self.start_time = time.time()
|
102 |
+
|
103 |
+
# Record the timestamp of the first global step
|
104 |
+
if not self.timestamp_log:
|
105 |
+
self.timestamp_log.append(BatchTimestamp(self.global_steps,
|
106 |
+
self.start_time))
|
107 |
+
|
108 |
+
def on_batch_end(self, batch, logs=None):
|
109 |
+
"""Records elapse time of the batch and calculates examples per second."""
|
110 |
+
self.steps_in_epoch = batch + 1
|
111 |
+
steps_since_last_log = self.global_steps - self.last_log_step
|
112 |
+
if steps_since_last_log >= self.log_steps:
|
113 |
+
now = time.time()
|
114 |
+
elapsed_time = now - self.start_time
|
115 |
+
steps_per_second = steps_since_last_log / elapsed_time
|
116 |
+
examples_per_second = steps_per_second * self.batch_size
|
117 |
+
|
118 |
+
self.timestamp_log.append(BatchTimestamp(self.global_steps, now))
|
119 |
+
logging.info(
|
120 |
+
'TimeHistory: %.2f seconds, %.2f examples/second between steps %d '
|
121 |
+
'and %d', elapsed_time, examples_per_second, self.last_log_step,
|
122 |
+
self.global_steps)
|
123 |
+
|
124 |
+
if self.summary_writer:
|
125 |
+
with self.summary_writer.as_default():
|
126 |
+
tf.summary.scalar('global_step/sec', steps_per_second,
|
127 |
+
self.global_steps)
|
128 |
+
tf.summary.scalar('examples/sec', examples_per_second,
|
129 |
+
self.global_steps)
|
130 |
+
|
131 |
+
self.last_log_step = self.global_steps
|
132 |
+
self.start_time = None
|
133 |
+
|
134 |
+
def on_epoch_end(self, epoch, logs=None):
|
135 |
+
epoch_run_time = time.time() - self.epoch_start
|
136 |
+
self.epoch_runtime_log.append(epoch_run_time)
|
137 |
+
|
138 |
+
self.steps_before_epoch += self.steps_in_epoch
|
139 |
+
self.steps_in_epoch = 0
|
140 |
+
|
141 |
+
|
142 |
+
def get_profiler_callback(model_dir, profile_steps, enable_tensorboard,
|
143 |
+
steps_per_epoch):
|
144 |
+
"""Validate profile_steps flag value and return profiler callback."""
|
145 |
+
profile_steps_error_message = (
|
146 |
+
'profile_steps must be a comma separated pair of positive integers, '
|
147 |
+
'specifying the first and last steps to be profiled.'
|
148 |
+
)
|
149 |
+
try:
|
150 |
+
profile_steps = [int(i) for i in profile_steps.split(',')]
|
151 |
+
except ValueError:
|
152 |
+
raise ValueError(profile_steps_error_message)
|
153 |
+
if len(profile_steps) != 2:
|
154 |
+
raise ValueError(profile_steps_error_message)
|
155 |
+
start_step, stop_step = profile_steps
|
156 |
+
if start_step < 0 or start_step > stop_step:
|
157 |
+
raise ValueError(profile_steps_error_message)
|
158 |
+
if enable_tensorboard:
|
159 |
+
logging.warning(
|
160 |
+
'Both TensorBoard and profiler callbacks are used. Note that the '
|
161 |
+
'TensorBoard callback profiles the 2nd step (unless otherwise '
|
162 |
+
'specified). Please make sure the steps profiled by the two callbacks '
|
163 |
+
'do not overlap.')
|
164 |
+
return ProfilerCallback(model_dir, start_step, stop_step, steps_per_epoch)
|
165 |
+
|
166 |
+
|
167 |
+
class ProfilerCallback(tf.keras.callbacks.Callback):
|
168 |
+
"""Save profiles in specified step range to log directory."""
|
169 |
+
|
170 |
+
def __init__(self, log_dir, start_step, stop_step, steps_per_epoch):
|
171 |
+
super(ProfilerCallback, self).__init__()
|
172 |
+
self.log_dir = log_dir
|
173 |
+
self.start_step = start_step
|
174 |
+
self.stop_step = stop_step
|
175 |
+
self.start_epoch = start_step // steps_per_epoch
|
176 |
+
self.stop_epoch = stop_step // steps_per_epoch
|
177 |
+
self.start_step_in_epoch = start_step % steps_per_epoch
|
178 |
+
self.stop_step_in_epoch = stop_step % steps_per_epoch
|
179 |
+
self.should_start = False
|
180 |
+
self.should_stop = False
|
181 |
+
|
182 |
+
def on_epoch_begin(self, epoch, logs=None):
|
183 |
+
if epoch == self.start_epoch:
|
184 |
+
self.should_start = True
|
185 |
+
if epoch == self.stop_epoch:
|
186 |
+
self.should_stop = True
|
187 |
+
|
188 |
+
def on_batch_begin(self, batch, logs=None):
|
189 |
+
if batch == self.start_step_in_epoch and self.should_start:
|
190 |
+
self.should_start = False
|
191 |
+
profiler.start(self.log_dir)
|
192 |
+
logging.info('Profiler started at Step %s', self.start_step)
|
193 |
+
|
194 |
+
def on_batch_end(self, batch, logs=None):
|
195 |
+
if batch == self.stop_step_in_epoch and self.should_stop:
|
196 |
+
self.should_stop = False
|
197 |
+
profiler.stop()
|
198 |
+
logging.info('Profiler saved profiles for steps between %s and %s to %s',
|
199 |
+
self.start_step, self.stop_step, self.log_dir)
|
200 |
+
|
201 |
+
|
202 |
+
def set_session_config(enable_eager=False,
|
203 |
+
enable_xla=False):
|
204 |
+
"""Sets the session config."""
|
205 |
+
if is_v2_0():
|
206 |
+
set_config_v2(enable_xla=enable_xla)
|
207 |
+
else:
|
208 |
+
config = get_config_proto_v1(enable_xla=enable_xla)
|
209 |
+
if enable_eager:
|
210 |
+
tf.compat.v1.enable_eager_execution(config=config)
|
211 |
+
else:
|
212 |
+
sess = tf.compat.v1.Session(config=config)
|
213 |
+
tf.compat.v1.keras.backend.set_session(sess)
|
214 |
+
|
215 |
+
|
216 |
+
def get_config_proto_v1(enable_xla=False):
|
217 |
+
"""Return config proto according to flag settings, or None to use default."""
|
218 |
+
config = None
|
219 |
+
if enable_xla:
|
220 |
+
config = tf.compat.v1.ConfigProto()
|
221 |
+
config.graph_options.optimizer_options.global_jit_level = (
|
222 |
+
tf.OptimizerOptions.ON_2)
|
223 |
+
return config
|
224 |
+
|
225 |
+
|
226 |
+
def set_config_v2(enable_xla=False):
|
227 |
+
"""Config eager context according to flag values using TF 2.0 API."""
|
228 |
+
if enable_xla:
|
229 |
+
tf.config.optimizer.set_jit(True)
|
230 |
+
|
231 |
+
|
232 |
+
def is_v2_0():
|
233 |
+
"""Returns true if using tf 2.0."""
|
234 |
+
return tf2.enabled()
|
235 |
+
|
236 |
+
|
237 |
+
def set_gpu_thread_mode_and_count(gpu_thread_mode,
|
238 |
+
datasets_num_private_threads,
|
239 |
+
num_gpus, per_gpu_thread_count):
|
240 |
+
"""Set GPU thread mode and count, and adjust dataset threads count."""
|
241 |
+
cpu_count = multiprocessing.cpu_count()
|
242 |
+
logging.info('Logical CPU cores: %s', cpu_count)
|
243 |
+
|
244 |
+
# Allocate private thread pool for each GPU to schedule and launch kernels
|
245 |
+
per_gpu_thread_count = per_gpu_thread_count or 2
|
246 |
+
os.environ['TF_GPU_THREAD_MODE'] = gpu_thread_mode
|
247 |
+
os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)
|
248 |
+
logging.info('TF_GPU_THREAD_COUNT: %s',
|
249 |
+
os.environ['TF_GPU_THREAD_COUNT'])
|
250 |
+
logging.info('TF_GPU_THREAD_MODE: %s',
|
251 |
+
os.environ['TF_GPU_THREAD_MODE'])
|
252 |
+
|
253 |
+
# Limit data preprocessing threadpool to CPU cores minus number of total GPU
|
254 |
+
# private threads and memory copy threads.
|
255 |
+
total_gpu_thread_count = per_gpu_thread_count * num_gpus
|
256 |
+
num_runtime_threads = num_gpus
|
257 |
+
if not datasets_num_private_threads:
|
258 |
+
datasets_num_private_threads = min(
|
259 |
+
cpu_count - total_gpu_thread_count - num_runtime_threads,
|
260 |
+
num_gpus * 8)
|
261 |
+
logging.info('Set datasets_num_private_threads to %s',
|
262 |
+
datasets_num_private_threads)
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/misc/model_helpers.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
"""Miscellaneous functions that can be called by models."""
|
16 |
+
|
17 |
+
from __future__ import absolute_import
|
18 |
+
from __future__ import division
|
19 |
+
from __future__ import print_function
|
20 |
+
|
21 |
+
import numbers
|
22 |
+
|
23 |
+
import tensorflow as tf
|
24 |
+
from tensorflow.python.util import nest
|
25 |
+
|
26 |
+
|
27 |
+
def past_stop_threshold(stop_threshold, eval_metric):
|
28 |
+
"""Return a boolean representing whether a model should be stopped.
|
29 |
+
|
30 |
+
Args:
|
31 |
+
stop_threshold: float, the threshold above which a model should stop
|
32 |
+
training.
|
33 |
+
eval_metric: float, the current value of the relevant metric to check.
|
34 |
+
|
35 |
+
Returns:
|
36 |
+
True if training should stop, False otherwise.
|
37 |
+
|
38 |
+
Raises:
|
39 |
+
ValueError: if either stop_threshold or eval_metric is not a number
|
40 |
+
"""
|
41 |
+
if stop_threshold is None:
|
42 |
+
return False
|
43 |
+
|
44 |
+
if not isinstance(stop_threshold, numbers.Number):
|
45 |
+
raise ValueError("Threshold for checking stop conditions must be a number.")
|
46 |
+
if not isinstance(eval_metric, numbers.Number):
|
47 |
+
raise ValueError("Eval metric being checked against stop conditions "
|
48 |
+
"must be a number.")
|
49 |
+
|
50 |
+
if eval_metric >= stop_threshold:
|
51 |
+
tf.compat.v1.logging.info(
|
52 |
+
"Stop threshold of {} was passed with metric value {}.".format(
|
53 |
+
stop_threshold, eval_metric))
|
54 |
+
return True
|
55 |
+
|
56 |
+
return False
|
57 |
+
|
58 |
+
|
59 |
+
def generate_synthetic_data(
|
60 |
+
input_shape, input_value=0, input_dtype=None, label_shape=None,
|
61 |
+
label_value=0, label_dtype=None):
|
62 |
+
"""Create a repeating dataset with constant values.
|
63 |
+
|
64 |
+
Args:
|
65 |
+
input_shape: a tf.TensorShape object or nested tf.TensorShapes. The shape of
|
66 |
+
the input data.
|
67 |
+
input_value: Value of each input element.
|
68 |
+
input_dtype: Input dtype. If None, will be inferred by the input value.
|
69 |
+
label_shape: a tf.TensorShape object or nested tf.TensorShapes. The shape of
|
70 |
+
the label data.
|
71 |
+
label_value: Value of each input element.
|
72 |
+
label_dtype: Input dtype. If None, will be inferred by the target value.
|
73 |
+
|
74 |
+
Returns:
|
75 |
+
Dataset of tensors or tuples of tensors (if label_shape is set).
|
76 |
+
"""
|
77 |
+
# TODO(kathywu): Replace with SyntheticDataset once it is in contrib.
|
78 |
+
element = input_element = nest.map_structure(
|
79 |
+
lambda s: tf.constant(input_value, input_dtype, s), input_shape)
|
80 |
+
|
81 |
+
if label_shape:
|
82 |
+
label_element = nest.map_structure(
|
83 |
+
lambda s: tf.constant(label_value, label_dtype, s), label_shape)
|
84 |
+
element = (input_element, label_element)
|
85 |
+
|
86 |
+
return tf.data.Dataset.from_tensors(element).repeat()
|
87 |
+
|
88 |
+
|
89 |
+
def apply_clean(flags_obj):
|
90 |
+
if flags_obj.clean and tf.io.gfile.exists(flags_obj.model_dir):
|
91 |
+
tf.compat.v1.logging.info("--clean flag set. Removing existing model dir:"
|
92 |
+
" {}".format(flags_obj.model_dir))
|
93 |
+
tf.io.gfile.rmtree(flags_obj.model_dir)
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/misc/tpu_lib.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
"""Initializes TPU system for TF 2.0."""
|
16 |
+
|
17 |
+
import tensorflow as tf
|
18 |
+
|
19 |
+
|
20 |
+
def tpu_initialize(tpu_address):
|
21 |
+
"""Initializes TPU for TF 2.0 training.
|
22 |
+
|
23 |
+
Args:
|
24 |
+
tpu_address: string, bns address of master TPU worker.
|
25 |
+
|
26 |
+
Returns:
|
27 |
+
A TPUClusterResolver.
|
28 |
+
"""
|
29 |
+
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
|
30 |
+
tpu=tpu_address)
|
31 |
+
if tpu_address not in ('', 'local'):
|
32 |
+
tf.config.experimental_connect_to_cluster(cluster_resolver)
|
33 |
+
tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
|
34 |
+
return cluster_resolver
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/testing/__init__.py
ADDED
File without changes
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/testing/benchmark_wrappers.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Lint as: python3
|
2 |
+
"""Utils to annotate and trace benchmarks."""
|
3 |
+
|
4 |
+
from __future__ import absolute_import
|
5 |
+
from __future__ import division
|
6 |
+
from __future__ import print_function
|
7 |
+
|
8 |
+
from absl import flags
|
9 |
+
from absl import logging
|
10 |
+
from absl.testing import flagsaver
|
11 |
+
|
12 |
+
FLAGS = flags.FLAGS
|
13 |
+
|
14 |
+
flags.DEFINE_multi_string(
|
15 |
+
'benchmark_method_flags', None,
|
16 |
+
'Optional list of runtime flags of the form key=value. Specify '
|
17 |
+
'multiple times to specify different flags. These will override the FLAGS '
|
18 |
+
'object directly after hardcoded settings in individual benchmark methods '
|
19 |
+
'before they call _run_and_report benchmark. Example if we set '
|
20 |
+
'--benchmark_method_flags=train_steps=10 and a benchmark method hardcodes '
|
21 |
+
'FLAGS.train_steps=10000 and later calls _run_and_report_benchmark, '
|
22 |
+
'it\'ll only run for 10 steps. This is useful for '
|
23 |
+
'debugging/profiling workflows.')
|
24 |
+
|
25 |
+
|
26 |
+
def enable_runtime_flags(decorated_func):
|
27 |
+
"""Sets attributes from --benchmark_method_flags for method execution.
|
28 |
+
|
29 |
+
@enable_runtime_flags decorator temporarily adds flags passed in via
|
30 |
+
--benchmark_method_flags and runs the decorated function in that context.
|
31 |
+
|
32 |
+
A user can set --benchmark_method_flags=train_steps=5 to run the benchmark
|
33 |
+
method in the snippet below with FLAGS.train_steps=5 for debugging (without
|
34 |
+
modifying the benchmark code).
|
35 |
+
|
36 |
+
class ModelBenchmark():
|
37 |
+
|
38 |
+
@benchmark_wrappers.enable_runtime_flags
|
39 |
+
def _run_and_report_benchmark(self):
|
40 |
+
# run benchmark ...
|
41 |
+
# report benchmark results ...
|
42 |
+
|
43 |
+
def benchmark_method(self):
|
44 |
+
FLAGS.train_steps = 1000
|
45 |
+
...
|
46 |
+
self._run_and_report_benchmark()
|
47 |
+
|
48 |
+
Args:
|
49 |
+
decorated_func: The method that runs the benchmark after previous setup
|
50 |
+
execution that set some flags.
|
51 |
+
|
52 |
+
Returns:
|
53 |
+
new_func: The same method which executes in a temporary context where flag
|
54 |
+
overrides from --benchmark_method_flags are active.
|
55 |
+
"""
|
56 |
+
|
57 |
+
def runner(*args, **kwargs):
|
58 |
+
"""Creates a temporary context to activate --benchmark_method_flags."""
|
59 |
+
if FLAGS.benchmark_method_flags:
|
60 |
+
saved_flag_values = flagsaver.save_flag_values()
|
61 |
+
for key_value in FLAGS.benchmark_method_flags:
|
62 |
+
key, value = key_value.split('=', 1)
|
63 |
+
try:
|
64 |
+
numeric_float = float(value)
|
65 |
+
numeric_int = int(numeric_float)
|
66 |
+
if abs(numeric_int) == abs(numeric_float):
|
67 |
+
flag_value = numeric_int
|
68 |
+
else:
|
69 |
+
flag_value = numeric_float
|
70 |
+
except ValueError:
|
71 |
+
flag_value = value
|
72 |
+
logging.info('Setting --%s=%s', key, flag_value)
|
73 |
+
setattr(FLAGS, key, flag_value)
|
74 |
+
else:
|
75 |
+
saved_flag_values = None
|
76 |
+
try:
|
77 |
+
result = decorated_func(*args, **kwargs)
|
78 |
+
return result
|
79 |
+
finally:
|
80 |
+
if saved_flag_values:
|
81 |
+
flagsaver.restore_flag_values(saved_flag_values)
|
82 |
+
|
83 |
+
return runner
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/testing/integration.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
"""Helper code to run complete models from within python.
|
16 |
+
"""
|
17 |
+
|
18 |
+
from __future__ import absolute_import
|
19 |
+
from __future__ import division
|
20 |
+
from __future__ import print_function
|
21 |
+
|
22 |
+
import os
|
23 |
+
import shutil
|
24 |
+
import sys
|
25 |
+
import tempfile
|
26 |
+
|
27 |
+
from absl import flags
|
28 |
+
from absl.testing import flagsaver
|
29 |
+
|
30 |
+
from TensorFlow.utils.flags import core as flags_core
|
31 |
+
|
32 |
+
|
33 |
+
@flagsaver.flagsaver
|
34 |
+
def run_synthetic(main, tmp_root, extra_flags=None, synth=True, train_epochs=1,
|
35 |
+
epochs_between_evals=1):
|
36 |
+
"""Performs a minimal run of a model.
|
37 |
+
|
38 |
+
This function is intended to test for syntax errors throughout a model. A
|
39 |
+
very limited run is performed using synthetic data.
|
40 |
+
|
41 |
+
Args:
|
42 |
+
main: The primary function used to exercise a code path. Generally this
|
43 |
+
function is "<MODULE>.main(argv)".
|
44 |
+
tmp_root: Root path for the temp directory created by the test class.
|
45 |
+
extra_flags: Additional flags passed by the caller of this function.
|
46 |
+
synth: Use synthetic data.
|
47 |
+
train_epochs: Value of the --train_epochs flag.
|
48 |
+
epochs_between_evals: Value of the --epochs_between_evals flag.
|
49 |
+
"""
|
50 |
+
|
51 |
+
extra_flags = [] if extra_flags is None else extra_flags
|
52 |
+
|
53 |
+
model_dir = tempfile.mkdtemp(dir=tmp_root)
|
54 |
+
|
55 |
+
args = [sys.argv[0], "--model_dir", model_dir] + extra_flags
|
56 |
+
|
57 |
+
if synth:
|
58 |
+
args.append("--use_synthetic_data")
|
59 |
+
|
60 |
+
if train_epochs is not None:
|
61 |
+
args.extend(["--train_epochs", str(train_epochs)])
|
62 |
+
|
63 |
+
if epochs_between_evals is not None:
|
64 |
+
args.extend(["--epochs_between_evals", str(epochs_between_evals)])
|
65 |
+
|
66 |
+
try:
|
67 |
+
flags_core.parse_flags(argv=args)
|
68 |
+
main(flags.FLAGS)
|
69 |
+
finally:
|
70 |
+
if os.path.exists(model_dir):
|
71 |
+
shutil.rmtree(model_dir)
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/testing/mock_lib.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
|
16 |
+
"""Mock objects and related functions for testing."""
|
17 |
+
|
18 |
+
from __future__ import absolute_import
|
19 |
+
from __future__ import division
|
20 |
+
from __future__ import print_function
|
21 |
+
|
22 |
+
|
23 |
+
class MockBenchmarkLogger(object):
|
24 |
+
"""This is a mock logger that can be used in dependent tests."""
|
25 |
+
|
26 |
+
def __init__(self):
|
27 |
+
self.logged_metric = []
|
28 |
+
|
29 |
+
def log_metric(self, name, value, unit=None, global_step=None,
|
30 |
+
extras=None):
|
31 |
+
self.logged_metric.append({
|
32 |
+
"name": name,
|
33 |
+
"value": float(value),
|
34 |
+
"unit": unit,
|
35 |
+
"global_step": global_step,
|
36 |
+
"extras": extras})
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/testing/perfzero_benchmark.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
"""Utils for creating PerfZero benchmarks."""
|
16 |
+
from __future__ import absolute_import
|
17 |
+
from __future__ import division
|
18 |
+
from __future__ import print_function
|
19 |
+
|
20 |
+
import os
|
21 |
+
|
22 |
+
from absl import flags
|
23 |
+
from absl.testing import flagsaver
|
24 |
+
import tensorflow as tf # pylint: disable=g-bad-import-order
|
25 |
+
|
26 |
+
FLAGS = flags.FLAGS
|
27 |
+
|
28 |
+
|
29 |
+
class PerfZeroBenchmark(tf.test.Benchmark):
|
30 |
+
"""Common methods used in PerfZero Benchmarks.
|
31 |
+
|
32 |
+
Handles the resetting of flags between tests, loading of default_flags,
|
33 |
+
overriding of defaults. PerfZero (OSS) runs each test in a separate
|
34 |
+
process reducing some need to reset the flags.
|
35 |
+
"""
|
36 |
+
local_flags = None
|
37 |
+
|
38 |
+
def __init__(self,
|
39 |
+
output_dir=None,
|
40 |
+
default_flags=None,
|
41 |
+
flag_methods=None,
|
42 |
+
tpu=None):
|
43 |
+
"""Initialize class.
|
44 |
+
|
45 |
+
Args:
|
46 |
+
output_dir: Base directory to store all output for the test.
|
47 |
+
default_flags: Set of flags to pass to model.
|
48 |
+
flag_methods: Set of flag methods to run during setup.
|
49 |
+
tpu: (optional) TPU name to use in a TPU benchmark.
|
50 |
+
"""
|
51 |
+
if os.getenv('BENCHMARK_OUTPUT_DIR'):
|
52 |
+
self.output_dir = os.getenv('BENCHMARK_OUTPUT_DIR')
|
53 |
+
elif output_dir:
|
54 |
+
self.output_dir = output_dir
|
55 |
+
else:
|
56 |
+
self.output_dir = '/tmp'
|
57 |
+
self.default_flags = default_flags or {}
|
58 |
+
self.flag_methods = flag_methods or {}
|
59 |
+
|
60 |
+
if os.getenv('BENCHMARK_TPU'):
|
61 |
+
resolved_tpu = os.getenv('BENCHMARK_TPU')
|
62 |
+
elif tpu:
|
63 |
+
resolved_tpu = tpu
|
64 |
+
else:
|
65 |
+
resolved_tpu = None
|
66 |
+
|
67 |
+
if resolved_tpu:
|
68 |
+
# TPU models are expected to accept a --tpu=name flag. PerfZero creates
|
69 |
+
# the TPU at runtime and passes the TPU's name to this flag.
|
70 |
+
self.default_flags['tpu'] = resolved_tpu
|
71 |
+
|
72 |
+
def _get_model_dir(self, folder_name):
|
73 |
+
"""Returns directory to store info, e.g. saved model and event log."""
|
74 |
+
return os.path.join(self.output_dir, folder_name)
|
75 |
+
|
76 |
+
def _setup(self):
|
77 |
+
"""Sets up and resets flags before each test."""
|
78 |
+
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
|
79 |
+
if PerfZeroBenchmark.local_flags is None:
|
80 |
+
for flag_method in self.flag_methods:
|
81 |
+
flag_method()
|
82 |
+
# Loads flags to get defaults to then override. List cannot be empty.
|
83 |
+
flags.FLAGS(['foo'])
|
84 |
+
# Overrides flag values with defaults for the class of tests.
|
85 |
+
for k, v in self.default_flags.items():
|
86 |
+
setattr(FLAGS, k, v)
|
87 |
+
saved_flag_values = flagsaver.save_flag_values()
|
88 |
+
PerfZeroBenchmark.local_flags = saved_flag_values
|
89 |
+
else:
|
90 |
+
flagsaver.restore_flag_values(PerfZeroBenchmark.local_flags)
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/testing/pylint.rcfile
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[MESSAGES CONTROL]
|
2 |
+
disable=R,W,bad-option-value,trailing-newlines,no-name-in-module
|
3 |
+
|
4 |
+
[REPORTS]
|
5 |
+
# Tells whether to display a full report or only the messages
|
6 |
+
reports=no
|
7 |
+
|
8 |
+
# Activate the evaluation score.
|
9 |
+
score=no
|
10 |
+
|
11 |
+
[BASIC]
|
12 |
+
|
13 |
+
# Regular expression matching correct argument names
|
14 |
+
argument-rgx=^[a-z][a-z0-9_]*$
|
15 |
+
|
16 |
+
# Regular expression matching correct attribute names
|
17 |
+
attr-rgx=^_{0,2}[a-z][a-z0-9_]*$
|
18 |
+
|
19 |
+
# Regular expression matching correct class attribute names
|
20 |
+
class-attribute-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$
|
21 |
+
|
22 |
+
# Regular expression matching correct class names
|
23 |
+
class-rgx=^_?[A-Z][a-zA-Z0-9]*$
|
24 |
+
|
25 |
+
# Regular expression matching correct constant names
|
26 |
+
const-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$
|
27 |
+
|
28 |
+
# Minimum line length for functions/classes that require docstrings, shorter
|
29 |
+
# ones are exempt.
|
30 |
+
docstring-min-length=10
|
31 |
+
|
32 |
+
# Regular expression matching correct function names
|
33 |
+
function-rgx=^(?:(?P<camel_case>_?[A-Z][a-zA-Z0-9]*)|(?P<snake_case>_?[a-z][a-z0-9_]*))$
|
34 |
+
|
35 |
+
# Good variable names which should always be accepted, separated by a comma
|
36 |
+
good-names=main,_
|
37 |
+
|
38 |
+
# Regular expression matching correct inline iteration names
|
39 |
+
inlinevar-rgx=^[a-z][a-z0-9_]*$
|
40 |
+
|
41 |
+
# Regular expression matching correct method names
|
42 |
+
method-rgx=^(?:(?P<exempt>__[a-z0-9_]+__|next)|(?P<camel_case>_{0,2}[A-Z][a-zA-Z0-9]*)|(?P<snake_case>_{0,2}[a-z][a-z0-9_]*)|(setUp|tearDown))$
|
43 |
+
|
44 |
+
# Regular expression matching correct module names
|
45 |
+
module-rgx=^(_?[a-z][a-z0-9_]*)|__init__|PRESUBMIT|PRESUBMIT_unittest$
|
46 |
+
|
47 |
+
# Regular expression which should only match function or class names that do
|
48 |
+
# not require a docstring.
|
49 |
+
no-docstring-rgx=(__.*__|main|.*ArgParser)
|
50 |
+
|
51 |
+
# Naming hint for variable names
|
52 |
+
variable-name-hint=[a-z_][a-z0-9_]{2,30}$
|
53 |
+
|
54 |
+
# Regular expression matching correct variable names
|
55 |
+
variable-rgx=^[a-z][a-z0-9_]*$
|
56 |
+
|
57 |
+
[TYPECHECK]
|
58 |
+
|
59 |
+
# List of module names for which member attributes should not be checked
|
60 |
+
# (useful for modules/projects where namespaces are manipulated during runtime
|
61 |
+
# and thus existing member attributes cannot be deduced by static analysis. It
|
62 |
+
# supports qualified module names, as well as Unix pattern matching.
|
63 |
+
ignored-modules=absl, absl.*, official, official.*, tensorflow, tensorflow.*, LazyLoader, google, google.cloud.*
|
64 |
+
|
65 |
+
|
66 |
+
[CLASSES]
|
67 |
+
|
68 |
+
# List of method names used to declare (i.e. assign) instance attributes.
|
69 |
+
defining-attr-methods=__init__,__new__,setUp
|
70 |
+
|
71 |
+
# List of member names, which should be excluded from the protected access
|
72 |
+
# warning.
|
73 |
+
exclude-protected=_asdict,_fields,_replace,_source,_make
|
74 |
+
|
75 |
+
# This is deprecated, because it is not used anymore.
|
76 |
+
#ignore-iface-methods=
|
77 |
+
|
78 |
+
# List of valid names for the first argument in a class method.
|
79 |
+
valid-classmethod-first-arg=cls,class_
|
80 |
+
|
81 |
+
# List of valid names for the first argument in a metaclass class method.
|
82 |
+
valid-metaclass-classmethod-first-arg=mcs
|
83 |
+
|
84 |
+
|
85 |
+
[DESIGN]
|
86 |
+
|
87 |
+
# Argument names that match this expression will be ignored. Default to name
|
88 |
+
# with leading underscore
|
89 |
+
ignored-argument-names=_.*
|
90 |
+
|
91 |
+
# Maximum number of arguments for function / method
|
92 |
+
max-args=5
|
93 |
+
|
94 |
+
# Maximum number of attributes for a class (see R0902).
|
95 |
+
max-attributes=7
|
96 |
+
|
97 |
+
# Maximum number of branch for function / method body
|
98 |
+
max-branches=12
|
99 |
+
|
100 |
+
# Maximum number of locals for function / method body
|
101 |
+
max-locals=15
|
102 |
+
|
103 |
+
# Maximum number of parents for a class (see R0901).
|
104 |
+
max-parents=7
|
105 |
+
|
106 |
+
# Maximum number of public methods for a class (see R0904).
|
107 |
+
max-public-methods=20
|
108 |
+
|
109 |
+
# Maximum number of return / yield for function / method body
|
110 |
+
max-returns=6
|
111 |
+
|
112 |
+
# Maximum number of statements in function / method body
|
113 |
+
max-statements=50
|
114 |
+
|
115 |
+
# Minimum number of public methods for a class (see R0903).
|
116 |
+
min-public-methods=2
|
117 |
+
|
118 |
+
|
119 |
+
[EXCEPTIONS]
|
120 |
+
|
121 |
+
# Exceptions that will emit a warning when being caught. Defaults to
|
122 |
+
# "Exception"
|
123 |
+
overgeneral-exceptions=StandardError,Exception,BaseException
|
124 |
+
|
125 |
+
|
126 |
+
[FORMAT]
|
127 |
+
|
128 |
+
# Number of spaces of indent required inside a hanging or continued line.
|
129 |
+
indent-after-paren=4
|
130 |
+
|
131 |
+
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
|
132 |
+
# tab).
|
133 |
+
indent-string=' '
|
134 |
+
|
135 |
+
# Maximum number of characters on a single line.
|
136 |
+
max-line-length=80
|
137 |
+
|
138 |
+
# Maximum number of lines in a module
|
139 |
+
max-module-lines=99999
|
140 |
+
|
141 |
+
# List of optional constructs for which whitespace checking is disabled
|
142 |
+
no-space-check=
|
143 |
+
|
144 |
+
# Allow the body of an if to be on the same line as the test if there is no
|
145 |
+
# else.
|
146 |
+
single-line-if-stmt=yes
|
147 |
+
|
148 |
+
# Allow URLs and comment type annotations to exceed the max line length as neither can be easily
|
149 |
+
# split across lines.
|
150 |
+
ignore-long-lines=^\s*(?:(# )?<?https?://\S+>?$|# type:)
|
151 |
+
|
152 |
+
|
153 |
+
[VARIABLES]
|
154 |
+
|
155 |
+
# List of additional names supposed to be defined in builtins. Remember that
|
156 |
+
# you should avoid to define new builtins when possible.
|
157 |
+
additional-builtins=
|
158 |
+
|
159 |
+
# List of strings which can identify a callback function by name. A callback
|
160 |
+
# name must start or end with one of those strings.
|
161 |
+
callbacks=cb_,_cb
|
162 |
+
|
163 |
+
# A regular expression matching the name of dummy variables (i.e. expectedly
|
164 |
+
# not used).
|
165 |
+
dummy-variables-rgx=^\*{0,2}(_$|unused_|dummy_)
|
166 |
+
|
167 |
+
# Tells whether we should check for unused import in __init__ files.
|
168 |
+
init-import=no
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/testing/scripts/builds_common.sh
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env bash
|
2 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
# ==============================================================================
|
16 |
+
#
|
17 |
+
# Common Bash functions used by build scripts
|
18 |
+
|
19 |
+
COLOR_NC='\033[0m'
|
20 |
+
COLOR_BOLD='\033[1m'
|
21 |
+
COLOR_LIGHT_GRAY='\033[0;37m'
|
22 |
+
COLOR_GREEN='\033[0;32m'
|
23 |
+
COLOR_RED='\033[0;31m'
|
24 |
+
|
25 |
+
die() {
|
26 |
+
# Print a message and exit with code 1.
|
27 |
+
#
|
28 |
+
# Usage: die <error_message>
|
29 |
+
# e.g., die "Something bad happened."
|
30 |
+
|
31 |
+
echo $@
|
32 |
+
exit 1
|
33 |
+
}
|
34 |
+
|
35 |
+
num_cpus() {
|
36 |
+
# Get the number of CPUs
|
37 |
+
N_CPUS=$(grep -c ^processor /proc/cpuinfo)
|
38 |
+
if [[ -z ${N_CPUS} ]]; then
|
39 |
+
die "ERROR: Unable to determine the number of CPUs"
|
40 |
+
fi
|
41 |
+
|
42 |
+
echo ${N_CPUS}
|
43 |
+
}
|
44 |
+
|
45 |
+
# List files changed (i.e., added, or revised) from
|
46 |
+
# the common ancestor of HEAD and the latest master branch.
|
47 |
+
# Usage: get_changed_files_from_master_branch
|
48 |
+
get_changed_files_from_master_branch() {
|
49 |
+
ANCESTOR=$(git merge-base HEAD master origin/master)
|
50 |
+
git diff ${ANCESTOR} --diff-filter=d --name-only "$@"
|
51 |
+
}
|
52 |
+
|
53 |
+
# List python files changed that still exist,
|
54 |
+
# i.e., not removed.
|
55 |
+
# Usage: get_py_files_to_check [--incremental]
|
56 |
+
get_py_files_to_check() {
|
57 |
+
if [[ "$1" == "--incremental" ]]; then
|
58 |
+
get_changed_files_from_master_branch -- '*.py'
|
59 |
+
elif [[ -z "$1" ]]; then
|
60 |
+
find official/ -name '*.py'
|
61 |
+
else
|
62 |
+
die "Found unsupported args: $@ for get_py_files_to_check."
|
63 |
+
fi
|
64 |
+
}
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/testing/scripts/ci_sanity.sh
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
# ==============================================================================
|
16 |
+
|
17 |
+
# Sanity check script that runs tests and lint under local environment.
|
18 |
+
# Make sure that tensorflow and pylint is installed.
|
19 |
+
# usage: models >: ./official/utils/testing/scripts/ci_sanity.sh do_pylint --incremental
|
20 |
+
set +x
|
21 |
+
|
22 |
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
23 |
+
source "${SCRIPT_DIR}/builds_common.sh"
|
24 |
+
cd "$SCRIPT_DIR/../../../.."
|
25 |
+
MODEL_ROOT="$(pwd)"
|
26 |
+
|
27 |
+
export PYTHONPATH="$PYTHONPATH:${MODEL_ROOT}"
|
28 |
+
|
29 |
+
# Run pylint
|
30 |
+
do_pylint() {
|
31 |
+
# Usage: do_pylint [--incremental]
|
32 |
+
#
|
33 |
+
# Options:
|
34 |
+
# --incremental Performs check on only the python files changed in the
|
35 |
+
# last non-merge git commit.
|
36 |
+
|
37 |
+
# Use this list to whitelist pylint errors
|
38 |
+
ERROR_WHITELIST=""
|
39 |
+
|
40 |
+
echo "ERROR_WHITELIST=\"${ERROR_WHITELIST}\""
|
41 |
+
|
42 |
+
PYLINT_BIN="python3 -m pylint"
|
43 |
+
|
44 |
+
PYTHON_SRC_FILES=$(get_py_files_to_check $1)
|
45 |
+
if [[ -z ${PYTHON_SRC_FILES} ]]; then
|
46 |
+
echo "do_pylint found no Python files to check. Returning."
|
47 |
+
return 0
|
48 |
+
fi
|
49 |
+
|
50 |
+
PYLINTRC_FILE="official/utils/testing/pylint.rcfile"
|
51 |
+
|
52 |
+
if [[ ! -f "${PYLINTRC_FILE}" ]]; then
|
53 |
+
die "ERROR: Cannot find pylint rc file at ${PYLINTRC_FILE}"
|
54 |
+
fi
|
55 |
+
|
56 |
+
NUM_SRC_FILES=$(echo ${PYTHON_SRC_FILES} | wc -w)
|
57 |
+
NUM_CPUS=$(num_cpus)
|
58 |
+
|
59 |
+
echo "Running pylint on ${NUM_SRC_FILES} files with ${NUM_CPUS} "\
|
60 |
+
"parallel jobs..."
|
61 |
+
echo ""
|
62 |
+
|
63 |
+
PYLINT_START_TIME=$(date +'%s')
|
64 |
+
OUTPUT_FILE="$(mktemp)_pylint_output.log"
|
65 |
+
ERRORS_FILE="$(mktemp)_pylint_errors.log"
|
66 |
+
NONWL_ERRORS_FILE="$(mktemp)_pylint_nonwl_errors.log"
|
67 |
+
|
68 |
+
rm -rf ${OUTPUT_FILE}
|
69 |
+
rm -rf ${ERRORS_FILE}
|
70 |
+
rm -rf ${NONWL_ERRORS_FILE}
|
71 |
+
touch ${NONWL_ERRORS_FILE}
|
72 |
+
|
73 |
+
${PYLINT_BIN} --rcfile="${PYLINTRC_FILE}" --output-format=parseable \
|
74 |
+
--jobs=${NUM_CPUS} ${PYTHON_SRC_FILES} > ${OUTPUT_FILE} 2>&1
|
75 |
+
PYLINT_END_TIME=$(date +'%s')
|
76 |
+
|
77 |
+
echo ""
|
78 |
+
echo "pylint took $((PYLINT_END_TIME - PYLINT_START_TIME)) s"
|
79 |
+
echo ""
|
80 |
+
|
81 |
+
# Report only what we care about
|
82 |
+
# Ref https://pylint.readthedocs.io/en/latest/technical_reference/features.html
|
83 |
+
# E: all errors
|
84 |
+
# W0311 bad-indentation
|
85 |
+
# W0312 mixed-indentation
|
86 |
+
# C0330 bad-continuation
|
87 |
+
# C0301 line-too-long
|
88 |
+
# C0326 bad-whitespace
|
89 |
+
# W0611 unused-import
|
90 |
+
# W0622 redefined-builtin
|
91 |
+
grep -E '(\[E|\[W0311|\[W0312|\[C0330|\[C0301|\[C0326|\[W0611|\[W0622)' ${OUTPUT_FILE} > ${ERRORS_FILE}
|
92 |
+
|
93 |
+
N_ERRORS=0
|
94 |
+
while read -r LINE; do
|
95 |
+
IS_WHITELISTED=0
|
96 |
+
for WL_REGEX in ${ERROR_WHITELIST}; do
|
97 |
+
if echo ${LINE} | grep -q "${WL_REGEX}"; then
|
98 |
+
echo "Found a whitelisted error:"
|
99 |
+
echo " ${LINE}"
|
100 |
+
IS_WHITELISTED=1
|
101 |
+
fi
|
102 |
+
done
|
103 |
+
|
104 |
+
if [[ ${IS_WHITELISTED} == "0" ]]; then
|
105 |
+
echo "${LINE}" >> ${NONWL_ERRORS_FILE}
|
106 |
+
echo "" >> ${NONWL_ERRORS_FILE}
|
107 |
+
((N_ERRORS++))
|
108 |
+
fi
|
109 |
+
done <${ERRORS_FILE}
|
110 |
+
|
111 |
+
echo "Raw lint output file: ${OUTPUT_FILE}"
|
112 |
+
|
113 |
+
echo ""
|
114 |
+
if [[ ${N_ERRORS} != 0 ]]; then
|
115 |
+
echo "FAIL: Found ${N_ERRORS} non-whitelited pylint errors:"
|
116 |
+
cat "${NONWL_ERRORS_FILE}"
|
117 |
+
return 1
|
118 |
+
else
|
119 |
+
echo "PASS: No non-whitelisted pylint errors were found."
|
120 |
+
return 0
|
121 |
+
fi
|
122 |
+
}
|
123 |
+
|
124 |
+
test_result=0
|
125 |
+
|
126 |
+
TESTS="$@"
|
127 |
+
|
128 |
+
for t in "${TESTS}"; do
|
129 |
+
${t} || test_result=$?
|
130 |
+
done
|
131 |
+
|
132 |
+
exit "${test_result}"
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/TensorFlow/utils/testing/scripts/presubmit.sh
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
# ==============================================================================
|
16 |
+
|
17 |
+
# Presubmit script that runs tests and lint under local environment.
|
18 |
+
# Make sure that tensorflow and pylint is installed.
|
19 |
+
# usage: models >: ./official/utils/testing/scripts/presubmit.sh
|
20 |
+
# usage: models >: ./official/utils/testing/scripts/presubmit.sh lint py2_test py3_test
|
21 |
+
set +x
|
22 |
+
|
23 |
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
24 |
+
cd "$SCRIPT_DIR/../../../.."
|
25 |
+
MODEL_ROOT="$(pwd)"
|
26 |
+
|
27 |
+
export PYTHONPATH="$PYTHONPATH:${MODEL_ROOT}"
|
28 |
+
|
29 |
+
py_test() {
|
30 |
+
local PY_BINARY="$1"
|
31 |
+
local exit_code=0
|
32 |
+
|
33 |
+
echo "===========Running Python test============"
|
34 |
+
|
35 |
+
for test_file in `find official/ -name '*test.py' -print`
|
36 |
+
do
|
37 |
+
echo "####=======Testing ${test_file}=======####"
|
38 |
+
${PY_BINARY} "${test_file}"
|
39 |
+
_exit_code=$?
|
40 |
+
if [[ $_exit_code != 0 ]]; then
|
41 |
+
exit_code=$_exit_code
|
42 |
+
echo "FAIL: ${test_file}"
|
43 |
+
fi
|
44 |
+
done
|
45 |
+
|
46 |
+
return "${exit_code}"
|
47 |
+
}
|
48 |
+
|
49 |
+
py2_test() {
|
50 |
+
local PY_BINARY=$(which python2)
|
51 |
+
py_test "$PY_BINARY"
|
52 |
+
return $?
|
53 |
+
}
|
54 |
+
|
55 |
+
py3_test() {
|
56 |
+
local PY_BINARY=$(which python3)
|
57 |
+
py_test "$PY_BINARY"
|
58 |
+
return $?
|
59 |
+
}
|
60 |
+
|
61 |
+
test_result=0
|
62 |
+
|
63 |
+
if [ "$#" -eq 0 ]; then
|
64 |
+
TESTS="lint py2_test py3_test"
|
65 |
+
else
|
66 |
+
TESTS="$@"
|
67 |
+
fi
|
68 |
+
|
69 |
+
for t in "${TESTS}"; do
|
70 |
+
${t} || test_result=$?
|
71 |
+
done
|
72 |
+
|
73 |
+
exit "${test_result}"
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/stable_diffusion/configs/train_01x08x08.yaml
ADDED
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.25e-7
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
parameterization: "v"
|
6 |
+
linear_start: 0.00085
|
7 |
+
linear_end: 0.0120
|
8 |
+
num_timesteps_cond: 1
|
9 |
+
log_every_t: 200
|
10 |
+
timesteps: 1000
|
11 |
+
first_stage_key: npy
|
12 |
+
first_stage_type: moments
|
13 |
+
cond_stage_key: txt
|
14 |
+
image_size: 64
|
15 |
+
channels: 4
|
16 |
+
cond_stage_trainable: false
|
17 |
+
conditioning_key: crossattn
|
18 |
+
monitor: steps
|
19 |
+
scale_factor: 0.18215
|
20 |
+
use_ema: False
|
21 |
+
|
22 |
+
load_vae: True
|
23 |
+
load_unet: False
|
24 |
+
load_encoder: True
|
25 |
+
|
26 |
+
validation_config:
|
27 |
+
sampler: "ddim" # plms, ddim, dpm
|
28 |
+
steps: 50
|
29 |
+
scale: 8.0
|
30 |
+
ddim_eta: 0.0
|
31 |
+
prompt_key: "caption"
|
32 |
+
image_fname_key: "image_id"
|
33 |
+
|
34 |
+
save_images:
|
35 |
+
enabled: False
|
36 |
+
base_output_dir: "results/inference"
|
37 |
+
fid:
|
38 |
+
enabled: True
|
39 |
+
inception_weights_url: https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth
|
40 |
+
cache_dir: /tmp/checkpoints/inception
|
41 |
+
gt_path: /val_data/val2014_30k_stats.npz
|
42 |
+
clip:
|
43 |
+
enabled: True
|
44 |
+
clip_version: "ViT-H-14"
|
45 |
+
cache_dir: /tmp/checkpoints/clip
|
46 |
+
|
47 |
+
scheduler_config:
|
48 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
49 |
+
params:
|
50 |
+
warm_up_steps: [ 1000 ]
|
51 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
52 |
+
f_start: [ 1.e-6 ]
|
53 |
+
f_max: [ 1. ]
|
54 |
+
f_min: [ 1. ]
|
55 |
+
|
56 |
+
unet_config:
|
57 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
58 |
+
params:
|
59 |
+
use_checkpoint: False # gradient checkpointing
|
60 |
+
use_fp16: False
|
61 |
+
image_size: 32
|
62 |
+
in_channels: 4
|
63 |
+
out_channels: 4
|
64 |
+
model_channels: 320
|
65 |
+
attention_resolutions: [ 4, 2, 1 ]
|
66 |
+
num_res_blocks: 2
|
67 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
68 |
+
num_head_channels: 64 # need to fix for flash-attn
|
69 |
+
use_spatial_transformer: True
|
70 |
+
use_linear_in_transformer: True
|
71 |
+
transformer_depth: 1
|
72 |
+
context_dim: 1024
|
73 |
+
legacy: False
|
74 |
+
|
75 |
+
first_stage_config:
|
76 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
77 |
+
params:
|
78 |
+
embed_dim: 4
|
79 |
+
monitor: val/rec_loss
|
80 |
+
ddconfig:
|
81 |
+
double_z: true
|
82 |
+
z_channels: 4
|
83 |
+
resolution: 256
|
84 |
+
in_channels: 3
|
85 |
+
out_ch: 3
|
86 |
+
ch: 128
|
87 |
+
ch_mult:
|
88 |
+
- 1
|
89 |
+
- 2
|
90 |
+
- 4
|
91 |
+
- 4
|
92 |
+
num_res_blocks: 2
|
93 |
+
attn_resolutions: []
|
94 |
+
dropout: 0.0
|
95 |
+
lossconfig:
|
96 |
+
target: torch.nn.Identity
|
97 |
+
|
98 |
+
cond_stage_config:
|
99 |
+
target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
|
100 |
+
params:
|
101 |
+
arch: "ViT-H-14"
|
102 |
+
version: "laion2b_s32b_b79k"
|
103 |
+
freeze: True
|
104 |
+
layer: "penultimate"
|
105 |
+
cache_dir: /tmp/checkpoints/clip
|
106 |
+
device: "hpu"
|
107 |
+
hpu_graph: True
|
108 |
+
use_fused_adamw: True
|
109 |
+
|
110 |
+
data:
|
111 |
+
target: ldm.data.composable_data_module.ComposableDataModule
|
112 |
+
params:
|
113 |
+
train:
|
114 |
+
target: ldm.data.webdatasets.build_dataloader
|
115 |
+
params:
|
116 |
+
urls: /mnt/weka/data/mlperf_models/stable_diffusion/training/laion-400m/webdataset-moments-filtered/{00000..00831}.tar
|
117 |
+
batch_size: 8
|
118 |
+
shuffle: 1000
|
119 |
+
partial: False
|
120 |
+
keep_only_keys: ["npy", "txt"]
|
121 |
+
num_workers: 4
|
122 |
+
cache_size: 0
|
123 |
+
cache_dir:
|
124 |
+
persistent_workers: True
|
125 |
+
|
126 |
+
validation:
|
127 |
+
target: ldm.data.tsv.build_dataloader
|
128 |
+
params:
|
129 |
+
annotations_file: "/val_data/val2014_30k.tsv"
|
130 |
+
keys: ["image_id", "id", "caption"]
|
131 |
+
batch_size: 8
|
132 |
+
shuffle: False
|
133 |
+
num_workers: 4
|
134 |
+
|
135 |
+
lightning:
|
136 |
+
trainer:
|
137 |
+
accelerator: 'hpu'
|
138 |
+
num_nodes: 1
|
139 |
+
devices: 8
|
140 |
+
precision: 32
|
141 |
+
logger: False
|
142 |
+
log_every_n_steps: 5
|
143 |
+
enable_progress_bar: False
|
144 |
+
max_epochs: -1
|
145 |
+
max_steps: 10000000000000
|
146 |
+
val_check_interval: 1000
|
147 |
+
enable_checkpointing: True
|
148 |
+
num_sanity_val_steps: 0
|
149 |
+
strategy:
|
150 |
+
target: strategies.DDPStrategy
|
151 |
+
params:
|
152 |
+
find_unused_parameters: False
|
153 |
+
|
154 |
+
modelcheckpoint:
|
155 |
+
target: lightning.pytorch.callbacks.ModelCheckpoint
|
156 |
+
params:
|
157 |
+
save_top_k: -1
|
158 |
+
every_n_train_steps: 1000
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/stable_diffusion/configs/train_02x08x08.yaml
ADDED
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.25e-7
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
parameterization: "v"
|
6 |
+
linear_start: 0.00085
|
7 |
+
linear_end: 0.0120
|
8 |
+
num_timesteps_cond: 1
|
9 |
+
log_every_t: 200
|
10 |
+
timesteps: 1000
|
11 |
+
first_stage_key: npy
|
12 |
+
first_stage_type: moments
|
13 |
+
cond_stage_key: txt
|
14 |
+
image_size: 64
|
15 |
+
channels: 4
|
16 |
+
cond_stage_trainable: false
|
17 |
+
conditioning_key: crossattn
|
18 |
+
monitor: steps
|
19 |
+
scale_factor: 0.18215
|
20 |
+
use_ema: False
|
21 |
+
|
22 |
+
load_vae: True
|
23 |
+
load_unet: False
|
24 |
+
load_encoder: True
|
25 |
+
|
26 |
+
validation_config:
|
27 |
+
sampler: "ddim" # plms, ddim, dpm
|
28 |
+
steps: 50
|
29 |
+
scale: 8.0
|
30 |
+
ddim_eta: 0.0
|
31 |
+
prompt_key: "caption"
|
32 |
+
image_fname_key: "image_id"
|
33 |
+
|
34 |
+
save_images:
|
35 |
+
enabled: False
|
36 |
+
base_output_dir: "/results/inference"
|
37 |
+
fid:
|
38 |
+
enabled: True
|
39 |
+
inception_weights_url: https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth
|
40 |
+
cache_dir: /tmp/checkpoints/inception
|
41 |
+
gt_path: /val_data/val2014_30k_stats.npz
|
42 |
+
clip:
|
43 |
+
enabled: True
|
44 |
+
clip_version: "ViT-H-14"
|
45 |
+
cache_dir: /tmp/checkpoints/clip
|
46 |
+
|
47 |
+
scheduler_config:
|
48 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
49 |
+
params:
|
50 |
+
warm_up_steps: [ 1000 ]
|
51 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
52 |
+
f_start: [ 1.e-6 ]
|
53 |
+
f_max: [ 1. ]
|
54 |
+
f_min: [ 1. ]
|
55 |
+
|
56 |
+
unet_config:
|
57 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
58 |
+
params:
|
59 |
+
use_checkpoint: False # gradient checkpointing
|
60 |
+
use_fp16: False
|
61 |
+
image_size: 32
|
62 |
+
in_channels: 4
|
63 |
+
out_channels: 4
|
64 |
+
model_channels: 320
|
65 |
+
attention_resolutions: [ 4, 2, 1 ]
|
66 |
+
num_res_blocks: 2
|
67 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
68 |
+
num_head_channels: 64 # need to fix for flash-attn
|
69 |
+
use_spatial_transformer: True
|
70 |
+
use_linear_in_transformer: True
|
71 |
+
transformer_depth: 1
|
72 |
+
context_dim: 1024
|
73 |
+
legacy: False
|
74 |
+
|
75 |
+
first_stage_config:
|
76 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
77 |
+
params:
|
78 |
+
embed_dim: 4
|
79 |
+
monitor: val/rec_loss
|
80 |
+
ddconfig:
|
81 |
+
double_z: true
|
82 |
+
z_channels: 4
|
83 |
+
resolution: 256
|
84 |
+
in_channels: 3
|
85 |
+
out_ch: 3
|
86 |
+
ch: 128
|
87 |
+
ch_mult:
|
88 |
+
- 1
|
89 |
+
- 2
|
90 |
+
- 4
|
91 |
+
- 4
|
92 |
+
num_res_blocks: 2
|
93 |
+
attn_resolutions: []
|
94 |
+
dropout: 0.0
|
95 |
+
lossconfig:
|
96 |
+
target: torch.nn.Identity
|
97 |
+
|
98 |
+
cond_stage_config:
|
99 |
+
target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
|
100 |
+
params:
|
101 |
+
arch: "ViT-H-14"
|
102 |
+
version: "laion2b_s32b_b79k"
|
103 |
+
freeze: True
|
104 |
+
layer: "penultimate"
|
105 |
+
cache_dir: /tmp/checkpoints/clip
|
106 |
+
device: "hpu"
|
107 |
+
hpu_graph: True
|
108 |
+
use_fused_adamw: True
|
109 |
+
|
110 |
+
data:
|
111 |
+
target: ldm.data.composable_data_module.ComposableDataModule
|
112 |
+
params:
|
113 |
+
train:
|
114 |
+
target: ldm.data.webdatasets.build_dataloader
|
115 |
+
params:
|
116 |
+
urls: /mnt/weka/data/mlperf_models/stable_diffusion/training/laion-400m/webdataset-moments-filtered/{00000..00831}.tar
|
117 |
+
batch_size: 8
|
118 |
+
shuffle: 1000
|
119 |
+
partial: False
|
120 |
+
keep_only_keys: ["npy", "txt"]
|
121 |
+
num_workers: 4
|
122 |
+
cache_size: 0
|
123 |
+
cache_dir:
|
124 |
+
persistent_workers: True
|
125 |
+
|
126 |
+
validation:
|
127 |
+
target: ldm.data.tsv.build_dataloader
|
128 |
+
params:
|
129 |
+
annotations_file: "/val_data/val2014_30k.tsv"
|
130 |
+
keys: ["image_id", "id", "caption"]
|
131 |
+
batch_size: 8
|
132 |
+
shuffle: False
|
133 |
+
num_workers: 1
|
134 |
+
|
135 |
+
lightning:
|
136 |
+
trainer:
|
137 |
+
accelerator: 'hpu'
|
138 |
+
num_nodes: 2
|
139 |
+
devices: 8
|
140 |
+
precision: 32
|
141 |
+
logger: False
|
142 |
+
log_every_n_steps: 5
|
143 |
+
enable_progress_bar: False
|
144 |
+
max_epochs: -1
|
145 |
+
max_steps: 10000000000000
|
146 |
+
val_check_interval: 1000
|
147 |
+
enable_checkpointing: True
|
148 |
+
num_sanity_val_steps: 0
|
149 |
+
strategy:
|
150 |
+
target: strategies.DDPStrategy
|
151 |
+
params:
|
152 |
+
find_unused_parameters: False
|
153 |
+
|
154 |
+
modelcheckpoint:
|
155 |
+
target: lightning.pytorch.callbacks.ModelCheckpoint
|
156 |
+
params:
|
157 |
+
save_top_k: -1
|
158 |
+
every_n_train_steps: 1000
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/stable_diffusion/configs/train_08x08x08.yaml
ADDED
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.25e-7
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
parameterization: "v"
|
6 |
+
linear_start: 0.00085
|
7 |
+
linear_end: 0.0120
|
8 |
+
num_timesteps_cond: 1
|
9 |
+
log_every_t: 200
|
10 |
+
timesteps: 1000
|
11 |
+
first_stage_key: npy
|
12 |
+
first_stage_type: moments
|
13 |
+
cond_stage_key: txt
|
14 |
+
image_size: 64
|
15 |
+
channels: 4
|
16 |
+
cond_stage_trainable: false
|
17 |
+
conditioning_key: crossattn
|
18 |
+
monitor: steps
|
19 |
+
scale_factor: 0.18215
|
20 |
+
use_ema: False
|
21 |
+
|
22 |
+
load_vae: True
|
23 |
+
load_unet: False
|
24 |
+
load_encoder: True
|
25 |
+
|
26 |
+
validation_config:
|
27 |
+
sampler: "ddim" # plms, ddim, dpm
|
28 |
+
steps: 50
|
29 |
+
scale: 8.0
|
30 |
+
ddim_eta: 0.0
|
31 |
+
prompt_key: "caption"
|
32 |
+
image_fname_key: "image_id"
|
33 |
+
|
34 |
+
save_images:
|
35 |
+
enabled: False
|
36 |
+
base_output_dir: "/results/inference"
|
37 |
+
fid:
|
38 |
+
enabled: True
|
39 |
+
inception_weights_url: https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth
|
40 |
+
cache_dir: /tmp/checkpoints/inception
|
41 |
+
gt_path: /val_data/val2014_30k_stats.npz
|
42 |
+
clip:
|
43 |
+
enabled: True
|
44 |
+
clip_version: "ViT-H-14"
|
45 |
+
cache_dir: /tmp/checkpoints/clip
|
46 |
+
|
47 |
+
scheduler_config:
|
48 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
49 |
+
params:
|
50 |
+
warm_up_steps: [ 1000 ]
|
51 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
52 |
+
f_start: [ 1.e-6 ]
|
53 |
+
f_max: [ 1. ]
|
54 |
+
f_min: [ 1. ]
|
55 |
+
|
56 |
+
unet_config:
|
57 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
58 |
+
params:
|
59 |
+
use_checkpoint: False # gradient checkpointing
|
60 |
+
use_fp16: False
|
61 |
+
image_size: 32
|
62 |
+
in_channels: 4
|
63 |
+
out_channels: 4
|
64 |
+
model_channels: 320
|
65 |
+
attention_resolutions: [ 4, 2, 1 ]
|
66 |
+
num_res_blocks: 2
|
67 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
68 |
+
num_head_channels: 64 # need to fix for flash-attn
|
69 |
+
use_spatial_transformer: True
|
70 |
+
use_linear_in_transformer: True
|
71 |
+
transformer_depth: 1
|
72 |
+
context_dim: 1024
|
73 |
+
legacy: False
|
74 |
+
|
75 |
+
first_stage_config:
|
76 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
77 |
+
params:
|
78 |
+
embed_dim: 4
|
79 |
+
monitor: val/rec_loss
|
80 |
+
ddconfig:
|
81 |
+
double_z: true
|
82 |
+
z_channels: 4
|
83 |
+
resolution: 256
|
84 |
+
in_channels: 3
|
85 |
+
out_ch: 3
|
86 |
+
ch: 128
|
87 |
+
ch_mult:
|
88 |
+
- 1
|
89 |
+
- 2
|
90 |
+
- 4
|
91 |
+
- 4
|
92 |
+
num_res_blocks: 2
|
93 |
+
attn_resolutions: []
|
94 |
+
dropout: 0.0
|
95 |
+
lossconfig:
|
96 |
+
target: torch.nn.Identity
|
97 |
+
|
98 |
+
cond_stage_config:
|
99 |
+
target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
|
100 |
+
params:
|
101 |
+
arch: "ViT-H-14"
|
102 |
+
version: "laion2b_s32b_b79k"
|
103 |
+
freeze: True
|
104 |
+
layer: "penultimate"
|
105 |
+
cache_dir: /tmp/checkpoints/clip
|
106 |
+
device: "hpu"
|
107 |
+
hpu_graph: True
|
108 |
+
use_fused_adamw: True
|
109 |
+
|
110 |
+
data:
|
111 |
+
target: ldm.data.composable_data_module.ComposableDataModule
|
112 |
+
params:
|
113 |
+
train:
|
114 |
+
target: ldm.data.webdatasets.build_dataloader
|
115 |
+
params:
|
116 |
+
urls: /mnt/weka/data/mlperf_models/stable_diffusion/training/laion-400m/webdataset-moments-filtered/{00000..00831}.tar
|
117 |
+
batch_size: 8
|
118 |
+
shuffle: 1000
|
119 |
+
partial: False
|
120 |
+
keep_only_keys: ["npy", "txt"]
|
121 |
+
num_workers: 4
|
122 |
+
cache_size: 0
|
123 |
+
cache_dir:
|
124 |
+
persistent_workers: True
|
125 |
+
|
126 |
+
validation:
|
127 |
+
target: ldm.data.tsv.build_dataloader
|
128 |
+
params:
|
129 |
+
annotations_file: "/val_data/val2014_30k.tsv"
|
130 |
+
keys: ["image_id", "id", "caption"]
|
131 |
+
batch_size: 8
|
132 |
+
shuffle: False
|
133 |
+
num_workers: 1
|
134 |
+
|
135 |
+
lightning:
|
136 |
+
trainer:
|
137 |
+
accelerator: 'hpu'
|
138 |
+
num_nodes: 8
|
139 |
+
devices: 8
|
140 |
+
precision: 32
|
141 |
+
logger: False
|
142 |
+
log_every_n_steps: 5
|
143 |
+
enable_progress_bar: False
|
144 |
+
max_epochs: -1
|
145 |
+
max_steps: 10000000000000
|
146 |
+
val_check_interval: 1000
|
147 |
+
enable_checkpointing: True
|
148 |
+
num_sanity_val_steps: 0
|
149 |
+
strategy:
|
150 |
+
target: strategies.DDPStrategy
|
151 |
+
params:
|
152 |
+
find_unused_parameters: False
|
153 |
+
|
154 |
+
modelcheckpoint:
|
155 |
+
target: lightning.pytorch.callbacks.ModelCheckpoint
|
156 |
+
params:
|
157 |
+
save_top_k: -1
|
158 |
+
every_n_train_steps: 1000
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/stable_diffusion/configs/train_32x08x08.yaml
ADDED
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.25e-7
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
parameterization: "v"
|
6 |
+
linear_start: 0.00085
|
7 |
+
linear_end: 0.0120
|
8 |
+
num_timesteps_cond: 1
|
9 |
+
log_every_t: 200
|
10 |
+
timesteps: 1000
|
11 |
+
first_stage_key: npy
|
12 |
+
first_stage_type: moments
|
13 |
+
cond_stage_key: txt
|
14 |
+
image_size: 64
|
15 |
+
channels: 4
|
16 |
+
cond_stage_trainable: false
|
17 |
+
conditioning_key: crossattn
|
18 |
+
monitor: steps
|
19 |
+
scale_factor: 0.18215
|
20 |
+
use_ema: False
|
21 |
+
|
22 |
+
load_vae: True
|
23 |
+
load_unet: False
|
24 |
+
load_encoder: True
|
25 |
+
|
26 |
+
validation_config:
|
27 |
+
sampler: "ddim" # plms, ddim, dpm
|
28 |
+
steps: 50
|
29 |
+
scale: 8.0
|
30 |
+
ddim_eta: 0.0
|
31 |
+
prompt_key: "caption"
|
32 |
+
image_fname_key: "image_id"
|
33 |
+
|
34 |
+
save_images:
|
35 |
+
enabled: False
|
36 |
+
base_output_dir: "/results/inference"
|
37 |
+
fid:
|
38 |
+
enabled: True
|
39 |
+
inception_weights_url: https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth
|
40 |
+
cache_dir: /tmp/checkpoints/inception
|
41 |
+
gt_path: /val_data/val2014_30k_stats.npz
|
42 |
+
clip:
|
43 |
+
enabled: True
|
44 |
+
clip_version: "ViT-H-14"
|
45 |
+
cache_dir: /tmp/checkpoints/clip
|
46 |
+
|
47 |
+
scheduler_config:
|
48 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
49 |
+
params:
|
50 |
+
warm_up_steps: [ 1000 ]
|
51 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
52 |
+
f_start: [ 1.e-6 ]
|
53 |
+
f_max: [ 1. ]
|
54 |
+
f_min: [ 1. ]
|
55 |
+
|
56 |
+
unet_config:
|
57 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
58 |
+
params:
|
59 |
+
use_checkpoint: False # gradient checkpointing
|
60 |
+
use_fp16: False
|
61 |
+
image_size: 32
|
62 |
+
in_channels: 4
|
63 |
+
out_channels: 4
|
64 |
+
model_channels: 320
|
65 |
+
attention_resolutions: [ 4, 2, 1 ]
|
66 |
+
num_res_blocks: 2
|
67 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
68 |
+
num_head_channels: 64 # need to fix for flash-attn
|
69 |
+
use_spatial_transformer: True
|
70 |
+
use_linear_in_transformer: True
|
71 |
+
transformer_depth: 1
|
72 |
+
context_dim: 1024
|
73 |
+
legacy: False
|
74 |
+
|
75 |
+
first_stage_config:
|
76 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
77 |
+
params:
|
78 |
+
embed_dim: 4
|
79 |
+
monitor: val/rec_loss
|
80 |
+
ddconfig:
|
81 |
+
double_z: true
|
82 |
+
z_channels: 4
|
83 |
+
resolution: 256
|
84 |
+
in_channels: 3
|
85 |
+
out_ch: 3
|
86 |
+
ch: 128
|
87 |
+
ch_mult:
|
88 |
+
- 1
|
89 |
+
- 2
|
90 |
+
- 4
|
91 |
+
- 4
|
92 |
+
num_res_blocks: 2
|
93 |
+
attn_resolutions: []
|
94 |
+
dropout: 0.0
|
95 |
+
lossconfig:
|
96 |
+
target: torch.nn.Identity
|
97 |
+
|
98 |
+
cond_stage_config:
|
99 |
+
target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
|
100 |
+
params:
|
101 |
+
arch: "ViT-H-14"
|
102 |
+
version: "laion2b_s32b_b79k"
|
103 |
+
freeze: True
|
104 |
+
layer: "penultimate"
|
105 |
+
cache_dir: /tmp/checkpoints/clip
|
106 |
+
device: "hpu"
|
107 |
+
hpu_graph: True
|
108 |
+
use_fused_adamw: True
|
109 |
+
|
110 |
+
data:
|
111 |
+
target: ldm.data.composable_data_module.ComposableDataModule
|
112 |
+
params:
|
113 |
+
train:
|
114 |
+
target: ldm.data.webdatasets.build_dataloader
|
115 |
+
params:
|
116 |
+
urls: /mnt/weka/data/mlperf_models/stable_diffusion/training/laion-400m/webdataset-moments-filtered/{00000..00831}.tar
|
117 |
+
batch_size: 8
|
118 |
+
shuffle: 1000
|
119 |
+
partial: False
|
120 |
+
keep_only_keys: ["npy", "txt"]
|
121 |
+
num_workers: 4
|
122 |
+
persistent_workers: True
|
123 |
+
|
124 |
+
validation:
|
125 |
+
target: ldm.data.tsv.build_dataloader
|
126 |
+
params:
|
127 |
+
annotations_file: "/val_data/val2014_30k.tsv"
|
128 |
+
keys: ["image_id", "id", "caption"]
|
129 |
+
batch_size: 8
|
130 |
+
shuffle: False
|
131 |
+
num_workers: 1
|
132 |
+
|
133 |
+
lightning:
|
134 |
+
trainer:
|
135 |
+
accelerator: 'hpu'
|
136 |
+
num_nodes: 32
|
137 |
+
devices: 8
|
138 |
+
precision: 32
|
139 |
+
logger: False
|
140 |
+
log_every_n_steps: 5
|
141 |
+
enable_progress_bar: False
|
142 |
+
max_epochs: -1
|
143 |
+
max_steps: 10000000000000
|
144 |
+
val_check_interval: 250
|
145 |
+
enable_checkpointing: True
|
146 |
+
num_sanity_val_steps: 0
|
147 |
+
strategy:
|
148 |
+
target: strategies.DDPStrategy
|
149 |
+
params:
|
150 |
+
find_unused_parameters: False
|
151 |
+
|
152 |
+
modelcheckpoint:
|
153 |
+
target: lightning.pytorch.callbacks.ModelCheckpoint
|
154 |
+
params:
|
155 |
+
save_top_k: -1
|
156 |
+
every_n_train_steps: 250
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/stable_diffusion/ldm/data/base.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import os
|
3 |
+
from abc import abstractmethod
|
4 |
+
|
5 |
+
import cv2
|
6 |
+
import numpy as np
|
7 |
+
import torch
|
8 |
+
from torch.utils.data import ChainDataset, ConcatDataset, Dataset, IterableDataset
|
9 |
+
|
10 |
+
|
11 |
+
class Txt2ImgIterableBaseDataset(IterableDataset):
|
12 |
+
'''
|
13 |
+
Define an interface to make the IterableDatasets for text2img data chainable
|
14 |
+
'''
|
15 |
+
|
16 |
+
def __init__(self, file_path: str, rank, world_size):
|
17 |
+
super().__init__()
|
18 |
+
self.file_path = file_path
|
19 |
+
self.folder_list = []
|
20 |
+
self.file_list = []
|
21 |
+
self.txt_list = []
|
22 |
+
self.info = self._get_file_info(file_path)
|
23 |
+
self.start = self.info['start']
|
24 |
+
self.end = self.info['end']
|
25 |
+
self.rank = rank
|
26 |
+
|
27 |
+
self.world_size = world_size
|
28 |
+
# self.per_worker = int(math.floor((self.end - self.start) / float(self.world_size)))
|
29 |
+
# self.iter_start = self.start + self.rank * self.per_worker
|
30 |
+
# self.iter_end = min(self.iter_start + self.per_worker, self.end)
|
31 |
+
# self.num_records = self.iter_end - self.iter_start
|
32 |
+
# self.valid_ids = [i for i in range(self.iter_end)]
|
33 |
+
self.num_records = self.end - self.start
|
34 |
+
self.valid_ids = [i for i in range(self.end)]
|
35 |
+
|
36 |
+
print(f'{self.__class__.__name__} dataset contains {self.__len__()} examples.')
|
37 |
+
|
38 |
+
def __len__(self):
|
39 |
+
# return self.iter_end - self.iter_start
|
40 |
+
return self.end - self.start
|
41 |
+
|
42 |
+
def __iter__(self):
|
43 |
+
sample_iterator = self._sample_generator(self.start, self.end)
|
44 |
+
# sample_iterator = self._sample_generator(self.iter_start, self.iter_end)
|
45 |
+
return sample_iterator
|
46 |
+
|
47 |
+
def _sample_generator(self, start, end):
|
48 |
+
for idx in range(start, end):
|
49 |
+
file_name = self.file_list[idx]
|
50 |
+
txt_name = self.txt_list[idx]
|
51 |
+
f_ = open(txt_name, 'r')
|
52 |
+
txt_ = f_.read()
|
53 |
+
f_.close()
|
54 |
+
image = cv2.imdecode(np.fromfile(file_name, dtype=np.uint8), 1)
|
55 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
56 |
+
image = torch.from_numpy(image) / 255
|
57 |
+
yield {"txt": txt_, "image": image}
|
58 |
+
|
59 |
+
def _get_file_info(self, file_path):
|
60 |
+
info = \
|
61 |
+
{
|
62 |
+
"start": 1,
|
63 |
+
"end": 0,
|
64 |
+
}
|
65 |
+
self.folder_list = [file_path + i for i in os.listdir(file_path) if '.' not in i]
|
66 |
+
for folder in self.folder_list:
|
67 |
+
files = [folder + '/' + i for i in os.listdir(folder) if 'jpg' in i]
|
68 |
+
txts = [k.replace('jpg', 'txt') for k in files]
|
69 |
+
self.file_list.extend(files)
|
70 |
+
self.txt_list.extend(txts)
|
71 |
+
info['end'] = len(self.file_list)
|
72 |
+
# with open(file_path, 'r') as fin:
|
73 |
+
# for _ in enumerate(fin):
|
74 |
+
# info['end'] += 1
|
75 |
+
# self.txt_list = [k.replace('jpg', 'txt') for k in self.file_list]
|
76 |
+
return info
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/stable_diffusion/ldm/data/utils.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torchvision import transforms
|
2 |
+
from einops import rearrange
|
3 |
+
|
4 |
+
from ldm.util import instantiate_from_config
|
5 |
+
|
6 |
+
def instantiate_transforms_from_config(config):
|
7 |
+
if config.target in ['torchvision.transforms.RandomResizedCrop', 'torchvision.transforms.Resize']:
|
8 |
+
# the isinstance is necessary because instantiate_transforms_from_config might be called multiple times
|
9 |
+
# and isinstance(config['params']['interpolation'] already caseted from str to InterpolationMode
|
10 |
+
if "interpolation" in config['params'] and isinstance(config['params']['interpolation'], str):
|
11 |
+
config.params.interpolation = interpolation_from_string(config['params']['interpolation'])
|
12 |
+
return instantiate_from_config(config)
|
13 |
+
|
14 |
+
def interpolation_from_string(interpolation):
|
15 |
+
interpolation_map = {
|
16 |
+
'nearest': transforms.InterpolationMode.NEAREST,
|
17 |
+
'bilinear': transforms.InterpolationMode.BILINEAR,
|
18 |
+
'bicubic': transforms.InterpolationMode.BICUBIC,
|
19 |
+
'box': transforms.InterpolationMode.BOX,
|
20 |
+
'hamming': transforms.InterpolationMode.HAMMING,
|
21 |
+
'lanczos': transforms.InterpolationMode.LANCZOS,
|
22 |
+
}
|
23 |
+
return interpolation_map[interpolation]
|
24 |
+
|
25 |
+
def rearrange_transform(pattern):
|
26 |
+
return transforms.Lambda(lambda x: rearrange(tensor=x, pattern=pattern))
|
27 |
+
|
28 |
+
def identity(x):
|
29 |
+
return x
|
30 |
+
|
31 |
+
def keys_filter(keys):
|
32 |
+
def filter_fn(sample):
|
33 |
+
return {k: v for k, v in sample.items() if k in keys}
|
34 |
+
return filter_fn
|
35 |
+
|
36 |
+
def value_filter(key, predicate, value):
|
37 |
+
def filter_fn(sample):
|
38 |
+
if predicate == "eq":
|
39 |
+
return sample[key] == value
|
40 |
+
elif predicate == "neq":
|
41 |
+
return sample[key] != value
|
42 |
+
elif predicate == "gt":
|
43 |
+
return sample[key] > value
|
44 |
+
elif predicate == "lt":
|
45 |
+
return sample[key] < value
|
46 |
+
elif predicate == "gte":
|
47 |
+
return sample[key] >= value
|
48 |
+
elif predicate == "lte":
|
49 |
+
return sample[key] <= value
|
50 |
+
else:
|
51 |
+
raise ValueError(f"Unknown predicate: {predicate}")
|
52 |
+
return filter_fn
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/stable_diffusion/ldm/lr_scheduler.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
|
4 |
+
class LambdaWarmUpCosineScheduler:
|
5 |
+
"""
|
6 |
+
note: use with a base_lr of 1.0
|
7 |
+
"""
|
8 |
+
def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0):
|
9 |
+
self.lr_warm_up_steps = warm_up_steps
|
10 |
+
self.lr_start = lr_start
|
11 |
+
self.lr_min = lr_min
|
12 |
+
self.lr_max = lr_max
|
13 |
+
self.lr_max_decay_steps = max_decay_steps
|
14 |
+
self.last_lr = 0.
|
15 |
+
self.verbosity_interval = verbosity_interval
|
16 |
+
|
17 |
+
def schedule(self, n, **kwargs):
|
18 |
+
if self.verbosity_interval > 0:
|
19 |
+
if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}")
|
20 |
+
if n < self.lr_warm_up_steps:
|
21 |
+
lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start
|
22 |
+
self.last_lr = lr
|
23 |
+
return lr
|
24 |
+
else:
|
25 |
+
t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps)
|
26 |
+
t = min(t, 1.0)
|
27 |
+
lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * (
|
28 |
+
1 + np.cos(t * np.pi))
|
29 |
+
self.last_lr = lr
|
30 |
+
return lr
|
31 |
+
|
32 |
+
def __call__(self, n, **kwargs):
|
33 |
+
return self.schedule(n,**kwargs)
|
34 |
+
|
35 |
+
|
36 |
+
class LambdaWarmUpCosineScheduler2:
|
37 |
+
"""
|
38 |
+
supports repeated iterations, configurable via lists
|
39 |
+
note: use with a base_lr of 1.0.
|
40 |
+
"""
|
41 |
+
def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0):
|
42 |
+
assert len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths)
|
43 |
+
self.lr_warm_up_steps = warm_up_steps
|
44 |
+
self.f_start = f_start
|
45 |
+
self.f_min = f_min
|
46 |
+
self.f_max = f_max
|
47 |
+
self.cycle_lengths = cycle_lengths
|
48 |
+
self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths))
|
49 |
+
self.last_f = 0.
|
50 |
+
self.verbosity_interval = verbosity_interval
|
51 |
+
|
52 |
+
def find_in_interval(self, n):
|
53 |
+
interval = 0
|
54 |
+
for cl in self.cum_cycles[1:]:
|
55 |
+
if n <= cl:
|
56 |
+
return interval
|
57 |
+
interval += 1
|
58 |
+
|
59 |
+
def schedule(self, n, **kwargs):
|
60 |
+
cycle = self.find_in_interval(n)
|
61 |
+
n = n - self.cum_cycles[cycle]
|
62 |
+
if self.verbosity_interval > 0:
|
63 |
+
if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, "
|
64 |
+
f"current cycle {cycle}")
|
65 |
+
if n < self.lr_warm_up_steps[cycle]:
|
66 |
+
f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
|
67 |
+
self.last_f = f
|
68 |
+
return f
|
69 |
+
else:
|
70 |
+
t = (n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle])
|
71 |
+
t = min(t, 1.0)
|
72 |
+
f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * (
|
73 |
+
1 + np.cos(t * np.pi))
|
74 |
+
self.last_f = f
|
75 |
+
return f
|
76 |
+
|
77 |
+
def __call__(self, n, **kwargs):
|
78 |
+
return self.schedule(n, **kwargs)
|
79 |
+
|
80 |
+
|
81 |
+
class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2):
|
82 |
+
|
83 |
+
def schedule(self, n, **kwargs):
|
84 |
+
cycle = self.find_in_interval(n)
|
85 |
+
n = n - self.cum_cycles[cycle]
|
86 |
+
if self.verbosity_interval > 0:
|
87 |
+
if n % self.verbosity_interval == 0:
|
88 |
+
print(f"current step: {n}, recent lr-multiplier: {self.last_f}, current cycle {cycle}")
|
89 |
+
|
90 |
+
if n < self.lr_warm_up_steps[cycle]:
|
91 |
+
f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
|
92 |
+
self.last_f = f
|
93 |
+
return f
|
94 |
+
else:
|
95 |
+
f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n) / (self.cycle_lengths[cycle])
|
96 |
+
self.last_f = f
|
97 |
+
return f
|
98 |
+
|
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/stable_diffusion/ldm/models/autoencoder.py
ADDED
@@ -0,0 +1,227 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
try:
|
3 |
+
import lightning.pytorch as pl
|
4 |
+
except:
|
5 |
+
import pytorch_lightning as pl
|
6 |
+
|
7 |
+
import torch.nn.functional as F
|
8 |
+
from contextlib import contextmanager
|
9 |
+
|
10 |
+
from ldm.modules.diffusionmodules.model import Encoder, Decoder
|
11 |
+
from ldm.modules.distributions.distributions import DiagonalGaussianDistribution
|
12 |
+
|
13 |
+
from ldm.util import instantiate_from_config
|
14 |
+
from ldm.modules.ema import LitEma
|
15 |
+
|
16 |
+
|
17 |
+
class AutoencoderKL(pl.LightningModule):
|
18 |
+
def __init__(self,
|
19 |
+
ddconfig,
|
20 |
+
lossconfig,
|
21 |
+
embed_dim,
|
22 |
+
ckpt_path=None,
|
23 |
+
ignore_keys=[],
|
24 |
+
image_key="image",
|
25 |
+
colorize_nlabels=None,
|
26 |
+
monitor=None,
|
27 |
+
ema_decay=None,
|
28 |
+
learn_logvar=False
|
29 |
+
):
|
30 |
+
super().__init__()
|
31 |
+
self.learn_logvar = learn_logvar
|
32 |
+
self.image_key = image_key
|
33 |
+
self.encoder = Encoder(**ddconfig)
|
34 |
+
self.decoder = Decoder(**ddconfig)
|
35 |
+
self.loss = instantiate_from_config(lossconfig)
|
36 |
+
assert ddconfig["double_z"]
|
37 |
+
self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1)
|
38 |
+
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
|
39 |
+
self.embed_dim = embed_dim
|
40 |
+
if colorize_nlabels is not None:
|
41 |
+
assert type(colorize_nlabels)==int
|
42 |
+
self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
|
43 |
+
if monitor is not None:
|
44 |
+
self.monitor = monitor
|
45 |
+
|
46 |
+
self.use_ema = ema_decay is not None
|
47 |
+
if self.use_ema:
|
48 |
+
self.ema_decay = ema_decay
|
49 |
+
assert 0. < ema_decay < 1.
|
50 |
+
self.model_ema = LitEma(self, decay=ema_decay)
|
51 |
+
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
52 |
+
|
53 |
+
if ckpt_path is not None:
|
54 |
+
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
|
55 |
+
|
56 |
+
def init_from_ckpt(self, path, ignore_keys=list()):
|
57 |
+
sd = torch.load(path, map_location="cpu")["state_dict"]
|
58 |
+
keys = list(sd.keys())
|
59 |
+
for k in keys:
|
60 |
+
for ik in ignore_keys:
|
61 |
+
if k.startswith(ik):
|
62 |
+
print("Deleting key {} from state_dict.".format(k))
|
63 |
+
del sd[k]
|
64 |
+
self.load_state_dict(sd, strict=False)
|
65 |
+
print(f"Restored from {path}")
|
66 |
+
|
67 |
+
@contextmanager
|
68 |
+
def ema_scope(self, context=None):
|
69 |
+
if self.use_ema:
|
70 |
+
self.model_ema.store(self.parameters())
|
71 |
+
self.model_ema.copy_to(self)
|
72 |
+
if context is not None:
|
73 |
+
print(f"{context}: Switched to EMA weights")
|
74 |
+
try:
|
75 |
+
yield None
|
76 |
+
finally:
|
77 |
+
if self.use_ema:
|
78 |
+
self.model_ema.restore(self.parameters())
|
79 |
+
if context is not None:
|
80 |
+
print(f"{context}: Restored training weights")
|
81 |
+
|
82 |
+
def on_train_batch_end(self, *args, **kwargs):
|
83 |
+
if self.use_ema:
|
84 |
+
self.model_ema(self)
|
85 |
+
|
86 |
+
def moments(self, x):
|
87 |
+
h = self.encoder(x)
|
88 |
+
moments = self.quant_conv(h)
|
89 |
+
return moments
|
90 |
+
|
91 |
+
def encode(self, x):
|
92 |
+
moments = self.moments(x)
|
93 |
+
posterior = DiagonalGaussianDistribution(moments)
|
94 |
+
return posterior
|
95 |
+
|
96 |
+
def decode(self, z):
|
97 |
+
z = self.post_quant_conv(z)
|
98 |
+
dec = self.decoder(z)
|
99 |
+
return dec
|
100 |
+
|
101 |
+
def forward(self, input, sample_posterior=True):
|
102 |
+
posterior = self.encode(input)
|
103 |
+
if sample_posterior:
|
104 |
+
z = posterior.sample()
|
105 |
+
else:
|
106 |
+
z = posterior.mode()
|
107 |
+
dec = self.decode(z)
|
108 |
+
return dec, posterior
|
109 |
+
|
110 |
+
def get_input(self, batch, k):
|
111 |
+
x = batch[k]
|
112 |
+
if len(x.shape) == 3:
|
113 |
+
x = x[..., None]
|
114 |
+
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
|
115 |
+
return x
|
116 |
+
|
117 |
+
def training_step(self, batch, batch_idx, optimizer_idx):
|
118 |
+
inputs = self.get_input(batch, self.image_key)
|
119 |
+
reconstructions, posterior = self(inputs)
|
120 |
+
|
121 |
+
if optimizer_idx == 0:
|
122 |
+
# train encoder+decoder+logvar
|
123 |
+
aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
|
124 |
+
last_layer=self.get_last_layer(), split="train")
|
125 |
+
self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
|
126 |
+
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
|
127 |
+
return aeloss
|
128 |
+
|
129 |
+
if optimizer_idx == 1:
|
130 |
+
# train the discriminator
|
131 |
+
discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
|
132 |
+
last_layer=self.get_last_layer(), split="train")
|
133 |
+
|
134 |
+
self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
|
135 |
+
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
|
136 |
+
return discloss
|
137 |
+
|
138 |
+
def validation_step(self, batch, batch_idx):
|
139 |
+
log_dict = self._validation_step(batch, batch_idx)
|
140 |
+
with self.ema_scope():
|
141 |
+
log_dict_ema = self._validation_step(batch, batch_idx, postfix="_ema")
|
142 |
+
return log_dict
|
143 |
+
|
144 |
+
def _validation_step(self, batch, batch_idx, postfix=""):
|
145 |
+
inputs = self.get_input(batch, self.image_key)
|
146 |
+
reconstructions, posterior = self(inputs)
|
147 |
+
aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,
|
148 |
+
last_layer=self.get_last_layer(), split="val"+postfix)
|
149 |
+
|
150 |
+
discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,
|
151 |
+
last_layer=self.get_last_layer(), split="val"+postfix)
|
152 |
+
|
153 |
+
self.log(f"val{postfix}/rec_loss", log_dict_ae[f"val{postfix}/rec_loss"])
|
154 |
+
self.log_dict(log_dict_ae)
|
155 |
+
self.log_dict(log_dict_disc)
|
156 |
+
return self.log_dict
|
157 |
+
|
158 |
+
def configure_optimizers(self):
|
159 |
+
lr = self.learning_rate
|
160 |
+
ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(
|
161 |
+
self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())
|
162 |
+
if self.learn_logvar:
|
163 |
+
print(f"{self.__class__.__name__}: Learning logvar")
|
164 |
+
ae_params_list.append(self.loss.logvar)
|
165 |
+
opt_ae = torch.optim.Adam(ae_params_list,
|
166 |
+
lr=lr, betas=(0.5, 0.9))
|
167 |
+
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
|
168 |
+
lr=lr, betas=(0.5, 0.9))
|
169 |
+
return [opt_ae, opt_disc], []
|
170 |
+
|
171 |
+
def get_last_layer(self):
|
172 |
+
return self.decoder.conv_out.weight
|
173 |
+
|
174 |
+
@torch.no_grad()
|
175 |
+
def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):
|
176 |
+
log = dict()
|
177 |
+
x = self.get_input(batch, self.image_key)
|
178 |
+
x = x.to(self.device)
|
179 |
+
if not only_inputs:
|
180 |
+
xrec, posterior = self(x)
|
181 |
+
if x.shape[1] > 3:
|
182 |
+
# colorize with random projection
|
183 |
+
assert xrec.shape[1] > 3
|
184 |
+
x = self.to_rgb(x)
|
185 |
+
xrec = self.to_rgb(xrec)
|
186 |
+
log["samples"] = self.decode(torch.randn_like(posterior.sample()))
|
187 |
+
log["reconstructions"] = xrec
|
188 |
+
if log_ema or self.use_ema:
|
189 |
+
with self.ema_scope():
|
190 |
+
xrec_ema, posterior_ema = self(x)
|
191 |
+
if x.shape[1] > 3:
|
192 |
+
# colorize with random projection
|
193 |
+
assert xrec_ema.shape[1] > 3
|
194 |
+
xrec_ema = self.to_rgb(xrec_ema)
|
195 |
+
log["samples_ema"] = self.decode(torch.randn_like(posterior_ema.sample()))
|
196 |
+
log["reconstructions_ema"] = xrec_ema
|
197 |
+
log["inputs"] = x
|
198 |
+
return log
|
199 |
+
|
200 |
+
def to_rgb(self, x):
|
201 |
+
assert self.image_key == "segmentation"
|
202 |
+
if not hasattr(self, "colorize"):
|
203 |
+
self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
|
204 |
+
x = F.conv2d(x, weight=self.colorize)
|
205 |
+
x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
|
206 |
+
return x
|
207 |
+
|
208 |
+
|
209 |
+
class IdentityFirstStage(torch.nn.Module):
|
210 |
+
def __init__(self, *args, vq_interface=False, **kwargs):
|
211 |
+
self.vq_interface = vq_interface
|
212 |
+
super().__init__()
|
213 |
+
|
214 |
+
def encode(self, x, *args, **kwargs):
|
215 |
+
return x
|
216 |
+
|
217 |
+
def decode(self, x, *args, **kwargs):
|
218 |
+
return x
|
219 |
+
|
220 |
+
def quantize(self, x, *args, **kwargs):
|
221 |
+
if self.vq_interface:
|
222 |
+
return x, None, [None, None, None]
|
223 |
+
return x
|
224 |
+
|
225 |
+
def forward(self, x, *args, **kwargs):
|
226 |
+
return x
|
227 |
+
|