python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# SPDX-License-Identifier: GPL-2.0
"""
Detect circular variable expansion.
If a recursively expanded variable references itself (eventually),
it should fail with an error message.
"""
def test(conf):
assert conf.oldaskconfig() != 0
assert conf.stderr_matches('expected_stderr')
| grace-kernel-grace-kernel-6.1.y | scripts/kconfig/tests/preprocess/circular_expansion/__init__.py |
# SPDX-License-Identifier: GPL-2.0
"""
Built-in function tests.
"""
def test(conf):
assert conf.oldaskconfig() == 0
assert conf.stdout_contains('expected_stdout')
assert conf.stderr_matches('expected_stderr')
| grace-kernel-grace-kernel-6.1.y | scripts/kconfig/tests/preprocess/builtin_func/__init__.py |
# SPDX-License-Identifier: GPL-2.0
"""
Basic choice tests.
The handling of 'choice' is a bit complicated part in Kconfig.
The behavior of 'y' choice is intuitive. If choice values are tristate,
the choice can be 'm' where each value can be enabled independently.
Also, if a choice is marked as 'optional', the whole choice can be
invisible.
"""
def test_oldask0(conf):
assert conf.oldaskconfig() == 0
assert conf.stdout_contains('oldask0_expected_stdout')
def test_oldask1(conf):
assert conf.oldaskconfig('oldask1_config') == 0
assert conf.stdout_contains('oldask1_expected_stdout')
def test_allyes(conf):
assert conf.allyesconfig() == 0
assert conf.config_contains('allyes_expected_config')
def test_allmod(conf):
assert conf.allmodconfig() == 0
assert conf.config_contains('allmod_expected_config')
def test_allno(conf):
assert conf.allnoconfig() == 0
assert conf.config_contains('allno_expected_config')
def test_alldef(conf):
assert conf.alldefconfig() == 0
assert conf.config_contains('alldef_expected_config')
| grace-kernel-grace-kernel-6.1.y | scripts/kconfig/tests/choice/__init__.py |
# SPDX-License-Identifier: GPL-2.0
"""
Detect recursive inclusion error.
If recursive inclusion is detected, it should fail with error messages.
"""
def test(conf):
assert conf.oldaskconfig() != 0
assert conf.stderr_contains('expected_stderr')
| grace-kernel-grace-kernel-6.1.y | scripts/kconfig/tests/err_recursive_inc/__init__.py |
# SPDX-License-Identifier: GPL-2.0
"""
Detect recursive dependency error.
Recursive dependency should be treated as an error.
"""
def test(conf):
assert conf.oldaskconfig() == 1
assert conf.stderr_contains('expected_stderr')
| grace-kernel-grace-kernel-6.1.y | scripts/kconfig/tests/err_recursive_dep/__init__.py |
# SPDX-License-Identifier: GPL-2.0
"""
Do not write choice values to .config if the dependency is unmet.
"# CONFIG_... is not set" should not be written into the .config file
for symbols with unmet dependency.
This was not working correctly for choice values because choice needs
a bit different symbol computation.
This checks that no unneeded "# COFIG_... is not set" is contained in
the .config file.
Related Linux commit: cb67ab2cd2b8abd9650292c986c79901e3073a59
"""
def test(conf):
assert conf.oldaskconfig('config', 'n') == 0
assert conf.config_matches('expected_config')
| grace-kernel-grace-kernel-6.1.y | scripts/kconfig/tests/no_write_if_dep_unmet/__init__.py |
# SPDX-License-Identifier: GPL-2.0
"""
Hide tristate choice values with mod dependency in y choice.
If tristate choice values depend on symbols set to 'm', they should be
hidden when the choice containing them is changed from 'm' to 'y'
(i.e. exclusive choice).
Related Linux commit: fa64e5f6a35efd5e77d639125d973077ca506074
"""
def test(conf):
assert conf.oldaskconfig('config', 'y') == 0
assert conf.config_contains('expected_config')
assert conf.stdout_contains('expected_stdout')
| grace-kernel-grace-kernel-6.1.y | scripts/kconfig/tests/choice_value_with_m_dep/__init__.py |
# SPDX-License-Identifier: GPL-2.0
"""
Ask new choice values when they become visible.
If new choice values are added with new dependency, and they become
visible during user configuration, oldconfig should recognize them
as (NEW), and ask the user for choice.
Related Linux commit: 5d09598d488f081e3be23f885ed65cbbe2d073b5
"""
def test(conf):
assert conf.oldconfig('config', 'y') == 0
assert conf.stdout_contains('expected_stdout')
| grace-kernel-grace-kernel-6.1.y | scripts/kconfig/tests/new_choice_with_dep/__init__.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
#
# Copyright (C) Google LLC, 2020
#
# Author: Nathan Huckleberry <[email protected]>
#
"""A helper routine run clang-tidy and the clang static-analyzer on
compile_commands.json.
"""
import argparse
import json
import multiprocessing
import subprocess
import sys
def parse_arguments():
"""Set up and parses command-line arguments.
Returns:
args: Dict of parsed args
Has keys: [path, type]
"""
usage = """Run clang-tidy or the clang static-analyzer on a
compilation database."""
parser = argparse.ArgumentParser(description=usage)
type_help = "Type of analysis to be performed"
parser.add_argument("type",
choices=["clang-tidy", "clang-analyzer"],
help=type_help)
path_help = "Path to the compilation database to parse"
parser.add_argument("path", type=str, help=path_help)
return parser.parse_args()
def init(l, a):
global lock
global args
lock = l
args = a
def run_analysis(entry):
# Disable all checks, then re-enable the ones we want
checks = []
checks.append("-checks=-*")
if args.type == "clang-tidy":
checks.append("linuxkernel-*")
else:
checks.append("clang-analyzer-*")
checks.append("-clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling")
p = subprocess.run(["clang-tidy", "-p", args.path, ",".join(checks), entry["file"]],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=entry["directory"])
with lock:
sys.stderr.buffer.write(p.stdout)
def main():
try:
args = parse_arguments()
lock = multiprocessing.Lock()
pool = multiprocessing.Pool(initializer=init, initargs=(lock, args))
# Read JSON data into the datastore variable
with open(args.path, "r") as f:
datastore = json.load(f)
pool.map(run_analysis, datastore)
except BrokenPipeError:
# Python flushes standard streams on exit; redirect remaining output
# to devnull to avoid another BrokenPipeError at shutdown
devnull = os.open(os.devnull, os.O_WRONLY)
os.dup2(devnull, sys.stdout.fileno())
sys.exit(1) # Python exits with error code 1 on EPIPE
if __name__ == "__main__":
main()
| grace-kernel-grace-kernel-6.1.y | scripts/clang-tools/run-clang-tools.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
#
# Copyright (C) Google LLC, 2018
#
# Author: Tom Roeder <[email protected]>
#
"""A tool for generating compile_commands.json in the Linux kernel."""
import argparse
import json
import logging
import os
import re
import subprocess
import sys
_DEFAULT_OUTPUT = 'compile_commands.json'
_DEFAULT_LOG_LEVEL = 'WARNING'
_FILENAME_PATTERN = r'^\..*\.cmd$'
_LINE_PATTERN = r'^cmd_[^ ]*\.o := (.* )([^ ]*\.c) *(;|$)'
_VALID_LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
# The tools/ directory adopts a different build system, and produces .cmd
# files in a different format. Do not support it.
_EXCLUDE_DIRS = ['.git', 'Documentation', 'include', 'tools']
def parse_arguments():
"""Sets up and parses command-line arguments.
Returns:
log_level: A logging level to filter log output.
directory: The work directory where the objects were built.
ar: Command used for parsing .a archives.
output: Where to write the compile-commands JSON file.
paths: The list of files/directories to handle to find .cmd files.
"""
usage = 'Creates a compile_commands.json database from kernel .cmd files'
parser = argparse.ArgumentParser(description=usage)
directory_help = ('specify the output directory used for the kernel build '
'(defaults to the working directory)')
parser.add_argument('-d', '--directory', type=str, default='.',
help=directory_help)
output_help = ('path to the output command database (defaults to ' +
_DEFAULT_OUTPUT + ')')
parser.add_argument('-o', '--output', type=str, default=_DEFAULT_OUTPUT,
help=output_help)
log_level_help = ('the level of log messages to produce (defaults to ' +
_DEFAULT_LOG_LEVEL + ')')
parser.add_argument('--log_level', choices=_VALID_LOG_LEVELS,
default=_DEFAULT_LOG_LEVEL, help=log_level_help)
ar_help = 'command used for parsing .a archives'
parser.add_argument('-a', '--ar', type=str, default='llvm-ar', help=ar_help)
paths_help = ('directories to search or files to parse '
'(files should be *.o, *.a, or modules.order). '
'If nothing is specified, the current directory is searched')
parser.add_argument('paths', type=str, nargs='*', help=paths_help)
args = parser.parse_args()
return (args.log_level,
os.path.abspath(args.directory),
args.output,
args.ar,
args.paths if len(args.paths) > 0 else [args.directory])
def cmdfiles_in_dir(directory):
"""Generate the iterator of .cmd files found under the directory.
Walk under the given directory, and yield every .cmd file found.
Args:
directory: The directory to search for .cmd files.
Yields:
The path to a .cmd file.
"""
filename_matcher = re.compile(_FILENAME_PATTERN)
exclude_dirs = [ os.path.join(directory, d) for d in _EXCLUDE_DIRS ]
for dirpath, dirnames, filenames in os.walk(directory, topdown=True):
# Prune unwanted directories.
if dirpath in exclude_dirs:
dirnames[:] = []
continue
for filename in filenames:
if filename_matcher.match(filename):
yield os.path.join(dirpath, filename)
def to_cmdfile(path):
"""Return the path of .cmd file used for the given build artifact
Args:
Path: file path
Returns:
The path to .cmd file
"""
dir, base = os.path.split(path)
return os.path.join(dir, '.' + base + '.cmd')
def cmdfiles_for_a(archive, ar):
"""Generate the iterator of .cmd files associated with the archive.
Parse the given archive, and yield every .cmd file used to build it.
Args:
archive: The archive to parse
Yields:
The path to every .cmd file found
"""
for obj in subprocess.check_output([ar, '-t', archive]).decode().split():
yield to_cmdfile(obj)
def cmdfiles_for_modorder(modorder):
"""Generate the iterator of .cmd files associated with the modules.order.
Parse the given modules.order, and yield every .cmd file used to build the
contained modules.
Args:
modorder: The modules.order file to parse
Yields:
The path to every .cmd file found
"""
with open(modorder) as f:
for line in f:
ko = line.rstrip()
base, ext = os.path.splitext(ko)
if ext != '.ko':
sys.exit('{}: module path must end with .ko'.format(ko))
mod = base + '.mod'
# Read from *.mod, to get a list of objects that compose the module.
with open(mod) as m:
for mod_line in m:
yield to_cmdfile(mod_line.rstrip())
def process_line(root_directory, command_prefix, file_path):
"""Extracts information from a .cmd line and creates an entry from it.
Args:
root_directory: The directory that was searched for .cmd files. Usually
used directly in the "directory" entry in compile_commands.json.
command_prefix: The extracted command line, up to the last element.
file_path: The .c file from the end of the extracted command.
Usually relative to root_directory, but sometimes absolute.
Returns:
An entry to append to compile_commands.
Raises:
ValueError: Could not find the extracted file based on file_path and
root_directory or file_directory.
"""
# The .cmd files are intended to be included directly by Make, so they
# escape the pound sign '#', either as '\#' or '$(pound)' (depending on the
# kernel version). The compile_commands.json file is not interepreted
# by Make, so this code replaces the escaped version with '#'.
prefix = command_prefix.replace('\#', '#').replace('$(pound)', '#')
# Use os.path.abspath() to normalize the path resolving '.' and '..' .
abs_path = os.path.abspath(os.path.join(root_directory, file_path))
if not os.path.exists(abs_path):
raise ValueError('File %s not found' % abs_path)
return {
'directory': root_directory,
'file': abs_path,
'command': prefix + file_path,
}
def main():
"""Walks through the directory and finds and parses .cmd files."""
log_level, directory, output, ar, paths = parse_arguments()
level = getattr(logging, log_level)
logging.basicConfig(format='%(levelname)s: %(message)s', level=level)
line_matcher = re.compile(_LINE_PATTERN)
compile_commands = []
for path in paths:
# If 'path' is a directory, handle all .cmd files under it.
# Otherwise, handle .cmd files associated with the file.
# built-in objects are linked via vmlinux.a
# Modules are listed in modules.order.
if os.path.isdir(path):
cmdfiles = cmdfiles_in_dir(path)
elif path.endswith('.a'):
cmdfiles = cmdfiles_for_a(path, ar)
elif path.endswith('modules.order'):
cmdfiles = cmdfiles_for_modorder(path)
else:
sys.exit('{}: unknown file type'.format(path))
for cmdfile in cmdfiles:
with open(cmdfile, 'rt') as f:
result = line_matcher.match(f.readline())
if result:
try:
entry = process_line(directory, result.group(1),
result.group(2))
compile_commands.append(entry)
except ValueError as err:
logging.info('Could not add line from %s: %s',
cmdfile, err)
with open(output, 'wt') as f:
json.dump(compile_commands, f, indent=2, sort_keys=True)
if __name__ == '__main__':
main()
| grace-kernel-grace-kernel-6.1.y | scripts/clang-tools/gen_compile_commands.py |
import os
import sys
from PyTorch.Detection.SSD.ssd import nvidia_ssd, nvidia_ssd_processing_utils
sys.path.append(os.path.join(sys.path[0], 'PyTorch/Detection/SSD'))
from PyTorch.Classification.ConvNets.image_classification.models import resnet50 as nvidia_resnet50
from PyTorch.Classification.ConvNets.image_classification.models import resnext101_32x4d as nvidia_resnext101_32x4d
from PyTorch.Classification.ConvNets.image_classification.models import se_resnext101_32x4d as nvidia_se_resnext101_32x4d
from PyTorch.Classification.ConvNets.image_classification.models import efficientnet_b0 as nvidia_efficientnet_b0
from PyTorch.Classification.ConvNets.image_classification.models import efficientnet_b4 as nvidia_efficientnet_b4
from PyTorch.Classification.ConvNets.image_classification.models import efficientnet_widese_b0 as nvidia_efficientnet_widese_b0
from PyTorch.Classification.ConvNets.image_classification.models import efficientnet_widese_b4 as nvidia_efficientnet_widese_b4
from PyTorch.Classification.ConvNets.image_classification.models import nvidia_convnets_processing_utils
from PyTorch.Classification.ConvNets.image_classification.models import resnext101_32x4d as nvidia_resneXt
from PyTorch.Classification.ConvNets.image_classification.models import nvidia_efficientnet
sys.path.append(os.path.join(sys.path[0], 'PyTorch/Classification/ConvNets/image_classification'))
from PyTorch.Classification.GPUNet.configs.gpunet_torchhub import nvidia_gpunet
sys.path.append(os.path.join(sys.path[0], 'PyTorch/Classification/GPUNet/configs'))
from PyTorch.SpeechSynthesis.Tacotron2.tacotron2 import nvidia_tacotron2
from PyTorch.SpeechSynthesis.Tacotron2.tacotron2 import nvidia_tts_utils
from PyTorch.SpeechSynthesis.Tacotron2.waveglow import nvidia_waveglow
sys.path.append(os.path.join(sys.path[0], 'PyTorch/SpeechSynthesis/Tacotron2'))
from PyTorch.SpeechSynthesis.HiFiGAN.fastpitch import nvidia_fastpitch
from PyTorch.SpeechSynthesis.HiFiGAN.fastpitch import nvidia_textprocessing_utils
from PyTorch.SpeechSynthesis.HiFiGAN.hifigan import nvidia_hifigan
sys.path.append(os.path.join(sys.path[0], 'PyTorch/SpeechSynthesis/HiFiGAN'))
from PyTorch.Forecasting.TFT.tft_torchhub import nvidia_tft, nvidia_tft_data_utils
sys.path.append(os.path.join(sys.path[0], 'PyTorch/Forecasting/TFT'))
| DeepLearningExamples-master | hubconf.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import paddle
import program
from dali import build_dataloader
from utils.mode import Mode
from utils.save_load import init_ckpt
from utils.logger import setup_dllogger
from utils.config import parse_args, print_args
def main(args):
'''
Export saved model params to paddle inference model
'''
setup_dllogger(args.trt_export_log_path)
if args.show_config:
print_args(args)
eval_dataloader = build_dataloader(args, Mode.EVAL)
startup_prog = paddle.static.Program()
eval_prog = paddle.static.Program()
eval_fetchs, _, eval_feeds, _ = program.build(
args,
eval_prog,
startup_prog,
step_each_epoch=len(eval_dataloader),
is_train=False)
eval_prog = eval_prog.clone(for_test=True)
device = paddle.set_device('gpu')
exe = paddle.static.Executor(device)
exe.run(startup_prog)
path_to_ckpt = args.from_checkpoint
if path_to_ckpt is None:
logging.warning(
'The --from-checkpoint is not set, model weights will not be initialize.'
)
else:
init_ckpt(path_to_ckpt, eval_prog, exe)
logging.info('Checkpoint path is %s', path_to_ckpt)
save_inference_dir = args.trt_inference_dir
paddle.static.save_inference_model(
path_prefix=os.path.join(save_inference_dir, args.model_arch_name),
feed_vars=[eval_feeds['data']],
fetch_vars=[eval_fetchs['label'][0]],
executor=exe,
program=eval_prog)
logging.info('Successully export inference model to %s',
save_inference_dir)
if __name__ == '__main__':
paddle.enable_static()
main(parse_args(including_trt=True))
| DeepLearningExamples-master | PaddlePaddle/Classification/RN50v1.5/export_model.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import logging
import paddle
class Cosine:
"""
Cosine learning rate decay.
lr = eta_min + 0.5 * (learning_rate - eta_min) * (cos(epoch * (PI / epochs)) + 1)
Args:
args(Namespace): Arguments obtained from ArgumentParser.
step_each_epoch(int): The number of steps in each epoch.
last_epoch (int, optional): The index of last epoch. Can be set to restart training.
Default: -1, meaning initial learning rate.
"""
def __init__(self, args, step_each_epoch, last_epoch=-1):
super().__init__()
if args.warmup_epochs >= args.epochs:
args.warmup_epochs = args.epochs
self.learning_rate = args.lr
self.T_max = (args.epochs - args.warmup_epochs) * step_each_epoch
self.eta_min = 0.0
self.last_epoch = last_epoch
self.warmup_steps = round(args.warmup_epochs * step_each_epoch)
self.warmup_start_lr = args.warmup_start_lr
def __call__(self):
learning_rate = paddle.optimizer.lr.CosineAnnealingDecay(
learning_rate=self.learning_rate,
T_max=self.T_max,
eta_min=self.eta_min,
last_epoch=self.
last_epoch) if self.T_max > 0 else self.learning_rate
if self.warmup_steps > 0:
learning_rate = paddle.optimizer.lr.LinearWarmup(
learning_rate=learning_rate,
warmup_steps=self.warmup_steps,
start_lr=self.warmup_start_lr,
end_lr=self.learning_rate,
last_epoch=self.last_epoch)
return learning_rate
def build_lr_scheduler(args, step_each_epoch):
"""
Build a learning rate scheduler.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
step_each_epoch(int): The number of steps in each epoch.
return:
lr(paddle.optimizer.lr.LRScheduler): A learning rate scheduler.
"""
# Turn last_epoch to last_step, since we update lr each step instead of each epoch.
last_step = args.start_epoch * step_each_epoch - 1
learning_rate_mod = sys.modules[__name__]
lr = getattr(learning_rate_mod, args.lr_scheduler)(args, step_each_epoch,
last_step)
if not isinstance(lr, paddle.optimizer.lr.LRScheduler):
lr = lr()
logging.info("build lr %s success..", lr)
return lr
| DeepLearningExamples-master | PaddlePaddle/Classification/RN50v1.5/lr_scheduler.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
from contextlib import contextmanager
from utils.cuda_bind import cuda_profile_start, cuda_profile_stop
from utils.cuda_bind import cuda_nvtx_range_push, cuda_nvtx_range_pop
class Profiler:
def __init__(self):
super().__init__()
self._enable_profile = int(os.environ.get('ENABLE_PROFILE', 0))
self._start_step = int(os.environ.get('PROFILE_START_STEP', 0))
self._stop_step = int(os.environ.get('PROFILE_STOP_STEP', 0))
if self._enable_profile:
log_msg = f"Profiling start at {self._start_step}-th and stop at {self._stop_step}-th iteration"
logging.info(log_msg)
def profile_setup(self, step):
"""
Setup profiling related status.
Args:
step (int): the index of iteration.
Return:
stop (bool): a signal to indicate whether profiling should stop or not.
"""
if self._enable_profile and step == self._start_step:
cuda_profile_start()
logging.info("Profiling start at %d-th iteration",
self._start_step)
if self._enable_profile and step == self._stop_step:
cuda_profile_stop()
logging.info("Profiling stop at %d-th iteration", self._stop_step)
return True
return False
def profile_tag_push(self, step, msg):
if self._enable_profile and \
step >= self._start_step and \
step < self._stop_step:
tag_msg = f"Iter-{step}-{msg}"
cuda_nvtx_range_push(tag_msg)
def profile_tag_pop(self):
if self._enable_profile:
cuda_nvtx_range_pop()
@contextmanager
def profile_tag(self, step, msg):
self.profile_tag_push(step, msg)
yield
self.profile_tag_pop()
| DeepLearningExamples-master | PaddlePaddle/Classification/RN50v1.5/profile.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import os
from dataclasses import dataclass
from cuda import cudart
import paddle
import numpy as np
from nvidia.dali.backend import TensorListCPU
import nvidia.dali.ops as ops
import nvidia.dali.fn as fn
import nvidia.dali.types as types
from nvidia.dali.pipeline import Pipeline
from nvidia.dali.plugin.paddle import DALIGenericIterator
from utils.mode import Mode
from utils.utility import get_num_trainers, get_trainer_id
@dataclass
class PipeOpMeta:
crop: int
resize_shorter: int
min_area: float
max_area: float
lower: float
upper: float
interp: types.DALIInterpType
mean: float
std: float
output_dtype: types.DALIDataType
output_layout: str
pad_output: bool
class HybridPipeBase(Pipeline):
def __init__(self,
file_root,
batch_size,
device_id,
ops_meta,
num_threads=4,
seed=42,
shard_id=0,
num_shards=1,
random_shuffle=True,
dont_use_mmap=True):
super().__init__(batch_size, num_threads, device_id, seed=seed)
self.input = ops.readers.File(
file_root=file_root,
shard_id=shard_id,
num_shards=num_shards,
random_shuffle=random_shuffle,
dont_use_mmap=dont_use_mmap)
self.build_ops(ops_meta)
def build_ops(self, ops_meta):
pass
def __len__(self):
return self.epoch_size("Reader")
class HybridTrainPipe(HybridPipeBase):
def build_ops(self, ops_meta):
# Set internal nvJPEG buffers size to handle full-sized ImageNet images
# without additional reallocations
device_memory_padding = 211025920
host_memory_padding = 140544512
self.decode = ops.decoders.ImageRandomCrop(
device='mixed',
output_type=types.DALIImageType.RGB,
device_memory_padding=device_memory_padding,
host_memory_padding=host_memory_padding,
random_aspect_ratio=[ops_meta.lower, ops_meta.upper],
random_area=[ops_meta.min_area, ops_meta.max_area],
num_attempts=100)
self.res = ops.Resize(
device='gpu',
resize_x=ops_meta.crop,
resize_y=ops_meta.crop,
interp_type=ops_meta.interp)
self.cmnp = ops.CropMirrorNormalize(
device="gpu",
dtype=ops_meta.output_dtype,
output_layout=ops_meta.output_layout,
crop=(ops_meta.crop, ops_meta.crop),
mean=ops_meta.mean,
std=ops_meta.std,
pad_output=ops_meta.pad_output)
self.coin = ops.random.CoinFlip(probability=0.5)
self.to_int64 = ops.Cast(dtype=types.DALIDataType.INT64, device="gpu")
def define_graph(self):
rng = self.coin()
jpegs, labels = self.input(name="Reader")
images = self.decode(jpegs)
images = self.res(images)
output = self.cmnp(images.gpu(), mirror=rng)
return [output, self.to_int64(labels.gpu())]
class HybridValPipe(HybridPipeBase):
def build_ops(self, ops_meta):
self.decode = ops.decoders.Image(device="mixed")
self.res = ops.Resize(
device="gpu",
resize_shorter=ops_meta.resize_shorter,
interp_type=ops_meta.interp)
self.cmnp = ops.CropMirrorNormalize(
device="gpu",
dtype=ops_meta.output_dtype,
output_layout=ops_meta.output_layout,
crop=(ops_meta.crop, ops_meta.crop),
mean=ops_meta.mean,
std=ops_meta.std,
pad_output=ops_meta.pad_output)
self.to_int64 = ops.Cast(dtype=types.DALIDataType.INT64, device="gpu")
def define_graph(self):
jpegs, labels = self.input(name="Reader")
images = self.decode(jpegs)
images = self.res(images)
output = self.cmnp(images)
return [output, self.to_int64(labels.gpu())]
def dali_dataloader(args, mode, device):
"""
Define a dali dataloader with configuration to operate datasets.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
mode(utils.Mode): Train or eval mode.
device(int): Id of GPU to load data.
Outputs:
DALIGenericIterator(nvidia.dali.plugin.paddle.DALIGenericIterator)
Iteratable outputs of DALI pipeline,
including "data" and "label" in type of Paddle's Tensor.
"""
assert "gpu" in device, "gpu training is required for DALI"
assert mode in Mode, "Dataset mode should be in supported Modes"
device_id = int(device.split(':')[1])
seed = args.dali_random_seed
num_threads = args.dali_num_threads
batch_size = args.batch_size
interp = 1 # settings.interpolation or 1 # default to linear
interp_map = {
# cv2.INTER_NEAREST
0: types.DALIInterpType.INTERP_NN,
# cv2.INTER_LINEAR
1: types.DALIInterpType.INTERP_LINEAR,
# cv2.INTER_CUBIC
2: types.DALIInterpType.INTERP_CUBIC,
# LANCZOS3 for cv2.INTER_LANCZOS4
3: types.DALIInterpType.INTERP_LANCZOS3
}
assert interp in interp_map, "interpolation method not supported by DALI"
interp = interp_map[interp]
normalize_scale = args.normalize_scale
normalize_mean = args.normalize_mean
normalize_std = args.normalize_std
normalize_mean = [v / normalize_scale for v in normalize_mean]
normalize_std = [v / normalize_scale for v in normalize_std]
output_layout = args.data_layout[1:] # NCHW -> CHW or NHWC -> HWC
pad_output = args.image_channel == 4
output_dtype = types.FLOAT16 if args.dali_output_fp16 else types.FLOAT
shard_id = get_trainer_id()
num_shards = get_num_trainers()
scale = args.rand_crop_scale
ratio = args.rand_crop_ratio
ops_meta = PipeOpMeta(
crop=args.crop_size,
resize_shorter=args.resize_short,
min_area=scale[0],
max_area=scale[1],
lower=ratio[0],
upper=ratio[1],
interp=interp,
mean=normalize_mean,
std=normalize_std,
output_dtype=output_dtype,
output_layout=output_layout,
pad_output=pad_output)
file_root = args.image_root
pipe_class = None
if mode == Mode.TRAIN:
file_root = os.path.join(file_root, 'train')
pipe_class = HybridTrainPipe
else:
file_root = os.path.join(file_root, 'val')
pipe_class = HybridValPipe
pipe = pipe_class(
file_root,
batch_size,
device_id,
ops_meta,
num_threads=num_threads,
seed=seed + shard_id,
shard_id=shard_id,
num_shards=num_shards)
pipe.build()
return DALIGenericIterator([pipe], ['data', 'label'], reader_name='Reader')
def build_dataloader(args, mode):
"""
Build a dataloader to process datasets. Only DALI dataloader is supported now.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
mode(utils.Mode): Train or eval mode.
Returns:
dataloader(nvidia.dali.plugin.paddle.DALIGenericIterator):
Iteratable outputs of DALI pipeline,
including "data" and "label" in type of Paddle's Tensor.
"""
assert mode in Mode, "Dataset mode should be in supported Modes (train or eval)"
return dali_dataloader(args, mode, paddle.device.get_device())
def dali_synthetic_dataloader(args, device):
"""
Define a dali dataloader with synthetic data.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
device(int): Id of GPU to load data.
Outputs:
DALIGenericIterator(nvidia.dali.plugin.paddle.DALIGenericIterator)
Iteratable outputs of DALI pipeline,
including "data" in type of Paddle's Tensor.
"""
assert "gpu" in device, "gpu training is required for DALI"
device_id = int(device.split(':')[1])
batch_size = args.batch_size
image_shape = args.image_shape
output_dtype = types.FLOAT16 if args.dali_output_fp16 else types.FLOAT
num_threads = args.dali_num_threads
class ExternalInputIterator(object):
def __init__(self, batch_size, image_shape):
n_bytes = int(batch_size * np.prod(image_shape) * 4)
err, mem = cudart.cudaMallocHost(n_bytes)
assert err == cudart.cudaError_t.cudaSuccess
mem_ptr = ctypes.cast(mem, ctypes.POINTER(ctypes.c_float))
self.synthetic_data = np.ctypeslib.as_array(mem_ptr, shape=(batch_size, *image_shape))
self.n = args.benchmark_steps
def __iter__(self):
self.i = 0
return self
def __next__(self):
if self.i >= self.n:
self.__iter__()
raise StopIteration()
self.i += 1
return TensorListCPU(self.synthetic_data, is_pinned=True)
eli = ExternalInputIterator(batch_size, image_shape)
pipe = Pipeline(batch_size=batch_size, num_threads=num_threads, device_id=device_id)
with pipe:
images = fn.external_source(source=eli, no_copy=True, dtype=output_dtype)
images = images.gpu()
pipe.set_outputs(images)
pipe.build()
return DALIGenericIterator([pipe], ['data'])
| DeepLearningExamples-master | PaddlePaddle/Classification/RN50v1.5/dali.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import paddle
from paddle.distributed import fleet
from paddle.static import sparsity
from paddle.fluid.contrib.mixed_precision.fp16_utils import rewrite_program
from paddle.fluid.contrib.mixed_precision.fp16_lists import AutoMixedPrecisionLists
from dali import build_dataloader
from utils.config import parse_args, print_args
from utils.logger import setup_dllogger
from utils.save_load import init_program, save_model
from utils.affinity import set_cpu_affinity
from utils.mode import Mode, RunScope
import program
class MetricSummary:
def __init__(self):
super().__init__()
self.metric_dict = None
def update(self, new_metrics):
if not self.is_updated:
self.metric_dict = dict()
for key in new_metrics:
if key in self.metric_dict:
# top1, top5 and ips are "larger is better"
if key in ['top1', 'top5', 'ips']:
self.metric_dict[key] = new_metrics[key] if new_metrics[
key] > self.metric_dict[key] else self.metric_dict[key]
# Others are "Smaller is better"
else:
self.metric_dict[key] = new_metrics[key] if new_metrics[
key] < self.metric_dict[key] else self.metric_dict[key]
else:
self.metric_dict[key] = new_metrics[key]
@property
def is_updated(self):
return self.metric_dict is not None
def main(args):
"""
A enterpoint to train and evaluate a ResNet50 model, which contains six steps.
1. Parse arguments from command line.
2. Initialize distributed training related setting, including CPU affinity.
3. Build dataloader via DALI.
4. Create training and evaluating Paddle.static.Program.
5. Load checkpoint or pretrained model if given.
6. Run program (train and evaluate with datasets, then save model if necessary).
"""
setup_dllogger(args.report_file)
if args.show_config:
print_args(args)
fleet.init(is_collective=True)
if args.enable_cpu_affinity:
set_cpu_affinity()
device = paddle.set_device('gpu')
startup_prog = paddle.static.Program()
train_dataloader = None
train_prog = None
optimizer = None
if args.run_scope in [RunScope.TRAIN_EVAL, RunScope.TRAIN_ONLY]:
train_dataloader = build_dataloader(args, Mode.TRAIN)
train_step_each_epoch = len(train_dataloader)
train_prog = paddle.static.Program()
train_fetchs, lr_scheduler, _, optimizer = program.build(
args,
train_prog,
startup_prog,
step_each_epoch=train_step_each_epoch,
is_train=True)
eval_dataloader = None
eval_prog = None
if args.run_scope in [RunScope.TRAIN_EVAL, RunScope.EVAL_ONLY]:
eval_dataloader = build_dataloader(args, Mode.EVAL)
eval_step_each_epoch = len(eval_dataloader)
eval_prog = paddle.static.Program()
eval_fetchs, _, _, _ = program.build(
args,
eval_prog,
startup_prog,
step_each_epoch=eval_step_each_epoch,
is_train=False)
# clone to prune some content which is irrelevant in eval_prog
eval_prog = eval_prog.clone(for_test=True)
exe = paddle.static.Executor(device)
exe.run(startup_prog)
init_program(
args,
exe=exe,
program=train_prog if train_prog is not None else eval_prog)
if args.amp:
if args.run_scope == RunScope.EVAL_ONLY:
rewrite_program(eval_prog, amp_lists=AutoMixedPrecisionLists())
else:
optimizer.amp_init(
device,
scope=paddle.static.global_scope(),
test_program=eval_prog,
use_fp16_test=True)
if args.asp and args.prune_model:
logging.info("Pruning model to 2:4 sparse pattern...")
sparsity.prune_model(train_prog, mask_algo=args.mask_algo)
logging.info("Pruning model done.")
if eval_prog is not None:
eval_prog = program.compile_prog(args, eval_prog, is_train=False)
train_summary = MetricSummary()
eval_summary = MetricSummary()
for epoch_id in range(args.start_epoch, args.epochs):
# Training
if train_prog is not None:
metric_summary = program.run(args, train_dataloader, exe,
train_prog, train_fetchs, epoch_id,
Mode.TRAIN, lr_scheduler)
train_summary.update(metric_summary)
# Save a checkpoint
if epoch_id % args.save_interval == 0:
model_path = os.path.join(args.output_dir,
args.model_arch_name)
save_model(train_prog, model_path, epoch_id, args.model_prefix)
# Evaluation
if (eval_prog is not None) and \
(epoch_id % args.eval_interval == 0):
metric_summary = program.run(args, eval_dataloader, exe, eval_prog,
eval_fetchs, epoch_id, Mode.EVAL)
eval_summary.update(metric_summary)
if train_summary.is_updated:
program.log_info(tuple(), train_summary.metric_dict, Mode.TRAIN)
if eval_summary.is_updated:
program.log_info(tuple(), eval_summary.metric_dict, Mode.EVAL)
if __name__ == '__main__':
paddle.enable_static()
main(parse_args())
| DeepLearningExamples-master | PaddlePaddle/Classification/RN50v1.5/train.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import logging
from profile import Profiler
import numpy as np
from optimizer import build_optimizer
from lr_scheduler import build_lr_scheduler
from utils.misc import AverageMeter
from utils.mode import Mode, RunScope
from utils.utility import get_num_trainers
import models
import dllogger
import paddle
import paddle.nn.functional as F
from paddle.distributed import fleet
from paddle.distributed.fleet import DistributedStrategy
from paddle.static import sparsity
from paddle.distributed.fleet.meta_optimizers.common import CollectiveHelper
def create_feeds(image_shape):
"""
Create feeds mapping for the inputs of Pragrm execution.
Args:
image_shape(list[int]): Model input shape, such as [4, 224, 224].
Returns:
feeds(dict): A dict to map variables'name to their values.
key (string): Name of variable to feed.
Value (tuple): paddle.static.data.
"""
feeds = dict()
feeds['data'] = paddle.static.data(
name="data", shape=[None] + image_shape, dtype="float32")
feeds['label'] = paddle.static.data(
name="label", shape=[None, 1], dtype="int64")
return feeds
def create_fetchs(out, feeds, class_num, label_smoothing=0, mode=Mode.TRAIN):
"""
Create fetchs to obtain specific outputs from Pragrm execution (included loss and measures).
Args:
out(variable): The model output variable.
feeds(dict): A dict of mapping variables'name to their values
(The input of Program execution).
class_num(int): The number of classes.
label_smoothing(float, optional): Epsilon of label smoothing. Default: 0.
mode(utils.Mode, optional): Train or eval mode. Default: Mode.TRAIN
Returns:
fetchs(dict): A dict of outputs from Program execution (included loss and measures).
key (string): Name of variable to fetch.
Value (tuple): (variable, AverageMeter).
"""
fetchs = dict()
target = paddle.reshape(feeds['label'], [-1, 1])
if mode == Mode.TRAIN:
if label_smoothing == 0:
loss = F.cross_entropy(out, target)
else:
label_one_hot = F.one_hot(target, class_num)
soft_target = F.label_smooth(
label_one_hot, epsilon=label_smoothing)
soft_target = paddle.reshape(soft_target, shape=[-1, class_num])
log_softmax = -F.log_softmax(out, axis=-1)
loss = paddle.sum(log_softmax * soft_target, axis=-1)
else:
loss = F.cross_entropy(out, target)
label = paddle.argmax(out, axis=-1, dtype='int32')
fetchs['label'] = (label, None)
loss = loss.mean()
fetchs['loss'] = (loss, AverageMeter('loss', '7.4f', need_avg=True))
acc_top1 = paddle.metric.accuracy(input=out, label=target, k=1)
acc_top5 = paddle.metric.accuracy(input=out, label=target, k=5)
metric_dict = dict()
metric_dict["top1"] = acc_top1
metric_dict["top5"] = acc_top5
for key in metric_dict:
if mode != Mode.TRAIN and paddle.distributed.get_world_size() > 1:
paddle.distributed.all_reduce(
metric_dict[key], op=paddle.distributed.ReduceOp.SUM)
metric_dict[key] = metric_dict[
key] / paddle.distributed.get_world_size()
fetchs[key] = (metric_dict[key], AverageMeter(
key, '7.4f', need_avg=True))
return fetchs
def create_strategy(args, is_train=True):
"""
Create paddle.static.BuildStrategy and paddle.static.ExecutionStrategy with arguments.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
is_train(bool, optional): Indicate the prupose of strategy is for training
of not. Default is True.
Returns:
build_strategy(paddle.static.BuildStrategy): A instance of BuildStrategy.
exec_strategy(paddle.static.ExecutionStrategy): A instance of ExecutionStrategy.
"""
build_strategy = paddle.static.BuildStrategy()
exec_strategy = paddle.static.ExecutionStrategy()
exec_strategy.num_threads = 1
exec_strategy.num_iteration_per_drop_scope = (10000 if args.amp and
args.use_pure_fp16 else 10)
paddle.set_flags({
'FLAGS_cudnn_exhaustive_search': True,
'FLAGS_conv_workspace_size_limit': 4096
})
if not is_train:
build_strategy.fix_op_run_order = True
if args.amp:
build_strategy.fuse_bn_act_ops = True
build_strategy.fuse_elewise_add_act_ops = True
build_strategy.fuse_bn_add_act_ops = True
build_strategy.enable_addto = True
if args.fuse_resunit and is_train:
build_strategy.fuse_resunit = True
return build_strategy, exec_strategy
def dist_optimizer(args, optimizer):
"""
Create a distributed optimizer based on a given optimizer.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
optimizer(paddle.optimizer): A normal optimizer.
Returns:
optimizer(fleet.distributed_optimizer): A distributed optimizer.
"""
build_strategy, exec_strategy = create_strategy(args)
dist_strategy = DistributedStrategy()
dist_strategy.execution_strategy = exec_strategy
dist_strategy.build_strategy = build_strategy
dist_strategy.fuse_all_reduce_ops = True
all_reduce_size = 16
dist_strategy.fuse_grad_size_in_MB = all_reduce_size
dist_strategy.nccl_comm_num = 1
dist_strategy.sync_nccl_allreduce = True
if args.amp:
dist_strategy.cudnn_batchnorm_spatial_persistent = True
dist_strategy.amp = True
dist_strategy.amp_configs = {
"init_loss_scaling": args.scale_loss,
"use_dynamic_loss_scaling": args.use_dynamic_loss_scaling,
"use_pure_fp16": args.use_pure_fp16
}
dist_strategy.asp = args.asp
optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy)
return optimizer
def build(args, main_prog, startup_prog, step_each_epoch, is_train=True):
"""
Build a executable paddle.static.Program via following four steps:
1. Create feeds.
2. Create a model.
3. Create fetchs.
4. Create an optimizer if is_train==True.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
main_prog(paddle.static.Program):The main program.
startup_prog(paddle.static.Program):The startup program.
step_each_epoch(int): The number of steps in each epoch.
is_train(bool, optional): Whether the main programe created is for training. Default: True.
Returns:
fetchs(dict): A dict of outputs from Program execution (included loss and measures).
lr_scheduler(paddle.optimizer.lr.LRScheduler): A learning rate scheduler.
feeds(dict): A dict to map variables'name to their values.
optimizer(Optimizer): An optimizer with distributed/AMP/ASP strategy.
"""
with paddle.static.program_guard(main_prog, startup_prog):
with paddle.utils.unique_name.guard():
mode = Mode.TRAIN if is_train else Mode.EVAL
feeds = create_feeds(args.image_shape)
model_name = args.model_arch_name
class_num = args.num_of_class
input_image_channel = args.image_channel
data_format = args.data_layout
use_pure_fp16 = args.use_pure_fp16
bn_weight_decay = args.bn_weight_decay
model = models.__dict__[model_name](
class_num=class_num,
input_image_channel=input_image_channel,
data_format=data_format,
use_pure_fp16=use_pure_fp16,
bn_weight_decay=bn_weight_decay)
out = model(feeds["data"])
fetchs = create_fetchs(
out, feeds, class_num, args.label_smoothing, mode=mode)
if args.asp:
sparsity.set_excluded_layers(main_prog, [model.fc.weight.name])
lr_scheduler = None
optimizer = None
if is_train:
lr_scheduler = build_lr_scheduler(args, step_each_epoch)
optimizer = build_optimizer(args, lr_scheduler)
optimizer = dist_optimizer(args, optimizer)
optimizer.minimize(fetchs['loss'][0], startup_prog)
# This is a workaround to "Communicator of ring id 0 has not been initialized.".
# Since Paddle's design, the initialization would be done inside train program,
# eval_only need to manually call initialization.
if args.run_scope == RunScope.EVAL_ONLY and \
paddle.distributed.get_world_size() > 1:
collective_helper = CollectiveHelper(
role_maker=fleet.PaddleCloudRoleMaker(is_collective=True))
collective_helper.update_startup_program(startup_prog)
return fetchs, lr_scheduler, feeds, optimizer
def compile_prog(args, program, loss_name=None, is_train=True):
"""
Compile the given program, which would fuse computing ops or optimize memory footprint
based building strategy in config.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
program(paddle.static.Program): The main program to be compiled.
loss_name(str, optional): The name of loss variable. Default: None.
is_train(bool, optional): Indicate the prupose of strategy is for
training of not. Default is True.
Returns:
compiled_program(paddle.static.CompiledProgram): A compiled program.
"""
build_strategy, exec_strategy = create_strategy(args, is_train)
compiled_program = paddle.static.CompiledProgram(
program).with_data_parallel(
loss_name=loss_name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
return compiled_program
def run(args,
dataloader,
exe,
program,
fetchs,
epoch,
mode=Mode.TRAIN,
lr_scheduler=None):
"""
Execute program.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
dataloader(nvidia.dali.plugin.paddle.DALIGenericIterator):
Iteratable output of NVIDIA DALI pipeline,
please refer to dali_dataloader in dali.py for details.
exe(paddle.static.Executor): A executor to run program.
program(paddle.static.Program): The program to be executed.
fetchs(dict): A dict of outputs from Program execution (included loss and measures).
epoch(int): Current epoch id to run.
mode(utils.Mode, optional): Train or eval mode. Default: Mode.TRAIN.
lr_scheduler(paddle.optimizer.lr.LRScheduler, optional): A learning rate scheduler.
Default: None.
Returns:
metrics (dict): A dictionary to collect values of metrics.
"""
num_trainers = get_num_trainers()
fetch_list = [f[0] for f in fetchs.values()]
metric_dict = {"lr": AverageMeter('lr', 'f', postfix=",", need_avg=False)}
for k in fetchs:
if fetchs[k][1] is not None:
metric_dict[k] = fetchs[k][1]
metric_dict["batch_time"] = AverageMeter(
'batch_time', '.5f', postfix=" s,")
metric_dict["data_time"] = AverageMeter('data_time', '.5f', postfix=" s,")
metric_dict["compute_time"] = AverageMeter(
'compute_time', '.5f', postfix=" s,")
for m in metric_dict.values():
m.reset()
profiler = Profiler()
tic = time.perf_counter()
idx = 0
batch_size = None
latency = []
total_benchmark_steps = \
args.benchmark_steps + args.benchmark_warmup_steps
dataloader.reset()
while True:
# profiler.profile_setup return True only when
# profile is enable and idx == stop steps
if profiler.profile_setup(idx):
break
idx += 1
try:
batch = next(dataloader)
except StopIteration:
# Reset dataloader when run benchmark to fill required steps.
if args.benchmark and (idx < total_benchmark_steps):
dataloader.reset()
# Reset tic timestamp to ignore exception handling time.
tic = time.perf_counter()
continue
break
except RuntimeError:
logging.warning(
"Except RuntimeError when reading data from dataloader, try to read once again..."
)
continue
reader_toc = time.perf_counter()
metric_dict['data_time'].update(reader_toc - tic)
batch_size = batch[0]["data"].shape()[0]
feed_dict = batch[0]
with profiler.profile_tag(idx, "Training"
if mode == Mode.TRAIN else "Evaluation"):
results = exe.run(program=program,
feed=feed_dict,
fetch_list=fetch_list)
for name, m in zip(fetchs.keys(), results):
if name in metric_dict:
metric_dict[name].update(np.mean(m), batch_size)
metric_dict["compute_time"].update(time.perf_counter() - reader_toc)
metric_dict["batch_time"].update(time.perf_counter() - tic)
if mode == Mode.TRAIN:
metric_dict['lr'].update(lr_scheduler.get_lr())
if lr_scheduler is not None:
with profiler.profile_tag(idx, "LR Step"):
lr_scheduler.step()
tic = time.perf_counter()
if idx % args.print_interval == 0:
log_msg = dict()
log_msg['loss'] = metric_dict['loss'].val.item()
log_msg['top1'] = metric_dict['top1'].val.item()
log_msg['top5'] = metric_dict['top5'].val.item()
log_msg['data_time'] = metric_dict['data_time'].val
log_msg['compute_time'] = metric_dict['compute_time'].val
log_msg['batch_time'] = metric_dict['batch_time'].val
log_msg['ips'] = \
batch_size * num_trainers / metric_dict['batch_time'].val
if mode == Mode.TRAIN:
log_msg['lr'] = metric_dict['lr'].val
log_info((epoch, idx), log_msg, mode)
if args.benchmark:
latency.append(metric_dict['batch_time'].val)
# Ignore the warmup iters
if idx == args.benchmark_warmup_steps:
metric_dict["compute_time"].reset()
metric_dict["data_time"].reset()
metric_dict["batch_time"].reset()
latency.clear()
logging.info("Begin benchmark at step %d", idx + 1)
if idx == total_benchmark_steps:
benchmark_data = dict()
benchmark_data[
'ips'] = batch_size * num_trainers / metric_dict[
'batch_time'].avg
if mode == mode.EVAL:
latency = np.array(latency) * 1000
quantile = np.quantile(latency, [0.9, 0.95, 0.99])
benchmark_data['latency_avg'] = np.mean(latency)
benchmark_data['latency_p90'] = quantile[0]
benchmark_data['latency_p95'] = quantile[1]
benchmark_data['latency_p99'] = quantile[2]
logging.info("End benchmark at epoch step %d", idx)
return benchmark_data
epoch_data = dict()
epoch_data['loss'] = metric_dict['loss'].avg.item()
epoch_data['epoch_time'] = metric_dict['batch_time'].total
epoch_data['ips'] = batch_size * num_trainers * \
metric_dict["batch_time"].count / metric_dict["batch_time"].sum
if mode == Mode.EVAL:
epoch_data['top1'] = metric_dict['top1'].avg.item()
epoch_data['top5'] = metric_dict['top5'].avg.item()
log_info((epoch, ), epoch_data, mode)
return epoch_data
def log_info(step, metrics, mode):
"""
Log metrics with step and mode information.
Args:
step(tuple): Step, coulbe (epoch-id, iter-id). Use tuple() for summary.
metrics(dict): A dictionary collected values of metrics.
mode(utils.Mode): Train or eval mode.
"""
prefix = 'train' if mode == Mode.TRAIN else 'val'
dllogger_iter_data = dict()
for key in metrics:
dllogger_iter_data[f"{prefix}.{key}"] = metrics[key]
dllogger.log(step=step, data=dllogger_iter_data)
| DeepLearningExamples-master | PaddlePaddle/Classification/RN50v1.5/program.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import logging
from paddle import optimizer as optim
class Momentum:
"""
Simple Momentum optimizer with velocity state.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
learning_rate(float|LRScheduler): The learning rate used to update parameters.
Can be a float value or a paddle.optimizer.lr.LRScheduler.
"""
def __init__(self, args, learning_rate):
super().__init__()
self.learning_rate = learning_rate
self.momentum = args.momentum
self.weight_decay = args.weight_decay
self.grad_clip = None
self.multi_precision = args.amp
def __call__(self):
# model_list is None in static graph
parameters = None
opt = optim.Momentum(
learning_rate=self.learning_rate,
momentum=self.momentum,
weight_decay=self.weight_decay,
grad_clip=self.grad_clip,
multi_precision=self.multi_precision,
parameters=parameters)
return opt
def build_optimizer(args, lr):
"""
Build a raw optimizer with learning rate scheduler.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
lr(paddle.optimizer.lr.LRScheduler): A LRScheduler used for training.
return:
optim(paddle.optimizer): A normal optmizer.
"""
optimizer_mod = sys.modules[__name__]
opt = getattr(optimizer_mod, args.optimizer)(args, learning_rate=lr)()
logging.info("build optimizer %s success..", opt)
return opt
| DeepLearningExamples-master | PaddlePaddle/Classification/RN50v1.5/optimizer.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import glob
import numpy as np
import dllogger
from paddle.fluid import LoDTensor
from paddle.inference import Config, PrecisionType, create_predictor
from dali import dali_dataloader, dali_synthetic_dataloader
from utils.config import parse_args, print_args
from utils.mode import Mode
from utils.logger import setup_dllogger
def init_predictor(args):
infer_dir = args.trt_inference_dir
assert os.path.isdir(
infer_dir), f'inference_dir = "{infer_dir}" is not a directory'
pdiparams_path = glob.glob(os.path.join(infer_dir, '*.pdiparams'))
pdmodel_path = glob.glob(os.path.join(infer_dir, '*.pdmodel'))
assert len(pdiparams_path) == 1, \
f'There should be only 1 pdiparams in {infer_dir}, but there are {len(pdiparams_path)}'
assert len(pdmodel_path) == 1, \
f'There should be only 1 pdmodel in {infer_dir}, but there are {len(pdmodel_path)}'
predictor_config = Config(pdmodel_path[0], pdiparams_path[0])
predictor_config.enable_memory_optim()
predictor_config.enable_use_gpu(0, args.device)
precision = args.trt_precision
max_batch_size = args.batch_size
assert precision in ['FP32', 'FP16', 'INT8'], \
'precision should be FP32/FP16/INT8'
if precision == 'INT8':
precision_mode = PrecisionType.Int8
elif precision == 'FP16':
precision_mode = PrecisionType.Half
elif precision == 'FP32':
precision_mode = PrecisionType.Float32
else:
raise NotImplementedError
predictor_config.enable_tensorrt_engine(
workspace_size=args.trt_workspace_size,
max_batch_size=max_batch_size,
min_subgraph_size=args.trt_min_subgraph_size,
precision_mode=precision_mode,
use_static=args.trt_use_static,
use_calib_mode=args.trt_use_calib_mode)
predictor = create_predictor(predictor_config)
return predictor
def predict(predictor, input_data):
'''
Args:
predictor: Paddle inference predictor
input_data: A list of input
Returns:
output_data: A list of output
'''
# copy image data to input tensor
input_names = predictor.get_input_names()
for i, name in enumerate(input_names):
input_tensor = predictor.get_input_handle(name)
if isinstance(input_data[i], LoDTensor):
input_tensor.share_external_data(input_data[i])
else:
input_tensor.reshape(input_data[i].shape)
input_tensor.copy_from_cpu(input_data[i])
# do the inference
predictor.run()
results = []
# get out data from output tensor
output_names = predictor.get_output_names()
for i, name in enumerate(output_names):
output_tensor = predictor.get_output_handle(name)
output_data = output_tensor.copy_to_cpu()
results.append(output_data)
return results
def benchmark_dataset(args):
"""
Benchmark DALI format dataset, which reflects real the pipeline throughput including
1. Read images
2. Pre-processing
3. Inference
4. H2D, D2H
"""
predictor = init_predictor(args)
dali_iter = dali_dataloader(args, Mode.EVAL, 'gpu:' + str(args.device))
# Warmup some samples for the stable performance number
batch_size = args.batch_size
image_shape = args.image_shape
images = np.zeros((batch_size, *image_shape)).astype(np.float32)
for _ in range(args.benchmark_warmup_steps):
predict(predictor, [images])[0]
total_images = 0
correct_predict = 0
latency = []
start = time.perf_counter()
last_time_step = time.perf_counter()
for dali_data in dali_iter:
for data in dali_data:
label = np.asarray(data['label'])
total_images += label.shape[0]
label = label.flatten()
images = data['data']
predict_label = predict(predictor, [images])[0]
correct_predict += (label == predict_label).sum()
batch_end_time_step = time.perf_counter()
batch_latency = batch_end_time_step - last_time_step
latency.append(batch_latency)
last_time_step = time.perf_counter()
end = time.perf_counter()
latency = np.array(latency) * 1000
quantile = np.quantile(latency, [0.9, 0.95, 0.99])
statistics = {
'precision': args.trt_precision,
'batch_size': batch_size,
'throughput': total_images / (end - start),
'accuracy': correct_predict / total_images,
'eval_latency_avg': np.mean(latency),
'eval_latency_p90': quantile[0],
'eval_latency_p95': quantile[1],
'eval_latency_p99': quantile[2],
}
return statistics
def benchmark_synthetic(args):
"""
Benchmark on the synthetic data and bypass all pre-processing.
The host to device copy is still included.
This used to find the upper throughput bound when tunning the full input pipeline.
"""
predictor = init_predictor(args)
dali_iter = dali_synthetic_dataloader(args, 'gpu:' + str(args.device))
batch_size = args.batch_size
image_shape = args.image_shape
images = np.random.random((batch_size, *image_shape)).astype(np.float32)
latency = []
# warmup
for _ in range(args.benchmark_warmup_steps):
predict(predictor, [images])[0]
# benchmark
start = time.perf_counter()
last_time_step = time.perf_counter()
for dali_data in dali_iter:
for data in dali_data:
images = data['data']
predict(predictor, [images])[0]
batch_end_time_step = time.perf_counter()
batch_latency = batch_end_time_step - last_time_step
latency.append(batch_latency)
last_time_step = time.perf_counter()
end = time.perf_counter()
latency = np.array(latency) * 1000
quantile = np.quantile(latency, [0.9, 0.95, 0.99])
statistics = {
'precision': args.trt_precision,
'batch_size': batch_size,
'throughput': args.benchmark_steps * batch_size / (end - start),
'eval_latency_avg': np.mean(latency),
'eval_latency_p90': quantile[0],
'eval_latency_p95': quantile[1],
'eval_latency_p99': quantile[2],
}
return statistics
def main(args):
setup_dllogger(args.trt_log_path)
if args.show_config:
print_args(args)
if args.trt_use_synthetic:
statistics = benchmark_synthetic(args)
else:
statistics = benchmark_dataset(args)
dllogger.log(step=tuple(), data=statistics)
if __name__ == '__main__':
main(parse_args(including_trt=True))
| DeepLearningExamples-master | PaddlePaddle/Classification/RN50v1.5/inference.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['AverageMeter']
class AverageMeter:
"""
A container to keep running sum, mean and last value.
"""
def __init__(self, name='', fmt='f', postfix="", need_avg=True):
self.name = name
self.fmt = fmt
self.postfix = postfix
self.need_avg = need_avg
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
@property
def total(self):
return '{self.sum:{self.fmt}}{self.postfix}'.format(self=self)
| DeepLearningExamples-master | PaddlePaddle/Classification/RN50v1.5/utils/misc.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import re
import shutil
import tempfile
import logging
import paddle
_PDOPT_SUFFIX = '.pdopt'
_PDPARAMS_SUFFIX = '.pdparams'
def _mkdir_if_not_exist(path):
"""
Mkdir if not exists, ignore the exception when multiprocess mkdir together.
"""
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
logging.warning(
'be happy if some process has already created %s', path)
else:
raise OSError(f'Failed to mkdir {path}')
def _load_state(path):
"""
Load model parameters from .pdparams file.
Args:
path(str): Path to .pdparams file.
Returns:
state(dict): Dict of parameters loaded from file.
"""
if os.path.exists(path + _PDOPT_SUFFIX):
tmp = tempfile.mkdtemp()
dst = os.path.join(tmp, os.path.basename(os.path.normpath(path)))
shutil.copy(path + _PDPARAMS_SUFFIX, dst + _PDPARAMS_SUFFIX)
state = paddle.static.load_program_state(dst)
shutil.rmtree(tmp)
else:
state = paddle.static.load_program_state(path)
return state
def load_params(prog, path, ignore_params=None):
"""
Load model from the given path.
Args:
prog (paddle.static.Program): Load weight to which Program object.
path (string): Model path.
ignore_params (list): Ignore variable to load when finetuning.
"""
if not (os.path.isdir(path) or os.path.exists(path + _PDPARAMS_SUFFIX)):
raise ValueError(f"Model pretrain path {path} does not exists.")
logging.info("Loading parameters from %s...", path)
ignore_set = set()
state = _load_state(path)
# ignore the parameter which mismatch the shape
# between the model and pretrain weight.
all_var_shape = {}
for block in prog.blocks:
for param in block.all_parameters():
all_var_shape[param.name] = param.shape
ignore_set.update([
name for name, shape in all_var_shape.items()
if name in state and shape != state[name].shape
])
if ignore_params:
all_var_names = [var.name for var in prog.list_vars()]
ignore_list = filter(
lambda var: any([re.match(name, var) for name in ignore_params]),
all_var_names)
ignore_set.update(list(ignore_list))
if len(ignore_set) > 0:
for k in ignore_set:
if k in state:
logging.warning(
'variable %s is already excluded automatically', k)
del state[k]
paddle.static.set_program_state(prog, state)
def init_ckpt(path_to_ckpt, program, exe):
"""
Init from checkpoints or pretrained model in given path.
Args:
path_to_ckpt(str): The path to files of checkpoints,
including '.pdparams' and '.pdopt'.
program(paddle.static.Program): The program to init model.
exe(paddle.static.Executor): The executor to run program.
"""
paddle.static.load(program, path_to_ckpt, exe)
logging.info("Finish initalizing the checkpoint from %s", path_to_ckpt)
def init_pretrained(path_to_pretrained, program):
"""
Init from checkpoints or pretrained model in given path.
Args:
path_to_pretrained(str): The path to file of pretrained model.
program(paddle.static.Program): The program to init model.
"""
if not isinstance(path_to_pretrained, list):
pretrained_model = [path_to_pretrained]
for pretrain in pretrained_model:
load_params(program, pretrain)
logging.info("Finish initalizing pretrained parameters from %s",
pretrained_model)
def init_program(args, program, exe):
"""
Init from given checkpoint or pretrained parameters .
Args:
args(Namespace): Arguments obtained from ArgumentParser.
program(paddle.static.Program): The program to init model.
exe(paddle.static.Executor): The executor to run program.
"""
if args.from_checkpoint is not None:
init_ckpt(args.from_checkpoint, program, exe)
logging.info("Training will start at the %d-th epoch",
args.start_epoch)
elif args.from_pretrained_params is not None:
init_pretrained(args.from_pretrained_params, program)
def save_model(program, model_path, epoch_id, prefix):
"""
Save a model to given path.
Args:
program(paddle.static.Program): The program to be saved.
model_path(str): The path to save model.
epoch_id(int): The current epoch id.
"""
if paddle.distributed.get_rank() != 0:
return
model_path = os.path.join(model_path, str(epoch_id))
_mkdir_if_not_exist(model_path)
model_prefix = os.path.join(model_path, prefix)
paddle.static.save(program, model_prefix)
logging.info("Already save model in %s", model_path)
| DeepLearningExamples-master | PaddlePaddle/Classification/RN50v1.5/utils/save_load.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
import argparse
import logging
import distutils.util
import dllogger
from utils.mode import RunScope
from utils.utility import get_num_trainers
from utils.save_load import _PDOPT_SUFFIX, _PDPARAMS_SUFFIX
_AUTO_LAST_EPOCH = 'auto'
def _get_full_path_of_ckpt(args):
if args.from_checkpoint is None:
args.last_epoch_of_checkpoint = -1
return
def _check_file_exist(path_with_prefix):
pdopt_path = path_with_prefix + _PDOPT_SUFFIX
pdparams_path = path_with_prefix + _PDPARAMS_SUFFIX
found = False
if os.path.exists(pdopt_path) and os.path.exists(pdparams_path):
found = True
return found, pdopt_path, pdparams_path
target_from_checkpoint = os.path.join(args.from_checkpoint,
args.model_prefix)
if args.last_epoch_of_checkpoint is None:
args.last_epoch_of_checkpoint = -1
elif args.last_epoch_of_checkpoint == _AUTO_LAST_EPOCH:
folders = os.listdir(args.from_checkpoint)
args.last_epoch_of_checkpoint = -1
for folder in folders:
tmp_ckpt_path = os.path.join(args.from_checkpoint, folder,
args.model_prefix)
try:
folder = int(folder)
except ValueError:
logging.warning(
f"Skip folder '{folder}' since its name is not integer-convertable."
)
continue
if folder > args.last_epoch_of_checkpoint and \
_check_file_exist(tmp_ckpt_path)[0]:
args.last_epoch_of_checkpoint = folder
epoch_with_prefix = os.path.join(str(args.last_epoch_of_checkpoint), args.model_prefix) \
if args.last_epoch_of_checkpoint > -1 else args.model_prefix
target_from_checkpoint = os.path.join(args.from_checkpoint,
epoch_with_prefix)
else:
try:
args.last_epoch_of_checkpoint = int(args.last_epoch_of_checkpoint)
except ValueError:
raise ValueError(f"The value of --last-epoch-of-checkpoint should be None, {_AUTO_LAST_EPOCH}" \
f" or integer >= 0, but receive {args.last_epoch_of_checkpoint}")
args.from_checkpoint = target_from_checkpoint
found, pdopt_path, pdparams_path = _check_file_exist(args.from_checkpoint)
if not found:
args.from_checkpoint = None
args.last_epoch_of_checkpoint = -1
logging.warning(
f"Cannot find {pdopt_path} and {pdparams_path}, disable --from-checkpoint."
)
def _get_full_path_of_pretrained_params(args):
if args.from_pretrained_params is None:
args.last_epoch_of_checkpoint = -1
return
args.from_pretrained_params = os.path.join(args.from_pretrained_params,
args.model_prefix)
pdparams_path = args.from_pretrained_params + _PDPARAMS_SUFFIX
if not os.path.exists(pdparams_path):
args.from_pretrained_params = None
logging.warning(
f"Cannot find {pdparams_path}, disable --from-pretrained-params.")
args.last_epoch_of_checkpoint = -1
def print_args(args):
args_for_log = copy.deepcopy(args)
# Due to dllogger cannot serialize Enum into JSON.
args_for_log.run_scope = args_for_log.run_scope.value
dllogger.log(step='PARAMETER', data=vars(args_for_log))
def check_and_process_args(args):
# Precess the scope of run
run_scope = None
for scope in RunScope:
if args.run_scope == scope.value:
run_scope = scope
break
assert run_scope is not None, \
f"only support {[scope.value for scope in RunScope]} as run_scope"
args.run_scope = run_scope
# Precess image layout and channel
args.image_channel = args.image_shape[0]
if args.data_layout == "NHWC":
args.image_shape = [
args.image_shape[1], args.image_shape[2], args.image_shape[0]
]
# Precess learning rate
args.lr = get_num_trainers() * args.lr
# Precess model loading
assert not (args.from_checkpoint is not None and \
args.from_pretrained_params is not None), \
"--from-pretrained-params and --from-checkpoint should " \
"not be set simultaneously."
_get_full_path_of_pretrained_params(args)
_get_full_path_of_ckpt(args)
args.start_epoch = args.last_epoch_of_checkpoint + 1
# Precess benchmark
if args.benchmark:
assert args.run_scope in [
RunScope.TRAIN_ONLY, RunScope.EVAL_ONLY
], "If benchmark enabled, run_scope must be `train_only` or `eval_only`"
# Only run one epoch when benchmark or eval_only.
if args.benchmark or \
(args.run_scope == RunScope.EVAL_ONLY):
args.epochs = args.start_epoch + 1
if args.run_scope == RunScope.EVAL_ONLY:
args.eval_interval = 1
def add_global_args(parser):
group = parser.add_argument_group('Global')
group.add_argument(
'--output-dir',
type=str,
default='./output/',
help='A path to store trained models.')
group.add_argument(
'--run-scope',
default='train_eval',
choices=('train_eval', 'train_only', 'eval_only'),
help='Running scope. It should be one of {train_eval, train_only, eval_only}.'
)
group.add_argument(
'--epochs',
type=int,
default=90,
help='The number of epochs for training.')
group.add_argument(
'--save-interval',
type=int,
default=1,
help='The iteration interval to save checkpoints.')
group.add_argument(
'--eval-interval',
type=int,
default=1,
help='The iteration interval to test trained models on a given validation dataset. ' \
'Ignored when --run-scope is train_only.'
)
group.add_argument(
'--print-interval',
type=int,
default=10,
help='The iteration interval to show training/evaluation message.')
group.add_argument(
'--report-file',
type=str,
default='./report.json',
help='A file in which to store JSON experiment report.')
group.add_argument(
'--data-layout',
default='NCHW',
choices=('NCHW', 'NHWC'),
help='Data format. It should be one of {NCHW, NHWC}.')
group.add_argument(
'--benchmark', action='store_true', help='To enable benchmark mode.')
group.add_argument(
'--benchmark-steps',
type=int,
default=100,
help='Steps for benchmark run, only be applied when --benchmark is set.'
)
group.add_argument(
'--benchmark-warmup-steps',
type=int,
default=100,
help='Warmup steps for benchmark run, only be applied when --benchmark is set.'
)
group.add_argument(
'--model-prefix',
type=str,
default="resnet_50_paddle",
help='The prefix name of model files to save/load.')
group.add_argument(
'--from-pretrained-params',
type=str,
default=None,
help='A folder path which contains pretrained parameters, that is a file in name' \
' --model-prefix + .pdparams. It should not be set with --from-checkpoint' \
' at the same time.'
)
group.add_argument(
'--from-checkpoint',
type=str,
default=None,
help='A checkpoint path to resume training. It should not be set ' \
'with --from-pretrained-params at the same time. The path provided ' \
'could be a folder contains < epoch_id/ckpt_files > or < ckpt_files >.'
)
group.add_argument(
'--last-epoch-of-checkpoint',
type=str,
default=None,
help='The epoch id of the checkpoint given by --from-checkpoint. ' \
'It should be None, auto or integer >= 0. If it is set as ' \
'None, then training will start from 0-th epoch. If it is set as ' \
'auto, then it will search largest integer-convertable folder ' \
' --from-checkpoint, which contains required checkpoint. ' \
'Default is None.'
)
group.add_argument(
'--show-config',
type=distutils.util.strtobool,
default=True,
help='To show arguments.')
group.add_argument(
'--enable-cpu-affinity',
type=distutils.util.strtobool,
default=True,
help='To enable in-built GPU-CPU affinity.')
return parser
def add_advance_args(parser):
group = parser.add_argument_group('Advanced Training')
# AMP
group.add_argument(
'--amp',
action='store_true',
help='Enable automatic mixed precision training (AMP).')
group.add_argument(
'--scale-loss',
type=float,
default=1.0,
help='The loss scalar for AMP training, only be applied when --amp is set.'
)
group.add_argument(
'--use-dynamic-loss-scaling',
action='store_true',
help='Enable dynamic loss scaling in AMP training, only be applied when --amp is set.'
)
group.add_argument(
'--use-pure-fp16',
action='store_true',
help='Enable pure FP16 training, only be applied when --amp is set.')
group.add_argument(
'--fuse-resunit',
action='store_true',
help='Enable CUDNNv8 ResUnit fusion, only be applied when --amp is set.')
# ASP
group.add_argument(
'--asp',
action='store_true',
help='Enable automatic sparse training (ASP).')
group.add_argument(
'--prune-model',
action='store_true',
help='Prune model to 2:4 sparse pattern, only be applied when --asp is set.'
)
group.add_argument(
'--mask-algo',
default='mask_1d',
choices=('mask_1d', 'mask_2d_greedy', 'mask_2d_best'),
help='The algorithm to generate sparse masks. It should be one of ' \
'{mask_1d, mask_2d_greedy, mask_2d_best}. This only be applied ' \
'when --asp and --prune-model is set.'
)
return parser
def add_dataset_args(parser):
def float_list(x):
return list(map(float, x.split(',')))
def int_list(x):
return list(map(int, x.split(',')))
dataset_group = parser.add_argument_group('Dataset')
dataset_group.add_argument(
'--image-root',
type=str,
default='/imagenet',
help='A root folder of train/val images. It should contain train and val folders, ' \
'which store corresponding images.'
)
dataset_group.add_argument(
'--image-shape',
type=int_list,
default=[4, 224, 224],
help='The image shape. Its shape should be [channel, height, width].')
# Data Loader
dataset_group.add_argument(
'--batch-size',
type=int,
default=256,
help='The batch size for both training and evaluation.')
dataset_group.add_argument(
'--dali-random-seed',
type=int,
default=42,
help='The random seed for DALI data loader.')
dataset_group.add_argument(
'--dali-num-threads',
type=int,
default=4,
help='The number of threads applied to DALI data loader.')
dataset_group.add_argument(
'--dali-output-fp16',
action='store_true',
help='Output FP16 data from DALI data loader.')
# Augmentation
augmentation_group = parser.add_argument_group('Data Augmentation')
augmentation_group.add_argument(
'--crop-size',
type=int,
default=224,
help='The size to crop input images.')
augmentation_group.add_argument(
'--rand-crop-scale',
type=float_list,
default=[0.08, 1.],
help='Range from which to choose a random area fraction.')
augmentation_group.add_argument(
'--rand-crop-ratio',
type=float_list,
default=[3.0 / 4, 4.0 / 3],
help='Range from which to choose a random aspect ratio (width/height).')
augmentation_group.add_argument(
'--normalize-scale',
type=float,
default=1.0 / 255.0,
help='A scalar to normalize images.')
augmentation_group.add_argument(
'--normalize-mean',
type=float_list,
default=[0.485, 0.456, 0.406],
help='The mean values to normalize RGB images.')
augmentation_group.add_argument(
'--normalize-std',
type=float_list,
default=[0.229, 0.224, 0.225],
help='The std values to normalize RGB images.')
augmentation_group.add_argument(
'--resize-short',
type=int,
default=256,
help='The length of the shorter dimension of the resized image.')
return parser
def add_model_args(parser):
group = parser.add_argument_group('Model')
group.add_argument(
'--model-arch-name',
type=str,
default='ResNet50',
help='The model architecture name. It should be one of {ResNet50}.')
group.add_argument(
'--num-of-class',
type=int,
default=1000,
help='The number classes of images.')
group.add_argument(
'--bn-weight-decay',
action='store_true',
help='Apply weight decay to BatchNorm shift and scale.')
return parser
def add_training_args(parser):
group = parser.add_argument_group('Training')
group.add_argument(
'--label-smoothing',
type=float,
default=0.1,
help='The ratio of label smoothing.')
group.add_argument(
'--optimizer',
default='Momentum',
metavar="OPTIMIZER",
choices=('Momentum'),
help='The name of optimizer. It should be one of {Momentum}.')
group.add_argument(
'--momentum',
type=float,
default=0.875,
help='The momentum value of optimizer.')
group.add_argument(
'--weight-decay',
type=float,
default=3.0517578125e-05,
help='The coefficient of weight decay.')
group.add_argument(
'--lr-scheduler',
default='Cosine',
metavar="LR_SCHEDULER",
choices=('Cosine'),
help='The name of learning rate scheduler. It should be one of {Cosine}.'
)
group.add_argument(
'--lr', type=float, default=0.256, help='The initial learning rate.')
group.add_argument(
'--warmup-epochs',
type=int,
default=5,
help='The number of epochs for learning rate warmup.')
group.add_argument(
'--warmup-start-lr',
type=float,
default=0.0,
help='The initial learning rate for warmup.')
return parser
def add_trt_args(parser):
group = parser.add_argument_group('Paddle-TRT')
group.add_argument(
'--device',
type=int,
default='0',
help='The GPU device id for Paddle-TRT inference.'
)
group.add_argument(
'--trt-inference-dir',
type=str,
default='./inference',
help='A path to store/load inference models. ' \
'export_model.py would export models to this folder, ' \
'then inference.py would load from here.'
)
group.add_argument(
'--trt-precision',
default='FP32',
choices=('FP32', 'FP16', 'INT8'),
help='The precision of TensorRT. It should be one of {FP32, FP16, INT8}.'
)
group.add_argument(
'--trt-workspace-size',
type=int,
default=(1 << 30),
help='The memory workspace of TensorRT in MB.')
group.add_argument(
'--trt-min-subgraph-size',
type=int,
default=3,
help='The minimal subgraph size to enable PaddleTRT.')
group.add_argument(
'--trt-use-static',
type=distutils.util.strtobool,
default=False,
help='Fix TensorRT engine at first running.')
group.add_argument(
'--trt-use-calib-mode',
type=distutils.util.strtobool,
default=False,
help='Use the PTQ calibration of PaddleTRT int8.')
group.add_argument(
'--trt-export-log-path',
type=str,
default='./export.json',
help='A file in which to store JSON model exporting report.')
group.add_argument(
'--trt-log-path',
type=str,
default='./inference.json',
help='A file in which to store JSON inference report.')
group.add_argument(
'--trt-use-synthetic',
type=distutils.util.strtobool,
default=False,
help='Apply synthetic data for benchmark.')
return parser
def parse_args(including_trt=False):
parser = argparse.ArgumentParser(
description="PaddlePaddle RN50v1.5 training script",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = add_global_args(parser)
parser = add_dataset_args(parser)
parser = add_model_args(parser)
parser = add_training_args(parser)
parser = add_advance_args(parser)
if including_trt:
parser = add_trt_args(parser)
args = parser.parse_args()
check_and_process_args(args)
return args
| DeepLearningExamples-master | PaddlePaddle/Classification/RN50v1.5/utils/config.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import ctypes
_cuda_home = os.environ.get('CUDA_HOME', '/usr/local/cuda')
_cudart = ctypes.CDLL(os.path.join(_cuda_home, 'lib64/libcudart.so'))
def cuda_profile_start():
_cudart.cudaProfilerStart()
def cuda_profile_stop():
_cudart.cudaProfilerStop()
_nvtx = ctypes.CDLL(os.path.join(_cuda_home, 'lib64/libnvToolsExt.so'))
def cuda_nvtx_range_push(name):
_nvtx.nvtxRangePushW(ctypes.c_wchar_p(name))
def cuda_nvtx_range_pop():
_nvtx.nvtxRangePop()
| DeepLearningExamples-master | PaddlePaddle/Classification/RN50v1.5/utils/cuda_bind.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def get_num_trainers():
num_trainers = int(os.environ.get('PADDLE_TRAINERS_NUM', 1))
return num_trainers
def get_trainer_id():
trainer_id = int(os.environ.get('PADDLE_TRAINER_ID', 0))
return trainer_id
| DeepLearningExamples-master | PaddlePaddle/Classification/RN50v1.5/utils/utility.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import paddle
def _get_gpu_affinity_table():
"""
Generate three dict objects, gpu_cpu_affinity_map, cpu_socket_gpus_list, cpu_core_groups.
gpu_cpu_affinity_map (dict): Key is GPU ID and value is cpu_affinity string.
cpu_socket_gpus_list (dict): Key is cpu_affinity string and value is a list
collected all GPU IDs that affinity to this cpu socket.
cpu_core_groups (dict): Key is cpu_affinity string and value is cpu core groups.
cpu core groups contains #GPUs groups, each group have,
nearly eaual amount of cpu cores.
Example:
$nvidis-smi topo -m
GPU0 GPU1 GPU2 GPU3 CPU Affinity NUMA Affinity
GPU0 X SYS SYS SYS 0-9,20-29 0
GPU1 SYS X SYS SYS 0-9,20-29 0
GPU2 SYS SYS X SYS 10-19,30-39 1
GPU3 SYS SYS SYS X 10-19,30-39 1
gpu_cpu_affinity_map =
{ 0: '0-9,20-29', # GPU0's cpu affninity is '0-9,20-29'
1: '0-9,20-29', # GPU1's cpu affninity is '0-9,20-29'
2: '10-19,30-39', # GPU2's cpu affninity is '10-19,30-39'
3: '10-19,30-39' } # GPU3's cpu affninity is '10-19,30-39'
cpu_socket_gpus_list =
{ '0-9,20-29': [0, 1], # There are 2 GPUs, 0 and 1, belong to cpu affinity '0-9,20-29'.
'10-19,30-39': [2, 3] # There are 2 GPUs, 2 and 3, belong to cpu affinity '10-19,30-39'.
}
cpu_core_groups =
# There are 2 GPUs belong to cpu affinity '0-9,20-29', then
# cores [0, 1, ..., 8, 9] would be split to two groups every
# 2-th elements
# [0, 2, 4, 6, 8] and [1, 3, 5, 7, 9]
# The same for cores [20, 21, ..., 28, 29].
{'0-9,20-29': [
[[0, 2, 4, 6, 8], [1, 3, 5, 7, 9]],
[[20, 22, 24, 26, 28], [21, 23, 25, 27, 29]]
],
# The same as '0-9,20-29'
'10-19,30-39': [
[[10, 12, 14, 16, 18], [11, 13, 15, 17, 19]],
[[30, 32, 34, 36, 38], [31, 33, 35, 37, 39]]
]}
"""
lines = os.popen('nvidia-smi topo -m').readlines()
cpu_affinity_idx = -1
titles = lines[0].split('\t')
for idx in range(len(titles)):
if 'CPU Affinity' in titles[idx]:
cpu_affinity_idx = idx
assert cpu_affinity_idx > 0, \
"Can not obtain correct CPU affinity column index via nvidia-smi!"
gpu_cpu_affinity_map = dict()
cpu_socket_gpus_list = dict()
# Skip title
for idx in range(1, len(lines)):
line = lines[idx]
items = line.split('\t')
if 'GPU' in items[0]:
gpu_id = int(items[0][3:])
affinity = items[cpu_affinity_idx]
gpu_cpu_affinity_map[gpu_id] = affinity
if affinity in cpu_socket_gpus_list:
cpu_socket_gpus_list[affinity].append(gpu_id)
else:
cpu_socket_gpus_list[affinity] = [gpu_id]
cpu_core_groups = _group_cpu_cores(cpu_socket_gpus_list)
return gpu_cpu_affinity_map, cpu_socket_gpus_list, cpu_core_groups
def _group_cpu_cores(cpu_socket_gpus_list):
"""
Generate a dictionary that key is cpu_affinity string and value is cpu core groups.
cpu core groups contains #GPUs groups, each group have, nearly eaual amount of cpu cores.
The grouping way is collect cpu cores every #GPUs-th elements, due to index of hyperthreading.
For examle, 4 physical cores, 8 cores with hyperthreading. The CPU indices [0, 1, 2, 3] is
physical cores, and [4, 5, 6, 7] is hyperthreading. In this case, distributing physical cores
first, then hyperthreading would reach better performance.
Args:
cpu_socket_gpus_list (dict): a dict that map cpu_affinity_str to all GPUs that belong to it.
Return:
cpu_core_groups (dict): a dict that map cpu_affinity_str to cpu core groups.
Example:
cpu_socket_gpus_list = { '0-9,20-29': [0, 1], '10-19,30-39': [2, 3] },
which means there are 2 GPUs, 0 and 1, belong to '0-9,20-29' and
2 GPUs, 2 and 3, belong to '10-19,30-39'
therefore, cpu_core_groups =
{'0-9,20-29': [
[[0, 2, 4, 6, 8], [1, 3, 5, 7, 9]],
[[20, 22, 24, 26, 28], [21, 23, 25, 27, 29]]
],
'10-19,30-39': [
[[10, 12, 14, 16, 18], [11, 13, 15, 17, 19]],
[[30, 32, 34, 36, 38], [31, 33, 35, 37, 39]]
]}
"""
cpu_core_groups = dict()
for cpu_socket in cpu_socket_gpus_list:
cpu_core_groups[cpu_socket] = list()
gpu_count = len(cpu_socket_gpus_list[cpu_socket])
cores = cpu_socket.split(',')
for core in cores:
core_indices = _get_core_indices(core)
core_group = list()
for i in range(gpu_count):
start = i % len(core_indices)
sub_core_set = core_indices[start::gpu_count]
core_group.append(sub_core_set)
cpu_core_groups[cpu_socket].append(core_group)
return cpu_core_groups
def _get_core_indices(cores_str):
"""
Generate a dictionary of cpu core indices.
Args:
cores_str (str): a string with format "start_idx-end_idx".
Return:
cpu_core_indices (list): a list collected all indices in [start_idx, end_idx].
Example:
cores_str = '0-20'
cpu_core_indices = [0, 1, 2, ..., 18, 19, 20]
"""
start, end = cores_str.split('-')
return [*range(int(start), int(end) + 1)]
def set_cpu_affinity():
"""
Setup CPU affinity.
Each GPU would be bound to a specific set of CPU cores for optimal and stable performance.
This function would obtain GPU-CPU affinity via "nvidia-smi topo -m", then equally distribute
CPU cores to each GPU.
"""
gpu_cpu_affinity_map, cpu_socket_gpus_list, cpu_core_groups = \
_get_gpu_affinity_table()
node_num = paddle.distributed.fleet.node_num()
gpu_per_node = paddle.distributed.get_world_size() // node_num
local_rank = paddle.distributed.get_rank() % gpu_per_node
# gpu_cpu_affinity_map (dict): Key is GPU ID and value is cpu_affinity string.
# cpu_socket_gpus_list (dict): Key is cpu_affinity string and value is a list
# collected all GPU IDs that affinity to this cpu socket.
# cpu_core_groups (dict): Key is cpu_affinity string and value is cpu core groups.
# cpu core groups contains #GPUs groups, each group have,
# nearly eaual amount of cpu cores.
# Example:
# $nvidis-smi topo -m
# GPU0 GPU1 GPU2 GPU3 CPU Affinity NUMA Affinity
# GPU0 X SYS SYS SYS 0-9,20-29 0
# GPU1 SYS X SYS SYS 0-9,20-29 0
# GPU2 SYS SYS X SYS 10-19,30-39 1
# GPU3 SYS SYS SYS X 10-19,30-39 1
#
# gpu_cpu_affinity_map =
# { 0: '0-9,20-29',
# 1: '0-9,20-29',
# 2: '10-19,30-39',
# 3: '10-19,30-39' }
# cpu_socket_gpus_list =
# { '0-9,20-29': [0, 1],
# '10-19,30-39': [2, 3] }
# cpu_core_groups =
# {'0-9,20-29': [
# [[0, 2, 4, 6, 8], [1, 3, 5, 7, 9]],
# [[20, 22, 24, 26, 28], [21, 23, 25, 27, 29]]
# ],
# '10-19,30-39': [
# [[10, 12, 14, 16, 18], [11, 13, 15, 17, 19]],
# [[30, 32, 34, 36, 38], [31, 33, 35, 37, 39]]
# ]}
#
# for rank-0, it belong to '0-9,20-29' cpu_affinity_key,
# and it locate in index-0 of cpu_socket_gpus_list['0-9,20-29'],
# therefore, affinity_mask would be a collection of all cpu cores
# in index-0 of cpu_core_groups['0-9,20-29'], that is [0, 2, 4, 6, 8]
# and [20, 22, 24, 26, 28].
# affinity_mask = [0, 2, 4, 6, 8, 20, 22, 24, 26, 28]
affinity_mask = list()
cpu_affinity_key = gpu_cpu_affinity_map[local_rank]
cpu_core_idx = cpu_socket_gpus_list[cpu_affinity_key].index(local_rank)
for cpu_core_group in cpu_core_groups[cpu_affinity_key]:
affinity_mask.extend(cpu_core_group[cpu_core_idx])
pid = os.getpid()
os.sched_setaffinity(pid, affinity_mask)
logging.info("Set CPU affinity of rank-%d (Process %d) "
"to %s.", local_rank, pid, str(os.sched_getaffinity(pid)))
| DeepLearningExamples-master | PaddlePaddle/Classification/RN50v1.5/utils/affinity.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DeepLearningExamples-master | PaddlePaddle/Classification/RN50v1.5/utils/__init__.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import paddle.distributed as dist
import dllogger
def format_step(step):
"""
Define prefix for different prefix message for dllogger.
Args:
step(str|tuple): Dllogger step format.
Returns:
s(str): String to print in log.
"""
if isinstance(step, str):
return step
s = ""
if len(step) > 0:
s += f"Epoch: {step[0]} "
if len(step) > 1:
s += f"Iteration: {step[1]} "
if len(step) > 2:
s += f"Validation Iteration: {step[2]} "
if len(step) == 0:
s = "Summary:"
return s
def setup_dllogger(log_file):
"""
Setup logging and dllogger.
Args:
log_file(str): Path to log file.
"""
logging.basicConfig(
level=logging.DEBUG,
format='{asctime}:{levelname}: {message}',
style='{')
if dist.get_rank() == 0:
dllogger.init(backends=[
dllogger.StdOutBackend(
dllogger.Verbosity.DEFAULT, step_format=format_step),
dllogger.JSONStreamBackend(dllogger.Verbosity.VERBOSE, log_file),
])
else:
dllogger.init([])
| DeepLearningExamples-master | PaddlePaddle/Classification/RN50v1.5/utils/logger.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
class Mode(Enum):
TRAIN = 'Train'
EVAL = 'Eval'
class RunScope(Enum):
TRAIN_ONLY = 'train_only'
EVAL_ONLY = 'eval_only'
TRAIN_EVAL = 'train_eval'
| DeepLearningExamples-master | PaddlePaddle/Classification/RN50v1.5/utils/mode.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .resnet import ResNet50
| DeepLearningExamples-master | PaddlePaddle/Classification/RN50v1.5/models/__init__.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
from paddle import ParamAttr
import paddle.nn as nn
from paddle.nn import Conv2D, BatchNorm, Linear
from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
from paddle.nn.initializer import Uniform, Constant, KaimingNormal
MODELS = ["ResNet50"]
__all__ = MODELS
class ConvBNLayer(nn.Layer):
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
groups=1,
act=None,
lr_mult=1.0,
data_format="NCHW",
bn_weight_decay=True):
super().__init__()
self.act = act
self.avg_pool = AvgPool2D(
kernel_size=2, stride=2, padding=0, ceil_mode=True)
self.conv = Conv2D(
in_channels=num_channels,
out_channels=num_filters,
kernel_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
weight_attr=ParamAttr(
learning_rate=lr_mult, initializer=KaimingNormal()),
bias_attr=False,
data_format=data_format)
self.bn = BatchNorm(
num_filters,
param_attr=ParamAttr(
learning_rate=lr_mult,
regularizer=None
if bn_weight_decay else paddle.regularizer.L2Decay(0.0),
initializer=Constant(1.0)),
bias_attr=ParamAttr(
learning_rate=lr_mult,
regularizer=None
if bn_weight_decay else paddle.regularizer.L2Decay(0.0),
initializer=Constant(0.0)),
data_layout=data_format)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
if self.act:
x = self.relu(x)
return x
class BottleneckBlock(nn.Layer):
def __init__(self,
num_channels,
num_filters,
stride,
shortcut=True,
lr_mult=1.0,
data_format="NCHW",
bn_weight_decay=True):
super().__init__()
self.conv0 = ConvBNLayer(
num_channels=num_channels,
num_filters=num_filters,
filter_size=1,
act="relu",
lr_mult=lr_mult,
data_format=data_format,
bn_weight_decay=bn_weight_decay)
self.conv1 = ConvBNLayer(
num_channels=num_filters,
num_filters=num_filters,
filter_size=3,
stride=stride,
act="relu",
lr_mult=lr_mult,
data_format=data_format,
bn_weight_decay=bn_weight_decay)
self.conv2 = ConvBNLayer(
num_channels=num_filters,
num_filters=num_filters * 4,
filter_size=1,
act=None,
lr_mult=lr_mult,
data_format=data_format,
bn_weight_decay=bn_weight_decay)
if not shortcut:
self.short = ConvBNLayer(
num_channels=num_channels,
num_filters=num_filters * 4,
filter_size=1,
stride=stride,
lr_mult=lr_mult,
data_format=data_format,
bn_weight_decay=bn_weight_decay)
self.relu = nn.ReLU()
self.shortcut = shortcut
def forward(self, x):
identity = x
x = self.conv0(x)
x = self.conv1(x)
x = self.conv2(x)
if self.shortcut:
short = identity
else:
short = self.short(identity)
x = paddle.add(x=x, y=short)
x = self.relu(x)
return x
class ResNet(nn.Layer):
def __init__(self,
class_num=1000,
data_format="NCHW",
input_image_channel=3,
use_pure_fp16=False,
bn_weight_decay=True):
super().__init__()
self.class_num = class_num
self.num_filters = [64, 128, 256, 512]
self.block_depth = [3, 4, 6, 3]
self.num_channels = [64, 256, 512, 1024]
self.channels_mult = 1 if self.num_channels[-1] == 256 else 4
self.use_pure_fp16 = use_pure_fp16
self.stem_cfg = {
#num_channels, num_filters, filter_size, stride
"vb": [[input_image_channel, 64, 7, 2]],
}
self.stem = nn.Sequential(* [
ConvBNLayer(
num_channels=in_c,
num_filters=out_c,
filter_size=k,
stride=s,
act="relu",
data_format=data_format,
bn_weight_decay=bn_weight_decay)
for in_c, out_c, k, s in self.stem_cfg['vb']
])
self.max_pool = MaxPool2D(
kernel_size=3, stride=2, padding=1, data_format=data_format)
block_list = []
for block_idx in range(len(self.block_depth)):
shortcut = False
for i in range(self.block_depth[block_idx]):
block_list.append(
BottleneckBlock(
num_channels=self.num_channels[block_idx] if i == 0
else self.num_filters[block_idx] * self.channels_mult,
num_filters=self.num_filters[block_idx],
stride=2 if i == 0 and block_idx != 0 else 1,
shortcut=shortcut,
data_format=data_format,
bn_weight_decay=bn_weight_decay))
shortcut = True
self.blocks = nn.Sequential(*block_list)
self.avg_pool = AdaptiveAvgPool2D(1, data_format=data_format)
self.flatten = nn.Flatten()
self.avg_pool_channels = self.num_channels[-1] * 2
stdv = 1.0 / math.sqrt(self.avg_pool_channels * 1.0)
self.fc = Linear(
self.avg_pool_channels,
self.class_num,
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)))
def forward(self, x):
if self.use_pure_fp16:
with paddle.static.amp.fp16_guard():
x = self.stem(x)
x = self.max_pool(x)
x = self.blocks(x)
x = self.avg_pool(x)
x = self.flatten(x)
x = self.fc(x)
else:
x = self.stem(x)
x = self.max_pool(x)
x = self.blocks(x)
x = self.avg_pool(x)
x = self.flatten(x)
x = self.fc(x)
return x
def ResNet50(**kwargs):
model = ResNet(**kwargs)
return model
| DeepLearningExamples-master | PaddlePaddle/Classification/RN50v1.5/models/resnet.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import paddle
from utils.utility import is_integer
class Poly:
"""
Polynormial learning rate decay.
lr = (learning_rate - end_lr) * (1 - min(step, decay_steps) / decay_steps) ^ power + end_lr
If `power` is 1.0, it's also equivalent to linear learning rate decay.
Args:
learning_rate (float): The initial learning rate.
num_steps(int): The total number of training steps.
end_lr(float, optional): The minimum final learning rate. Default: 0.0.
power(float, optional): Power of polynomial. Default: 1.0.
warmup(int|float, optional):
If warmup is int, it indicates the number of warmup steps. Default: 0.
If warmup is float, it indicates the proportion of warmup steps.
warmup_start_lr(float, optional): Initial learning rate of warm up. Default: 0.0.
last_step(int, optional): The step id of the last run. Can be set to resume training.
Default: 0.
"""
def __init__(self,
learning_rate,
num_steps,
end_lr=0.0,
power=1.0,
warmup=0,
warmup_start_lr=0.0,
last_step=0):
super().__init__()
self.end_lr = end_lr
self.power = power
self.learning_rate = learning_rate
self.warmup_start_lr = warmup_start_lr
self.last_step = last_step
self.total_steps = num_steps
self.warmup_steps = warmup if is_integer(warmup) else int(
math.floor(warmup * self.total_steps))
self.steps = self.total_steps - self.warmup_steps
assert self.warmup_steps <= self.total_steps, "warmup steps can't be larger than total steps"
def __call__(self):
learning_rate = paddle.optimizer.lr.PolynomialDecay(
learning_rate=self.learning_rate,
decay_steps=self.steps,
end_lr=self.end_lr,
power=self.power,
last_epoch=self.
last_step) if self.steps > 0 else self.learning_rate
if self.warmup_steps > 0:
learning_rate = paddle.optimizer.lr.LinearWarmup(
learning_rate=learning_rate,
warmup_steps=self.warmup_steps,
start_lr=self.warmup_start_lr,
end_lr=self.learning_rate,
last_epoch=self.last_step)
return learning_rate
def build_lr_scheduler(args):
"""
Build a learning rate scheduler.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
return:
lr(paddle.optimizer.lr.LRScheduler): A learning rate scheduler.
"""
lr = Poly(
args.learning_rate,
args.max_steps,
warmup=args.warmup_proportion,
last_step=args.last_step_of_checkpoint)
if not isinstance(lr, paddle.optimizer.lr.LRScheduler):
lr = lr()
logging.info("build lr %s success..", lr)
return lr
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/lr_scheduler.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import paddle
from tokenizer import _is_whitespace
def create_squad_data_holder():
input_ids = paddle.static.data(
name="input_ids", shape=[-1, -1], dtype="int64")
segment_ids = paddle.static.data(
name="segment_ids", shape=[-1, -1], dtype="int64")
start_positions = paddle.static.data(
name="start_positions", shape=[-1, 1], dtype="int64")
end_positions = paddle.static.data(
name="end_positions", shape=[-1, 1], dtype="int64")
unique_id = paddle.static.data(
name="unique_id", shape=[-1, 1], dtype="int64")
return input_ids, segment_ids, start_positions, end_positions, unique_id
class SquadExample:
"""
A single training/test example for simple sequence classification.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
class InputFeatures:
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
class SQuAD(paddle.io.Dataset):
def __init__(self,
tokenizer,
mode='train',
version_2_with_negative=False,
path=None,
doc_stride=128,
max_query_length=64,
max_seq_length=512):
self.version_2_with_negative = version_2_with_negative
self.path = path
self.tokenizer = tokenizer
self.doc_stride = doc_stride
self.max_query_length = max_query_length
self.max_seq_length = max_seq_length
self._transform_func = None
if mode == 'train':
self.is_training = True
else:
self.is_training = False
self._read()
self.features = self.convert_examples_to_features(
self.examples,
tokenizer=self.tokenizer,
doc_stride=self.doc_stride,
max_query_length=self.max_query_length,
max_seq_length=self.max_seq_length)
def convert_examples_to_features(self, examples, tokenizer, max_seq_length,
doc_stride, max_query_length):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
features = []
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if self.is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if self.is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position +
1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position,
tok_end_position) = self._improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position,
tokenizer, example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[
split_token_index]
is_max_context = self._check_is_max_context(
doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_ids = input_ids + [
tokenizer.vocab[tokenizer.pad_token]
for _ in range(self.max_seq_length - len(input_ids))
]
segment_ids = segment_ids + [
tokenizer.vocab[tokenizer.pad_token]
for _ in range(self.max_seq_length - len(segment_ids))
]
input_mask = [1] * len(input_ids)
start_position = None
end_position = None
if self.is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if self.is_training and example.is_impossible:
start_position = 0
end_position = 0
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible))
unique_id += 1
return features
def _improve_answer_span(self, doc_tokens, input_start, input_end,
tokenizer, orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(self, doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context,
num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def _read(self):
with open(self.path, "r", encoding="utf8") as reader:
input_data = json.load(reader)["data"]
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if _is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if self.is_training:
if self.version_2_with_negative:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer."
)
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
try:
end_position = char_to_word_offset[
answer_offset + answer_length - 1]
except:
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
else:
if self.version_2_with_negative:
is_impossible = qa["is_impossible"]
orig_answer_text = []
if not is_impossible and 'answers' in qa.keys():
answers = qa["answers"]
for answer in answers:
orig_answer_text.append(answer["text"])
else:
start_position = -1
end_position = -1
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
self.examples = examples
def __len__(self):
return len(self.features)
def __getitem__(self, idx):
feature = self.features[idx]
if self.is_training:
return feature.input_ids, feature.segment_ids, feature.unique_id, feature.start_position, feature.end_position
else:
return feature.input_ids, feature.segment_ids, feature.unique_id
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/squad_dataset.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import time
import collections
import sys
import subprocess
import numpy as np
import paddle
import paddle.distributed.fleet as fleet
from paddle.fluid.contrib.mixed_precision.fp16_utils import rewrite_program
from paddle.fluid.contrib.mixed_precision.fp16_lists import AutoMixedPrecisionLists
from modeling import BertForQuestionAnswering, BertConfig
from tokenizer import BertTokenizer
from squad_utils import get_answers
from loss import CrossEntropyLossForSQuAD
from squad_dataset import SQuAD, create_squad_data_holder
from utils.collate import Pad, Stack, Tuple
from utils.utility import get_num_trainers, get_trainer_id, set_seed
from utils.logger import setup_loggers
from utils.affinity import set_cpu_affinity
from utils.save_load import mkdir_if_not_exist, init_program, save_model
from utils.config import print_args, parse_args
from utils.task import Task
from optimizer import AdamW
from lr_scheduler import Poly
from program import dist_optimizer
import dllogger
def evaluate(args, exe, logits, dev_program, data_loader):
RawResult = collections.namedtuple(
"RawResult", ["unique_id", "start_logits", "end_logits"])
all_results = []
infer_start = time.time()
tic_eval = time.time()
tic_benchmark_begin = 0
tic_benchmark_end = 0
dllogger.log(step="PARAMETER", data={"eval_start": True})
for step, batch in enumerate(data_loader):
start_logits_tensor, end_logits_tensor = exe.run(dev_program,
feed=batch,
fetch_list=[*logits])
if args.benchmark and step == args.benchmark_warmup_steps:
tic_benchmark_begin = time.time()
if args.benchmark and step == args.benchmark_warmup_steps + args.benchmark_steps:
tic_benchmark_end = time.time()
unique_ids = np.array(batch[0]['unique_id'])
for idx in range(unique_ids.shape[0]):
if len(all_results) % 1000 == 0 and len(all_results):
dllogger.log(step="PARAMETER",
data={
"sample_number": len(all_results),
"time_per_1000": time.time() - tic_eval
})
tic_eval = time.time()
unique_id = int(unique_ids[idx])
start_logits = [float(x) for x in start_logits_tensor[idx]]
end_logits = [float(x) for x in end_logits_tensor[idx]]
all_results.append(
RawResult(
unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
if args.benchmark:
time_to_benchmark = tic_benchmark_end - tic_benchmark_begin
dllogger.log(step=tuple(),
data={
"inference_sequences_per_second":
args.predict_batch_size * args.benchmark_steps /
time_to_benchmark
})
return
else:
time_to_infer = time.time() - infer_start
dllogger.log(step=tuple(),
data={
"e2e_inference_time": time_to_infer,
"inference_sequences_per_second":
len(data_loader.dataset.features) / time_to_infer
})
output_dir = os.path.join(args.output_dir, args.bert_model, "squad")
mkdir_if_not_exist(output_dir)
output_prediction_file = os.path.join(output_dir, "predictions.json")
output_nbest_file = os.path.join(output_dir, "nbest_predictions.json")
answers, nbest_answers = get_answers(args, data_loader.dataset.examples,
data_loader.dataset.features,
all_results)
with open(output_prediction_file, "w") as f:
f.write(json.dumps(answers, indent=4) + "\n")
with open(output_nbest_file, "w") as f:
f.write(json.dumps(nbest_answers, indent=4) + "\n")
if args.do_eval:
eval_out = subprocess.check_output([
sys.executable, args.eval_script, args.predict_file,
output_prediction_file
])
scores = str(eval_out).strip()
exact_match = float(scores.split(":")[1].split(",")[0])
f1 = float(scores.split(":")[2].split("}")[0])
dllogger.log(step=tuple(), data={"exact_match": exact_match, "F1": f1})
def main(args):
setup_loggers(args.report_file)
if args.show_config:
print_args(args)
trainer_id = get_trainer_id()
num_trainers = get_num_trainers()
# Set the paddle execute enviroment
fleet.init(is_collective=True)
if args.enable_cpu_affinity:
set_cpu_affinity()
place = paddle.set_device('gpu')
set_seed(args.seed)
dllogger.log(step="PARAMETER", data={"SEED": args.seed})
# Create the main_program for the training and dev_program for the validation
main_program = paddle.static.default_main_program()
startup_program = paddle.static.default_startup_program()
tokenizer = BertTokenizer(
vocab_file=args.vocab_file,
do_lower_case=args.do_lower_case,
max_len=512)
with paddle.static.program_guard(main_program, startup_program):
input_ids, segment_ids, start_positions, end_positions, unique_id = create_squad_data_holder(
)
if args.do_train:
train_dataset = SQuAD(
tokenizer=tokenizer,
doc_stride=args.doc_stride,
path=args.train_file,
version_2_with_negative=args.version_2_with_negative,
max_query_length=args.max_query_length,
max_seq_length=args.max_seq_length,
mode="train")
train_batch_sampler = paddle.io.DistributedBatchSampler(
train_dataset, batch_size=args.train_batch_size, shuffle=True)
train_batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.vocab[tokenizer.pad_token]), # input
Pad(axis=0, pad_val=tokenizer.vocab[tokenizer.pad_token]), # segment
Stack(), # unique_id
Stack(dtype="int64"), # start_pos
Stack(dtype="int64") # end_pos
): [data for i, data in enumerate(fn(samples)) if i != 2]
train_data_loader = paddle.io.DataLoader(
dataset=train_dataset,
feed_list=[
input_ids, segment_ids, start_positions, end_positions
],
batch_sampler=train_batch_sampler,
collate_fn=train_batchify_fn,
num_workers=0,
return_list=False)
with paddle.static.program_guard(main_program, startup_program):
bert_config = BertConfig.from_json_file(args.config_file)
bert_config.fuse_mha = args.fuse_mha
if bert_config.vocab_size % 8 != 0:
bert_config.vocab_size += 8 - (bert_config.vocab_size % 8)
model = BertForQuestionAnswering(bert_config)
criterion = CrossEntropyLossForSQuAD()
logits = model(input_ids=input_ids, token_type_ids=segment_ids)
if args.do_predict:
dev_program = main_program.clone(for_test=True)
if args.do_train:
loss = criterion(logits, (start_positions, end_positions))
num_train_steps = len(train_data_loader) * args.epochs
if args.max_steps is not None and args.max_steps > 0:
num_train_steps = min(num_train_steps, args.max_steps)
lr_scheduler = Poly(
learning_rate=args.learning_rate, num_steps=num_train_steps)()
optimizer = AdamW(args, learning_rate=lr_scheduler)()
optimizer = dist_optimizer(args, optimizer)
optimizer.minimize(loss)
exe = paddle.static.Executor(place)
exe.run(startup_program)
init_program(
args, program=main_program, exe=exe, model=model, task=Task.squad)
if args.do_train:
dllogger.log(step="PARAMETER", data={"train_start": True})
dllogger.log(step="PARAMETER",
data={
"training_samples":
len(train_data_loader.dataset.examples)
})
dllogger.log(step="PARAMETER",
data={
"training_features":
len(train_data_loader.dataset.features)
})
dllogger.log(step="PARAMETER",
data={"train_batch_size": args.train_batch_size})
dllogger.log(step="PARAMETER", data={"steps": num_train_steps})
global_step = 0
tic_benchmark_begin = 0
tic_benchmark_end = 0
tic_train_begin = time.time()
for epoch in range(args.epochs):
for batch in train_data_loader:
if global_step >= num_train_steps:
break
if args.benchmark and global_step >= args.benchmark_warmup_steps + args.benchmark_steps:
break
loss_return = exe.run(main_program,
feed=batch,
fetch_list=[loss])
lr = lr_scheduler.get_lr()
lr_scheduler.step()
global_step += 1
if args.benchmark and global_step == args.benchmark_warmup_steps:
tic_benchmark_begin = time.time()
if args.benchmark and global_step == args.benchmark_warmup_steps + args.benchmark_steps:
tic_benchmark_end = time.time()
if global_step % args.log_freq == 0:
dllogger_it_data = {
'loss': loss_return[0].item(),
'learning_rate': lr
}
dllogger.log((epoch, global_step), data=dllogger_it_data)
if not args.benchmark:
time_to_train = time.time() - tic_train_begin
dllogger.log(step=tuple(),
data={
"e2e_train_time": time_to_train,
"training_sequences_per_second":
args.train_batch_size * num_train_steps *
num_trainers / time_to_train
})
else:
time_to_benchmark = tic_benchmark_end - tic_benchmark_begin
dllogger.log(step=tuple(),
data={
"training_sequences_per_second":
args.train_batch_size * args.benchmark_steps *
num_trainers / time_to_benchmark
})
if trainer_id == 0:
model_path = os.path.join(args.output_dir, args.bert_model,
"squad")
save_model(main_program, model_path, args.model_prefix)
if args.do_predict and trainer_id == 0:
dev_dataset = SQuAD(
tokenizer=tokenizer,
doc_stride=args.doc_stride,
path=args.predict_file,
version_2_with_negative=args.version_2_with_negative,
max_query_length=args.max_query_length,
max_seq_length=args.max_seq_length,
mode="dev")
dev_batch_sampler = paddle.io.BatchSampler(
dev_dataset, batch_size=args.predict_batch_size, shuffle=False)
dev_batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.vocab[tokenizer.pad_token]), # input
Pad(axis=0, pad_val=tokenizer.vocab[tokenizer.pad_token]), # segment
Stack() # unique_id
): fn(samples)
dev_data_loader = paddle.io.DataLoader(
dataset=dev_dataset,
feed_list=[input_ids, segment_ids, unique_id],
batch_sampler=dev_batch_sampler,
collate_fn=dev_batchify_fn,
num_workers=0,
return_list=False)
dllogger.log(step="PARAMETER", data={"predict_start": True})
dllogger.log(
step="PARAMETER",
data={"eval_samples": len(dev_data_loader.dataset.examples)})
dllogger.log(
step="PARAMETER",
data={"eval_features": len(dev_data_loader.dataset.features)})
dllogger.log(step="PARAMETER",
data={"predict_batch_size": args.predict_batch_size})
if args.amp:
amp_lists = AutoMixedPrecisionLists(
custom_white_list=['softmax', 'layer_norm', 'gelu'])
rewrite_program(dev_program, amp_lists=amp_lists)
evaluate(args, exe, logits, dev_program, dev_data_loader)
if __name__ == "__main__":
paddle.enable_static()
main(parse_args(Task.squad))
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/run_squad.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/__init__.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import logging
import paddle
import paddle.distributed.fleet as fleet
from utils.config import parse_args, print_args
from utils.save_load import init_program
from utils.logger import setup_loggers
from utils.affinity import set_cpu_affinity
from utils.utility import set_seed, get_trainer_id, get_num_trainers
import program
import dllogger
from lddl.paddle import get_bert_pretrain_data_loader
def main():
"""
An enterpoint to train a BERT model, which contains five steps.
1. Parse arguments from command line.
2. Initialize distributed training related setting, including CPU affinity.
3. Create training Paddle.static.Program.
4. Load checkpoint or pretrained model if given.
5. Run program (train with datasets and save model if necessary).
"""
now = time.time()
args = parse_args()
setup_loggers(args.report_file)
if args.show_config:
print_args(args)
device = paddle.set_device('gpu')
fleet.init(is_collective=True)
if args.enable_cpu_affinity:
set_cpu_affinity()
# Create the random seed for the worker
set_seed(args.seed + get_trainer_id())
dllogger.log(step="PARAMETER", data={"SEED": args.seed})
dllogger.log(step="PARAMETER", data={"train_start": True})
dllogger.log(step="PARAMETER",
data={"batch_size_per_gpu": args.batch_size})
dllogger.log(step="PARAMETER", data={"learning_rate": args.learning_rate})
main_program = paddle.static.default_main_program()
startup_program = paddle.static.default_startup_program()
model, lr_scheduler, optimizer, loss, feeds = program.build(
args, main_program, startup_program)
exe = paddle.static.Executor(device)
exe.run(startup_program)
progress = init_program(args, program=main_program, exe=exe, model=model)
train_dataloader = get_bert_pretrain_data_loader(
args.input_dir,
vocab_file=args.vocab_file,
data_loader_kwargs={
'batch_size': args.batch_size,
'num_workers': args.num_workers,
'persistent_workers': True,
'feed_list': feeds
},
base_seed=args.seed,
log_dir=None if args.output_dir is None else
os.path.join(args.output_dir, 'lddl_log'),
log_level=logging.WARNING,
start_epoch=0 if progress is None else progress.get("epoch", 0),
sequence_length_alignment=64)
if args.amp:
optimizer.amp_init(device)
global_step, actual_steps_this_run, final_loss, train_time_raw = program.run(
exe, main_program, args, lr_scheduler, loss, train_dataloader,
progress)
if get_trainer_id() == 0:
e2e_time = time.time() - now
if args.benchmark:
training_perf = args.batch_size * args.gradient_merge_steps * (
actual_steps_this_run - args.benchmark_warmup_steps
) * get_num_trainers() / train_time_raw
else:
training_perf = args.batch_size * args.gradient_merge_steps * actual_steps_this_run * get_num_trainers(
) / train_time_raw
dllogger.log(step=tuple(),
data={
"e2e_train_time": e2e_time,
"training_sequences_per_second": training_perf,
"final_loss": final_loss,
"raw_train_time": train_time_raw
})
if __name__ == "__main__":
paddle.enable_static()
main()
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/run_pretraining.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import os
import unicodedata
from io import open
def convert_to_unicode(text):
"""
Converts `text` to Unicode (if it's not already), assuming utf-8 input.
Args:
text(str|bytes): Text to be converted to unicode.
Returns:
str: converted text.
"""
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError(f"Unsupported string type: {type(text)}")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def whitespace_tokenize(text):
"""
Runs basic whitespace cleaning and splitting on a peice of text.
Args:
text(str): Text to be tokened.
Returns:
tokens(list): Token list.
"""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer:
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
pad_token = "[PAD]"
def __init__(self,
vocab_file,
do_lower_case=True,
max_len=512,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
if not os.path.isfile(vocab_file):
raise ValueError(
f"Can't find a vocabulary file at path {vocab_file}")
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.basic_tokenizer = BasicTokenizer(
do_lower_case=do_lower_case, never_split=never_split)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.max_len = max_len if max_len is not None else int(1e12)
def tokenize(self, text):
"""Tokenize a piece of text."""
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
raise ValueError(
f"Token indices sequence length is longer than the specified maximum "
f"sequence length for this BERT model ({len(ids)} > {self.max_len}). "
f"Running this sequence through BERT will result in indexing errors"
)
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
class BasicTokenizer:
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self,
do_lower_case=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""
Constructs a BasicTokenizer.
Args:
do_lower_case(bool, optional): Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in self.never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
if text in self.never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer:
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically control characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/tokenizer.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
class CrossEntropyLossForSQuAD(paddle.nn.Layer):
"""
Loss function for SQuAD
"""
def __init__(self):
super().__init__()
def forward(self, y, label):
start_logits, end_logits = y
start_position, end_position = label
start_position = paddle.unsqueeze(start_position, axis=-1)
end_position = paddle.unsqueeze(end_position, axis=-1)
start_loss = paddle.nn.functional.softmax_with_cross_entropy(
logits=start_logits, label=start_position, soft_label=False)
start_loss = paddle.mean(start_loss)
end_loss = paddle.nn.functional.softmax_with_cross_entropy(
logits=end_logits, label=end_position, soft_label=False)
end_loss = paddle.mean(end_loss)
loss = (start_loss + end_loss) / 2
return loss
class BertPretrainingCriterion(paddle.nn.Layer):
"""
Loss function for BertPretraining.
Args:
vocab_size(int):
Vocabulary size of `inputs_ids` in `BertModel`.
"""
def __init__(self, vocab_size):
super().__init__()
self.loss_fn = paddle.nn.loss.CrossEntropyLoss(ignore_index=-1)
self.vocab_size = vocab_size
def forward(self, prediction_scores, seq_relationship_score,
masked_lm_labels, next_sentence_labels):
"""
Args:
prediction_scores(Tensor):
The scores of masked token prediction. Its data type should be float32.
If `masked_positions` is None, its shape is [batch_size, sequence_length, vocab_size].
Otherwise, its shape is [batch_size, mask_token_num, vocab_size]
seq_relationship_score(Tensor):
The scores of next sentence prediction. Its data type should be float32 and
its shape is [batch_size, 2]
masked_lm_labels(Tensor):
The labels of the masked language modeling, its dimensionality is equal to `prediction_scores`.
Its data type should be int64. If `masked_positions` is None, its shape is [batch_size, sequence_length, 1].
Otherwise, its shape is [batch_size, mask_token_num, 1]
next_sentence_labels(Tensor):
The labels of the next sentence prediction task, the dimensionality of `next_sentence_labels`
is equal to `seq_relation_labels`. Its data type should be int64 and
its shape is [batch_size, 1]
masked_lm_scale(Tensor or int):
The scale of masked tokens. Used for the normalization of masked language modeling loss.
If it is a `Tensor`, its data type should be int64 and its shape is equal to `prediction_scores`.
Returns:
Tensor: The pretraining loss, equals to the sum of `masked_lm_loss` plus the mean of `next_sentence_loss`.
Its data type should be float32 and its shape is [1].
"""
with paddle.static.amp.fp16_guard():
masked_lm_labels_flat = masked_lm_labels.reshape([-1])
mlm_labels = masked_lm_labels_flat[masked_lm_labels_flat != -1]
masked_lm_loss = self.loss_fn(prediction_scores, mlm_labels)
if next_sentence_labels.ndim == 1:
next_sentence_labels = next_sentence_labels.unsqueeze(axis=-1)
next_sentence_loss = self.loss_fn(seq_relationship_score,
next_sentence_labels)
return masked_lm_loss + next_sentence_loss
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/loss.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create masked LM/next sentence masked_lm examples for BERT."""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import os
import random
from io import open
import collections
import h5py
import numpy as np
from tqdm import tqdm
from tokenizer import BertTokenizer, convert_to_unicode
class TrainingInstance:
"""A single training instance (sentence pair)."""
def __init__(self, tokens, segment_ids, masked_lm_positions,
masked_lm_labels, is_random_next):
self.tokens = tokens
self.segment_ids = segment_ids
self.is_random_next = is_random_next
self.masked_lm_positions = masked_lm_positions
self.masked_lm_labels = masked_lm_labels
def write_instance_to_example_file(instances, tokenizer, max_seq_length,
max_predictions_per_seq, output_file):
"""Create example files from `TrainingInstance`s."""
total_written = 0
features = collections.OrderedDict()
num_instances = len(instances)
features["input_ids"] = np.zeros(
[num_instances, max_seq_length], dtype="int32")
features["input_mask"] = np.zeros(
[num_instances, max_seq_length], dtype="int32")
features["segment_ids"] = np.zeros(
[num_instances, max_seq_length], dtype="int32")
features["masked_lm_positions"] = np.zeros(
[num_instances, max_predictions_per_seq], dtype="int32")
features["masked_lm_ids"] = np.zeros(
[num_instances, max_predictions_per_seq], dtype="int32")
features["next_sentence_labels"] = np.zeros(num_instances, dtype="int32")
for inst_index, instance in enumerate(tqdm(instances)):
input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
input_mask = [1] * len(input_ids)
segment_ids = list(instance.segment_ids)
assert len(input_ids) <= max_seq_length
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
masked_lm_positions = list(instance.masked_lm_positions)
masked_lm_ids = tokenizer.convert_tokens_to_ids(
instance.masked_lm_labels)
masked_lm_weights = [1.0] * len(masked_lm_ids)
while len(masked_lm_positions) < max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(0)
masked_lm_weights.append(0.0)
next_sentence_label = 1 if instance.is_random_next else 0
features["input_ids"][inst_index] = input_ids
features["input_mask"][inst_index] = input_mask
features["segment_ids"][inst_index] = segment_ids
features["masked_lm_positions"][inst_index] = masked_lm_positions
features["masked_lm_ids"][inst_index] = masked_lm_ids
features["next_sentence_labels"][inst_index] = next_sentence_label
total_written += 1
logging.info("saving data")
f = h5py.File(output_file, 'w')
f.create_dataset(
"input_ids",
data=features["input_ids"],
dtype='i4',
compression='gzip')
f.create_dataset(
"input_mask",
data=features["input_mask"],
dtype='i1',
compression='gzip')
f.create_dataset(
"segment_ids",
data=features["segment_ids"],
dtype='i1',
compression='gzip')
f.create_dataset(
"masked_lm_positions",
data=features["masked_lm_positions"],
dtype='i4',
compression='gzip')
f.create_dataset(
"masked_lm_ids",
data=features["masked_lm_ids"],
dtype='i4',
compression='gzip')
f.create_dataset(
"next_sentence_labels",
data=features["next_sentence_labels"],
dtype='i1',
compression='gzip')
f.flush()
f.close()
def create_training_instances(input_files, tokenizer, max_seq_length,
dupe_factor, short_seq_prob, masked_lm_prob,
max_predictions_per_seq, rng):
"""Create `TrainingInstance`s from raw text."""
all_documents = [[]]
# Input file format:
# (1) One sentence per line. These should ideally be actual sentences, not
# entire paragraphs or arbitrary spans of text. (Because we use the
# sentence boundaries for the "next sentence prediction" task).
# (2) Blank lines between documents. Document boundaries are needed so
# that the "next sentence prediction" task doesn't span between documents.
for input_file in input_files:
logging.info(f"creating instance from {input_file}")
with open(input_file, "r", encoding="UTF-8") as reader:
while True:
line = convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
# Empty lines are used as document delimiters
if not line:
all_documents.append([])
tokens = tokenizer.tokenize(line)
if tokens:
all_documents[-1].append(tokens)
# Remove empty documents
all_documents = [x for x in all_documents if x]
rng.shuffle(all_documents)
# vocab_words = list(tokenizer.vocab.keys())
vocab_words = list(tokenizer.vocab.keys())
instances = []
for _ in range(dupe_factor):
for document_index in range(len(all_documents)):
instances.extend(
create_instances_from_document(
all_documents, document_index, max_seq_length,
short_seq_prob, masked_lm_prob, max_predictions_per_seq,
vocab_words, rng))
rng.shuffle(instances)
return instances
def create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng):
"""Creates `TrainingInstance`s for a single document."""
document = all_documents[document_index]
# Account for [CLS], [SEP], [SEP]
max_num_tokens = max_seq_length - 3
# We *usually* want to fill up the entire sequence since we are padding
# to `max_seq_length` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
# sequences to minimize the mismatch between pre-training and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `max_seq_length` is a hard limit.
target_seq_length = max_num_tokens
if rng.random() < short_seq_prob:
target_seq_length = rng.randint(2, max_num_tokens)
# We DON'T just concatenate all of the tokens from a document into a long
# sequence and choose an arbitrary split point because this would make the
# next sentence prediction task too easy. Instead, we split the input into
# segments "A" and "B" based on the actual "sentences" provided by the user
# input.
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# `a_end` is how many segments from `current_chunk` go into the `A`
# (first) sentence.
a_end = 1
if len(current_chunk) >= 2:
a_end = rng.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
# Random next
is_random_next = False
if len(current_chunk) == 1 or rng.random() < 0.5:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
# This should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document
# we're processing.
for _ in range(10):
random_document_index = rng.randint(
0, len(all_documents) - 1)
if random_document_index != document_index:
break
#If picked random document is the same as the current document
if random_document_index == document_index:
is_random_next = False
random_document = all_documents[random_document_index]
random_start = rng.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
# We didn't actually use these segments so we "put them back" so
# they don't go to waste.
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
# Actual next
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)
assert len(tokens_a) >= 1
assert len(tokens_b) >= 1
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
(tokens, masked_lm_positions,
masked_lm_labels) = create_masked_lm_predictions(
tokens, masked_lm_prob, max_predictions_per_seq,
vocab_words, rng)
instance = TrainingInstance(
tokens=tokens,
segment_ids=segment_ids,
is_random_next=is_random_next,
masked_lm_positions=masked_lm_positions,
masked_lm_labels=masked_lm_labels)
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
return instances
MaskedLmInstance = collections.namedtuple("MaskedLmInstance",
["index", "label"])
def create_masked_lm_predictions(tokens, masked_lm_prob,
max_predictions_per_seq, vocab_words, rng):
"""Creates the predictions for the masked LM objective."""
cand_indexes = []
for (i, token) in enumerate(tokens):
if token == "[CLS]" or token == "[SEP]":
continue
cand_indexes.append(i)
rng.shuffle(cand_indexes)
output_tokens = list(tokens)
num_to_predict = min(max_predictions_per_seq,
max(1, int(round(len(tokens) * masked_lm_prob))))
masked_lms = []
covered_indexes = set()
for index in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
if index in covered_indexes:
continue
covered_indexes.add(index)
masked_token = None
# 80% of the time, replace with [MASK]
if rng.random() < 0.8:
masked_token = "[MASK]"
else:
# 10% of the time, keep original
if rng.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = vocab_words[rng.randint(0, len(vocab_words) -
1)]
output_tokens[index] = masked_token
masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
masked_lms = sorted(masked_lms, key=lambda x: x.index)
masked_lm_positions = []
masked_lm_labels = []
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (output_tokens, masked_lm_positions, masked_lm_labels)
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):
"""Truncates a pair of sequences to a maximum sequence length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
assert len(trunc_tokens) >= 1
# We want to sometimes truncate from the front and sometimes from the
# back to add more randomness and avoid biases.
if rng.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_file",
default=None,
type=str,
required=True,
help="The input train corpus. can be directory with .txt files or a path to a single file"
)
parser.add_argument(
"--output_file",
default=None,
type=str,
required=True,
help="The output file where created hdf5 formatted data will be written."
)
parser.add_argument(
"--vocab_file",
default=None,
type=str,
required=False,
help="The vocabulary the BERT model will train on. "
"Use bert_model argument would ignore this. "
"The bert_model argument is recommended.")
parser.add_argument(
"--do_lower_case",
action='store_true',
default=True,
help="Whether to lower case the input text. True for uncased models, False for cased models. "
"Use bert_model argument would ignore this. The bert_model argument is recommended."
)
## Other parameters
#int
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument(
"--dupe_factor",
default=10,
type=int,
help="Number of times to duplicate the input data (with different masks)."
)
parser.add_argument(
"--max_predictions_per_seq",
default=20,
type=int,
help="Maximum number of masked LM predictions per sequence.")
# floats
parser.add_argument(
"--masked_lm_prob",
default=0.15,
type=float,
help="Masked LM probability.")
parser.add_argument(
"--short_seq_prob",
default=0.1,
type=float,
help="Probability to create a sequence shorter than maximum sequence length"
)
parser.add_argument(
'--random_seed',
type=int,
default=12345,
help="random seed for initialization")
args = parser.parse_args()
print(args)
tokenizer = BertTokenizer(
args.vocab_file, do_lower_case=args.do_lower_case, max_len=512)
input_files = []
if os.path.isfile(args.input_file):
input_files.append(args.input_file)
elif os.path.isdir(args.input_file):
input_files = [
os.path.join(args.input_file, f)
for f in os.listdir(args.input_file)
if (os.path.isfile(os.path.join(args.input_file, f)) and
f.endswith('.txt'))
]
else:
raise ValueError(f"{args.input_file} is not a valid path")
rng = random.Random(args.random_seed)
instances = create_training_instances(
input_files, tokenizer, args.max_seq_length, args.dupe_factor,
args.short_seq_prob, args.masked_lm_prob, args.max_predictions_per_seq,
rng)
output_file = args.output_file
write_instance_to_example_file(instances, tokenizer, args.max_seq_length,
args.max_predictions_per_seq, output_file)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/create_pretraining_data.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import logging
import shutil
import paddle
import paddle.distributed.fleet as fleet
from modeling import BertForPretraining, BertConfig
from loss import BertPretrainingCriterion
from utils.save_load import save_model
from utils.utility import get_trainer_id
from lr_scheduler import build_lr_scheduler
from optimizer import build_optimizer
import dllogger
def create_pretraining_data_holder():
input_ids = paddle.static.data(
name="input_ids", shape=[-1, -1], dtype="int64")
token_type_ids = paddle.static.data(
name="token_type_ids", shape=[-1, -1], dtype="int64")
attention_mask = paddle.static.data(
name="attention_mask", shape=[-1, 1, 1, -1], dtype="int64")
next_sentence_labels = paddle.static.data(
name="next_sentence_labels", shape=[-1, 1], dtype="int64")
masked_lm_labels = paddle.static.data(
name="masked_lm_labels", shape=[-1, -1], dtype="int64")
return [
input_ids, token_type_ids, attention_mask, next_sentence_labels,
masked_lm_labels
]
def create_strategy(args, use_distributed_fused_lamb=False):
"""
Create paddle.static.BuildStrategy and paddle.static.ExecutionStrategy with arguments.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
use_distributed_fused_lamb(bool, optional): Whether to use distributed fused lamb.
Returns:
build_strategy(paddle.static.BuildStrategy): A instance of BuildStrategy.
exec_strategy(paddle.static.ExecutionStrategy): A instance of ExecutionStrategy.
"""
build_strategy = paddle.static.BuildStrategy()
exec_strategy = paddle.static.ExecutionStrategy()
build_strategy.enable_addto = True
if args.amp:
build_strategy.fuse_gemm_epilogue = True
build_strategy.fuse_dot_product_attention = args.fuse_mha
if use_distributed_fused_lamb:
build_strategy.fuse_all_reduce_ops = False
build_strategy.reduce_strategy = paddle.static.BuildStrategy.ReduceStrategy._NoReduce
else:
build_strategy.fuse_all_reduce_ops = True
build_strategy.reduce_strategy = paddle.static.BuildStrategy.ReduceStrategy.AllReduce
exec_strategy.num_threads = 1
exec_strategy.num_iteration_per_drop_scope = 10000
return build_strategy, exec_strategy
def dist_optimizer(args, optimizer):
"""
Create a distributed optimizer based on a given optimizer.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
optimizer(paddle.optimizer): A normal optimizer.
Returns:
optimizer(fleet.distributed_optimizer): A distributed optimizer.
"""
use_distributed_fused_lamb = True if args.optimizer == 'DistributedFusedLamb' else False
build_strategy, exec_strategy = create_strategy(args,
use_distributed_fused_lamb)
dist_strategy = fleet.DistributedStrategy()
if use_distributed_fused_lamb:
dist_strategy.gradient_scale_configs = {'scale_strategy': 'sum'}
dist_strategy.execution_strategy = exec_strategy
dist_strategy.build_strategy = build_strategy
if use_distributed_fused_lamb:
dist_strategy.fuse_all_reduce_ops = False
else:
dist_strategy.fuse_all_reduce_ops = True
dist_strategy.fuse_grad_size_in_MB = 0
if args.amp:
dist_strategy.amp = True
custom_white_list = ['softmax', 'layer_norm', 'gelu']
custom_black_list = ['lookup_table',
'lookup_table_v2'] if args.use_pure_fp16 else None
dist_strategy.amp_configs = {
'custom_white_list': custom_white_list,
'custom_black_list': custom_black_list,
'init_loss_scaling': args.scale_loss,
'use_dynamic_loss_scaling': True,
'incr_every_n_steps': 2000,
'decr_every_n_nan_or_inf': 1,
'incr_ratio': 2.0,
'decr_ratio': 0.5,
'use_pure_fp16': args.use_pure_fp16,
'use_fp16_guard': args.use_pure_fp16
}
if not use_distributed_fused_lamb and args.gradient_merge_steps > 1:
dist_strategy.gradient_merge = True
dist_strategy.gradient_merge_configs = {
'k_steps': args.gradient_merge_steps
}
optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy)
return optimizer
def build(args, main_prog, startup_prog, is_train=True):
"""
Build a executable paddle.static.Program via following 3 steps:
1. Create feeds.
2. Create model.
3. Create loss.
4. Create optimizer if is_train==True.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
main_prog(paddle.static.Program):The main program.
startup_prog(paddle.static.Program):The startup program.
is_train(bool, optional): Whether the main programe created is for training. Default: True.
Returns:
model(paddle.nn.Layer): An instance of BERT Model defined in modeling.py.
lr_scheduler(paddle.optimizer.lr.LRScheduler): A learning rate scheduler.
optimizer(Optimizer): An optimizer with distributed/AMP strategy.
loss(variable): The output variable of loss function.
feeds(dict): A dict of mapping variables' names to their values
"""
with paddle.static.program_guard(main_prog, startup_prog):
with paddle.utils.unique_name.guard():
feeds = create_pretraining_data_holder()
[
input_ids, token_type_ids, attention_mask,
next_sentence_labels, masked_lm_labels
] = feeds
bert_config = BertConfig.from_json_file(args.config_file)
if bert_config.vocab_size % 8 != 0:
bert_config.vocab_size += 8 - (bert_config.vocab_size % 8)
bert_config.fuse_mha = args.fuse_mha
model = BertForPretraining(bert_config)
criterion = BertPretrainingCriterion(bert_config.vocab_size)
prediction_scores, seq_relationship_score = model(
input_ids=input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
masked_lm_labels=masked_lm_labels)
loss = criterion(prediction_scores, seq_relationship_score,
masked_lm_labels, next_sentence_labels)
lr_scheduler = None
optimizer = None
if is_train:
lr_scheduler = build_lr_scheduler(args)
optimizer = build_optimizer(args, lr_scheduler)
optimizer = dist_optimizer(args, optimizer)
optimizer.minimize(loss)
return model, lr_scheduler, optimizer, loss, feeds
def run(exe,
program,
args,
lr_scheduler,
loss,
train_dataloader,
progress=None):
"""
Execute program.
Args:
exe(paddle.static.Executor): A executor to run program.
program(paddle.static.Program): The program to be executed.
args(Namespace): Arguments obtained from ArgumentParser.
lr_scheduler(paddle.optimizer.lr.LRScheduler): A learning rate scheduler.
Default: None.
loss(variable): The output variable of loss function.
progress(dict, optional): A dict to record the training progress of checkpoint.
Returns:
global_step(int): Final step id of this run.
loss_return(float): Final loss of this run.
train_time_raw(float): Time to train of this run.
"""
trainer_id = get_trainer_id()
batch_size_per_gpu = args.batch_size
log_steps = args.log_freq
save_steps = args.num_steps_per_checkpoint
gradient_merge_steps = args.gradient_merge_steps
most_recent_ckpts_paths = []
last_step = args.last_step_of_checkpoint
train_iter = 0
epoch = 0
train_time_raw = 0
if progress is None:
progress = dict()
else:
epoch = progress.get('epoch', 0)
global_step = 0 + last_step
logging.info(f"Training will start at the {last_step+1}th step")
max_steps = args.max_steps
steps_this_run = max_steps
if args.steps_this_run is not None:
if args.steps_this_run + last_step > max_steps:
logging.info(
f"Only {max_steps - last_step} steps will be performed in this run due to the limit of --max-steps."
)
else:
steps_this_run = args.steps_this_run
max_steps = steps_this_run + last_step
logging.warning(
f"{steps_this_run} steps will be performed in this run.")
if args.benchmark:
max_steps = args.benchmark_warmup_steps + args.benchmark_steps + last_step
total_samples = 0
raw_train_start = time.time()
step_start = time.time()
avg_loss = 0
while True:
for batch in train_dataloader:
train_iter += 1
loss_return = exe.run(program, feed=batch, fetch_list=[loss])
total_samples += batch_size_per_gpu
avg_loss += loss_return[0].item()
lr = lr_scheduler.get_lr()
if train_iter % (log_steps * gradient_merge_steps) == 0:
step_cost = time.time() - step_start
dllogger_it_data = {
'loss': avg_loss / gradient_merge_steps,
'learning_rate': lr,
'step_cost': step_cost,
'step_samples': total_samples,
'seqs_per_sec': total_samples / step_cost,
}
dllogger.log((epoch, global_step + 1), data=dllogger_it_data)
total_samples = 0
step_start = time.time()
if train_iter % gradient_merge_steps == 0:
global_step += 1
lr_scheduler.step()
avg_loss = 0
if args.benchmark and train_iter == (args.benchmark_warmup_steps *
gradient_merge_steps):
raw_train_start = time.time()
if train_iter % (save_steps * gradient_merge_steps
) == 0 or global_step >= max_steps:
train_time_raw = time.time() - raw_train_start
if trainer_id == 0:
model_path = os.path.join(
args.output_dir, args.bert_model, "phase1"
if args.phase1 else "phase2", f"{global_step}")
progress = {
'epoch': epoch,
'global_step': global_step,
'phase': 1 if args.phase1 else 2,
}
save_model(program, model_path, args.model_prefix,
progress)
most_recent_ckpts_paths.append(model_path)
if len(most_recent_ckpts_paths) > 3:
ckpt_to_be_removed = most_recent_ckpts_paths.pop(0)
shutil.rmtree(ckpt_to_be_removed)
if global_step >= max_steps:
actual_steps_this_run = global_step - last_step
return global_step, actual_steps_this_run, loss_return[0].item(), train_time_raw
epoch += 1
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/program.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import logging
import paddle
from paddle import optimizer as optim
_EXCLUDE_FROM_DECAY = ["b_0", "norm"]
class AdamW:
"""
AdamW optimizer.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
learning_rate(float|LRScheduler, optional): The learning rate used to update parameters. Default: 0.001
Can be a float value or a paddle.optimizer.lr.LRScheduler.
"""
def __init__(self, args, learning_rate):
self.learning_rate = learning_rate
self.beta1 = args.beta1
self.beta2 = args.beta2
self.epsilon = args.epsilon
self.weight_decay = args.weight_decay
self.multi_precision = args.amp
def __call__(self):
# not apply weight decay to all bias and layer_norm
def apply_decay_func(name):
return False if any(key in name
for key in _EXCLUDE_FROM_DECAY) else True
# add grad clipping to prevent exploding gradients
clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0)
opt = optim.AdamW(
learning_rate=self.learning_rate,
beta1=self.beta1,
beta2=self.beta2,
epsilon=self.epsilon,
weight_decay=self.weight_decay,
apply_decay_param_fun=apply_decay_func,
grad_clip=clip,
multi_precision=self.multi_precision)
return opt
class Lamb:
"""
Lamb optimizer.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
learning_rate(float|LRScheduler, optional): The learning rate used to update parameters. Default: 0.001
Can be a float value or a paddle.optimizer.lr.LRScheduler.
"""
def __init__(self, args, learning_rate):
self.learning_rate = learning_rate
self.beta1 = args.beta1
self.beta2 = args.beta2
self.epsilon = args.epsilon
self.lamb_weight_decay = args.weight_decay
self.multi_precision = args.amp
def __call__(self):
# not apply weight decay to all bias and layer_norm
def exclude_from_decay_func(param):
return True if any(key in param.name
for key in _EXCLUDE_FROM_DECAY) else False
clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0)
opt = optim.Lamb(
learning_rate=self.learning_rate,
lamb_weight_decay=self.lamb_weight_decay,
beta1=self.beta1,
beta2=self.beta2,
epsilon=self.epsilon,
exclude_from_weight_decay_fn=exclude_from_decay_func,
grad_clip=clip)
opt._multi_precision = True if self.multi_precision else False
return opt
class DistributedFusedLamb:
"""
DistributedFusedLamb optimizer.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
learning_rate(float|LRScheduler, optional): The learning rate used to update parameters. Default: 0.001
Can be a float value or a paddle.optimizer.lr.LRScheduler.
"""
def __init__(self, args, learning_rate):
self.learning_rate = learning_rate
self.beta1 = args.beta1
self.beta2 = args.beta2
self.epsilon = args.epsilon
self.lamb_weight_decay = args.weight_decay
self.gradient_merge_steps = args.gradient_merge_steps
def __call__(self):
# not apply weight decay to all bias and layer_norm
def exclude_from_decay_func(param):
return True if any(key in param.name
for key in _EXCLUDE_FROM_DECAY) else False
clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0)
opt = paddle.incubate.DistributedFusedLamb(
learning_rate=self.learning_rate,
lamb_weight_decay=self.lamb_weight_decay,
beta1=self.beta1,
beta2=self.beta2,
epsilon=self.epsilon,
exclude_from_weight_decay_fn=exclude_from_decay_func,
grad_clip=clip,
clip_after_allreduce=True,
is_grad_scaled_by_nranks=False,
use_master_param_norm=True,
gradient_accumulation_steps=self.gradient_merge_steps,
use_master_acc_grad=True)
return opt
def build_optimizer(args, lr):
"""
Build a raw optimizer with learning rate scheduler.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
lr(paddle.optimizer.lr.LRScheduler): A LRScheduler used for training.
return:
optim(paddle.optimizer): A normal optmizer.
"""
optimizer_mod = sys.modules[__name__]
opt = getattr(optimizer_mod, args.optimizer)(args, learning_rate=lr)()
logging.info("build optimizer %s success..", opt)
return opt
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/optimizer.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import copy
from dataclasses import dataclass
import logging
import paddle
import paddle.nn as nn
try:
from paddle.incubate.nn import FusedTransformerEncoderLayer
except ImportError:
FusedTransformerEncoderLayer = None
__all__ = [
'BertModel', 'BertForPretraining', 'BertPretrainingHeads',
'BertForQuestionAnswering'
]
@dataclass
class BertConfig:
vocab_size: int = 30528
hidden_size: int = 768
num_hidden_layers: int = 12
num_attention_heads: int = 12
intermediate_size: int = 3072
hidden_act: str = "gelu"
hidden_dropout_prob: float = 0.1
attention_probs_dropout_prob: float = 0.1
max_position_embeddings: int = 512
type_vocab_size: int = 2
initializer_range: float = 0.02
output_all_encoded_layers: bool = False
pad_token_id: int = 0
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig()
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertEmbeddings(nn.Layer):
"""
Include embeddings from word, position and token_type embeddings
"""
def __init__(self, bert_config):
super().__init__()
self.word_embeddings = nn.Embedding(bert_config.vocab_size,
bert_config.hidden_size)
self.position_embeddings = nn.Embedding(
bert_config.max_position_embeddings, bert_config.hidden_size)
self.token_type_embeddings = nn.Embedding(bert_config.type_vocab_size,
bert_config.hidden_size)
self.layer_norm = nn.LayerNorm(bert_config.hidden_size, epsilon=1e-12)
self.dropout = nn.Dropout(bert_config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
"""
Args:
See class `BertModel`.
"""
ones = paddle.ones_like(input_ids, dtype="int64")
seq_length = paddle.cumsum(ones, axis=-1)
position_ids = seq_length - ones
position_ids.stop_gradient = True
if token_type_ids is None:
token_type_ids = paddle.zeros_like(input_ids, dtype="int64")
input_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = input_embeddings + position_embeddings + token_type_embeddings
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertPooler(nn.Layer):
"""
Pool the result of BertEncoder.
"""
def __init__(self, hidden_size, pool_act=nn.Tanh()):
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation = pool_act
def forward(self, hidden_states):
"""
Args:
hidden_states(Tensor): A Tensor of hidden_states.
"""
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertModel(nn.Layer):
"""
The bare BERT Model transformer outputting raw hidden-states.
Args:
bert_config(BertConfig): A BertConfig class instance with the configuration
to build a new model
"""
def __init__(self, bert_config):
super().__init__()
self.bert_config = bert_config
self.embeddings = BertEmbeddings(bert_config)
self.fuse = True if FusedTransformerEncoderLayer is not None else False
self.fuse = False
if self.fuse:
self.encoder = nn.LayerList([
FusedTransformerEncoderLayer(
bert_config.hidden_size,
bert_config.num_attention_heads,
bert_config.intermediate_size,
dropout_rate=bert_config.hidden_dropout_prob,
activation=bert_config.hidden_act,
attn_dropout_rate=bert_config.attention_probs_dropout_prob,
act_dropout_rate=0.)
for _ in range(bert_config.num_hidden_layers)
])
else:
logging.warning(
"FusedTransformerEncoderLayer is not supported by the running Paddle. "
"TransformerEncoderLayer will be used.")
encoder_layer = nn.TransformerEncoderLayer(
bert_config.hidden_size,
bert_config.num_attention_heads,
bert_config.intermediate_size,
dropout=bert_config.hidden_dropout_prob,
activation=bert_config.hidden_act,
attn_dropout=bert_config.attention_probs_dropout_prob,
act_dropout=0,
fuse_qkv=bert_config.fuse_mha)
self.encoder = nn.TransformerEncoder(encoder_layer,
bert_config.num_hidden_layers)
self.pooler = BertPooler(bert_config.hidden_size)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
"""
Args:
input_ids(Tensor):
A Tensor of shape [batch_size, sequence_length] with the word token
indices in the vocabulary. Data type should be `int64`.
token_type_ids(Tensor, optional):
An optional Tensor of shape [batch_size, sequence_length] with the token types
indices selected in [0, type_vocab_size - 1].
If `type_vocab_size` is 2, indices can either be 0 or 1. Type 0 corresponds
to a `sentence A` and type 1 corresponds to a `sentence B` token.
(see BERT paper for more details). Its data type should be `int64`
Defaults: None, which means we don't add segment embeddings.
attention_mask(Tensor, optional):
An optional Tensor of shape [batch_size, sequence_length] with indices of
mask used in multi-head attention to avoid performing attention on to some
unwanted positions, usually the paddings or the subsequent positions.
Its data type can be int, float and bool.
When the data type is bool, the `masked` tokens have `False` values and the others have `True` values.
When the data type is int, the `masked` tokens have `0` values and the others have `1` values.
When the data type is float, the `masked` tokens have `-INF` values and the others have `0` values.
Defaults: None.
Returns:
encoder_output(Tensor):
A Tensor of shape [batch_size, sequence_length, hidden_size] contains hidden-states at the last
layer of the model. The data type should be float32.
pooled_output(Tensor):
A Tensor of shape [batch_size, hidden_size] which is the output of a classifier pretrained on
top of the hidden state associated to the first character of the input (`CLS`) to train on the
Next-Sentence task (see BERT's paper).
"""
if attention_mask is None:
attention_mask = paddle.unsqueeze(
(input_ids != self.bert_config.pad_token_id).astype('int32'),
axis=[1, 2])
else:
if attention_mask.ndim == 2:
# attention_mask [batch_size, sequence_length] -> [batch_size, 1, 1, sequence_length]
attention_mask = attention_mask.unsqueeze(axis=[1, 2])
embedding_output = self.embeddings(
input_ids=input_ids, token_type_ids=token_type_ids)
if self.fuse:
encoder_output = embedding_output
for layer in self.encoder:
encoder_output = layer(encoder_output, attention_mask)
else:
encoder_output = self.encoder(embedding_output, attention_mask)
pooled_output = self.pooler(encoder_output)
return encoder_output, pooled_output
class BertForQuestionAnswering(nn.Layer):
"""
BERT model for Question Answering (span extraction).
This module is composed of the BERT model with a linear layer on top of
the sequence output that computes start_logits and end_logits
Args:
bert_config(BertConfig): a BertConfig class instance with the configuration to build a new model.
"""
def __init__(self, bert_config):
super().__init__()
self.bert = BertModel(bert_config)
self.classifier = nn.Linear(bert_config.hidden_size, 2)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
"""
Args:
See class `BertModel`.
Returns:
start_logits(Tensor):
A tensor of shape [batch_size, sequence_length] indicates the start position token.
end_logits(Tensor):
A tensor of shape [batch_size, sequence_length] indicates the end position token.
"""
encoder_output, _ = self.bert(
input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask)
logits = self.classifier(encoder_output)
logits = paddle.transpose(logits, perm=[2, 0, 1])
start_logits, end_logits = paddle.unstack(x=logits, axis=0)
return start_logits, end_logits
class BertLMPredictionHead(nn.Layer):
"""
Bert Model with a `language modeling` head on top for CLM fine-tuning.
Args:
hidden_size(int): See class `BertConfig`.
vocab_size(int): See class `BertConfig`.
activation(str): Activation function used in the language modeling task.
embedding_weights(Tensor, optional):
An optional Tensor of shape [vocab_size, hidden_size] used to map hidden_states
to logits of the masked token prediction. The data type should be float32.
Defaults: None, which means use the same weights of the embedding layer.
"""
def __init__(self,
hidden_size,
vocab_size,
activation,
embedding_weights=None):
super().__init__()
self.transform = nn.Linear(hidden_size, hidden_size)
self.activation = getattr(nn.functional, activation)
self.layer_norm = nn.LayerNorm(hidden_size, epsilon=1e-12)
self.decoder_weight = self.create_parameter(
shape=[vocab_size, hidden_size],
dtype=self.transform.weight.dtype,
is_bias=False) if embedding_weights is None else embedding_weights
self.decoder_bias = self.create_parameter(
shape=[vocab_size], dtype=self.decoder_weight.dtype, is_bias=True)
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = paddle.tensor.matmul(
hidden_states, self.decoder_weight,
transpose_y=True) + self.decoder_bias
return hidden_states
class BertPretrainingHeads(nn.Layer):
"""
Perform language modeling task and next sentence classification task.
Args:
hidden_size(int): See class `BertConfig`.
vocab_size(int): See class `BertConfig`.
activation(str): Activation function used in the language modeling task.
embedding_weights (Tensor, optional):
An optional Tensor of shape [vocab_size, hidden_size] used to map hidden_states
to logits of the masked token prediction. The data type should be float32.
Defaults: None, which means use the same weights of the embedding layer.
"""
def __init__(self,
hidden_size,
vocab_size,
activation,
embedding_weights=None):
super().__init__()
self.predictions = BertLMPredictionHead(hidden_size, vocab_size,
activation, embedding_weights)
self.seq_relationship = nn.Linear(hidden_size, 2)
def forward(self, encoder_output, pooled_output, masked_lm_labels):
"""
Args:
sequence_output(Tensor):
A Tensor of shape [batch_size, sequence_length, hidden_size] with hidden-states
at the last layer of bert model. It's data type should be float32.
pooled_output(Tensor):
A Tensor of shape [batch_size, hidden_size] with output of first token (`[CLS]`) in sequence.
We "pool" the model by simply taking the hidden state corresponding to the first token.
Its data type should be float32.
masked_positions(Tensor, optional):
An optional tensor of shape [batch_size, mask_token_num] indicates positions to be masked
in the position embedding. Its data type should be int64. Default: None
Returns:
prediction_scores(Tensor):
A Tensor with the scores of masked token prediction. Its data type should be float32.
If `masked_positions` is None, its shape is [batch_size, sequence_length, vocab_size].
Otherwise, the shape is [batch_size, mask_token_num, vocab_size].
seq_relationship_score(Tensor):
A Tensor of shape [batch_size, 2] with the scores of next sentence prediction.
Its data type should be float32.
"""
sequence_flattened = paddle.index_select(
encoder_output.reshape([-1, encoder_output.shape[-1]]),
paddle.nonzero(masked_lm_labels.reshape([-1]) != -1).squeeze(),
axis=0)
prediction_scores = self.predictions(sequence_flattened)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertForPretraining(nn.Layer):
"""
Bert Model with pretraining tasks on top.
Args:
bert_config(Class BertConfig): An instance of class `BertConfig`.
"""
def __init__(self, bert_config):
super().__init__()
self.bert = BertModel(bert_config)
self.cls = BertPretrainingHeads(
bert_config.hidden_size,
bert_config.vocab_size,
bert_config.hidden_act,
embedding_weights=self.bert.embeddings.word_embeddings.weight)
def forward(self, input_ids, token_type_ids, attention_mask,
masked_lm_labels):
"""
Args:
input_ids(Tensor): See class `BertModel`.
token_type_ids(Tensor, optional): See class `BertModel`.
attention_mask(Tensor, optional): See class `BertModel`.
masked_positions(Tensor, optional): See class `BertPretrainingHeads`.
Returns:
prediction_scores(Tensor):
A Tensor with the scores of masked token prediction. Its data type should be float32.
If `masked_positions` is None, its shape is [batch_size, sequence_length, vocab_size].
Otherwise, its shape is [batch_size, mask_token_num, vocab_size].
seq_relationship_score(Tensor):
A Tensor of shape [batch_size, 2] with the scores of next sentence prediction.
Its data type should be float32.
"""
with paddle.static.amp.fp16_guard():
outputs = self.bert(
input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(
sequence_output, pooled_output, masked_lm_labels)
return prediction_scores, seq_relationship_score
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/modeling.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import math
import logging
from tokenizer import BasicTokenizer
def get_answers(args, examples, features, results):
predictions = collections.defaultdict(
list) #it is possible that one example corresponds to multiple features
Prediction = collections.namedtuple('Prediction',
['text', 'start_logit', 'end_logit'])
if args.version_2_with_negative:
null_vals = collections.defaultdict(lambda: (float("inf"), 0, 0))
for ex, feat, result in match_results(examples, features, results):
start_indices = _get_best_indices(result.start_logits,
args.n_best_size)
end_indices = _get_best_indices(result.end_logits, args.n_best_size)
prelim_predictions = get_valid_prelim_predictions(
args, start_indices, end_indices, feat, result)
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
if args.version_2_with_negative:
score = result.start_logits[0] + result.end_logits[0]
if score < null_vals[ex.qas_id][0]:
null_vals[ex.qas_id] = (score, result.start_logits[0],
result.end_logits[0])
curr_predictions = []
seen_predictions = []
for pred in prelim_predictions:
if len(curr_predictions) == args.n_best_size:
break
if pred.start_index > 0: # this is a non-null prediction TODO: this probably is irrelevant
final_text = get_answer_text(args, ex, feat, pred)
if final_text in seen_predictions:
continue
else:
final_text = ""
seen_predictions.append(final_text)
curr_predictions.append(
Prediction(final_text, pred.start_logit, pred.end_logit))
predictions[ex.qas_id] += curr_predictions
#Add empty prediction
if args.version_2_with_negative:
for qas_id in predictions.keys():
predictions[qas_id].append(
Prediction('', null_vals[ex.qas_id][1], null_vals[ex.qas_id][
2]))
nbest_answers = collections.defaultdict(list)
answers = {}
for qas_id, preds in predictions.items():
nbest = sorted(
preds, key=lambda x: (x.start_logit + x.end_logit),
reverse=True)[:args.n_best_size]
# In very rare edge cases we could only have single null prediction.
# So we just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
Prediction(
text="empty", start_logit=0.0, end_logit=0.0))
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry and entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_answers[qas_id].append(output)
if args.version_2_with_negative:
score_diff = null_vals[qas_id][
0] - best_non_null_entry.start_logit - best_non_null_entry.end_logit
if score_diff > args.null_score_diff_threshold:
answers[qas_id] = ""
else:
answers[qas_id] = best_non_null_entry.text
else:
answers[qas_id] = nbest_answers[qas_id][0]['text']
return answers, nbest_answers
def get_answer_text(args, example, feature, pred):
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, args.do_lower_case,
args.verbose_logging)
return final_text
def get_valid_prelim_predictions(args, start_indices, end_indices, feature,
result):
_PrelimPrediction = collections.namedtuple(
"PrelimPrediction",
["start_index", "end_index", "start_logit", "end_logit"])
prelim_predictions = []
for start_index in start_indices:
for end_index in end_indices:
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > args.max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
return prelim_predictions
def match_results(examples, features, results):
unique_f_ids = set([f.unique_id for f in features])
unique_r_ids = set([r.unique_id for r in results])
matching_ids = unique_f_ids & unique_r_ids
features = [f for f in features if f.unique_id in matching_ids]
results = [r for r in results if r.unique_id in matching_ids]
features.sort(key=lambda x: x.unique_id)
results.sort(key=lambda x: x.unique_id)
for f, r in zip(
features, results
): #original code assumes strict ordering of examples. TODO: rewrite this
yield examples[f.example_index], f, r
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logging.info(f"Unable to find text: {pred_text} in {orig_text}")
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logging.info(
"Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logging.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logging.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indices(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(
enumerate(logits), key=lambda x: x[1], reverse=True)
best_indices = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indices.append(index_and_score[i][0])
return best_indices
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/squad_utils.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
class Task(Enum):
pretrain = 'Pretrain'
squad = 'SQuAD'
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/utils/task.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
class Stack:
"""
Stacks the input data samples to construct the batch. The input samples
must have the same shape/length.
Args:
axis (int, optional): The axis in the result data along which the input
data are stacked. Default: 0.
dtype (str|numpy.dtype, optional): The value type of the output. If it
is set to None, the type of input data is used. Default: None.
"""
def __init__(self, axis=0, dtype=None):
self._axis = axis
self._dtype = dtype
def __call__(self, data):
"""
Batchifies the input data by stacking.
Args:
data (list[numpy.ndarray]): The input data samples. It is a list.
Each element is a numpy.ndarray or list.
Returns:
numpy.ndarray: Stacked batch data.
Example:
.. code-block:: python
from data import Stack
a = [1, 2, 3]
b = [4, 5, 6]
c = [7, 8, 9]
result = Stack()([a, b, c])
'''
[[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
'''
"""
data = np.stack(
data,
axis=self._axis).astype(self._dtype) if self._dtype else np.stack(
data, axis=self._axis)
return data
class Pad:
"""
Stacks the input data samples with padding.
Args:
pad_val (float|int, optional): The padding value. Default: 0.
axis (int, optional): The axis to pad the arrays. The arrays will be
padded to the largest dimension at axis. For example,
assume the input arrays have shape (10, 8, 5), (6, 8, 5), (3, 8, 5)
and the axis is 0. Each input will be padded into
(10, 8, 5) and then stacked to form the final output, which has
shape(3, 10, 8, 5). Default: 0.
ret_length (bool|numpy.dtype, optional): If it is bool, indicate whether
to return the valid length in the output, and the data type of
returned length is int32 if True. If it is numpy.dtype, indicate the
data type of returned length. Default: False.
dtype (numpy.dtype, optional): The value type of the output. If it is
set to None, the input data type is used. Default: None.
pad_right (bool, optional): Boolean argument indicating whether the
padding direction is right-side. If True, it indicates we pad to the right side,
while False indicates we pad to the left side. Default: True.
Example:
.. code-block:: python
from data import Pad
# Inputs are multiple lists
a = [1, 2, 3, 4]
b = [5, 6, 7]
c = [8, 9]
Pad(pad_val=0)([a, b, c])
'''
[[1, 2, 3, 4],
[5, 6, 7, 0],
[8, 9, 0, 0]]
'''
"""
def __init__(self,
pad_val=0,
axis=0,
ret_length=None,
dtype=None,
pad_right=True):
self._pad_val = pad_val
self._axis = axis
self._ret_length = ret_length
self._dtype = dtype
self._pad_right = pad_right
def __call__(self, data):
"""
Batchify the input data by padding. The input can be list of numpy.ndarray.
The arrays will be padded to the largest dimension at axis and then
stacked to form the final output. In addition, the function will output
the original dimensions at the axis if ret_length is not None.
Args:
data (list(numpy.ndarray)|list(list)): List of samples to pad and stack.
Returns:
numpy.ndarray|tuple: If `ret_length` is False, it is a numpy.ndarray \
representing the padded batch data and the shape is (N, …). \
Otherwise, it is a tuple, except for the padded batch data, the \
tuple also includes a numpy.ndarray representing all samples' \
original length shaped `(N,)`.
"""
arrs = [np.asarray(ele) for ele in data]
original_length = [ele.shape[self._axis] for ele in arrs]
max_size = max(original_length)
ret_shape = list(arrs[0].shape)
ret_shape[self._axis] = max_size
ret_shape = (len(arrs), ) + tuple(ret_shape)
ret = np.full(
shape=ret_shape,
fill_value=self._pad_val,
dtype=arrs[0].dtype if self._dtype is None else self._dtype)
for i, arr in enumerate(arrs):
if arr.shape[self._axis] == max_size:
ret[i] = arr
else:
slices = [slice(None) for _ in range(arr.ndim)]
if self._pad_right:
slices[self._axis] = slice(0, arr.shape[self._axis])
else:
slices[self._axis] = slice(
max_size - arr.shape[self._axis], max_size)
if slices[self._axis].start != slices[self._axis].stop:
slices = [slice(i, i + 1)] + slices
ret[tuple(slices)] = arr
if self._ret_length:
return ret, np.asarray(
original_length,
dtype="int32") if self._ret_length == True else np.asarray(
original_length, self._ret_length)
else:
return ret
class Tuple:
"""
Wrap multiple batchify functions together. The input functions will be applied
to the corresponding input fields.
Each sample should be a list or tuple containing multiple fields. The i'th
batchify function stored in Tuple will be applied on the i'th field.
For example, when data sample is (nd_data, label), you can wrap two batchify
functions using `Tuple(DataBatchify, LabelBatchify)` to batchify nd_data and
label correspondingly.
Args:
fn (list|tuple|callable): The batchify functions to wrap.
*args (tuple of callable): The additional batchify functions to wrap.
Example:
.. code-block:: python
from data import Tuple, Pad, Stack
batchify_fn = Tuple(Pad(axis=0, pad_val=0), Stack())
"""
def __init__(self, fn, *args):
if isinstance(fn, (list, tuple)):
assert len(args) == 0, f"Input pattern not understood. The input of Tuple can be " \
f"Tuple(A, B, C) or Tuple([A, B, C]) or Tuple((A, B, C)). " \
f"Received fn={fn}, args={args}"
self._fn = fn
else:
self._fn = (fn, ) + args
for i, ele_fn in enumerate(self._fn):
assert callable(
ele_fn
), f"Batchify functions must be callable! type(fn[{i}]) = {str(type(ele_fn))}"
def __call__(self, data):
"""
Batchify data samples by applying each function on the corresponding data
field, and each data field is produced by stacking the field data of samples.
Args:
data (list): The samples to batchfy. Each sample should contain N fields.
Returns:
tuple: A tuple composed of results from all including batchifying functions.
"""
assert len(data[0]) == len(self._fn), \
f"The number of attributes in each data sample should contain" \
f" {len(self._fn)} elements"
ret = []
for i, ele_fn in enumerate(self._fn):
result = ele_fn([ele[i] for ele in data])
if isinstance(result, (tuple, list)):
ret.extend(result)
else:
ret.append(result)
return tuple(ret)
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/utils/collate.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import re
import io
import shutil
import tempfile
import logging
import json
import paddle
import numpy as np
from utils.task import Task
_PROGRESS_SUFFIX = '_progress.json'
_PDOPT_SUFFIX = '.pdopt'
_PDPARAMS_SUFFIX = '.pdparams'
def mkdir_if_not_exist(path):
"""
Mkdir if not exists, ignore the exception when multiprocess mkdir together.
"""
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
logging.warning(
f"be happy if some process has already created {path}")
else:
raise OSError(f"Failed to mkdir {path}")
def load_train_progress(progress_file):
"""
Load train progress info (such as file_list, epoch_id, step_id) from a given
file, which is used to resume training.
Args:
progress_file(str): Path to a file named `progress.json` with progress info.
Returns:
pregress_dict(dict): A dict with progress info.
"""
progress_dict = {}
if os.path.isfile(progress_file):
with open(progress_file, "r", encoding='utf-8') as reader:
json_obj = json.loads(reader.read())
for k, v in json_obj.items():
progress_dict[k] = v
else:
logging.warning("progress file is not found")
return progress_dict
def _load_state(path):
"""
Load model parameters from .pdparams file.
Args:
path(str): Path to .pdparams file.
Returns:
state(dict): Dict of parameters loaded from file.
"""
if os.path.exists(path + _PDOPT_SUFFIX):
tmp = tempfile.mkdtemp()
dst = os.path.join(tmp, os.path.basename(os.path.normpath(path)))
shutil.copy(path + _PDPARAMS_SUFFIX, dst + _PDPARAMS_SUFFIX)
state = paddle.static.load_program_state(dst)
shutil.rmtree(tmp)
else:
state = paddle.static.load_program_state(path)
return state
def load_params(prog, path, ignore_params=None):
"""
Load model from the given path.
Args:
prog (paddle.static.Program): Load weight to which Program object.
path (string): Model path.
ignore_params (list): Ignore variable to load when finetuning.
"""
if not (os.path.isdir(path) or os.path.exists(path + _PDPARAMS_SUFFIX)):
raise ValueError(f"Model pretrain path {path} does not exists.")
logging.info(f"Loading parameters from {path}...")
ignore_set = set()
state = _load_state(path)
# ignore the parameter which mismatch the shape
# between the model and pretrain weight.
all_var_shape = {}
for block in prog.blocks:
for param in block.all_parameters():
all_var_shape[param.name] = param.shape
ignore_set.update([
name for name, shape in all_var_shape.items()
if name in state and shape != state[name].shape
])
if ignore_params:
all_var_names = [var.name for var in prog.list_vars()]
ignore_list = filter(
lambda var: any([re.match(name, var) for name in ignore_params]),
all_var_names)
ignore_set.update(list(ignore_list))
if len(ignore_set) > 0:
for k in ignore_set:
if k in state:
logging.warning(
f"variable {k} is already excluded automatically")
del state[k]
for n, p in state.items():
state[n] = p.astype(np.float32)
paddle.static.set_program_state(prog, state)
def init_ckpt(path_to_ckpt, program, exe):
"""
Init from checkpoints or pretrained model in given path.
Args:
path_to_ckpt(str): The path to files of checkpoints,
including '.pdparams' and '.pdopt'.
program(paddle.static.Program): The program to init model.
exe(paddle.static.Executor): The executor to run program.
"""
if path_to_ckpt:
paddle.static.load(program, path_to_ckpt, exe)
logging.info(f"Finish initing checkpoint from {path_to_ckpt}")
return
def init_pretrained(path_to_pretrained, program):
"""
Init from checkpoints or pretrained model in given path.
Args:
path_to_pretrained(str): The path to file of pretrained model.
program(paddle.static.Program): The program to init model.
"""
if path_to_pretrained:
if not isinstance(path_to_pretrained, list):
pretrained_model = [path_to_pretrained]
for pretrain in pretrained_model:
load_params(program, pretrain)
logging.info(
f"Finish initing pretrained model from {pretrained_model}")
def reset_program_state_dict(model, pretrained_file=None):
"""
Initialize the parameter from the bert config, and set the parameter by
reseting the state dict."
"""
state_dict = model.state_dict()
pretrained_state_dict = None
if pretrained_file is not None:
pretrained_state_dict = _load_state(pretrained_file)
reset_state_dict = {}
scale = model.bert.bert_config.initializer_range
reset_parameter_names = []
for n, p in state_dict.items():
if pretrained_state_dict is not None and n in pretrained_state_dict:
reset_state_dict[p.name] = np.array(
pretrained_state_dict[n], dtype=np.float32)
reset_parameter_names.append(n)
elif pretrained_state_dict is not None and p.name in pretrained_state_dict and "bert" in n:
reset_state_dict[p.name] = np.array(
pretrained_state_dict[p.name], dtype=np.float32)
reset_parameter_names.append(n)
elif "layer_norm" not in p.name and "b_0" not in p.name:
reset_state_dict[p.name] = np.random.normal(
loc=0.0, scale=scale, size=p.shape).astype("float32")
logging.info(
f"the following parameter had reset, please check. {reset_parameter_names}"
)
return reset_state_dict
def init_program(args, program, exe, model, task=Task.pretrain):
"""
Init from given checkpoint or pretrained parameters.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
program(paddle.static.Program): The program to init model.
exe(paddle.static.Executor): The executor to run program.
model(paddle.nn.Layer): An instance of BERT model defined in modeling.py.
"""
progress = None
if args.from_checkpoint is not None:
init_ckpt(args.from_checkpoint, program, exe)
progress = load_train_progress(args.from_checkpoint + _PROGRESS_SUFFIX)
#elif task == Task.pretrain and args.from_pretrained_params is not None:
elif args.from_pretrained_params is not None:
init_pretrained(args.from_pretrained_params, program)
else:
reset_state_dict = reset_program_state_dict(
model, args.from_pretrained_params)
paddle.static.set_program_state(program, reset_state_dict)
return progress
def save_model(program, model_path, prefix, progress=None):
"""
Save a model to given path.
Args:
program(paddle.static.Program): The program to be saved.
model_path(str): The path to save model.
prefix(str): The prefix of model files.
"""
if paddle.distributed.get_rank() != 0:
return
mkdir_if_not_exist(model_path)
model_prefix = os.path.join(model_path, prefix)
paddle.static.save(program, model_prefix)
if progress is not None:
progress_file = os.path.join(model_path, prefix + _PROGRESS_SUFFIX)
out_json = json.dumps(progress, indent=2, sort_keys=True) + "\n"
with io.open(progress_file, 'w', encoding="utf-8") as f:
f.write(out_json)
logging.info(f"Already save model in {model_path}")
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/utils/save_load.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
import argparse
import distutils.util
import logging
import dllogger
import paddle
from utils.task import Task
from utils.save_load import _PDOPT_SUFFIX, _PDPARAMS_SUFFIX, _PROGRESS_SUFFIX
_AUTO_LAST_EPOCH = 'auto'
_DEFAULT_BERT_CONFIG = {
'bert-large-uncased': './bert_configs/bert-large-uncased.json',
'bert-large-cased': './bert_configs/bert-large-cased.json',
'bert-base-uncased': './bert_configs/bert-base-uncased.json',
'bert-base-cased': './bert_configs/bert-base-cased.json',
}
def _get_full_path_of_ckpt(args):
if args.from_checkpoint is None:
args.last_step_of_checkpoint = 0
return
def _check_file_exist(path_with_prefix):
pdopt_path = path_with_prefix + _PDOPT_SUFFIX
pdparams_path = path_with_prefix + _PDPARAMS_SUFFIX
progress_path = path_with_prefix + _PROGRESS_SUFFIX
found = False
if (
os.path.exists(pdopt_path)
and os.path.exists(pdparams_path)
and os.path.exists(progress_path)
):
found = True
return found, pdopt_path, pdparams_path, progress_path
if not os.path.exists(args.from_checkpoint):
logging.warning(
f"Start training from scratch since no checkpoint is found."
)
args.from_checkpoint = None
args.last_step_of_checkpoint = 0
return
target_from_checkpoint = os.path.join(
args.from_checkpoint, args.model_prefix
)
if args.last_step_of_checkpoint is None:
args.last_step_of_checkpoint = 0
elif args.last_step_of_checkpoint == _AUTO_LAST_EPOCH:
folders = os.listdir(args.from_checkpoint)
args.last_step_of_checkpoint = 0
for folder in folders:
tmp_ckpt_path = os.path.join(
args.from_checkpoint, folder, args.model_prefix
)
try:
folder = int(folder)
except ValueError:
logging.warning(
f"Skip folder '{folder}' since its name is not integer-convertable."
)
continue
if (
folder > args.last_step_of_checkpoint
and _check_file_exist(tmp_ckpt_path)[0]
):
args.last_step_of_checkpoint = folder
step_with_prefix = (
os.path.join(str(args.last_step_of_checkpoint), args.model_prefix)
if args.last_step_of_checkpoint > 0
else args.model_prefix
)
target_from_checkpoint = os.path.join(
args.from_checkpoint, step_with_prefix
)
else:
try:
args.last_step_of_checkpoint = int(args.last_step_of_checkpoint)
except ValueError:
raise ValueError(
f"The value of --last-step-of-checkpoint should be None, {_AUTO_LAST_EPOCH}"
f" or integer >= 0, but receive {args.last_step_of_checkpoint}"
)
args.from_checkpoint = target_from_checkpoint
found, pdopt_path, pdparams_path, progress_path = _check_file_exist(
args.from_checkpoint
)
if not found:
args.from_checkpoint = None
args.last_step_of_checkpoint = 0
logging.warning(
f"Cannot find {pdopt_path} and {pdparams_path} and {progress_path}, disable --from-checkpoint."
)
def _get_full_path_of_pretrained_params(args, task=Task.pretrain):
if (
args.from_pretrained_params is None
and args.from_phase1_final_params is None
):
args.last_step_of_checkpoint = 0
return
if (
task == Task.pretrain
and args.from_phase1_final_params is not None
and args.last_step_of_checkpoint == 0
):
args.from_pretrained_params = args.from_phase1_final_params
args.from_pretrained_params = os.path.join(
args.from_pretrained_params, args.model_prefix
)
pdparams_path = args.from_pretrained_params + _PDPARAMS_SUFFIX
if not os.path.exists(pdparams_path):
args.from_pretrained_params = None
logging.warning(
f"Cannot find {pdparams_path}, disable --from-pretrained-params."
)
args.last_step_of_checkpoint = 0
def print_args(args):
args_for_log = copy.deepcopy(args)
dllogger.log(step='PARAMETER', data=vars(args_for_log))
def check_and_process_args(args, task=Task.pretrain):
if task == Task.pretrain:
assert not (
args.from_checkpoint is not None
and args.from_pretrained_params is not None
), (
"--from-pretrained-params and --from-checkpoint should "
"not be set simultaneously."
)
assert not (
args.phase1 and args.phase2
), "--phase1 and --phase2 should not be set simultaneously in bert pretraining."
if args.from_phase1_final_params is not None:
assert (
args.phase2
), "--from-phase1-final-params should only be used in phase2"
# SQuAD finetuning does not support suspend-resume yet.(TODO)
_get_full_path_of_ckpt(args)
if args.bert_model == 'custom':
assert (
args.config_file is not None
), "--config-file must be specified if --bert-model=custom"
elif args.config_file is None:
args.config_file = _DEFAULT_BERT_CONFIG[args.bert_model]
logging.info(
f"According to the name of bert_model, the default config_file: {args.config_file} will be used."
)
if args.from_checkpoint is None:
_get_full_path_of_pretrained_params(args, task)
assert os.path.isfile(
args.config_file
), f"Cannot find config file in {args.config_file}"
# cudnn mha fusion is only supported after v8.9.1 on Ampere and Hopper GPU
device_capability = paddle.device.cuda.get_device_capability()
cudnn_mha_supported = paddle.get_cudnn_version() >= 8901 and (
device_capability == (8, 0) or device_capability == (9, 0)
)
if (not cudnn_mha_supported or args.amp is False) and args.fuse_mha is True:
logging.info(
f"cudnn mha fusion is not supported, fall back to unfused mha"
)
args.fuse_mha = False
def add_global_args(parser, task=Task.pretrain):
group = parser.add_argument_group('Global')
if task == Task.pretrain:
group.add_argument(
'--input-dir',
type=str,
default=None,
required=True,
help='The input data directory. Should be specified by users and contain .hdf5 files for the task.',
)
group.add_argument('--num-workers', default=1, type=int)
if task == Task.squad:
group.add_argument(
'--train-file',
type=str,
default=None,
help='SQuAD json for training. E.g., train-v1.1.json',
)
group.add_argument(
'--predict-file',
type=str,
default=None,
help='SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json',
)
group.add_argument(
"--eval-script",
help="Script to evaluate squad predictions",
default="evaluate.py",
type=str,
)
group.add_argument(
'--epochs',
type=int,
default=3,
help='The number of epochs for training.',
)
group.add_argument(
'--vocab-file',
type=str,
default=None,
required=True,
help="Vocabulary mapping/file BERT was pretrainined on",
)
group.add_argument(
'--output-dir',
type=str,
default=None,
required=True,
help='The output directory where the model checkpoints will be written. Should be specified by users.',
)
group.add_argument(
'--bert-model',
type=str,
default='bert-large-uncased',
choices=(
'bert-base-uncased',
'bert-base-cased',
'bert-large-uncased',
'bert-large-cased',
'custom',
),
help='Specifies the type of BERT model to use. If it is set as custom, '
'the path to the config file must be given by specifying --config-file',
)
group.add_argument(
'--config-file',
type=str,
default=None,
help='The BERT model config. If set to None, `<--bert-model>.json` in folder `bert_configs` will be used.',
)
group.add_argument(
'--max-steps',
type=int,
default=None,
required=True if task == Task.pretrain else False,
help='Total number of training steps to perform.',
)
group.add_argument(
'--log-freq', type=int, default=10, help='Frequency of logging loss.'
)
group.add_argument(
'--num-steps-per-checkpoint',
type=int,
default=100,
help='Number of update steps until a model checkpoint is saved to disk.',
)
# Init model
group.add_argument(
'--from-pretrained-params',
type=str,
default=None,
help='Path to pretrained parameters. If set to None, no pretrained params will be used.',
)
group.add_argument(
'--from-checkpoint',
type=str,
default=None,
help='A checkpoint path to resume training. If set to None, no checkpoint will be used. '
'If not None, --from-pretrained-params will be ignored.',
)
group.add_argument(
'--last-step-of-checkpoint',
type=str,
default=None,
help='The step id of the checkpoint given by --from-checkpoint. '
'It should be None, auto, or integer > 0. If it is set as '
'None, then training will start from the 1-th epoch. If it is set as '
'auto, then it will search largest integer-convertable folder '
' --from-checkpoint, which contains required checkpoint. ',
)
if task == Task.pretrain:
group.add_argument(
'--from-phase1-final-params',
type=str,
default=None,
help='Path to final checkpoint of phase1, which will be used to '
'initialize the parameter in the first step of phase2, and '
'ignored in the rest steps of phase2.',
)
group.add_argument(
'--steps-this-run',
type=int,
default=None,
help='If provided, only run this many steps before exiting.',
)
group.add_argument(
'--seed', type=int, default=42, help="random seed for initialization"
)
group.add_argument(
'--report-file',
type=str,
default='./report.json',
help='A file in which to store JSON experiment report.',
)
group.add_argument(
'--model-prefix',
type=str,
default='bert_paddle',
help='The prefix name of model files to save/load.',
)
group.add_argument(
'--show-config',
type=distutils.util.strtobool,
default=True,
help='To show arguments.',
)
group.add_argument(
'--enable-cpu-affinity',
type=distutils.util.strtobool,
default=True,
help='To enable in-built GPU-CPU affinity.',
)
group.add_argument(
'--benchmark', action='store_true', help='To enable benchmark mode.'
)
group.add_argument(
'--benchmark-steps',
type=int,
default=20,
help='Steps for a benchmark run, only applied when --benchmark is set.',
)
group.add_argument(
'--benchmark-warmup-steps',
type=int,
default=20,
help='Warmup steps for a benchmark run, only applied when --benchmark is set.',
)
return parser
def add_training_args(parser, task=Task.pretrain):
group = parser.add_argument_group('Training')
group.add_argument(
'--optimizer',
default='Lamb',
metavar="OPTIMIZER",
choices=('Lamb', 'AdamW'),
help='The name of optimizer. It should be one of {Lamb, AdamW}.',
)
group.add_argument(
'--gradient-merge-steps',
type=int,
default=1,
help="Number of update steps to accumualte before performing a backward/update pass.",
)
group.add_argument(
'--learning-rate',
type=float,
default=1e-4,
help='The initial learning rate.',
)
group.add_argument(
'--warmup-start-lr',
type=float,
default=0.0,
help='The initial learning rate for warmup.',
)
group.add_argument(
'--warmup-proportion',
type=float,
default=0.01,
help='Proportion of training to perform linear learning rate warmup for. '
'For example, 0.1 = 10%% of training.',
)
group.add_argument(
'--beta1',
type=float,
default=0.9,
help='The exponential decay rate for the 1st moment estimates.',
)
group.add_argument(
'--beta2',
type=float,
default=0.999,
help='The exponential decay rate for the 2st moment estimates.',
)
group.add_argument(
'--epsilon',
type=float,
default=1e-6,
help='A small float value for numerical stability.',
)
group.add_argument(
'--weight-decay',
type=float,
default=0.01,
help='The weight decay coefficient.',
)
group.add_argument(
'--max-seq-length',
default=512,
type=int,
help='The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, and sequences shorter \n'
'than this will be padded.',
)
if task == Task.pretrain:
group.add_argument(
'--batch-size',
type=int,
default=32,
help='The batch size for training',
)
group.add_argument(
'--phase1',
action='store_true',
help='The phase of BERT pretraining. It should not be set '
'with --phase2 at the same time.',
)
group.add_argument(
'--phase2',
action='store_true',
help='The phase of BERT pretraining. It should not be set '
'with --phase1 at the same time.',
)
group.add_argument(
'--max-predictions-per-seq',
default=80,
type=int,
help='The maximum total of masked tokens in the input sequence',
)
if task == Task.squad:
group.add_argument(
"--do-train", action='store_true', help="Whether to run training."
)
group.add_argument(
"--do-predict",
action='store_true',
help="Whether to run eval on the dev set.",
)
group.add_argument(
"--do-eval",
action='store_true',
help="Whether to use evaluate accuracy of predictions",
)
group.add_argument(
"--train-batch-size",
default=32,
type=int,
help="Total batch size for training.",
)
group.add_argument(
"--predict-batch-size",
default=8,
type=int,
help="Total batch size for predictions.",
)
group.add_argument(
"--verbose-logging",
action='store_true',
help="If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.",
)
group.add_argument(
"--doc-stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take "
"between chunks.",
)
group.add_argument(
"--max-query-length",
default=64,
type=int,
help="The maximum number of tokens for the question. Questions longer than this "
"will be truncated to this length.",
)
group.add_argument(
"--n-best-size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json "
"output file.",
)
group.add_argument(
"--max-answer-length",
default=30,
type=int,
help="The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another.",
)
group.add_argument(
"--do-lower-case",
action='store_true',
help="Whether to lower case the input text. True for uncased models, False for cased models.",
)
group.add_argument(
'--version-2-with-negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
group.add_argument(
'--null-score-diff-threshold',
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
return parser
def add_advance_args(parser):
group = parser.add_argument_group('Advanced Training')
group.add_argument(
'--amp',
action='store_true',
help='Enable automatic mixed precision training (AMP).',
)
group.add_argument(
'--scale-loss',
type=float,
default=1.0,
help='The loss scalar for AMP training, only applied when --amp is set.',
)
group.add_argument(
'--use-dynamic-loss-scaling',
action='store_true',
help='Enable dynamic loss scaling in AMP training, only applied when --amp is set.',
)
group.add_argument(
'--use-pure-fp16',
action='store_true',
help='Enable pure FP16 training, only applied when --amp is set.',
)
group.add_argument(
'--fuse-mha',
action='store_true',
help='Enable multihead attention fusion. Require cudnn version >= 8.9.1',
)
return parser
def parse_args(task=Task.pretrain):
parser = argparse.ArgumentParser(
description="PaddlePaddle BERT pretraining script"
if task == Task.pretrain
else "PaddlePaddle SQuAD finetuning script",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser = add_global_args(parser, task)
parser = add_training_args(parser, task)
parser = add_advance_args(parser)
args = parser.parse_args()
check_and_process_args(args, task)
return args
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/utils/config.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import ctypes
_cuda_home = os.environ.get('CUDA_HOME', '/usr/local/cuda')
_cudart = ctypes.CDLL(os.path.join(_cuda_home, 'lib64/libcudart.so'))
def cuda_profile_start():
_cudart.cudaProfilerStart()
def cuda_profile_stop():
_cudart.cudaProfilerStop()
_nvtx = ctypes.CDLL(os.path.join(_cuda_home, 'lib64/libnvToolsExt.so'))
def cuda_nvtx_range_push(name):
_nvtx.nvtxRangePushW(ctypes.c_wchar_p(name))
def cuda_nvtx_range_pop():
_nvtx.nvtxRangePop()
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/utils/cuda_bind.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import random
import numpy as np
import paddle
def get_num_trainers():
"""Get number of trainers in distributed training."""
num_trainers = int(os.environ.get('PADDLE_TRAINERS_NUM', 1))
return num_trainers
def get_trainer_id():
"""Get index of trainer in distributed training."""
trainer_id = int(os.environ.get('PADDLE_TRAINER_ID', 0))
return trainer_id
def is_integer(number):
"""Whether a number is integer."""
if sys.version > '3':
return isinstance(number, int)
return isinstance(number, (int, long))
def set_seed(seed):
"""Set random seed."""
random.seed(seed)
np.random.seed(seed)
paddle.seed(seed)
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/utils/utility.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import paddle
def _get_gpu_affinity_table():
"""
Generate three dict objects, gpu_cpu_affinity_map, cpu_socket_gpus_list, cpu_core_groups.
gpu_cpu_affinity_map (dict): Key is GPU ID and value is cpu_affinity string.
cpu_socket_gpus_list (dict): Key is cpu_affinity string and value is a list
collected all GPU IDs that affinity to this cpu socket.
cpu_core_groups (dict): Key is cpu_affinity string and value is cpu core groups.
cpu core groups contains #GPUs groups, each group have,
nearly eaual amount of cpu cores.
Example:
$nvidis-smi topo -m
GPU0 GPU1 GPU2 GPU3 CPU Affinity NUMA Affinity
GPU0 X SYS SYS SYS 0-9,20-29 0
GPU1 SYS X SYS SYS 0-9,20-29 0
GPU2 SYS SYS X SYS 10-19,30-39 1
GPU3 SYS SYS SYS X 10-19,30-39 1
gpu_cpu_affinity_map =
{ 0: '0-9,20-29', # GPU0's cpu affninity is '0-9,20-29'
1: '0-9,20-29', # GPU1's cpu affninity is '0-9,20-29'
2: '10-19,30-39', # GPU2's cpu affninity is '10-19,30-39'
3: '10-19,30-39' } # GPU3's cpu affninity is '10-19,30-39'
cpu_socket_gpus_list =
{ '0-9,20-29': [0, 1], # There are 2 GPUs, 0 and 1, belong to cpu affinity '0-9,20-29'.
'10-19,30-39': [2, 3] # There are 2 GPUs, 2 and 3, belong to cpu affinity '10-19,30-39'.
}
cpu_core_groups =
# There are 2 GPUs belong to cpu affinity '0-9,20-29', then
# cores [0, 1, ..., 8, 9] would be split to two groups every
# 2-th elements
# [0, 2, 4, 6, 8] and [1, 3, 5, 7, 9]
# The same for cores [20, 21, ..., 28, 29].
{'0-9,20-29': [
[[0, 2, 4, 6, 8], [1, 3, 5, 7, 9]],
[[20, 22, 24, 26, 28], [21, 23, 25, 27, 29]]
],
# The same as '0-9,20-29'
'10-19,30-39': [
[[10, 12, 14, 16, 18], [11, 13, 15, 17, 19]],
[[30, 32, 34, 36, 38], [31, 33, 35, 37, 39]]
]}
"""
lines = os.popen('nvidia-smi topo -m').readlines()
cpu_affinity_idx = -1
titles = lines[0].split('\t')
for idx in range(len(titles)):
if 'CPU Affinity' in titles[idx]:
cpu_affinity_idx = idx
assert cpu_affinity_idx > 0, \
"Can not obtain correct CPU affinity column index via nvidia-smi!"
gpu_cpu_affinity_map = dict()
cpu_socket_gpus_list = dict()
# Skip title
for idx in range(1, len(lines)):
line = lines[idx]
items = line.split('\t')
if 'GPU' in items[0]:
gpu_id = int(items[0][3:])
affinity = items[cpu_affinity_idx]
gpu_cpu_affinity_map[gpu_id] = affinity
if affinity in cpu_socket_gpus_list:
cpu_socket_gpus_list[affinity].append(gpu_id)
else:
cpu_socket_gpus_list[affinity] = [gpu_id]
cpu_core_groups = _group_cpu_cores(cpu_socket_gpus_list)
return gpu_cpu_affinity_map, cpu_socket_gpus_list, cpu_core_groups
def _group_cpu_cores(cpu_socket_gpus_list):
"""
Generate a dictionary that key is cpu_affinity string and value is cpu core groups.
cpu core groups contains #GPUs groups, each group have, nearly eaual amount of cpu cores.
The grouping way is collect cpu cores every #GPUs-th elements, due to index of hyperthreading.
For examle, 4 physical cores, 8 cores with hyperthreading. The CPU indices [0, 1, 2, 3] is
physical cores, and [4, 5, 6, 7] is hyperthreading. In this case, distributing physical cores
first, then hyperthreading would reach better performance.
Args:
cpu_socket_gpus_list (dict): a dict that map cpu_affinity_str to all GPUs that belong to it.
Return:
cpu_core_groups (dict): a dict that map cpu_affinity_str to cpu core groups.
Example:
cpu_socket_gpus_list = { '0-9,20-29': [0, 1], '10-19,30-39': [2, 3] },
which means there are 2 GPUs, 0 and 1, belong to '0-9,20-29' and
2 GPUs, 2 and 3, belong to '10-19,30-39'
therefore, cpu_core_groups =
{'0-9,20-29': [
[[0, 2, 4, 6, 8], [1, 3, 5, 7, 9]],
[[20, 22, 24, 26, 28], [21, 23, 25, 27, 29]]
],
'10-19,30-39': [
[[10, 12, 14, 16, 18], [11, 13, 15, 17, 19]],
[[30, 32, 34, 36, 38], [31, 33, 35, 37, 39]]
]}
"""
cpu_core_groups = dict()
for cpu_socket in cpu_socket_gpus_list:
cpu_core_groups[cpu_socket] = list()
gpu_count = len(cpu_socket_gpus_list[cpu_socket])
cores = cpu_socket.split(',')
for core in cores:
core_indices = _get_core_indices(core)
core_group = list()
for i in range(gpu_count):
start = i % len(core_indices)
sub_core_set = core_indices[start::gpu_count]
core_group.append(sub_core_set)
cpu_core_groups[cpu_socket].append(core_group)
return cpu_core_groups
def _get_core_indices(cores_str):
"""
Generate a dictionary of cpu core indices.
Args:
cores_str (str): a string with format "start_idx-end_idx".
Return:
cpu_core_indices (list): a list collected all indices in [start_idx, end_idx].
Example:
cores_str = '0-20'
cpu_core_indices = [0, 1, 2, ..., 18, 19, 20]
"""
start, end = cores_str.split('-')
return [*range(int(start), int(end) + 1)]
def set_cpu_affinity():
"""
Setup CPU affinity.
Each GPU would be bound to a specific set of CPU cores for optimal and stable performance.
This function would obtain GPU-CPU affinity via "nvidia-smi topo -m", then equally distribute
CPU cores to each GPU.
"""
gpu_cpu_affinity_map, cpu_socket_gpus_list, cpu_core_groups = \
_get_gpu_affinity_table()
node_num = paddle.distributed.fleet.node_num()
gpu_per_node = paddle.distributed.get_world_size() // node_num
local_rank = paddle.distributed.get_rank() % gpu_per_node
# gpu_cpu_affinity_map (dict): Key is GPU ID and value is cpu_affinity string.
# cpu_socket_gpus_list (dict): Key is cpu_affinity string and value is a list
# collected all GPU IDs that affinity to this cpu socket.
# cpu_core_groups (dict): Key is cpu_affinity string and value is cpu core groups.
# cpu core groups contains #GPUs groups, each group have,
# nearly eaual amount of cpu cores.
# Example:
# $nvidis-smi topo -m
# GPU0 GPU1 GPU2 GPU3 CPU Affinity NUMA Affinity
# GPU0 X SYS SYS SYS 0-9,20-29 0
# GPU1 SYS X SYS SYS 0-9,20-29 0
# GPU2 SYS SYS X SYS 10-19,30-39 1
# GPU3 SYS SYS SYS X 10-19,30-39 1
#
# gpu_cpu_affinity_map =
# { 0: '0-9,20-29',
# 1: '0-9,20-29',
# 2: '10-19,30-39',
# 3: '10-19,30-39' }
# cpu_socket_gpus_list =
# { '0-9,20-29': [0, 1],
# '10-19,30-39': [2, 3] }
# cpu_core_groups =
# {'0-9,20-29': [
# [[0, 2, 4, 6, 8], [1, 3, 5, 7, 9]],
# [[20, 22, 24, 26, 28], [21, 23, 25, 27, 29]]
# ],
# '10-19,30-39': [
# [[10, 12, 14, 16, 18], [11, 13, 15, 17, 19]],
# [[30, 32, 34, 36, 38], [31, 33, 35, 37, 39]]
# ]}
#
# for rank-0, it belong to '0-9,20-29' cpu_affinity_key,
# and it locate in index-0 of cpu_socket_gpus_list['0-9,20-29'],
# therefore, affinity_mask would be a collection of all cpu cores
# in index-0 of cpu_core_groups['0-9,20-29'], that is [0, 2, 4, 6, 8]
# and [20, 22, 24, 26, 28].
# affinity_mask = [0, 2, 4, 6, 8, 20, 22, 24, 26, 28]
affinity_mask = list()
cpu_affinity_key = gpu_cpu_affinity_map[local_rank]
cpu_core_idx = cpu_socket_gpus_list[cpu_affinity_key].index(local_rank)
for cpu_core_group in cpu_core_groups[cpu_affinity_key]:
affinity_mask.extend(cpu_core_group[cpu_core_idx])
pid = os.getpid()
os.sched_setaffinity(pid, affinity_mask)
logging.info("Set CPU affinity of rank-%d (Process %d) "
"to %s.", local_rank, pid, str(os.sched_getaffinity(pid)))
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/utils/affinity.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/utils/__init__.py |
# Copyright (c) 2021 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import paddle.distributed as dist
import dllogger
def format_step(step):
"""
Define prefix for different prefix message for dllogger.
Args:
step(str|tuple): Dllogger step format.
Returns:
s(str): String to print in log.
"""
if isinstance(step, str):
return step
s = ""
if len(step) > 0:
s += f"Epoch: {step[0]} "
if len(step) > 1:
s += f"Step: {step[1]} "
if len(step) > 2:
s += f"Validation Iteration: {step[2]} "
if len(step) == 0:
s = "Summary:"
return s
def setup_loggers(log_file):
"""
Setup logging and dllogger.
Args:
log_file(str): Path to log file.
"""
logging.basicConfig(
level=logging.DEBUG,
format='{asctime}:{levelname}: {message}',
style='{')
if dist.get_rank() == 0:
dllogger.init(backends=[
dllogger.StdOutBackend(
dllogger.Verbosity.DEFAULT, step_format=format_step),
dllogger.JSONStreamBackend(dllogger.Verbosity.VERBOSE, log_file),
])
else:
dllogger.init([])
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/utils/logger.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import urllib.request
class SquadDownloader:
def __init__(self, save_path):
self.save_path = save_path + '/squad'
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
if not os.path.exists(self.save_path + '/v1.1'):
os.makedirs(self.save_path + '/v1.1')
if not os.path.exists(self.save_path + '/v2.0'):
os.makedirs(self.save_path + '/v2.0')
self.download_urls = {
'https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json':
'v1.1/train-v1.1.json',
'https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json':
'v1.1/dev-v1.1.json',
'https://worksheets.codalab.org/rest/bundles/0xbcd57bee090b421c982906709c8c27e1/contents/blob/':
'v1.1/evaluate-v1.1.py',
'https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json':
'v2.0/train-v2.0.json',
'https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json':
'v2.0/dev-v2.0.json',
'https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/':
'v2.0/evaluate-v2.0.py',
}
def download(self):
for item in self.download_urls:
url = item
file = self.download_urls[item]
print('Downloading:', url)
if os.path.isfile(self.save_path + '/' + file):
print('** Download file already exists, skipping download')
else:
response = urllib.request.urlopen(url)
with open(self.save_path + '/' + file, "wb") as handle:
handle.write(response.read())
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/data/SquadDownloader.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from WikiDownloader import WikiDownloader
from BooksDownloader import BooksDownloader
from SquadDownloader import SquadDownloader
class Downloader:
def __init__(self, dataset_name, save_path):
self.dataset_name = dataset_name
self.save_path = save_path
def download(self):
if self.dataset_name == 'bookscorpus':
self.download_bookscorpus()
elif self.dataset_name == 'wikicorpus_en':
self.download_wikicorpus('en')
elif self.dataset_name == 'wikicorpus_zh':
self.download_wikicorpus('zh')
elif self.dataset_name == 'squad':
self.download_squad()
elif self.dataset_name == 'all':
self.download_bookscorpus()
self.download_wikicorpus('en')
self.download_wikicorpus('zh')
self.download_squad()
else:
print(self.dataset_name)
assert False, 'Unknown dataset_name provided to downloader'
def download_bookscorpus(self):
downloader = BooksDownloader(self.save_path)
downloader.download()
def download_wikicorpus(self, language):
downloader = WikiDownloader(language, self.save_path)
downloader.download()
def download_squad(self):
downloader = SquadDownloader(self.save_path)
downloader.download()
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/data/Downloader.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
class BookscorpusTextFormatting:
def __init__(self, books_path, output_filename, recursive=False):
self.books_path = books_path
self.recursive = recursive
self.output_filename = output_filename
# This puts one book per line
def merge(self):
with open(self.output_filename, mode='w', newline='\n') as ofile:
for filename in glob.glob(
self.books_path + '/' + '*.txt', recursive=True):
with open(
filename, mode='r', encoding='utf-8-sig',
newline='\n') as file:
for line in file:
if line.strip() != '':
ofile.write(line.strip() + ' ')
ofile.write("\n\n")
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/data/BookscorpusTextFormatting.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/data/__init__.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
class WikiDownloader:
def __init__(self, language, save_path):
self.save_path = save_path + '/wikicorpus_' + language
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
self.language = language
# Use a mirror from https://dumps.wikimedia.org/mirrors.html if the below links do not work
self.download_urls = {
'en':
'https://dumps.wikimedia.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2',
'zh':
'https://dumps.wikimedia.org/zhwiki/latest/zhwiki-latest-pages-articles.xml.bz2'
}
self.output_files = {
'en': 'wikicorpus_en.xml.bz2',
'zh': 'wikicorpus_zh.xml.bz2'
}
def download(self):
if self.language in self.download_urls:
url = self.download_urls[self.language]
filename = self.output_files[self.language]
print('Downloading:', url)
if os.path.isfile(self.save_path + '/' + filename):
print('** Download file already exists, skipping download')
else:
cmd = [
'wget', url,
f"--output-document={os.path.join(self.save_path, filename)}"
]
print('Running:', cmd)
status = subprocess.run(cmd)
if status.returncode != 0:
raise RuntimeError('Wiki download not successful')
# Always unzipping since this is relatively fast and will overwrite
print('Unzipping:', self.output_files[self.language])
subprocess.run('bzip2 -dk ' + self.save_path + '/' + filename,
shell=True,
check=True)
else:
assert False, 'WikiDownloader not implemented for this language yet.'
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/data/WikiDownloader.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
class WikicorpusTextFormatting:
def __init__(self, wiki_path, output_filename, recursive=False):
self.wiki_path = wiki_path
self.recursive = recursive
self.output_filename = output_filename
# This puts one article per line
def merge(self):
with open(self.output_filename, mode='w', newline='\n') as ofile:
for dirname in glob.glob(self.wiki_path + '/*/', recursive=False):
for filename in glob.glob(
dirname + 'wiki_*', recursive=self.recursive):
print(filename)
article_lines = []
article_open = False
with open(filename, mode='r', newline='\n') as file:
for line in file:
if '<doc id=' in line:
article_open = True
elif '</doc>' in line:
article_open = False
for oline in article_lines[1:]:
if oline != '\n':
ofile.write(oline.rstrip() + " ")
ofile.write("\n\n")
article_lines = []
else:
if article_open:
article_lines.append(line)
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/data/WikicorpusTextFormatting.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import pprint
import subprocess
import BookscorpusTextFormatting
import Downloader
import TextSharding
import WikicorpusTextFormatting
def main(args):
working_dir = os.environ['BERT_PREP_WORKING_DIR']
print('Working Directory:', working_dir)
print('Action:', args.action)
print('Dataset Name:', args.dataset)
if args.input_files:
args.input_files = args.input_files.split(',')
hdf5_tfrecord_folder_prefix = "_lower_case_" + str(args.do_lower_case) + "_seq_len_" + str(args.max_seq_length) \
+ "_max_pred_" + str(args.max_predictions_per_seq) + "_masked_lm_prob_" + str(args.masked_lm_prob) \
+ "_random_seed_" + str(args.random_seed) + "_dupe_factor_" + str(args.dupe_factor)
directory_structure = {
'download': working_dir + '/download', # Downloaded and decompressed
'extracted': working_dir +
'/extracted', # Extracted from whatever the initial format is (e.g., wikiextractor)
'formatted': working_dir +
'/formatted_one_article_per_line', # This is the level where all sources should look the same
'sharded': working_dir + '/sharded_' + "training_shards_" +
str(args.n_training_shards) + "_test_shards_" + str(args.n_test_shards)
+ "_fraction_" + str(args.fraction_test_set),
'tfrecord': working_dir + '/tfrecord' + hdf5_tfrecord_folder_prefix,
'hdf5': working_dir + '/hdf5' + hdf5_tfrecord_folder_prefix
}
print('\nDirectory Structure:')
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(directory_structure)
print('')
if args.action == 'download':
if not os.path.exists(directory_structure['download']):
os.makedirs(directory_structure['download'])
downloader = Downloader.Downloader(args.dataset,
directory_structure['download'])
downloader.download()
elif args.action == 'text_formatting':
assert args.dataset != 'squad', 'Cannot perform text_formatting on squad or pretrained weights'
if not os.path.exists(directory_structure['extracted']):
os.makedirs(directory_structure['extracted'])
if not os.path.exists(directory_structure['formatted']):
os.makedirs(directory_structure['formatted'])
if args.dataset == 'bookscorpus':
books_path = directory_structure['download'] + '/bookscorpus'
#books_path = directory_structure['download']
output_filename = directory_structure[
'formatted'] + '/bookscorpus_one_book_per_line.txt'
books_formatter = BookscorpusTextFormatting.BookscorpusTextFormatting(
books_path, output_filename, recursive=True)
books_formatter.merge()
elif args.dataset == 'wikicorpus_en':
if args.skip_wikiextractor == 0:
path_to_wikiextractor_in_container = '/workspace/wikiextractor/WikiExtractor.py'
wikiextractor_command = path_to_wikiextractor_in_container + ' ' + directory_structure[
'download'] + '/' + args.dataset + '/wikicorpus_en.xml ' + '-b 100M --processes ' + str(
args.n_processes) + ' -o ' + directory_structure[
'extracted'] + '/' + args.dataset
print('WikiExtractor Command:', wikiextractor_command)
# wikiextractor_process = subprocess.run(wikiextractor_command,
subprocess.run(wikiextractor_command, shell=True, check=True)
#wikiextractor_process.communicate()
wiki_path = directory_structure['extracted'] + '/wikicorpus_en'
output_filename = directory_structure[
'formatted'] + '/wikicorpus_en_one_article_per_line.txt'
wiki_formatter = WikicorpusTextFormatting.WikicorpusTextFormatting(
wiki_path, output_filename, recursive=True)
wiki_formatter.merge()
elif args.dataset == 'wikicorpus_zh':
assert False, 'wikicorpus_zh not fully supported at this time. The simplified/tradition Chinese data needs to be translated and properly segmented still, and should work once this step is added.'
if args.skip_wikiextractor == 0:
path_to_wikiextractor_in_container = '/workspace/wikiextractor/WikiExtractor.py'
wikiextractor_command = path_to_wikiextractor_in_container + ' ' + directory_structure[
'download'] + '/' + args.dataset + '/wikicorpus_zh.xml ' + '-b 100M --processes ' + str(
args.n_processes) + ' -o ' + directory_structure[
'extracted'] + '/' + args.dataset
print('WikiExtractor Command:', wikiextractor_command)
# wikiextractor_process = subprocess.run(wikiextractor_command,
subprocess.run(wikiextractor_command, shell=True, check=True)
#wikiextractor_process.communicate()
wiki_path = directory_structure['extracted'] + '/wikicorpus_zh'
output_filename = directory_structure[
'formatted'] + '/wikicorpus_zh_one_article_per_line.txt'
wiki_formatter = WikicorpusTextFormatting.WikicorpusTextFormatting(
wiki_path, output_filename, recursive=True)
wiki_formatter.merge()
assert os.stat(
output_filename
).st_size > 0, 'File glob did not pick up extracted wiki files from WikiExtractor.'
elif args.action == 'sharding':
# Note: books+wiki requires user to provide list of input_files (comma-separated with no spaces)
if args.dataset == 'bookscorpus' or 'wikicorpus' in args.dataset or 'books_wiki' in args.dataset:
if args.input_files is None:
if args.dataset == 'bookscorpus':
args.input_files = [
directory_structure['formatted'] +
'/bookscorpus_one_book_per_line.txt'
]
elif args.dataset == 'wikicorpus_en':
args.input_files = [
directory_structure['formatted'] +
'/wikicorpus_en_one_article_per_line.txt'
]
elif args.dataset == 'wikicorpus_zh':
args.input_files = [
directory_structure['formatted'] +
'/wikicorpus_zh_one_article_per_line.txt'
]
elif args.dataset == 'books_wiki_en_corpus':
args.input_files = [
directory_structure['formatted'] +
'/bookscorpus_one_book_per_line.txt',
directory_structure['formatted'] +
'/wikicorpus_en_one_article_per_line.txt'
]
output_file_prefix = directory_structure[
'sharded'] + '/' + args.dataset + '/' + args.dataset
if not os.path.exists(directory_structure['sharded']):
os.makedirs(directory_structure['sharded'])
if not os.path.exists(directory_structure['sharded'] + '/' +
args.dataset):
os.makedirs(directory_structure['sharded'] + '/' +
args.dataset)
# Segmentation is here because all datasets look the same in one article/book/whatever per line format, and
# it seemed unnecessarily complicated to add an additional preprocessing step to call just for this.
# Different languages (e.g., Chinese simplified/traditional) may require translation and
# other packages to be called from here -- just add a conditional branch for those extra steps
segmenter = TextSharding.NLTKSegmenter()
sharding = TextSharding.Sharding(
args.input_files, output_file_prefix, args.n_training_shards,
args.n_test_shards, args.fraction_test_set)
sharding.load_articles()
sharding.segment_articles_into_sentences(segmenter)
sharding.distribute_articles_over_shards()
sharding.write_shards_to_disk()
else:
assert False, 'Unsupported dataset for sharding'
elif args.action == 'create_tfrecord_files':
assert False, 'TFrecord creation not supported in this PyTorch model example release.' \
''
if not os.path.exists(directory_structure['tfrecord'] + "/" +
args.dataset):
os.makedirs(directory_structure['tfrecord'] + "/" + args.dataset)
def create_record_worker(filename_prefix,
shard_id,
output_format='tfrecord'):
bert_preprocessing_command = 'python /workspace/bert/create_pretraining_data.py'
bert_preprocessing_command += ' --input_file=' + directory_structure[
'sharded'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(
shard_id) + '.txt'
bert_preprocessing_command += ' --output_file=' + directory_structure[
'tfrecord'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(
shard_id) + '.' + output_format
bert_preprocessing_command += ' --vocab_file=' + args.vocab_file
bert_preprocessing_command += ' --do_lower_case' if args.do_lower_case else ''
bert_preprocessing_command += ' --max_seq_length=' + str(
args.max_seq_length)
bert_preprocessing_command += ' --max_predictions_per_seq=' + str(
args.max_predictions_per_seq)
bert_preprocessing_command += ' --masked_lm_prob=' + str(
args.masked_lm_prob)
bert_preprocessing_command += ' --random_seed=' + str(
args.random_seed)
bert_preprocessing_command += ' --dupe_factor=' + str(
args.dupe_factor)
bert_preprocessing_process = subprocess.Popen(
bert_preprocessing_command, shell=True)
last_process = bert_preprocessing_process
# This could be better optimized (fine if all take equal time)
if shard_id % args.n_processes == 0 and shard_id > 0:
bert_preprocessing_process.wait()
return last_process
output_file_prefix = args.dataset
for i in range(args.n_training_shards):
last_process = create_record_worker(
output_file_prefix + '_training', i)
last_process.wait()
for i in range(args.n_test_shards):
last_process = create_record_worker(output_file_prefix + '_test',
i)
last_process.wait()
elif args.action == 'create_hdf5_files':
last_process = None
if not os.path.exists(directory_structure['hdf5'] + "/" +
args.dataset):
os.makedirs(directory_structure['hdf5'] + "/" + args.dataset)
def create_record_worker(filename_prefix,
shard_id,
output_format='hdf5'):
bert_preprocessing_command = 'python /workspace/bert/create_pretraining_data.py'
bert_preprocessing_command += ' --input_file=' + directory_structure[
'sharded'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(
shard_id) + '.txt'
bert_preprocessing_command += ' --output_file=' + directory_structure[
'hdf5'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(
shard_id) + '.' + output_format
bert_preprocessing_command += ' --vocab_file=' + args.vocab_file
bert_preprocessing_command += ' --do_lower_case' if args.do_lower_case else ''
bert_preprocessing_command += ' --max_seq_length=' + str(
args.max_seq_length)
bert_preprocessing_command += ' --max_predictions_per_seq=' + str(
args.max_predictions_per_seq)
bert_preprocessing_command += ' --masked_lm_prob=' + str(
args.masked_lm_prob)
bert_preprocessing_command += ' --random_seed=' + str(
args.random_seed)
bert_preprocessing_command += ' --dupe_factor=' + str(
args.dupe_factor)
bert_preprocessing_process = subprocess.Popen(
bert_preprocessing_command, shell=True)
last_process = bert_preprocessing_process
# This could be better optimized (fine if all take equal time)
if shard_id % args.n_processes == 0 and shard_id > 0:
bert_preprocessing_process.wait()
return last_process
output_file_prefix = args.dataset
for i in range(args.n_training_shards):
last_process = create_record_worker(
output_file_prefix + '_training', i)
last_process.wait()
for i in range(args.n_test_shards):
last_process = create_record_worker(output_file_prefix + '_test',
i)
last_process.wait()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Preprocessing Application for Everything BERT-related')
parser.add_argument(
'--action',
type=str,
help='Specify the action you want the app to take. e.g., generate vocab, segment, create tfrecords',
choices={
'download', # Download and verify mdf5/sha sums
'text_formatting', # Convert into a file that contains one article/book per line
'sharding', # Convert previous formatted text into shards containing one sentence per line
'create_tfrecord_files', # Turn each shard into a TFrecord with masking and next sentence prediction info
'create_hdf5_files' # Turn each shard into a HDF5 file with masking and next sentence prediction info
})
parser.add_argument(
'--dataset',
type=str,
help='Specify the dataset to perform --action on',
choices={
'bookscorpus', 'wikicorpus_en', 'wikicorpus_zh',
'books_wiki_en_corpus', 'squad', 'all'
})
parser.add_argument(
'--input_files',
type=str,
help='Specify the input files in a comma-separated list (no spaces)')
parser.add_argument(
'--n_training_shards',
type=int,
help='Specify the number of training shards to generate',
default=256)
parser.add_argument(
'--n_test_shards',
type=int,
help='Specify the number of test shards to generate',
default=256)
parser.add_argument(
'--fraction_test_set',
type=float,
help='Specify the fraction (0..1) of the data to withhold for the test data split (based on number of sequences)',
default=0.1)
parser.add_argument(
'--segmentation_method',
type=str,
help='Specify your choice of sentence segmentation',
choices={'nltk'},
default='nltk')
parser.add_argument(
'--n_processes',
type=int,
help='Specify the max number of processes to allow at one time',
default=4)
parser.add_argument(
'--random_seed',
type=int,
help='Specify the base seed to use for any random number generation',
default=12345)
parser.add_argument(
'--dupe_factor',
type=int,
help='Specify the duplication factor',
default=5)
parser.add_argument(
'--masked_lm_prob',
type=float,
help='Specify the probability for masked lm',
default=0.15)
parser.add_argument(
'--max_seq_length',
type=int,
help='Specify the maximum sequence length',
default=512)
parser.add_argument(
'--max_predictions_per_seq',
type=int,
help='Specify the maximum number of masked words per sequence',
default=20)
parser.add_argument(
'--do_lower_case',
type=int,
help='Specify whether it is cased (0) or uncased (1) (any number greater than 0 will be treated as uncased)',
default=1)
parser.add_argument(
'--vocab_file',
type=str,
help='Specify absolute path to vocab file to use)')
parser.add_argument(
'--skip_wikiextractor',
type=int,
help='Specify whether to skip wikiextractor step 0=False, 1=True',
default=0)
parser.add_argument(
'--interactive_json_config_generator',
type=str,
help='Specify the action you want the app to take. e.g., generate vocab, segment, create tfrecords'
)
main(parser.parse_args())
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/data/bertPrep.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
class BooksDownloader:
def __init__(self, save_path):
self.save_path = save_path
pass
def download(self):
bookscorpus_download_command = 'python3 /workspace/bookcorpus/download_files.py --list /workspace/bookcorpus/url_list.jsonl --out'
bookscorpus_download_command += ' ' + self.save_path + '/bookscorpus'
bookscorpus_download_command += ' --trash-bad-count'
subprocess.run(bookscorpus_download_command, shell=True, check=True)
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/data/BooksDownloader.py |
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from itertools import islice
import multiprocessing
import statistics
class Sharding:
def __init__(self, input_files, output_name_prefix, n_training_shards,
n_test_shards, fraction_test_set):
assert len(input_files
) > 0, 'The input file list must contain at least one file.'
assert n_training_shards > 0, 'There must be at least one output shard.'
assert n_test_shards > 0, 'There must be at least one output shard.'
self.n_training_shards = n_training_shards
self.n_test_shards = n_test_shards
self.fraction_test_set = fraction_test_set
self.input_files = input_files
self.output_name_prefix = output_name_prefix
self.output_training_identifier = '_training'
self.output_test_identifier = '_test'
self.output_file_extension = '.txt'
self.articles = {} # key: integer identifier, value: list of articles
self.sentences = {
} # key: integer identifier, value: list of sentences
self.output_training_files = {
} # key: filename, value: list of articles to go into file
self.output_test_files = {
} # key: filename, value: list of articles to go into file
self.init_output_files()
# Remember, the input files contain one article per line (the whitespace check is to skip extraneous blank lines)
def load_articles(self):
print('Start: Loading Articles')
global_article_count = 0
for input_file in self.input_files:
print('input file:', input_file)
with open(input_file, mode='r', newline='\n') as f:
for _, line in enumerate(f):
if line.strip():
self.articles[global_article_count] = line.rstrip()
global_article_count += 1
print('End: Loading Articles: There are',
len(self.articles), 'articles.')
def segment_articles_into_sentences(self, segmenter):
print('Start: Sentence Segmentation')
if len(self.articles) == 0:
self.load_articles()
assert len(
self.articles
) != 0, 'Please check that input files are present and contain data.'
use_multiprocessing = 'serial'
def chunks(data, size=len(self.articles)):
it = iter(data)
for _ in range(0, len(data), size):
yield {k: data[k] for k in islice(it, size)}
if use_multiprocessing == 'manager':
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = []
n_processes = 7 # in addition to the main process, total = n_proc+1
def work(articles, return_dict):
sentences = {}
for i, article in enumerate(articles):
sentences[i] = segmenter.segment_string(articles[article])
if i % 5000 == 0:
print('Segmenting article', i)
return_dict.update(sentences)
for item in chunks(self.articles, len(self.articles)):
p = multiprocessing.Process(
target=work, args=(item, return_dict))
# Busy wait
while len(jobs) >= n_processes:
pass
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
elif use_multiprocessing == 'queue':
multiprocessing.Queue()
jobs = []
for item in chunks(self.articles, len(self.articles)):
pass
else: # serial option
for i, article in enumerate(self.articles):
self.sentences[i] = segmenter.segment_string(self.articles[
article])
if i % 5000 == 0:
print('Segmenting article', i)
print('End: Sentence Segmentation')
def init_output_files(self):
print('Start: Init Output Files')
assert len(
self.output_training_files
) == 0, 'Internal storage self.output_files already contains data. This function is intended to be used by the constructor only.'
assert len(
self.output_test_files
) == 0, 'Internal storage self.output_files already contains data. This function is intended to be used by the constructor only.'
for i in range(self.n_training_shards):
name = self.output_name_prefix + self.output_training_identifier + '_' + str(
i) + self.output_file_extension
self.output_training_files[name] = []
for i in range(self.n_test_shards):
name = self.output_name_prefix + self.output_test_identifier + '_' + str(
i) + self.output_file_extension
self.output_test_files[name] = []
print('End: Init Output Files')
def get_sentences_per_shard(self, shard):
result = 0
for article_id in shard:
result += len(self.sentences[article_id])
return result
def distribute_articles_over_shards(self):
print('Start: Distribute Articles Over Shards')
assert len(
self.articles
) >= self.n_training_shards + self.n_test_shards, 'There are fewer articles than shards. Please add more data or reduce the number of shards requested.'
# Create dictionary with - key: sentence count per article, value: article id number
sentence_counts = defaultdict(lambda: [])
max_sentences = 0
total_sentences = 0
for article_id in self.sentences:
current_length = len(self.sentences[article_id])
sentence_counts[current_length].append(article_id)
max_sentences = max(max_sentences, current_length)
total_sentences += current_length
n_sentences_assigned_to_training = int(
(1 - self.fraction_test_set) * total_sentences)
nominal_sentences_per_training_shard = n_sentences_assigned_to_training // self.n_training_shards
nominal_sentences_per_test_shard = (
total_sentences - n_sentences_assigned_to_training
) // self.n_test_shards
consumed_article_set = set({})
unused_article_set = set(self.articles.keys())
# Make first pass and add one article worth of lines per file
for file in self.output_training_files:
current_article_id = sentence_counts[max_sentences][-1]
sentence_counts[max_sentences].pop(-1)
self.output_training_files[file].append(current_article_id)
consumed_article_set.add(current_article_id)
unused_article_set.remove(current_article_id)
# Maintain the max sentence count
while len(sentence_counts[
max_sentences]) == 0 and max_sentences > 0:
max_sentences -= 1
if len(self.sentences[current_article_id]
) > nominal_sentences_per_training_shard:
nominal_sentences_per_training_shard = len(self.sentences[
current_article_id])
print(
'Warning: A single article contains more than the nominal number of sentences per training shard.'
)
for file in self.output_test_files:
current_article_id = sentence_counts[max_sentences][-1]
sentence_counts[max_sentences].pop(-1)
self.output_test_files[file].append(current_article_id)
consumed_article_set.add(current_article_id)
unused_article_set.remove(current_article_id)
# Maintain the max sentence count
while len(sentence_counts[
max_sentences]) == 0 and max_sentences > 0:
max_sentences -= 1
if len(self.sentences[
current_article_id]) > nominal_sentences_per_test_shard:
nominal_sentences_per_test_shard = len(self.sentences[
current_article_id])
print(
'Warning: A single article contains more than the nominal number of sentences per test shard.'
)
training_counts = []
test_counts = []
for shard in self.output_training_files:
training_counts.append(
self.get_sentences_per_shard(self.output_training_files[
shard]))
for shard in self.output_test_files:
test_counts.append(
self.get_sentences_per_shard(self.output_test_files[shard]))
training_median = statistics.median(training_counts)
test_median = statistics.median(test_counts)
# Make subsequent passes over files to find articles to add without going over limit
history_remaining = []
n_history_remaining = 4
while len(consumed_article_set) < len(self.articles):
for fidx, file in enumerate(self.output_training_files):
nominal_next_article_size = min(
nominal_sentences_per_training_shard -
training_counts[fidx], max_sentences)
# Maintain the max sentence count
while len(sentence_counts[
max_sentences]) == 0 and max_sentences > 0:
max_sentences -= 1
while len(sentence_counts[nominal_next_article_size]
) == 0 and nominal_next_article_size > 0:
nominal_next_article_size -= 1
if nominal_next_article_size not in sentence_counts or nominal_next_article_size == 0 or training_counts[
fidx] > training_median:
continue # skip adding to this file, will come back later if no file can accept unused articles
current_article_id = sentence_counts[
nominal_next_article_size][-1]
sentence_counts[nominal_next_article_size].pop(-1)
self.output_training_files[file].append(current_article_id)
consumed_article_set.add(current_article_id)
unused_article_set.remove(current_article_id)
for fidx, file in enumerate(self.output_test_files):
nominal_next_article_size = min(
nominal_sentences_per_test_shard - test_counts[fidx],
max_sentences)
# Maintain the max sentence count
while len(sentence_counts[
max_sentences]) == 0 and max_sentences > 0:
max_sentences -= 1
while len(sentence_counts[nominal_next_article_size]
) == 0 and nominal_next_article_size > 0:
nominal_next_article_size -= 1
if nominal_next_article_size not in sentence_counts or nominal_next_article_size == 0 or test_counts[
fidx] > test_median:
continue # skip adding to this file, will come back later if no file can accept unused articles
current_article_id = sentence_counts[
nominal_next_article_size][-1]
sentence_counts[nominal_next_article_size].pop(-1)
self.output_test_files[file].append(current_article_id)
consumed_article_set.add(current_article_id)
unused_article_set.remove(current_article_id)
# If unable to place articles a few times, bump up nominal sizes by fraction until articles get placed
if len(history_remaining) == n_history_remaining:
history_remaining.pop(0)
history_remaining.append(len(unused_article_set))
history_same = True
for i in range(1, len(history_remaining)):
history_same = history_same and (
history_remaining[i - 1] == history_remaining[i])
if history_same:
nominal_sentences_per_training_shard += 1
# nominal_sentences_per_test_shard += 1
training_counts = []
test_counts = []
for shard in self.output_training_files:
training_counts.append(
self.get_sentences_per_shard(self.output_training_files[
shard]))
for shard in self.output_test_files:
test_counts.append(
self.get_sentences_per_shard(self.output_test_files[
shard]))
training_median = statistics.median(training_counts)
test_median = statistics.median(test_counts)
print('Distributing data over shards:',
len(unused_article_set), 'articles remaining.')
if len(unused_article_set) != 0:
print('Warning: Some articles did not make it into output files.')
for shard in self.output_training_files:
print('Training shard:',
self.get_sentences_per_shard(self.output_training_files[
shard]))
for shard in self.output_test_files:
print('Test shard:',
self.get_sentences_per_shard(self.output_test_files[shard]))
print('End: Distribute Articles Over Shards')
def write_shards_to_disk(self):
print('Start: Write Shards to Disk')
for shard in self.output_training_files:
self.write_single_shard(shard, self.output_training_files[shard])
for shard in self.output_test_files:
self.write_single_shard(shard, self.output_test_files[shard])
print('End: Write Shards to Disk')
def write_single_shard(self, shard_name, shard):
with open(shard_name, mode='w', newline='\n') as f:
for article_id in shard:
for line in self.sentences[article_id]:
f.write(line + '\n')
f.write('\n') # Line break between articles
import nltk
nltk.download('punkt')
class NLTKSegmenter:
def __init(self):
pass
def segment_string(self, article):
return nltk.tokenize.sent_tokenize(article)
| DeepLearningExamples-master | PaddlePaddle/LanguageModeling/BERT/data/TextSharding.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import pandas as pd
import numpy as np
from pathlib import Path
if __name__ == '__main__':
data_path = sys.argv[1]
# - path containing ieee-fraud-detection data
# https://www.kaggle.com/competitions/ieee-fraud-detection
data_path = Path(data_path)
# - concat data files
train_trn = pd.read_csv(data_path / 'train_transaction.csv')
test_trn = pd.read_csv(data_path / 'test_transaction.csv')
# - not every transactionID has an associated transaction identification ...
data = pd.concat([train_trn, test_trn], axis=0)
user_cols = ['addr1', 'addr2', 'card1', 'card2', 'card3', 'card4', 'card5', 'card6']
# - product columns that can be used to create unique id
product_cols = ['ProductCD', 'R_emaildomain']
for c in user_cols:
data.loc[:, c] = data[c].fillna('').astype(str)
for c in product_cols:
data.loc[:, c] = data[c].fillna('').astype(str)
data['user_id'] = ''
user_cols_selected = ['card1'] # - select only card1
for c in user_cols_selected:
data.loc[:, 'user_id'] = data['user_id'] + data[c]
data['product_id'] = ''
for c in product_cols:
data.loc[:, 'product_id'] = data['product_id'] + data[c]
# - drop id cols
data.drop(columns=user_cols + product_cols, inplace=True)
# - select last transaction
data = data.sort_values('TransactionDT').groupby(['user_id', 'product_id']).tail(1)
# - dump data
save_path = os.path.join(data_path, 'data.csv')
data.to_csv(save_path, index=False)
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/scripts/ieee_fraud.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pandas as pd
from pathlib import Path
if __name__ == '__main__':
tabformer_path = sys.argv[1]
save_path = Path(tabformer_path).parent
save_path = save_path / 'card_transaction.v2.csv'
df = pd.read_csv(tabformer_path)
# - create seconds columns to sort transactions by
t = df["Time"].str.split(":", expand=True)
t = t[0].apply(int) * 3600 + t[1].apply(int) * 60
df.loc[:, "Seconds"] = t
df['Card ID'] = df["User"].astype(str) + df["Card"].astype(str)
sorted_df = df.sort_values(by="Seconds")
# - get last element
tdf = sorted_df.groupby(by=["Card ID", "Merchant Name"],
axis=0).tail(1).reset_index(drop=True)
tdf = tdf.drop(columns=["Card ID", "Seconds"])
# - save data
tdf.to_csv(save_path, index=False)
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/scripts/time_filter_tabformer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pandas as pd
from pathlib import Path
if __name__ == '__main__':
data_path = sys.argv[1]
save_path = Path(data_path).parent
save_path = save_path / 'data.csv'
df = pd.read_csv(data_path)
df['user'] = df['first'] + df['last']
df = df.groupby(['user', 'merchant'], axis=0).tail(1).reset_index(drop=True)
df = df.drop(columns=['user'])
# - save data
df.to_csv(save_path, index=False)
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/scripts/time_filter_credit.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
import traceback
from syngen.cli import get_parser
logging.basicConfig()
logging.root.setLevel(logging.NOTSET)
logger = logging.getLogger(__name__)
log = logger
def get_args():
parser = get_parser()
try:
args = parser.parse_args()
except TypeError:
parser.print_help()
sys.exit(0)
return args, sys.argv
def main():
args, argv = get_args()
log.info("=========================================")
log.info("| Synthetic Graph Generation Tool |")
log.info("=========================================")
try:
_ = args.action(args)
except Exception as error:
print(f"{error}")
traceback.print_tb(error.__traceback__)
sys.exit(0)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/__main__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from time import perf_counter, time
import dgl
import numpy as np
import torch
from syngen.benchmark.data_loader.datasets.edge_ds import EdgeDS
from syngen.benchmark.models import MODELS
from syngen.utils.types import MetaData
from syngen.configuration import SynGenDatasetFeatureSpec
logger = logging.getLogger(__name__)
log = logger
_NAME = "edge classification"
def train_ec(
args,
finetune_feature_spec: SynGenDatasetFeatureSpec,
*,
pretrain_feature_spec: SynGenDatasetFeatureSpec = None,
):
"""Example edge classification training loop to pre-train on generated dataset
with option to further finetune on a `finetune_source` dataset.
"""
model = MODELS[args.model]
optimizer = None
out = {}
dataset = EdgeDS(**vars(args))
# - pre-training
if pretrain_feature_spec is not None:
# - dataset
g, edge_ids = dataset.get_graph(
pretrain_feature_spec, args.pretraining_edge_name
)
sampler = dgl.dataloading.MultiLayerFullNeighborSampler(args.n_layers)
dataloader = dgl.dataloading.EdgeDataLoader(
g,
edge_ids,
sampler,
batch_size=args.batch_size,
shuffle=args.shuffle,
drop_last=False,
num_workers=args.num_workers,
)
# - Model
in_feats = g.ndata.get("feat").shape[1]
in_feats_edge = g.edata.get("feat").shape[1]
model = model(in_dim=in_feats, in_dim_edge=in_feats_edge, **vars(args))
model = model.cuda()
# - Optimizer
optimizer = torch.optim.Adam(
model.parameters(),
lr=args.learning_rate,
weight_decay=args.weight_decay,
)
log.info("Running pretraining ...")
losses, times = [], []
best_val_acc, best_test_acc = 0, 0
# - Training loop
for e in range(args.pretrain_epochs):
if args.timeit:
t0 = time.time()
train_acc, val_acc, test_acc, losses = train_epoch(
model, dataloader, optimizer
)
if args.timeit:
t1 = time.time()
times.append(t1 - t0)
val_acc = np.mean(val_acc)
test_acc = np.mean(test_acc)
train_acc = np.mean(train_acc)
loss = np.mean(losses)
if best_val_acc < val_acc:
best_val_acc = val_acc
best_test_acc = test_acc
if e % args.log_interval == 0:
log.info(
"Pretraining epoch {}, loss: {:.3f}, val acc: {:.3f} (best {:.3f}), test acc: {:.3f} (best {:.3f})".format(
e, loss, val_acc, best_val_acc, test_acc, best_test_acc
)
)
out = {
"pretrain-loss": loss,
"pretrain-val-acc": val_acc,
"pretrain-test-acc": test_acc,
**out,
}
if args.timeit:
out["pretrain-epoch-times"] = times
g, edge_ids = dataset.get_graph(
finetune_feature_spec, args.edge_name,
)
sampler = dgl.dataloading.MultiLayerFullNeighborSampler(args.n_layers)
dataloader = dgl.dataloading.EdgeDataLoader(
g,
edge_ids,
sampler,
batch_size=args.batch_size,
shuffle=args.shuffle,
drop_last=False,
num_workers=args.num_workers,
)
if optimizer is None:
in_feats = g.ndata.get("feat").shape[1]
in_feats_edge = g.edata.get("feat").shape[1]
model = model(
in_dim=in_feats, in_dim_edge=in_feats_edge, **vars(args)
)
model = model.cuda()
optimizer = torch.optim.Adam(
model.parameters(),
lr=args.learning_rate,
weight_decay=args.weight_decay,
)
# - finetune
best_val_acc, best_test_acc = 0, 0
for e in range(args.finetune_epochs):
if args.timeit:
t0 = time.time()
train_acc, val_acc, test_acc, losses = train_epoch(
model, dataloader, optimizer
)
if args.timeit:
t1 = time.time()
times.append(t1 - t0)
val_acc = np.mean(val_acc)
test_acc = np.mean(test_acc)
train_acc = np.mean(train_acc)
loss = np.mean(losses)
if best_val_acc < val_acc:
best_val_acc = val_acc
best_test_acc = test_acc
if e % args.log_interval == 0:
log.info(
"Finetuning: In epoch {}, loss: {:.3f}, val acc: {:.3f} (best {:.3f}), test acc: {:.3f} (best {:.3f})".format(
e, loss, val_acc, best_val_acc, test_acc, best_test_acc
)
)
out = {
"finetune-loss": loss,
"finetune-val-acc": val_acc,
"finetune-test-acc": test_acc,
**out,
}
if args.timeit:
out["finetune-epoch-times"] = times
return out
def train_epoch(model, dataloader, optimizer, verbose=False):
train_acc = []
val_acc = []
test_acc = []
losses = []
if verbose:
times = []
epoch_start = perf_counter()
for input_nodes, edge_subgraph, blocks in dataloader:
blocks = [b.to(torch.device("cuda")) for b in blocks]
edge_subgraph = edge_subgraph.to(torch.device("cuda"))
input_features = blocks[0].srcdata["feat"]
edge_labels = edge_subgraph.edata["labels"]
edge_features = None
if "feat" in edge_subgraph.edata:
edge_features = edge_subgraph.edata["feat"]
edge_predictions = model(
blocks=blocks,
edge_subgraph=edge_subgraph,
input_features=input_features,
edge_features=edge_features,
)
train_mask = edge_subgraph.edata["train_mask"]
val_mask = edge_subgraph.edata["val_mask"]
test_mask = edge_subgraph.edata["test_mask"]
loss = model.loss(
edge_predictions[train_mask],
torch.nn.functional.one_hot(
edge_labels[train_mask].long(),
num_classes=edge_predictions.shape[-1],
).float(),
)
# - store results
losses.append(loss.item())
preds = edge_predictions.argmax(1)
train_acc.append(
(preds[train_mask] == edge_labels[train_mask]).float().mean().item()
)
val_acc.append(
(preds[val_mask] == edge_labels[val_mask]).float().mean().item()
)
test_acc.append(
(preds[test_mask] == edge_labels[test_mask]).float().mean().item()
)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if verbose:
times.append(perf_counter() - epoch_start)
epoch_start = perf_counter()
if verbose:
return train_acc, val_acc, test_acc, losses, times
return train_acc, val_acc, test_acc, losses
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/benchmark/tasks/ec.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
from .ec import train_ec
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/benchmark/tasks/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
from .gat_ec import GATEC
from .gcn_ec import GCNEC
MODELS = {
"gat_ec": GATEC,
"gcn_ec": GCNEC,
}
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/benchmark/models/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dgl
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
GAT: Graph Attention Network
Graph Attention Networks (Veličković et al., ICLR 2018)
https://arxiv.org/abs/1710.10903
"""
from syngen.benchmark.models.layers.gat_layers import (
CustomGATLayer,
CustomGATLayerEdgeReprFeat,
CustomGATLayerIsotropic,
GATLayer,
)
from syngen.benchmark.models.layers.score_predictor import ScorePredictor
class GATEC(nn.Module):
@staticmethod
def add_args(parser):
parser.add_argument(
"--in-feat-dropout",
type=float,
default=0.1,
help="input feature dropout (default: 0.1)",
)
parser.add_argument(
"--dropout", type=float, default=0.1, help="dropout (default: 0.1)"
)
parser.add_argument("--batch_norm", action="store_true", default=False)
parser.add_argument("--n-heads", type=int, default=2)
parser.add_argument("--layer-type", type=str, default="dgl")
parser.add_argument("--residual", action="store_true", default=False)
parser.add_argument("--edge_feat", action="store_true", default=False)
def __init__(
self,
in_dim,
in_dim_edge,
hidden_dim,
out_dim,
num_classes,
n_heads,
in_feat_dropout,
dropout,
n_layers,
readout=False,
edge_feat=False,
batch_norm=False,
residual=False,
layer_type="dgl",
device="cuda",
**kwargs,
):
super().__init__()
self.in_dim = in_dim
self.in_dim_edge = in_dim_edge
self.hidden_dim = hidden_dim
self.out_dim = out_dim
self.num_classes = num_classes
self.n_heads = n_heads
self.dropout = dropout
self.n_layers = n_layers
self.readout = readout
self.batch_norm = batch_norm
self.residual = residual
self.device = device
self.layer_type = {
"dgl": GATLayer,
"edgereprfeat": CustomGATLayerEdgeReprFeat,
"edgefeat": CustomGATLayer,
"isotropic": CustomGATLayerIsotropic,
}.get(layer_type, GATLayer)
self.embedding_h = nn.Linear(
self.in_dim, self.hidden_dim * self.n_heads
)
if self.layer_type != GATLayer:
self.edge_feat = edge_feat
self.embedding_e = nn.Linear(
self.in_dim_edge, self.hidden_dim * self.n_heads
)
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList(
[
self.layer_type(
self.hidden_dim * self.n_heads,
self.hidden_dim,
self.n_heads,
self.dropout,
self.batch_norm,
self.residual,
)
for _ in range(n_layers - 1)
]
)
self.layers.append(
self.layer_type(
self.hidden_dim * self.n_heads,
self.out_dim,
1,
self.dropout,
self.batch_norm,
self.residual,
)
)
self.edge_score = ScorePredictor(2 * out_dim, num_classes)
def forward(
self,
blocks,
edge_subgraph,
input_features,
edge_features,
*args,
**kwargs,
):
h = self.embedding_h(input_features.float())
h = self.in_feat_dropout(h)
if self.layer_type == GATLayer:
for idx, conv in enumerate(self.layers):
h = conv(blocks[idx], h)
else:
if not self.edge_feat:
e = torch.ones_like(edge_features).to(self.device)
e = self.embedding_e(edge_features.float())
for idx, conv in enumerate(self.layers):
h, e = conv(blocks[idx], h, e)
edge_subgraph.ndata["h"] = h
def _edge_feat(edges):
e = torch.cat([edges.src["h"], edges.dst["h"]], dim=1)
e = self.edge_score(e)
return {"e": e}
edge_subgraph.apply_edges(_edge_feat)
return edge_subgraph.edata["e"]
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss(weight=None)
loss = criterion(pred, label)
return loss
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/benchmark/models/gat_ec.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dgl
import dgl.nn as dglnn
import torch
import torch.nn as nn
import torch.nn.functional as F
class GCNEC(nn.Module):
@staticmethod
def add_args(parser):
return parser
def __init__(
self, in_dim, hidden_dim, out_dim, num_classes, n_layers, **kwargs
):
super().__init__()
self.gcn = StochasticLayerGCN(in_dim, hidden_dim, out_dim, n_layers)
self.predictor = ScorePredictor(num_classes, out_dim)
def forward(self, blocks, edge_subgraph, input_features, *args, **kwargs):
x = self.gcn(blocks, input_features)
return self.predictor(edge_subgraph, x)
def loss(self, pred, label):
loss = torch.nn.functional.binary_cross_entropy_with_logits(
pred, label
)
return loss
class StochasticLayerGCN(nn.Module):
def __init__(self, in_feats, h_feats, out_feats, n_layers):
super().__init__()
self.layers = []
if n_layers <= 1:
self.layers.append(dglnn.GraphConv(in_feats, out_feats))
else:
self.layers.append(dglnn.GraphConv(in_feats, h_feats))
for _ in range(n_layers - 2):
self.layers.append(dglnn.GraphConv(h_feats, h_feats))
self.layers.append(dglnn.GraphConv(h_feats, out_feats))
self.layers = nn.ModuleList(self.layers)
def forward(self, blocks, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(blocks[i], x))
return x
class ScorePredictor(nn.Module):
def __init__(self, num_classes, in_feats):
super().__init__()
self.W = nn.Linear(2 * in_feats, num_classes)
def apply_edges(self, edges):
data = torch.cat([edges.src["x"], edges.dst["x"]], dim=1)
return {"score": self.W(data)}
def forward(self, edge_subgraph, x):
with edge_subgraph.local_scope():
edge_subgraph.ndata["x"] = x
edge_subgraph.apply_edges(self.apply_edges)
return edge_subgraph.edata["score"]
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/benchmark/models/gcn_ec.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
Predictor layer
"""
class ScorePredictor(nn.Module):
def __init__(self, input_dim, output_dim, L=2):
super().__init__()
list_FC_layers = [
nn.Linear(
input_dim // 2 ** l, input_dim // 2 ** (l + 1), bias=True
)
for l in range(L)
]
list_FC_layers.append(
nn.Linear(input_dim // 2 ** L, output_dim, bias=True)
)
self.FC_layers = nn.ModuleList(list_FC_layers)
self.L = L
def forward(self, x):
y = x
for l in range(self.L):
y = self.FC_layers[l](y)
y = F.relu(y)
y = self.FC_layers[self.L](y)
return y
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/benchmark/models/layers/score_predictor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/benchmark/models/layers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dgl
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl.nn.pytorch import GATConv
"""
GAT: Graph Attention Network
Graph Attention Networks (Veličković et al., ICLR 2018)
https://arxiv.org/abs/1710.10903
"""
class GATLayer(nn.Module):
"""
Parameters
----------
in_dim :
Number of input features.
out_dim :
Number of output features.
num_heads : int
Number of heads in Multi-Head Attention.
dropout :
Required for dropout of attn and feat in GATConv
batch_norm :
boolean flag for batch_norm layer.
residual :
If True, use residual connection inside this layer. Default: ``False``.
activation : callable activation function/layer or None, optional.
If not None, applies an activation function to the updated node features.
Using dgl builtin GATConv by default:
https://github.com/graphdeeplearning/benchmarking-gnns/commit/206e888ecc0f8d941c54e061d5dffcc7ae2142fc
"""
def __init__(
self,
in_dim,
out_dim,
num_heads,
dropout,
batch_norm,
residual=False,
activation=F.elu,
):
super().__init__()
self.residual = residual
self.activation = activation
self.batch_norm = batch_norm
if in_dim != (out_dim * num_heads):
self.residual = False
if dgl.__version__ < "0.5":
self.gatconv = GATConv(
in_dim, out_dim, num_heads, dropout, dropout
)
else:
self.gatconv = GATConv(
in_dim,
out_dim,
num_heads,
dropout,
dropout,
allow_zero_in_degree=True,
)
if self.batch_norm:
self.batchnorm_h = nn.BatchNorm1d(out_dim * num_heads)
def forward(self, g, h):
h_in = h # for residual connection
h = self.gatconv(g, h).flatten(1)
if self.batch_norm:
h = self.batchnorm_h(h)
if self.activation:
h = self.activation(h)
if self.residual:
h = h_in + h # residual connection
return h
##############################################################
#
# Additional layers for edge feature/representation analysis
#
##############################################################
class CustomGATHeadLayer(nn.Module):
def __init__(self, in_dim, out_dim, dropout, batch_norm):
super().__init__()
self.dropout = dropout
self.batch_norm = batch_norm
self.fc = nn.Linear(in_dim, out_dim, bias=False)
self.attn_fc = nn.Linear(2 * out_dim, 1, bias=False)
self.batchnorm_h = nn.BatchNorm1d(out_dim)
def edge_attention(self, edges):
z2 = torch.cat([edges.src["z"], edges.dst["z"]], dim=1)
a = self.attn_fc(z2)
return {"e": F.leaky_relu(a)}
def message_func(self, edges):
return {"z": edges.src["z"], "e": edges.data["e"]}
def reduce_func(self, nodes):
alpha = F.softmax(nodes.mailbox["e"], dim=1)
alpha = F.dropout(alpha, self.dropout, training=self.training)
h = torch.sum(alpha * nodes.mailbox["z"], dim=1)
return {"h": h}
def forward(self, g, h):
z = self.fc(h)
g.ndata["z"] = z
g.apply_edges(self.edge_attention)
g.update_all(self.message_func, self.reduce_func)
h = g.ndata["h"]
if self.batch_norm:
h = self.batchnorm_h(h)
h = F.elu(h)
h = F.dropout(h, self.dropout, training=self.training)
return h
class CustomGATLayer(nn.Module):
"""
Param: [in_dim, out_dim, n_heads]
"""
def __init__(
self, in_dim, out_dim, num_heads, dropout, batch_norm, residual=True
):
super().__init__()
self.in_channels = in_dim
self.out_channels = out_dim
self.num_heads = num_heads
self.residual = residual
if in_dim != (out_dim * num_heads):
self.residual = False
self.heads = nn.ModuleList()
for i in range(num_heads):
self.heads.append(
CustomGATHeadLayer(in_dim, out_dim, dropout, batch_norm)
)
self.merge = "cat"
def forward(self, g, h, e):
h_in = h # for residual connection
head_outs = [attn_head(g, h) for attn_head in self.heads]
if self.merge == "cat":
h = torch.cat(head_outs, dim=1)
else:
h = torch.mean(torch.stack(head_outs))
if self.residual:
h = h_in + h # residual connection
return h, e
def __repr__(self):
return "{}(in_channels={}, out_channels={}, heads={}, residual={})".format(
self.__class__.__name__,
self.in_channels,
self.out_channels,
self.num_heads,
self.residual,
)
##############################################################
class CustomGATHeadLayerEdgeReprFeat(nn.Module):
def __init__(self, in_dim, out_dim, dropout, batch_norm):
super().__init__()
self.dropout = dropout
self.batch_norm = batch_norm
self.fc_h = nn.Linear(in_dim, out_dim, bias=False)
self.fc_e = nn.Linear(in_dim, out_dim, bias=False)
self.fc_proj = nn.Linear(3 * out_dim, out_dim)
self.attn_fc = nn.Linear(3 * out_dim, 1, bias=False)
self.batchnorm_h = nn.BatchNorm1d(out_dim)
self.batchnorm_e = nn.BatchNorm1d(out_dim)
def edge_attention(self, edges):
z = torch.cat(
[edges.data["z_e"], edges.src["z_h"], edges.dst["z_h"]], dim=1
)
e_proj = self.fc_proj(z)
attn = F.leaky_relu(self.attn_fc(z))
return {"attn": attn, "e_proj": e_proj}
def message_func(self, edges):
return {"z": edges.src["z_h"], "attn": edges.data["attn"]}
def reduce_func(self, nodes):
alpha = F.softmax(nodes.mailbox["attn"], dim=1)
h = torch.sum(alpha * nodes.mailbox["z"], dim=1)
return {"h": h}
def forward(self, g, h, e):
import pdb
pdb.set_trace()
z_h = self.fc_h(h)
z_e = self.fc_e(e)
g.ndata["z_h"] = z_h
g.edata["z_e"] = z_e
g.apply_edges(self.edge_attention)
g.update_all(self.message_func, self.reduce_func)
h = g.ndata["h"]
e = g.edata["e_proj"]
if self.batch_norm:
h = self.batchnorm_h(h)
e = self.batchnorm_e(e)
h = F.elu(h)
e = F.elu(e)
h = F.dropout(h, self.dropout, training=self.training)
e = F.dropout(e, self.dropout, training=self.training)
return h, e
class CustomGATLayerEdgeReprFeat(nn.Module):
"""
Param: [in_dim, out_dim, n_heads]
"""
def __init__(
self, in_dim, out_dim, num_heads, dropout, batch_norm, residual=True
):
super().__init__()
self.in_channels = in_dim
self.out_channels = out_dim
self.num_heads = num_heads
self.residual = residual
if in_dim != (out_dim * num_heads):
self.residual = False
self.heads = nn.ModuleList()
for i in range(num_heads):
self.heads.append(
CustomGATHeadLayerEdgeReprFeat(
in_dim, out_dim, dropout, batch_norm
)
)
self.merge = "cat"
def forward(self, g, h, e):
h_in = h # for residual connection
e_in = e
head_outs_h = []
head_outs_e = []
for attn_head in self.heads:
h_temp, e_temp = attn_head(g, h, e)
head_outs_h.append(h_temp)
head_outs_e.append(e_temp)
if self.merge == "cat":
h = torch.cat(head_outs_h, dim=1)
e = torch.cat(head_outs_e, dim=1)
else:
raise NotImplementedError
if self.residual:
h = h_in + h # residual connection
e = e_in + e
return h, e
def __repr__(self):
return "{}(in_channels={}, out_channels={}, heads={}, residual={})".format(
self.__class__.__name__,
self.in_channels,
self.out_channels,
self.num_heads,
self.residual,
)
##############################################################
class CustomGATHeadLayerIsotropic(nn.Module):
def __init__(self, in_dim, out_dim, dropout, batch_norm):
super().__init__()
self.dropout = dropout
self.batch_norm = batch_norm
self.fc = nn.Linear(in_dim, out_dim, bias=False)
self.batchnorm_h = nn.BatchNorm1d(out_dim)
def message_func(self, edges):
return {"z": edges.src["z"]}
def reduce_func(self, nodes):
h = torch.sum(nodes.mailbox["z"], dim=1)
return {"h": h}
def forward(self, g, h):
z = self.fc(h)
g.ndata["z"] = z
g.update_all(self.message_func, self.reduce_func)
h = g.ndata["h"]
if self.batch_norm:
h = self.batchnorm_h(h)
h = F.elu(h)
h = F.dropout(h, self.dropout, training=self.training)
return h
class CustomGATLayerIsotropic(nn.Module):
"""
Param: [in_dim, out_dim, n_heads]
"""
def __init__(
self, in_dim, out_dim, num_heads, dropout, batch_norm, residual=True
):
super().__init__()
self.in_channels = in_dim
self.out_channels = out_dim
self.num_heads = num_heads
self.residual = residual
if in_dim != (out_dim * num_heads):
self.residual = False
self.heads = nn.ModuleList()
for i in range(num_heads):
self.heads.append(
CustomGATHeadLayerIsotropic(
in_dim, out_dim, dropout, batch_norm
)
)
self.merge = "cat"
def forward(self, g, h, e):
h_in = h # for residual connection
head_outs = [attn_head(g, h) for attn_head in self.heads]
if self.merge == "cat":
h = torch.cat(head_outs, dim=1)
else:
h = torch.mean(torch.stack(head_outs))
if self.residual:
h = h_in + h # residual connection
return h, e
def __repr__(self):
return "{}(in_channels={}, out_channels={}, heads={}, residual={})".format(
self.__class__.__name__,
self.in_channels,
self.out_channels,
self.num_heads,
self.residual,
)
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/benchmark/models/layers/gat_layers.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
class BaseDataset(ABC):
def get_graph(self, *args, **kwargs):
raise NotImplementedError("`get_graph` fn not implemented")
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/benchmark/data_loader/datasets/base_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
from .edge_ds import EdgeDS
DATASET_SOURCE = {
"edge_ds": EdgeDS,
}
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/benchmark/data_loader/datasets/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import dgl
import numpy as np
import torch
from syngen.configuration import SynGenDatasetFeatureSpec
from syngen.utils.types import DataFrameType, MetaData
from .base_dataset import BaseDataset
class EdgeDS(BaseDataset):
"""
lean DGL graph builder for edge classification,
"""
def __init__(
self,
target_col: str = "label",
add_reverse: bool = True,
train_ratio: float = 0.8,
test_ratio: float = 0.1,
val_ratio: float = 0.1,
**kwargs,
):
self.target_col = target_col
self.add_reverse = add_reverse
self.train_ratio = train_ratio
self.test_ratio = test_ratio
self.val_ratio = val_ratio
def get_graph(
self,
feature_spec: SynGenDatasetFeatureSpec,
edge_name
):
struct_data = feature_spec.get_structural_data(edge_name)
edge_info = feature_spec.get_edge_info(edge_name)
is_bipartite = edge_info[MetaData.SRC_NODE_TYPE] != edge_info[MetaData.DST_NODE_TYPE]
if is_bipartite:
offset = struct_data[:, 0].max() + 16
struct_data[:, 1] = struct_data[:, 1] + offset
# - construct dgl graph
g = dgl.graph((struct_data[:, 0], struct_data[:, 1]))
g.ndata["feat"] = torch.rand((g.num_nodes(), 32))
assert g.num_nodes() == (struct_data.max() + 1), f"expected {(struct_data.max() + 1)}, got {g.num_nodes()}"
if self.add_reverse:
edge_reverse = np.zeros_like(struct_data)
edge_reverse[:, 0] = struct_data[:, 1]
edge_reverse[:, 1] = struct_data[:, 0]
g.add_edges(edge_reverse[:, 0], edge_reverse[:, 1])
edge_data = feature_spec.get_tabular_data(MetaData.EDGES, edge_name)
feature_cols = list(set(edge_data.columns) - {self.target_col})
num_rows = len(edge_data)
num_edges = g.num_edges()
# - extract edge features + labels
features = edge_data[feature_cols].astype(np.float32).values
labels = edge_data[self.target_col].fillna(0).astype(np.float32).values
if num_rows == num_edges // 2:
# - add reverse features
features = np.concatenate([features, features], axis=0)
# - add reverse labels
labels = np.concatenate([labels, labels], axis=0)
# - add edge data
g.edata["feat"] = torch.Tensor(features)
g.edata["labels"] = torch.Tensor(labels)
# - dataset split
num_train = int(self.train_ratio * num_edges)
num_val = int(self.val_ratio * num_edges)
num_test = int(self.test_ratio * num_edges)
masks = torch.randperm(len(features))
train_idx = masks[:num_train]
val_idx = masks[num_train : num_train + num_val]
test_idx = masks[num_train + num_val : num_train + num_val + num_test]
train_mask = torch.zeros(len(features), dtype=torch.bool)
train_mask[train_idx] = True
val_mask = torch.zeros(len(features), dtype=torch.bool)
val_mask[val_idx] = True
test_mask = torch.zeros(len(features), dtype=torch.bool)
test_mask[test_idx] = True
g.edata["train_mask"] = train_mask
g.edata["val_mask"] = val_mask
g.edata["test_mask"] = test_mask
edge_eids = np.arange(0, len(struct_data))
return g, edge_eids
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/benchmark/data_loader/datasets/edge_ds.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import warnings
from typing import Dict, Optional, Union
from syngen.configuration.utils import optional_comparison, one_field_from_list_of_dicts
from syngen.utils.io_utils import load_dataframe, load_graph
from syngen.utils.types import MetaData, DataSourceInputType
class SynGenDatasetFeatureSpec(dict):
""" SynGenDatasetFeatureSpec is an util class to simply the work with SynGen Dataset Format
Args:
graph_metadata (Dict): dict in SynGen Format
"""
def __init__(self, graph_metadata: Dict):
super().__init__(graph_metadata)
@staticmethod
def instantiate_from_preprocessed(path: str):
""" Creates a SynGenDatasetFeatureSpec and checks all specified files
Args:
path: path to the directory with a dataset in SynGen Format
"""
if os.path.isfile(path):
file_path = path
dir_path = os.path.dirname(file_path)
elif os.path.isdir(path):
file_path = os.path.join(path, 'graph_metadata.json')
dir_path = path
else:
raise ValueError(f"expected path to existing file or directory. got {path}")
with open(file_path, 'r') as f:
graph_metadata = json.load(f)
graph_metadata[MetaData.PATH] = dir_path
config = SynGenDatasetFeatureSpec(graph_metadata)
config.validate()
return config
def get_tabular_data(self, part, name, cache=False, absolute_path=None, return_cat_feats=False):
part_info = self.get_info(part, name)
if MetaData.FEATURES_DATA in part_info:
return part_info[MetaData.FEATURES_DATA]
part_features_info = part_info[MetaData.FEATURES]
part_features_path = part_info[MetaData.FEATURES_PATH]
if part_features_path is None:
raise ValueError()
if MetaData.PATH not in self:
if absolute_path is None:
raise ValueError("Please specify the absolute path for the feature spec: "
"by passing absolute_path argument or specifying MetaData.PATH in the Feature Spec")
else:
self[MetaData.PATH] = absolute_path
features_df = load_dataframe(os.path.join(self[MetaData.PATH], part_features_path),
feature_info=part_features_info)
if cache:
part_info[MetaData.FEATURES_DATA] = features_df
if return_cat_feats:
cat_features = [
feature_info[MetaData.NAME]
for feature_info in part_info[MetaData.FEATURES]
if feature_info[MetaData.FEATURE_TYPE] == MetaData.CATEGORICAL
]
return features_df, cat_features
return features_df
def get_structural_data(self, edge_name, cache=False, absolute_path=None, ):
edge_info = self.get_edge_info(edge_name)
if MetaData.STRUCTURE_DATA in edge_info:
return edge_info[MetaData.STRUCTURE_DATA]
structure_path = edge_info[MetaData.STRUCTURE_PATH]
if structure_path is None:
raise ValueError()
if MetaData.PATH not in self:
if absolute_path is None:
raise ValueError("Please specify the absolute path for the feature spec: "
"by passing absolute_path argument or specifying MetaData.PATH in the Feature Spec")
else:
self[MetaData.PATH] = absolute_path
graph = load_graph(os.path.join(self[MetaData.PATH], structure_path))
if cache:
edge_info[MetaData.STRUCTURE_DATA] = graph
return graph
def get_edge_info(self, name: Union[str, list], src_node_type: Optional[str] = None,
dst_node_type: Optional[str] = None):
if isinstance(name, list):
src_node_type, name, dst_node_type = name
for edge_type in self[MetaData.EDGES]:
if edge_type[MetaData.NAME] == name \
and optional_comparison(src_node_type, edge_type[MetaData.SRC_NODE_TYPE]) \
and optional_comparison(dst_node_type, edge_type[MetaData.DST_NODE_TYPE]):
return edge_type
def get_node_info(self, name: str):
for node_type in self[MetaData.NODES]:
if node_type[MetaData.NAME] == name:
return node_type
def get_info(self, part, name):
if part == MetaData.NODES:
return self.get_node_info(name)
elif part == MetaData.EDGES:
return self.get_edge_info(name)
else:
raise ValueError(f"unsupported FeatureSpec part expected [{MetaData.NODES}, {MetaData.EDGES}], got {part}")
def validate(self):
for part in [MetaData.NODES, MetaData.EDGES]:
for part_info in self[part]:
if part_info[MetaData.FEATURES_PATH]:
tab_path = os.path.join(self[MetaData.PATH], part_info[MetaData.FEATURES_PATH])
assert os.path.exists(tab_path), f"{part}-{part_info[MetaData.NAME]}: {tab_path} does not exist"
assert len(part_info[MetaData.FEATURES]) > 0, \
f"{part}-{part_info[MetaData.NAME]}: tabular features are not specified"
feature_files = one_field_from_list_of_dicts(
part_info[MetaData.FEATURES], MetaData.FEATURE_FILE, res_aggregator=set)
if len(feature_files) > 1:
assert os.path.isdir(tab_path), \
"different feature files are specified MetaData. FEATURES_PATH should be a directory"
for ff in feature_files:
ff_path = os.path.join(tab_path, ff)
assert os.path.exists(ff_path), \
f"{part}-{part_info[MetaData.NAME]}: {ff_path} does not exist"
if part == MetaData.EDGES:
struct_path = os.path.join(self[MetaData.PATH], part_info[MetaData.STRUCTURE_PATH])
assert os.path.exists(struct_path), \
f"{part}-{part_info[MetaData.NAME]}: {struct_path} does not exist"
def copy(self):
res = {}
keys_to_ignore = {MetaData.STRUCTURE_DATA, MetaData.FEATURES_DATA}
for part in (MetaData.EDGES, MetaData.NODES):
res[part] = [
{
k: copy.deepcopy(v)
for k, v in part_info.items() if k not in keys_to_ignore
}
for part_info in self[part]
]
return SynGenDatasetFeatureSpec(res)
class SynGenConfiguration(SynGenDatasetFeatureSpec):
""" SynGen Configuration
"""
def __init__(self, configuration: Dict):
super().__init__(configuration)
self._fill_missing_values()
self.validate()
def validate(self):
if MetaData.ALIGNERS in self:
for aligner_info in self[MetaData.ALIGNERS]:
for edge_name in aligner_info[MetaData.EDGES]:
if not self.get_edge_info(edge_name)[MetaData.FEATURES_PATH].endswith(".parquet"):
raise ValueError("Alignment supports only .parquet files right now")
for node_name in aligner_info[MetaData.NODES]:
if not self.get_node_info(node_name)[MetaData.FEATURES_PATH].endswith(".parquet"):
raise ValueError("Alignment supports only .parquet files right now")
def _process_tabular_generators(self, graph_part_info, part):
if MetaData.TABULAR_GENERATORS not in graph_part_info:
return
if graph_part_info[MetaData.FEATURES] == -1:
assert len(graph_part_info[MetaData.TABULAR_GENERATORS]) == 1
tab_gen_cfg = graph_part_info[MetaData.TABULAR_GENERATORS][0]
assert tab_gen_cfg[MetaData.DATA_SOURCE][MetaData.TYPE] == DataSourceInputType.CONFIGURATION
cfg = SynGenConfiguration.instantiate_from_preprocessed(tab_gen_cfg[MetaData.DATA_SOURCE][MetaData.PATH])
data_source_part_info = cfg.get_info(part, tab_gen_cfg[MetaData.DATA_SOURCE][MetaData.NAME])
graph_part_info[MetaData.FEATURES] = data_source_part_info[MetaData.FEATURES]
for tab_gen_cfg in graph_part_info[MetaData.TABULAR_GENERATORS]:
if tab_gen_cfg[MetaData.FEATURES_LIST] == -1:
assert len(graph_part_info[MetaData.TABULAR_GENERATORS]) == 1, \
"you may use mimic value (-1) only if you specify a single tabular generator"
tab_gen_cfg[MetaData.FEATURES_LIST] = [f[MetaData.NAME] for f in graph_part_info[MetaData.FEATURES]]
if tab_gen_cfg[MetaData.DATA_SOURCE][MetaData.TYPE] == DataSourceInputType.RANDOM:
edge_features = [f[MetaData.NAME] for f in graph_part_info[MetaData.FEATURES]]
for feature_name in tab_gen_cfg[MetaData.FEATURES_LIST]:
if feature_name not in edge_features:
graph_part_info[MetaData.FEATURES].append(
{
MetaData.NAME: feature_name,
MetaData.DTYPE: 'float32',
MetaData.FEATURE_TYPE: MetaData.CONTINUOUS,
# Now random generator supports only continuous features
}
)
def _fill_missing_values(self):
for part in [MetaData.NODES, MetaData.EDGES]:
for part_info in self[part]:
if MetaData.FEATURES not in part_info:
part_info[MetaData.FEATURES] = []
warnings.warn(
f"{part}-{part_info[MetaData.NAME]}: no {MetaData.FEATURES} specified, default is []")
if MetaData.FEATURES_PATH not in part_info:
part_info[MetaData.FEATURES_PATH] = None
warnings.warn(
f"{part}-{part_info[MetaData.NAME]}: no {MetaData.FEATURES_PATH} specified, default is None")
if MetaData.COUNT not in part_info:
part_info[MetaData.COUNT] = -1
warnings.warn(
f"{part}-{part_info[MetaData.NAME]}: no {MetaData.COUNT} specified, "
f"try to mimic based on generators data")
self._process_tabular_generators(part_info, part)
if part == MetaData.EDGES:
if MetaData.DIRECTED not in part_info:
part_info[MetaData.DIRECTED] = False
if part_info[MetaData.COUNT] == -1:
data_source_info = part_info[MetaData.STRUCTURE_GENERATOR][MetaData.DATA_SOURCE]
if data_source_info[MetaData.TYPE] == DataSourceInputType.CONFIGURATION:
cfg = SynGenConfiguration.instantiate_from_preprocessed(data_source_info[MetaData.PATH])
data_source_part_info = cfg.get_info(part, data_source_info[MetaData.NAME])
elif data_source_info[MetaData.TYPE] == DataSourceInputType.RANDOM:
raise ValueError('Can\'t fill the ')
else:
raise ValueError("unsupported structure generator datasource type")
if part_info[MetaData.COUNT] == -1:
part_info[MetaData.COUNT] = data_source_part_info[MetaData.COUNT]
def copy(self):
res = super().copy()
if MetaData.ALIGNERS in self:
res[MetaData.ALIGNERS] = copy.deepcopy(self[MetaData.ALIGNERS])
return SynGenConfiguration(res)
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/configuration/configuration.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .configuration import SynGenDatasetFeatureSpec, SynGenConfiguration
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/configuration/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Iterable
def optional_comparison(optional, value):
if optional is None:
return True
return optional == value
def one_field_from_list_of_dicts(dicts: Iterable[Dict], field: str, res_aggregator=list):
return res_aggregator(d[field] for d in dicts if field in d)
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/configuration/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import logging
import os
import warnings
from collections import defaultdict
from pathlib import PosixPath
from typing import Dict, Union, Literal
import cudf
import cupy
import numpy as np
import pandas as pd
import xgboost
try:
from cuml.preprocessing import LabelEncoder
from pylibraft.random import rmat # rmat needs to be imported before cuml
except ImportError:
from sklearn.preprocessing import OrdinalEncoder as LabelEncoder
from syngen.graph_aligner.base_graph_aligner import BaseGraphAligner
from syngen.graph_aligner.utils import (
get_graph,
get_preproc_dict,
get_preproc_fn,
merge_dfs,
spread_ranks, merge_graph_vertex_feat,
)
from syngen.graph_aligner.utils import get_features as default_features
from syngen.utils.types import ColumnType, DataFrameType, MetaData
from syngen.utils.utils import df_to_cudf, df_to_pandas
# - suppress numba in debug mode
numba_logger = logging.getLogger("numba")
numba_logger.setLevel(logging.WARNING)
warnings.filterwarnings('ignore')
class XGBoostAligner(BaseGraphAligner):
"""Aligns two graphs via correlating structural graph features
and tabular features using a xgboost predictor.
Args:
xgboost_params: `dict`
key-value parameters to pass to `xgboost.train`. To use
different parameters for each feature pass a
`dict` of `dict` corresponding to each feature,
with keys as the feature name and values as the xgboost_params.
num_boost_round: `dict` or int
number of boosting rounds for xgboost. The same `num_boost_round`
is used for all features unless a `dict` with keys as feature name
and values as `num_boost_round` is passed.
batch_size: int
the size of the chunk during the alignment process
topk: int
the number of candidates with the highest ranks to be chosen from during alignment
"""
def __init__(
self,
xgboost_params: Union[Dict[str, dict], dict] = {
"learning_rate": 0.1,
"colsample_bytree": 0.3,
"max_depth": 5,
"n_estimators": 100,
"alpha": 10,
"tree_method": "gpu_hist",
},
num_boost_round: Union[Dict[str, int], int] = 10,
batch_size: int = 100000,
topk: int = 4,
get_features=default_features,
verbose=False,
**kwargs,
):
self.xgboost_params = xgboost_params
self.num_boost_round = num_boost_round
self.batch_size = batch_size
self.topk = topk
self.col_maps_edge = None
self.col_maps_node = None
self.get_features = get_features
self.verbose = verbose
self.xgboost_params['verbosity'] = int(xgboost_params.get('verbosity', self.verbose))
self.xgboost_params['silent'] = int(xgboost_params.get('silent', not self.verbose))
self.features_to_correlate_edge = None
self.features_to_correlate_node = None
self.col_maps_edge = None
self.col_maps_node = None
self.meta_dict_edge = None
self.meta_dict_node = None
self.edge_trained_models = None
self.node_trained_models = None
def _extract_structural_features(self, graphs):
structural_features = {}
for graph_name, graph_info in graphs.items():
is_hetero = graph_info[MetaData.SRC_NODE_TYPE] != graph_info[MetaData.DST_NODE_TYPE]
if is_hetero:
offset = graph_info['src_size'] + 10
graph_info[MetaData.STRUCTURE_DATA][:, 1] = graph_info[MetaData.STRUCTURE_DATA][:, 1] + offset
edge_list_df = cudf.DataFrame(graph_info[MetaData.STRUCTURE_DATA], columns=["src", "dst"])
graph = get_graph(edge_list_df, src="src", dst="dst").to_undirected()
graph_feat_dfs = self.get_features(edge_list_df, graph, src="src", dst="dst")
graph_feat_df = merge_dfs(graph_feat_dfs, on="vertex")
graph_feat_df = graph_feat_df.fillna(0)
if is_hetero:
src_nodes = graph_feat_df['vertex'] <= graph_info['src_size']
structural_features[graph_info[MetaData.SRC_NODE_TYPE]] = merge_graph_vertex_feat(
structural_features.get(graph_info[MetaData.SRC_NODE_TYPE]),
graph_feat_df.loc[src_nodes])
dst_nodes = graph_feat_df['vertex'] > graph_info['src_size']
dst_graph_feat_df = graph_feat_df.loc[dst_nodes]
dst_graph_feat_df["vertex"] -= offset
structural_features[graph_info[MetaData.DST_NODE_TYPE]] = merge_graph_vertex_feat(
structural_features.get(graph_info[MetaData.DST_NODE_TYPE]),
dst_graph_feat_df)
graph_info[MetaData.STRUCTURE_DATA][:, 1] = graph_info[MetaData.STRUCTURE_DATA][:, 1] - offset
else:
structural_features[graph_info[MetaData.SRC_NODE_TYPE]] = merge_graph_vertex_feat(
structural_features.get(graph_info[MetaData.SRC_NODE_TYPE]), graph_feat_df)
for _, df in structural_features.items():
df['vertex'] = df['vertex'].values.astype(int)
df.set_index('vertex', inplace=True)
return structural_features
def fit(
self,
graphs,
node_features,
edge_features,
**kwargs,
):
structural_features = self._extract_structural_features(graphs)
self._fit_node(node_features, structural_features)
self._fit_edge(edge_features, structural_features, graphs)
def _fit_edge(
self,
edge_features,
structural_features,
graphs
):
self.features_to_correlate_edge = {}
self.edge_trained_models = {}
self.col_maps_edge = {}
self.meta_dict_edge = {}
for edge_name, edge_features_data in edge_features.items():
self.features_to_correlate_edge[edge_name] = {}
cat_cols = edge_features_data[MetaData.CATEGORICAL_COLUMNS]
cont_columns = list(set(edge_features_data[MetaData.FEATURES_LIST]) - set(cat_cols))
for c in cat_cols:
self.features_to_correlate_edge[edge_name][c] = MetaData.CATEGORICAL
for c in cont_columns:
self.features_to_correlate_edge[edge_name][c] = MetaData.CONTINUOUS
self.meta_dict_edge[edge_name] = defaultdict(None)
preproc_dict = get_preproc_dict(self.features_to_correlate_edge[edge_name])
for feat, v in preproc_dict.items():
preproc_fn = get_preproc_fn(v["preproc"])
edge_features_data[MetaData.FEATURES_DATA][feat], meta = \
preproc_fn(edge_features_data[MetaData.FEATURES_DATA][feat])
self.meta_dict_edge[feat] = meta
graph_info = graphs[edge_name]
edge_list = graph_info[MetaData.STRUCTURE_DATA]
src_ids = edge_list[:, 0]
dst_ids = edge_list[:, 1]
src_struct_feat = structural_features[graph_info[MetaData.SRC_NODE_TYPE]].loc[src_ids].values
dst_struct_feat = structural_features[graph_info[MetaData.DST_NODE_TYPE]].loc[dst_ids].values
X_train = np.concatenate([src_struct_feat, dst_struct_feat], axis=1).astype(float)
self.edge_trained_models[edge_name] = {}
self.col_maps_edge[edge_name] = {}
edge_features_df = cudf.DataFrame.from_pandas(edge_features_data[MetaData.FEATURES_DATA])
for col_name, col_type in self.features_to_correlate_edge[edge_name].items():
if col_name in self.xgboost_params:
xgboost_params = dict(self.xgboost_params[col_name])
else:
xgboost_params = dict(self.xgboost_params)
y_train = edge_features_df[col_name]
if "objective" not in xgboost_params:
if col_type == ColumnType.CONTINUOUS:
xgboost_params["objective"] = "reg:squarederror"
elif col_type == ColumnType.CATEGORICAL:
xgboost_params["objective"] = "multi:softmax"
vals = edge_features_df[col_name]
encoder = LabelEncoder()
encoder.fit(vals)
self.col_maps_edge[edge_name][col_name] = encoder
num_classes = len(encoder.classes_)
xgboost_params["num_class"] = num_classes
y_train = encoder.transform(y_train)
y_train = y_train.values
dtrain = xgboost.DMatrix(X_train, y_train)
# - train the model
trained_model = xgboost.train(
xgboost_params,
dtrain,
num_boost_round=self.num_boost_round,
evals=[(dtrain, "train")],
verbose_eval=self.verbose,
)
self.edge_trained_models[edge_name][col_name] = trained_model
def _fit_node(
self,
node_features,
structural_features
):
self.features_to_correlate_node = {}
self.node_trained_models = {}
self.col_maps_node = {}
self.meta_dict_node = {}
# fit nodes
for node_name, node_features_data in node_features.items():
self.features_to_correlate_node[node_name] = {}
cat_cols = node_features_data[MetaData.CATEGORICAL_COLUMNS]
cont_columns = list(set(node_features_data[MetaData.FEATURES_LIST]) - set(cat_cols))
for c in cat_cols:
self.features_to_correlate_node[node_name][c] = MetaData.CATEGORICAL
for c in cont_columns:
self.features_to_correlate_node[node_name][c] = MetaData.CONTINUOUS
self.meta_dict_node[node_name] = defaultdict(None)
preproc_dict = get_preproc_dict(self.features_to_correlate_node[node_name])
for feat, v in preproc_dict.items():
preproc_fn = get_preproc_fn(v["preproc"])
node_features_data[MetaData.FEATURES_DATA][feat], meta = \
preproc_fn(node_features_data[MetaData.FEATURES_DATA][feat])
self.meta_dict_node[feat] = meta
nodes = structural_features[node_name].index.values.astype(int)
node_struct_feat = structural_features[node_name].loc[nodes].values
X_train = node_struct_feat.astype(float)
self.node_trained_models[node_name] = {}
self.col_maps_node[node_name] = {}
node_features_df = cudf.DataFrame.from_pandas(node_features_data[MetaData.FEATURES_DATA])
for col_name, col_type in self.features_to_correlate_node[node_name].items():
if col_name in self.xgboost_params:
xgboost_params = dict(self.xgboost_params[col_name])
else:
xgboost_params = dict(self.xgboost_params)
y_train = node_features_df[col_name].loc[nodes]
if "objective" not in xgboost_params:
if col_type == ColumnType.CONTINUOUS:
xgboost_params["objective"] = "reg:squarederror"
elif col_type == ColumnType.CATEGORICAL:
xgboost_params["objective"] = "multi:softmax"
vals = node_features_df[col_name].loc[nodes]
encoder = LabelEncoder()
encoder.fit(vals)
self.col_maps_node[node_name][col_name] = encoder
num_classes = len(encoder.classes_)
xgboost_params["num_class"] = num_classes
y_train = encoder.transform(y_train)
y_train = y_train.values
dtrain = xgboost.DMatrix(X_train, y_train)
trained_model = xgboost.train(
xgboost_params,
dtrain,
num_boost_round=self.num_boost_round,
evals=[(dtrain, "train")],
verbose_eval=self.verbose,
)
self.node_trained_models[node_name][col_name] = trained_model
def align(
self,
graphs,
node_features,
edge_features,
) -> pd.DataFrame:
structural_features = self._extract_structural_features(graphs)
for k, v in structural_features.items():
structural_features[k] = df_to_pandas(v)
res = {
MetaData.NODES: {},
MetaData.EDGES: {},
}
if self.features_to_correlate_node:
res[MetaData.NODES] = self._align(
structural_features,
node_features,
None,
self.features_to_correlate_node,
self.col_maps_node,
self.node_trained_models,
MetaData.NODES,
)
if self.features_to_correlate_edge:
res[MetaData.EDGES] = self._align(
structural_features,
edge_features,
graphs,
self.features_to_correlate_edge,
self.col_maps_edge,
self.edge_trained_models,
MetaData.EDGES,
)
return res
def _align(
self,
structural_features,
tab_features,
graphs,
features_to_correlate_part,
col_maps,
trained_models: Dict[str, xgboost.Booster],
part: Literal[MetaData.NODES, MetaData.EDGES],
) -> Dict[str, pd.DataFrame]:
result_dict = {}
for part_name, features_to_correlate in features_to_correlate_part.items():
preproc_dict = get_preproc_dict(features_to_correlate)
if part == MetaData.NODES:
split_df = structural_features[part_name]
elif part == MetaData.EDGES:
split_df = graphs[part_name][MetaData.STRUCTURE_DATA]
else:
raise ValueError(f"Only `{MetaData.NODES}` and `{MetaData.EDGES}` parts expected, got ({part})")
topk = min(len(split_df), self.topk)
batch_size = self.batch_size
if len(split_df) // batch_size == 0:
batch_size = len(split_df)
chunks = np.array_split(split_df, len(split_df) // batch_size)
all_preds = []
for chunk in chunks:
if part == MetaData.NODES:
node_feat = chunk.values
X_test = node_feat.astype(float)
dtest = xgboost.DMatrix(X_test)
elif part == MetaData.EDGES:
src_ids = chunk[:, 0]
dst_ids = chunk[:, 1]
src_struct_feat = structural_features[graphs[part_name][MetaData.SRC_NODE_TYPE]].loc[src_ids].values
dst_struct_feat = structural_features[graphs[part_name][MetaData.DST_NODE_TYPE]].loc[dst_ids].values
X_test = np.concatenate([src_struct_feat, dst_struct_feat], axis=1).astype(float)
dtest = xgboost.DMatrix(X_test)
col_preds = []
for col_name, col_type in features_to_correlate.items():
preds = trained_models[part_name][col_name].predict(dtest)
col_preds.append(preds.reshape(-1, 1))
col_preds = np.concatenate(col_preds, axis=1)
all_preds.append(col_preds)
all_preds = np.concatenate(all_preds, axis=0)
all_preds = cupy.asarray(all_preds)
target_cols = list(features_to_correlate.keys())
y_generated = []
for col_name, col_type in features_to_correlate.items():
preproc_fn = None
if preproc_dict:
try:
preproc_fn = get_preproc_fn(
preproc_dict[col_name]["preproc"]
)
except:
pass
y = tab_features[part_name][col_name]
if preproc_fn is not None:
y, _ = preproc_fn(y)
if col_type == ColumnType.CATEGORICAL:
y = col_maps[part_name][col_name].inverse_transform(y)
y_generated.append(cudf.Series(y))
y_generated = cudf.concat(y_generated, axis=1).values
ranks = cupy.zeros((len(split_df), 1))
if len(target_cols) == 1:
y_generated = y_generated.reshape(-1)
target_col = target_cols[0]
col_type = features_to_correlate[target_col]
if col_type == ColumnType.CATEGORICAL:
all_preds = col_maps[part_name][target_col].inverse_transform(
cudf.Series(all_preds)
)
all_preds = all_preds.values
unique_preds = cupy.unique(all_preds)
unique_preds = cupy.asnumpy(unique_preds)
unique_generated = cupy.unique(y_generated)
present_unique = [
up for up in unique_preds if up in unique_generated
]
idxs = cupy.arange(0, len(y_generated))
pred_assigned = cupy.zeros(len(all_preds), dtype="bool")
gen_assigned = cupy.zeros(len(y_generated), dtype="bool")
unassigned_idxs_pred = []
for up in present_unique:
sel_idxs = idxs[y_generated == up]
cupy.random.shuffle(sel_idxs)
ups_mask = (all_preds == up).squeeze()
num_ups = cupy.sum(ups_mask)
if len(sel_idxs) > num_ups:
r_idxs = sel_idxs[:num_ups]
ranks[ups_mask] = r_idxs.reshape(-1, 1)
pred_assigned[ups_mask] = True
gen_assigned[sel_idxs[:num_ups]] = True
else:
r_idxs = cupy.where(ups_mask)[0]
ra_idxs = r_idxs[: len(sel_idxs)]
ranks[ra_idxs] = sel_idxs.reshape(-1, 1)
ups_mask[ra_idxs] = False
unassigned_idxs = ra_idxs[len(sel_idxs):]
unassigned_idxs_pred.append(unassigned_idxs)
pred_assigned[ra_idxs] = True
gen_assigned[sel_idxs] = True
ranks[~pred_assigned] = idxs[~gen_assigned][: cupy.sum(~pred_assigned)].reshape(-1, 1)
elif col_type == ColumnType.CONTINUOUS:
y_generated = cupy.ravel(y_generated)
y_idxsort = cupy.argsort(y_generated)
y_generated_sorted = y_generated[y_idxsort]
ranking = cupy.searchsorted(y_generated_sorted, all_preds)
ranks = y_idxsort[ranking]
ranks = spread_ranks(ranks)
elif len(target_cols) > 1:
y_generated = y_generated / (
cupy.linalg.norm(y_generated, ord=2, axis=1).reshape(-1, 1)
)
chunks = cupy.array_split(all_preds, len(all_preds) // batch_size)
for idx, chunk in enumerate(chunks):
idxs = cupy.ones((len(y_generated),), dtype=bool)
chunk = chunk / cupy.linalg.norm(chunk, ord=2, axis=1).reshape(
-1, 1
)
sim = cupy.einsum("ij,kj->ik", chunk, y_generated)
chunk_ranks = cupy.argsort(sim, axis=1)[:, -topk:]
rand_sel = cupy.random.randint(0, topk, len(chunk_ranks))
chunk_ranks = chunk_ranks[
cupy.arange(len(chunk_ranks)), rand_sel
]
cupy.put(idxs, chunk_ranks, False)
y_generated = y_generated[idxs]
ranks[
idx * batch_size: idx * batch_size + len(chunk)
] = chunk_ranks.reshape(-1, 1)
ranks[ranks >= len(tab_features[part_name])] = len(tab_features[part_name]) - 1
ranks = cupy.asnumpy(ranks)
ranks = ranks.squeeze()
features = tab_features[part_name].iloc[ranks].reset_index(drop=True)
result_dict[part_name] = features
return result_dict
def save(self, save_dir: Union[PosixPath, str]):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if self.edge_trained_models:
for edge_name, models in self.edge_trained_models.items():
for col_name, model in models.items():
model.save_model(
os.path.join(save_dir, f"{edge_name}___{col_name}___xgb_aligner_edge.json")
)
if self.node_trained_models:
for node_name, models in self.node_trained_models.items():
for col_name, model in models.items():
model.save_model(
os.path.join(save_dir, f"{node_name}___{col_name}___xgb_aligner_node.json")
)
meta_data = {
"xgboost_params": self.xgboost_params,
"num_boost_round": self.num_boost_round,
"batch_size": self.batch_size,
"topk": self.topk,
"get_features": self.get_features,
"verbose": self.verbose,
"fitted_data": {
"features_to_correlate_edge": self.features_to_correlate_edge,
"features_to_correlate_node": self.features_to_correlate_node,
"col_maps_edge": self.col_maps_edge,
"col_maps_node": self.col_maps_node,
"meta_dict_edge": self.meta_dict_edge,
"meta_dict_node": self.meta_dict_node,
}
}
with open(os.path.join(save_dir, "xgb_aligner_meta.pkl"), "wb") as file_handler:
pickle.dump(meta_data, file_handler, protocol=pickle.HIGHEST_PROTOCOL)
@classmethod
def load(cls, dir_path: Union[PosixPath, str]):
with open(os.path.join(dir_path, "xgb_aligner_meta.pkl"), "rb") as file_handler:
meta_data = pickle.load(file_handler)
fitted_data = meta_data.pop('fitted_data')
instance = cls(**meta_data)
for k, v in fitted_data.items():
setattr(instance, k, v)
files = os.listdir(dir_path)
edge_files = [f for f in files if "xgb_aligner_edge" in f]
instance.edge_trained_models = defaultdict(dict)
for ef in edge_files:
xgb_model = xgboost.Booster()
xgb_model.load_model(os.path.join(dir_path, ef))
edge_name, col_name = ef.split("___")[:2] # - same format as `save`
instance.edge_trained_models[edge_name][col_name] = xgb_model
node_files = [f for f in files if "xgb_aligner_node" in f]
instance.node_trained_models = defaultdict(dict)
for nf in node_files:
xgb_model = xgboost.Booster()
xgb_model.load_model(os.path.join(dir_path, nf))
node_name, col_name = ef.split("___")[:2] # - same format as `save`
instance.node_trained_models[node_name][col_name] = xgb_model
return instance
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/graph_aligner/xgboost_aligner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
from syngen.graph_aligner.base_graph_aligner import BaseGraphAligner
from syngen.graph_aligner.xgboost_aligner import XGBoostAligner
aligner_classes = {
'xgboost': XGBoostAligner,
}
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/graph_aligner/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
class BaseGraphAligner(abc.ABC):
"""Base class for all graph alignment objects"""
@classmethod
def get_aligners(cls, include_parents=True):
"""Recursively find sublcasses of `BaseGraphAligner`
Args:
include_parents (bool): whether to include parents to other classes.
(default: `True`)
"""
aligners = dict()
for child in cls.__subclasses__():
children = child.get_aligners(include_parents)
aligners.update(children)
if include_parents or not children:
if abc.ABC not in child.__bases__:
aligners[child.__name__] = child
return aligners
def fit(self, *args, **kwargs) -> None:
"""function to fit aligner required to be implemented by aligners"""
raise NotImplementedError()
def align(self, *args, **kwargs):
"""align function to align generated graph and generated features,
required to be implemented by aligners
"""
raise NotImplementedError()
def save(self, path):
raise NotImplementedError()
@classmethod
def load(cls, path):
raise NotImplementedError()
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/graph_aligner/base_graph_aligner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path, PosixPath
from typing import List, Union
import cudf
import cupy
import pandas as pd
import torch
from tqdm import tqdm
from syngen.utils.types import ColumnType
from syngen.utils.cugraph import import_cugraph
def get_graph(df: cudf.DataFrame, src="src", dst="dst"):
"""Construct directed graph
Args:
df (DataFrameType): dataframe containing edge info
src (str): source node column name
dst (str): destination node column name
Returns:
`cugraph.DiGraph`
"""
cugraph = import_cugraph()
graph = cugraph.DiGraph()
graph.from_cudf_edgelist(df, source=src, destination=dst)
return graph
def merge_dfs(dfs, **kwargs):
"""merge a list of dataframes on a particular column
Args:
dfs (DataFrame): list of dataframes to merge on
kwargs (dict): key-word arguments to pass to DataFrame `merge` function
"""
if "on" not in kwargs:
kwargs["on"] = "vertex"
if "how" not in kwargs:
kwargs["how"] = "outer"
df = dfs[0]
for i in range(1, len(dfs)):
df = df.merge(dfs[i], **kwargs)
return df
def get_features(
df,
G,
src: str = "src",
dst: str = "dst",
pagerank_kwargs: dict = {"tol": 1e-4},
):
"""Extract structural features from graph `G`
features extracted: katz_centrality, out degree, pagerank
Args:
df (cudf.DataFrame): data containg edge list informatoin
G (cugraph.DiGraph): cuGraph graph descriptor containing connectivity information
from df.
src (str): source node column name.
dst (dst): destination node column name.
pagerank_kwargs (dict): page rank function arguments to pass.
"""
# - pagerank feat
cugraph = import_cugraph()
pr_df = cugraph.pagerank(G, **pagerank_kwargs)
# - out-degree feat
degree_src_df = df.groupby(src).count()
degree_src_df = degree_src_df.reset_index().rename(
columns={src: "vertex", dst: "out_degree"}
)
# - in-degree feat
degree_dst_df = df.groupby(dst).count()
degree_dst_df = degree_dst_df.reset_index().rename(
columns={dst: "vertex", src: "in_degree"}
)
# - katz feat
katz_df = cugraph.katz_centrality(G, tol=1e-2, alpha=1e-3)
return [pr_df, degree_src_df, degree_dst_df, katz_df]
def merge_graph_vertex_feat(old, new):
if old is None:
return new
merged_df = old.merge(new, on=['vertex'], how='outer')
merged_df = merged_df.fillna(0)
return merged_df
def chunk_pd_save(
df: pd.DataFrame,
save_path: Union[str, PosixPath],
chunk_size: Union[int, float],
):
"""Chunks a large dataframe and casts to a cudf for faster save
Args:
df (pdDataFrame): dataframe object to dump data
save_path (str): data path to dump chunks
chunk_size (int): size of the chunks
"""
save_path = Path(save_path)
num_rows = len(df)
if not save_path.exists():
os.makedirs(save_path)
if chunk_size > 0.0 <= 1.0:
chunk_size = int(num_rows * chunk_size)
else:
chunk_size = int(chunk_size)
for i in tqdm(range(num_rows // chunk_size - 1)):
chunk_df = df.iloc[i * chunk_size : (i + 1) * chunk_size]
chunk_cudf = cudf.from_pandas(chunk_df)
chunk_cudf.to_parquet(save_path / f"{i}_chunk.parquet", index=False)
def z_norm(series, meta=None, compute=False):
"""applies z-normalization (x - mu) / std"""
if meta:
mean = meta["mean"]
std = meta["std"]
else:
mean = series.mean()
std = series.std()
out = (series - mean) / std
return out, {"mean": mean, "std": std}
def categorify(series, meta=None, compute=False):
"""Converts categorical to ordinal"""
cat_codes = series.astype("category").cat.codes
return cat_codes, {}
def get_preproc_fn(name: str):
"""Preprocessing map function"""
PREPROC_FN_MAP = {"z_norm": z_norm, "categorify": categorify}
return PREPROC_FN_MAP[name]
def get_preproc_dict(feature_types: dict):
"""Apply preprocessing functions to each column type specified in `feature_types` """
preproc_dict = {}
for feat, type_ in feature_types.items():
if type_ == ColumnType.CONTINUOUS:
preproc_dict[feat] = {"type": type_, "preproc": "z_norm"}
elif type_ == ColumnType.CATEGORICAL:
preproc_dict[feat] = {"type": type_, "preproc": "categorify"}
return preproc_dict
def spread_ranks(ranks):
vals = cupy.unique(ranks)
rr = 0
for v in vals:
m = ranks == v
num_v = cupy.sum(m)
idx_range = cupy.arange(0, cupy.sum(m))
ranks[m] = ranks[m] + idx_range + rr
rr += num_v
return ranks
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/graph_aligner/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynvml
import psutil
class MemoryManager(object):
def __init__(self, gpus=None):
pynvml.nvmlInit()
def __new__(cls):
if not hasattr(cls, 'instance'):
cls.instance = super(MemoryManager, cls).__new__(cls)
return cls.instance
def get_available_gpus(self):
return pynvml.nvmlDeviceGetCount()
def get_memory_info_on_gpu(self, gpu_id):
h = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
return pynvml.nvmlDeviceGetMemoryInfo(h)
def get_min_available_across_gpus_memory(self, gpus):
total = None
used = 0
for g_id in range(gpus):
info = self.get_memory_info_on_gpu(g_id)
if total is None:
total = info.total
else:
assert total == info.total
used = max(used, info.used)
return total - used
def get_available_virtual_memory(self):
return psutil.virtual_memory().available
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/utils/memory_manager.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
from .utils import *
from .io_utils import *
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import logging
import importlib
from pathlib import PosixPath
from typing import Optional, Union
import cudf
import cupy
import dask.dataframe as dd
import dask_cudf
import cupy as cp
import numpy as np
import pandas as pd
import os
from syngen.utils.types import DataFrameType, NDArray
logger = logging.getLogger(__name__)
log = logger
class CustomTimer:
"""Wraps `time` module and adds tagging for multiple timers
Example:
timer = CustomTimer()
timer.start_counter("tag")
# - do a series of operation
# ...
# - end of operations
timer.end_counter("tag", "tag timer has ended")
Args:
path (Optional[str])
"""
def __init__(self, path: Optional[Union[PosixPath, str]] = str, verbose: bool = False):
self.path = path
self.verbose = verbose
self.timers = {}
self.f = None
if self.path:
self.f = open(self.path, "w")
def start_counter(self, key: str):
self.timers[key] = time.perf_counter()
def end_counter(self, key: str, msg: str):
end = time.perf_counter()
start = self.timers.get(key, None)
if start is None:
return
message_string = f"{msg}: {end - start:.2f}\n"
if self.f:
self.f.write(message_string)
if self.verbose:
print(message_string, end='')
def maybe_close(self):
if self.f:
self.f.close()
def current_ms_time():
return round(time.time() * 1000)
def to_ndarray(df: DataFrameType) -> NDArray:
""" Returns potentially distributed data frame to its in-memory equivalent array. """
if isinstance(df, (cudf.DataFrame, pd.DataFrame)):
return df.values
elif isinstance(df, (dask_cudf.DataFrame, dd.DataFrame)):
return df.compute().values
else:
raise NotImplementedError(f'Conversion of type {type(df)} is not supported')
def df_to_pandas(df):
""" Converts `DataFrameType` to `pandas.DataFrame`
Args:
df (DataFrameType): the DataFrame to be converted
"""
if isinstance(df, cudf.DataFrame):
pddf = df.to_pandas()
elif isinstance(df, dask_cudf.DataFrame):
pddf = pd.DataFrame(
cupy.asnumpy(df.values.compute()), columns=df.columns
)
elif isinstance(df, pd.DataFrame):
pddf = df
else:
raise ValueError(f"DataFrame type {type(df)} not supported")
return pddf
def df_to_cudf(df: DataFrameType):
""" Converts `DataFrameType` to `cudf.DataFrame`
Args:
df (DataFrameType): the DataFrame to be converted
"""
if isinstance(df, cudf.DataFrame):
pass
elif isinstance(df, dask_cudf.DataFrame):
df = cudf.DataFrame(
cupy.asnumpy(df.values.compute()), columns=df.columns
)
elif isinstance(df, pd.DataFrame):
df = cudf.from_pandas(df)
else:
raise ValueError(f"DataFrameType type {type(df)} not supported")
return df
def df_to_dask_cudf(df: DataFrameType,
chunksize: Optional[int] = None):
""" Converts `DataFrameType` to `dask_cudf.DataFrame`
Args:
df (DataFrameType): the DataFrame to be converted
chunksize (int): dask chunk size. (default: min(1e6, len(df) // num_devices))
"""
if chunksize is None:
chunksize = min(
int(1e6), len(df) // cupy.cuda.runtime.getDeviceCount()
)
if isinstance(df, cudf.DataFrame):
df = dask_cudf.from_cudf(df, chunksize=chunksize)
elif isinstance(df, dask_cudf.DataFrame):
pass
elif isinstance(df, pd.DataFrame):
df = cudf.from_pandas(df)
df = dask_cudf.from_cudf(df, chunksize=chunksize)
else:
raise ValueError(f"DataFrameType type {type(df)} not supported")
return df
def dynamic_import(object_path):
"""Import an object from its full path."""
if isinstance(object_path, str):
parent, obj_name = object_path.rsplit(".", 1)
try:
parent = importlib.import_module(parent)
except ImportError:
raise ImportError(f"Could not import {object_path}")
return getattr(parent, obj_name)
return object_path
def get_object_path(obj):
return obj.__class__.__module__ + '.' + obj.__class__.__name__
def ensure_path(path: Union[str, PosixPath]):
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
return path
def infer_operator(ndarray: NDArray):
""" Returns array backend module (numpy or cupy). """
if isinstance(ndarray, np.ndarray):
return np
elif isinstance(ndarray, cp.ndarray):
return cp
else:
logger.warning(
'Detected array of type %s, while one of (%s) was expected. Defaulting to using numpy',
type(ndarray), 'numpy.ndarray, cupy.ndarray',
)
return np
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/utils/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from collections import defaultdict
from pathlib import Path, PosixPath
from typing import Optional, Union, List
import numpy as np
import pandas as pd
from tqdm import tqdm
from syngen.utils.utils import infer_operator
from syngen.utils.types import NDArray
from syngen.utils.types import MetaData
logger = logging.getLogger(__name__)
log = logger
def dump_dataframe(data: pd.DataFrame, save_path: Union[PosixPath, str], format: Optional[str] = 'parquet') -> None:
if save_path.endswith('.csv'):
format = 'csv'
if save_path.endswith('.parquet'):
format = 'parquet'
log.info(f"writing to file {save_path} {format}")
if format == 'parquet':
data.to_parquet(save_path, compression=None, index=False)
elif format == 'csv':
data.to_csv(save_path, index=False)
else:
raise ValueError(f'unsupported file_format: {format}, expected `csv` or `parquet`')
def dump_generated_graph(path: Union[PosixPath, str], graph: NDArray, format: str = 'npy') -> None:
operator = infer_operator(graph)
if path.endswith('.npy'):
format = 'npy'
if path.endswith('.csv'):
format = 'csv'
if path.endswith('.parquet'):
format = 'parquet'
if format is None:
raise ValueError()
if format == 'npy':
operator.save(path, graph)
elif format == 'csv':
operator.savetxt(path, graph, fmt='%i', delimiter='\t')
elif format == 'parquet':
dump_dataframe(pd.DataFrame(graph, columns=['src', 'dst'], copy=False), path)
else:
raise ValueError(f'unsupported file_format: {format}, expected `npy`, `parquet` or `csv`')
def merge_dataframe_files(file_paths: List[Union[PosixPath, str]], format='csv') -> pd.DataFrame:
if format == 'parquet':
dfs = [pd.read_parquet(fn) for fn in file_paths]
elif format == 'csv':
dfs = [pd.read_csv(fn) for fn in file_paths]
else:
raise ValueError(f'unsupported file_format: {format}, expected `csv` or `parquet`')
return pd.concat(dfs, axis=0, ignore_index=True)
def load_dataframe(path: Union[PosixPath, str], format: Optional[str] = None, feature_info: Optional = None) -> pd.DataFrame:
if path.endswith('.parquet'):
format = 'parquet'
elif path.endswith('.csv'):
format = 'csv'
elif path.endswith('.npy'):
format = 'npy'
elif os.path.isdir(path):
format = 'dir'
if format is None:
raise ValueError()
if format == 'parquet':
return pd.read_parquet(path)
if format == 'csv':
return pd.read_csv(path)
assert feature_info is not None, '`npy` and `dir` require specified feature_info'
if format == 'npy':
return pd.DataFrame(np.load(path, mmap_mode='r'), columns=[f[MetaData.NAME] for f in feature_info], copy=False)
if format == 'dir':
file_names_to_features = defaultdict(list)
for fi in feature_info:
file_names_to_features[fi[MetaData.FEATURE_FILE]].append(fi)
return pd.concat(
[load_dataframe(os.path.join(path, fn), feature_info=file_names_to_features[fn])
for fn in os.listdir(path)], axis=1, copy=False)
def load_graph(path: Union[str, PosixPath], format: Optional[str] = None) -> np.ndarray:
if path.endswith('.parquet'):
format = 'parquet'
elif path.endswith('.csv'):
format = 'csv'
elif path.endswith('.npy'):
format = 'npy'
if format is None:
raise ValueError()
if format == 'parquet':
return pd.read_parquet(path).values
if format == 'csv':
return pd.read_csv(path).values
if format == 'npy':
return np.load(path, mmap_mode='c')
def write_csv_file_listener(save_path: Union[str, PosixPath], save_name: str, queue):
KILL_SIG = "kill"
save_path = Path(save_path) / f"{save_name}.csv"
first_file = True
while True:
# - keep listening until `kill` signal
m = queue.get()
if m == KILL_SIG:
break
elif type(m) == pd.DataFrame:
if first_file:
m.to_csv(save_path, index=False, header=True)
first_file = False
else:
m.to_csv(save_path, mode="append", index=False, header=False)
else:
raise Exception(f"{m} is not supported")
def merge_csv_files(
file_paths: List[Union[str, PosixPath]],
save_path: Union[str, PosixPath],
save_name: str = "samples",
header: bool = True,
remove_original_files: bool = True,
) -> None:
"""
Merges CSV files into a single large CSV file
Args:
file_paths (str): a list of paths to individual csv files
save_path (str): a path to directory to save merged csv file
save_name (str): file name of merged csv file
Returns:
None
"""
save_path = Path(save_path)
record_header = False
if header:
record_header = True
with open(save_path / f"{save_name}", "w") as out_file:
for i, fp in enumerate(tqdm(file_paths)):
with open(fp, "r") as csv:
for i, l in enumerate(csv):
if i == 0 and record_header:
out_file.write(l + "\n")
record_header = False
continue
elif i == 0:
continue
else:
out_file.write(l + "\n")
if remove_original_files:
for f in file_paths:
os.remove(f)
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/utils/io_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def import_cugraph():
""" Lazy import of cugraph. """
import cugraph
return cugraph
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/utils/cugraph.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from .str_enum import StrEnum
class MetaData(StrEnum):
PATH = "path"
EDGES = "edges"
NODES = "nodes"
ALIGNERS = "[gen]aligners"
GRAPHS = "graphs"
NAME = "name"
COUNT = "count"
NODE_DATA = "node_data"
EDGE_DATA = "edge_data"
TYPE = "type"
DTYPE = "dtype"
SRC = "src"
SRC_NAME = "src_name"
SRC_NODE_TYPE = "src_node_type"
DST = "dst"
DST_NAME = "dst_name"
DST_NODE_TYPE = "dst_node_type"
NODE_NAME = "node_name"
NODE_COLUMNS = "node_columns"
EDGE_NAME = "edge_name"
LABELS = "labels"
FEATURES = "features"
FEATURES_PATH = "features_path"
FEATURES_DATA = "features_data"
FEATURE_TYPE = "feature_type"
FEATURE_FILE = "feature_file"
FILENAME_PREFIX = "filename_prefix"
STRUCTURE_PATH = "structure_path"
STRUCTURE_DATA = "structure_data"
NODE_FEAT = "node_feat"
EDGE_FEAT = "edge_feat"
TRAIN_MASK = "train_mask"
VAL_MASK = "val_mask"
TEST_MASK = "test_mask"
CONTINUOUS = "continuous"
CATEGORICAL = "categorical"
CONTINUOUS_COLUMNS = "continuous_columns"
CATEGORICAL_COLUMNS = "categorical_columns"
UNDIRECTED = "undirected"
DIRECTED = "directed"
# generation related keys
STRUCTURE_GENERATOR = "[gen]structure_generator"
TABULAR_GENERATORS = "[gen]tabular_generators"
DATA_SOURCE = "data_source"
FEATURES_LIST = "features_list"
PARAMS = "params"
DUMP_PATH = "dump_path"
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/utils/types/metadata.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .str_enum import StrEnum
class ColumnType(StrEnum):
CONTINUOUS = "continuous"
CATEGORICAL = "categorical"
MIXED = "mixed"
DISCRETE = "discrete"
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/utils/types/column_type.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
class DataSourceInputType(Enum):
DATASET = "dataset", "ds"
EDGE_LIST = "edge_list", 'el'
RANDOM = "random", 'rnd'
CONFIGURATION = "configuration", 'cfg'
DATA = "data", 'data'
def __new__(cls, *values):
obj = object.__new__(cls)
obj._value_ = values[0]
for other_value in values[1:]:
cls._value2member_map_[other_value] = obj
obj._all_values = values
return obj
def __eq__(self, other):
return super().__eq__(self._value2member_map_[other])
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/utils/types/data_source_input_type.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
class StrEnum(str, enum.Enum):
def __new__(cls, *args):
for arg in args:
if not isinstance(arg, (str, enum.auto)):
raise TypeError(
"Values of StrEnums must be strings: {} is a {}".format(
repr(arg), type(arg)
)
)
return super().__new__(cls, *args)
def __str__(self):
return self.value
def _generate_next_value_(name, *_):
return name
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/utils/types/str_enum.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
from .array_type import NDArray
from .column_type import ColumnType
from .dataframe_type import DataFrameType
from .metadata import MetaData
from .data_source_input_type import DataSourceInputType
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/utils/types/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
import cudf
import pandas
DataFrameType = Union[
cudf.DataFrame,
pandas.DataFrame,
]
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/utils/types/dataframe_type.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TypeVar
import cupy as cp
import numpy as np
NDArray = TypeVar('NDArray', np.ndarray, cp.ndarray)
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/utils/types/array_type.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from syngen.cli.commands.synthesize import SynthesizeCommand
from syngen.cli.commands.preprocess import PreprocessingCommand
from syngen.cli.commands.mimic_dataset import MimicDatasetCommand
from syngen.cli.commands.pretrain import PretrainCommand
def get_parser():
parser = argparse.ArgumentParser(
description="Synthetic Graph Generation Tool",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
command = parser.add_subparsers(title="command")
command.required = True
SynthesizeCommand().init_parser(command)
PreprocessingCommand().init_parser(command)
MimicDatasetCommand().init_parser(command)
PretrainCommand().init_parser(command)
return parser
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/cli/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import json
import logging
from collections import defaultdict
from syngen.cli.commands.base_command import BaseCommand
from syngen.configuration import SynGenDatasetFeatureSpec, SynGenConfiguration
from syngen.generator.tabular import tabular_generators_classes
from syngen.utils.types import MetaData
logger = logging.getLogger(__name__)
log = logger
class MimicDatasetCommand(BaseCommand):
def init_parser(self, base_parser):
mimic_parser = base_parser.add_parser(
"mimic-dataset",
help="Quickly creates a SynGen Configuration for the given dataset",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
mimic_parser.set_defaults(action=self.run)
mimic_parser.add_argument(
"-dp", "--dataset-path", type=str, required=True,
help="Path to the dataset in SynGen format"
)
mimic_parser.add_argument(
"-of", "--output-file", type=str, required=True,
help="Path to the generated SynGen Configuration"
)
mimic_parser.add_argument(
"-tg", "--tab-gen", type=str, choices=list(tabular_generators_classes.keys()), default='kde',
help="Tabular Generator to mimic all tabular features"
)
mimic_parser.add_argument(
"-rsg", "--random-struct-gen", action='store_true',
help="Generates random structure based on Erdos-Renyi model"
)
mimic_parser.add_argument(
"-es", "--edge-scale", type=float, default=None,
help="Multiples the number of edges to generate by the provided number"
)
mimic_parser.add_argument(
"-en", "--node-scale", type=float, default=None,
help="Multiples the number of nodes to generate by the provided number"
)
mimic_parser.add_argument(
"-gdp", "--gen-dump-path", type=str, default=None,
help="Path to store the fitted generators"
)
def run(self, args):
dict_args = vars(args)
feature_spec = SynGenDatasetFeatureSpec.instantiate_from_preprocessed(dict_args['dataset_path'])
scales = {
MetaData.EDGES: dict_args['edge_scale'],
MetaData.NODES: dict_args['node_scale'],
}
for part in [MetaData.NODES, MetaData.EDGES]:
for part_info in feature_spec[part]:
if scales[part]:
part_info[MetaData.COUNT] = int(part_info[MetaData.COUNT] * scales[part])
if MetaData.FEATURES in part_info and len(part_info[MetaData.FEATURES]) > 0:
feature_files_content = defaultdict(list)
for feature in part_info[MetaData.FEATURES]:
if MetaData.FEATURE_FILE in feature:
feature_files_content[feature[MetaData.FEATURE_FILE]].append(feature[MetaData.NAME])
if feature_files_content:
part_info[MetaData.TABULAR_GENERATORS] = [
{
MetaData.TYPE: dict_args['tab_gen'],
MetaData.FEATURES_LIST: feats_list,
MetaData.FEATURE_FILE: ff,
MetaData.DATA_SOURCE: {
MetaData.TYPE: 'rnd',
} if dict_args['tab_gen'] == 'random'
else
{
MetaData.TYPE: 'cfg',
MetaData.PATH: dict_args['dataset_path'],
MetaData.NAME: part_info[MetaData.NAME],
},
MetaData.PARAMS: {},
MetaData.DUMP_PATH: os.path.join(dict_args['gen_dump_path'],
f"{part}_{part_info[MetaData.NAME]}_tab_gen_{idx}.pkl")
if dict_args['gen_dump_path'] else None
}
for idx, (ff, feats_list) in enumerate(feature_files_content.items())
]
else:
part_info[MetaData.TABULAR_GENERATORS] = [
{
MetaData.TYPE: dict_args['tab_gen'],
MetaData.FEATURES_LIST: -1,
MetaData.DATA_SOURCE: {
MetaData.TYPE: 'rnd',
} if dict_args['tab_gen'] == 'random'
else
{
MetaData.TYPE: 'cfg',
MetaData.PATH: dict_args['dataset_path'],
MetaData.NAME: part_info[MetaData.NAME],
},
MetaData.PARAMS: {},
MetaData.DUMP_PATH: os.path.join(dict_args['gen_dump_path'],
f"{part}_{part_info[MetaData.NAME]}_tab_gen_{0}.pkl")
if dict_args['gen_dump_path'] else None
}
]
if part == MetaData.EDGES:
part_info[MetaData.STRUCTURE_GENERATOR] = {
MetaData.TYPE: 'RMAT',
MetaData.DATA_SOURCE: {
MetaData.TYPE: 'rnd',
} if dict_args['random_struct_gen']
else
{
MetaData.TYPE: 'cfg',
MetaData.PATH: dict_args['dataset_path'],
MetaData.NAME: part_info[MetaData.NAME],
},
MetaData.PARAMS: {},
MetaData.DUMP_PATH: os.path.join(dict_args['gen_dump_path'],
f"{part_info[MetaData.NAME]}_struct_gen.pkl")
if dict_args['gen_dump_path'] else None
}
config = SynGenConfiguration(feature_spec)
with open(dict_args['output_file'], 'w') as f:
json.dump(config, f, indent=4)
log.info(f"SynGen Configuration saved into {dict_args['output_file']}")
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/cli/commands/mimic_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from syngen.cli.commands.base_command import BaseCommand
from syngen.benchmark.tasks import train_ec
from syngen.configuration import SynGenDatasetFeatureSpec, SynGenConfiguration
from syngen.generator.tabular import tabular_generators_classes
from syngen.utils.types import MetaData
from syngen.benchmark.models import MODELS
logging.basicConfig()
logging.root.setLevel(logging.NOTSET)
logger = logging.getLogger(__name__)
log = logger
class PretrainCommand(BaseCommand):
def init_parser(self, base_parser):
pretrain_parser = base_parser.add_parser(
"pretrain",
help="Run Synthetic Graph Data Pre-training Tool",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
pretrain_parser.set_defaults(action=self.run)
# global
pretrain_parser.add_argument(
"--task",
type=str,
default="ec",
help=f"now the only available option is ec (edge-classification)",
)
pretrain_parser.add_argument(
"--seed",
type=int,
default=777,
help="Set a seed globally"
)
pretrain_parser.add_argument(
"--timeit",
action="store_true",
help="Measures average training time",
)
pretrain_parser.add_argument(
"--data-path",
type=str,
required=True,
help="Path to dataset in SynGen format to train/finetune on",
)
pretrain_parser.add_argument(
"--edge-name",
type=str,
required=True,
help="Name of the edge to be used during train/finetune",
)
pretrain_parser.add_argument(
"--pretraining-data-path",
type=str,
default=None,
help="Path to dataset in SynGen format to pretrain on",
)
pretrain_parser.add_argument(
"--pretraining-edge-name",
type=str,
default=None,
help="Name of the edge to be used during pretraining",
)
# model
pretrain_parser.add_argument(
"--model",
type=str,
default="gat_ec",
help=f"List of available models: {list(MODELS.keys())}",
)
pretrain_parser.add_argument(
"--hidden-dim",
type=int,
default=128,
help="Hidden feature dimension"
)
pretrain_parser.add_argument(
"--out-dim",
type=int,
default=32,
help="Output feature dimension",
)
pretrain_parser.add_argument(
"--num-classes",
type=int,
required=True,
help="Number of classes in the target column",
)
pretrain_parser.add_argument(
"--n-layers",
type=int,
default=1,
help="Multi-layer full neighborhood sampler layers",
)
for key in MODELS.keys():
MODELS[key].add_args(pretrain_parser)
# dataset
pretrain_parser.add_argument(
"--target-col",
type=str,
required=True,
help="Target column for downstream prediction",
)
pretrain_parser.add_argument(
"--train-ratio",
type=float,
default=0.8,
help="Ratio of data to use as train",
)
pretrain_parser.add_argument(
"--val-ratio",
type=float,
default=0.1,
help="Ratio of data to use as val",
)
pretrain_parser.add_argument(
"--test-ratio",
type=float,
default=0.1,
help="Ratio of data to use as test",
)
# training
pretrain_parser.add_argument(
"--learning-rate",
"--lr",
dest="learning_rate",
type=float,
default=1e-3,
help=f"Initial learning rate for optimizer",
)
pretrain_parser.add_argument(
"--weight-decay",
type=float,
default=0.1,
help=f"Weight decay for optimizer",
)
pretrain_parser.add_argument(
"--batch-size",
type=int,
default=128,
help="Pre-training and Fine-tuning dataloader batch size",
)
pretrain_parser.add_argument(
"--num-workers",
type=int,
default=8,
help="Number of dataloading workers",
)
pretrain_parser.add_argument(
"--shuffle",
action="store_true",
default=False,
help="Shuffles data each epoch"
)
pretrain_parser.add_argument(
"--pretrain-epochs",
type=int,
default=0,
help="Number of pre-training epochs",
)
pretrain_parser.add_argument(
"--finetune-epochs",
type=int,
default=1,
help="Number of finetuning epochs",
)
pretrain_parser.add_argument(
"--log-interval",
type=int,
default=1,
help="logging interval"
)
def run(self, args):
dict_args = vars(args)
finetune_feature_spec = SynGenDatasetFeatureSpec.instantiate_from_preprocessed(
dict_args['data_path']
)
pretrain_feature_spec = None
if dict_args['pretraining_data_path']:
pretrain_feature_spec = SynGenDatasetFeatureSpec.instantiate_from_preprocessed(
dict_args['pretraining_data_path']
)
if args.task == "ec":
out = train_ec(
args,
finetune_feature_spec=finetune_feature_spec,
pretrain_feature_spec=pretrain_feature_spec,
)
else:
raise ValueError("benchmark not supported")
log.info(out)
return out
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/cli/commands/pretrain.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from syngen.cli.commands.base_command import BaseCommand
from syngen.preprocessing.datasets import DATASETS
logger = logging.getLogger(__name__)
log = logger
class PreprocessingCommand(BaseCommand):
def init_parser(self, base_parser):
preprocessing_parser = base_parser.add_parser(
"preprocess",
help="Run Dataset Preprocessing",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
preprocessing_parser.set_defaults(action=self.run)
preprocessing_parser.add_argument(
"--dataset", type=str, default=None, required=True, choices=list(DATASETS.keys()),
help="Dataset to preprocess",
)
preprocessing_parser.add_argument(
"-sp", "--source-path", type=str, default=None, required=True,
help="Path to raw data",
)
preprocessing_parser.add_argument(
"-dp", "--destination-path", type=str, default=None, required=False,
help="Path to store the preprocessed data. Default is $source_path/syngen_preprocessed",
)
preprocessing_parser.add_argument(
"--download",
action='store_true',
help="Downloads the dataset if specified",
)
preprocessing_parser.add_argument(
"--cpu",
action='store_true',
help='Performs the preprocessing_parser without leveraging GPU'
)
preprocessing_parser.add_argument(
"--use-cache",
action='store_true',
help='Does nothing if the target preprocessed dataset exists'
)
for preprocessing_class in DATASETS.values():
preprocessing_class.add_cli_args(preprocessing_parser)
def run(self, args):
dict_args = vars(args)
dataset_name = dict_args.pop('dataset')
source_path = dict_args.pop('source_path')
destination_path = dict_args.pop('destination_path')
download = dict_args.pop('download')
gpu = not dict_args.pop('cpu')
use_cache = dict_args.pop('use_cache')
preprocessing_class = DATASETS[dataset_name]
if download:
try:
preprocessing_class(source_path=source_path,
destination_path=destination_path,
download=download,
**dict_args)
log.info(f"{dataset_name} successfully downloaded into {source_path}")
except NotImplementedError:
log.info(f"{dataset_name} does not support automatic downloading, please download the dataset manually")
else:
preprocessing = preprocessing_class(source_path=source_path,
destination_path=destination_path,
download=download,
**dict_args)
preprocessing.transform(gpu=gpu, use_cache=use_cache)
log.info(f"{dataset_name} successfully preprocessed into {preprocessing.destination_path}")
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/cli/commands/preprocess.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DeepLearningExamples-master | Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/cli/commands/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.