python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Models for question-answering on SQuAD (Bert) modified from HuggingFace transformers ."""
import argparse
import logging
import os
import random
import timeit
import numpy as np
import torch
from torch.utils.data import DataLoader, SequentialSampler
from tqdm import tqdm
from transformers import (
BertConfig,
BertTokenizer,
squad_convert_examples_to_features,
)
from utils.modeling_bert import BertForQuestionAnswering
from transformers.data.metrics.squad_metrics import (
compute_predictions_logits,
squad_evaluate,
)
from transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def to_list(tensor):
return tensor.detach().cpu().tolist()
def evaluate(args, model, tokenizer, prefix=""):
dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
#if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
# model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
all_results = []
start_time = timeit.default_timer()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
seq_lens = torch.sum((batch[0] != 0).to(torch.int32), dim=1).numpy()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
# inputs = {
# "input_ids": batch[0],
# "attention_mask": batch[1].half() if args.data_type == 'fp16' else batch[1],
# "token_type_ids": batch[2],
# }
inputs = [batch[0], batch[1].half() if args.data_type == 'fp16' else batch[1], batch[2]]
example_indices = batch[3]
# outputs = model(**inputs)
outputs = model(*inputs)
for i, example_index in enumerate(example_indices):
eval_feature = features[example_index.item()]
unique_id = int(eval_feature.unique_id)
output = [to_list(output[i]) for output in outputs]
# Some models (XLNet, XLM) use 5 arguments for their predictions, while the other "simpler"
# models only use two.
if len(output) >= 5:
start_logits = output[0]
start_top_index = output[1]
end_logits = output[2]
end_top_index = output[3]
cls_logits = output[4]
result = SquadResult(
unique_id,
start_logits,
end_logits,
start_top_index=start_top_index,
end_top_index=end_top_index,
cls_logits=cls_logits,
)
else:
start_logits, end_logits = output
result = SquadResult(unique_id, start_logits[:seq_lens[i]], end_logits[:seq_lens[i]])
all_results.append(result)
evalTime = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset))
# Compute predictions
output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix))
output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix))
if args.version_2_with_negative:
output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix))
else:
output_null_log_odds_file = None
predictions = compute_predictions_logits(
examples,
features,
all_results,
args.n_best_size,
args.max_answer_length,
args.do_lower_case,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
args.verbose_logging,
args.version_2_with_negative,
args.null_score_diff_threshold,
tokenizer,
)
# Compute the F1 and exact scores.
results = squad_evaluate(examples, predictions)
return results
def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False):
if args.local_rank not in [-1, 0] and not evaluate:
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
torch.distributed.barrier()
# Load data features from cache or dataset file
input_dir = args.data_dir if args.data_dir else "."
cached_features_file = os.path.join(
input_dir,
"cached_{}_{}_{}".format(
"dev" if evaluate else "train",
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
),
)
# Init features and dataset from cache if it exists
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features_and_dataset = torch.load(cached_features_file)
features, dataset, examples = (
features_and_dataset["features"],
features_and_dataset["dataset"],
features_and_dataset["examples"],
)
else:
logger.info("Creating features from dataset file at %s", input_dir)
if not args.data_dir and ((evaluate and not args.predict_file) or (not evaluate and not args.train_file)):
try:
import tensorflow_datasets as tfds
except ImportError:
raise ImportError("If not data_dir is specified, tensorflow_datasets needs to be installed.")
if args.version_2_with_negative:
logger.warn("tensorflow_datasets does not handle version 2 of SQuAD.")
tfds_examples = tfds.load("squad")
examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate)
else:
processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor()
if evaluate:
examples = processor.get_dev_examples(args.data_dir, filename=args.predict_file)
else:
examples = processor.get_train_examples(args.data_dir, filename=args.train_file)
features, dataset = squad_convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=not evaluate,
return_dataset="pt",
threads=args.threads,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save({"features": features, "dataset": dataset, "examples": examples}, cached_features_file)
if args.local_rank == 0 and not evaluate:
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
torch.distributed.barrier()
if output_examples:
return dataset, examples, features
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
help="The input data dir. Should contain the .json files for the task."
+ "If no data dir or train/predict files are specified, will run with tensorflow_datasets.",
)
parser.add_argument(
"--train_file",
default=None,
type=str,
help="The input training file. If a data dir is specified, will look for the file there"
+ "If no data dir or train/predict files are specified, will run with tensorflow_datasets.",
)
parser.add_argument(
"--predict_file",
default=None,
type=str,
help="The input evaluation file. If a data dir is specified, will look for the file there"
+ "If no data dir or train/predict files are specified, will run with tensorflow_datasets.",
)
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.",
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument(
"--max_query_length",
default=64,
type=int,
help="The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length.",
)
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model."
)
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation."
)
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help="The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another.",
)
parser.add_argument(
"--verbose_logging",
action="store_true",
help="If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus")
parser.add_argument("--threads", type=int, default=1, help="multiple threads for converting example to features")
parser.add_argument("--model_type", type=str, help="ori, ths, thsext")
parser.add_argument("--data_type", type=str, help="fp32, fp16, bf16")
parser.add_argument('--ths_path', type=str, default='./lib/libth_transformer.so',
help='path of the pyt_fastertransformer dynamic lib file')
parser.add_argument('--int8_mode', type=int, default=0, metavar='NUMBER',
help='int8 mode (default: 0)', choices=[0, 1, 2, 3])
parser.add_argument('--remove_padding', action='store_true',
help='Remove the padding of sentences of encoder.')
parser.add_argument('--sparse', action='store_true',
help='Sparsity for Ampere.')
args = parser.parse_args()
if args.doc_stride >= args.max_seq_length - args.max_query_length:
logger.warning(
"WARNING - You've set a doc stride which may be superior to the document length in some "
"examples. This could result in errors when building features from the examples. Please reduce the doc "
"stride or increase the maximum length to ensure the features are correctly built."
)
# Setup CUDA, GPU & distributed training
if args.local_rank == -1:
device = torch.device("cuda")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s",
args.local_rank,
device,
args.n_gpu,
)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
config = BertConfig.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = BertTokenizer.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
logger.info("Parameters %s", args)
# Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
logger.info("Loading checkpoint %s for evaluation", args.model_name_or_path)
checkpoints = [args.model_name_or_path]
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
# Reload the model
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
use_ths = args.model_type.startswith('ths')
model = BertForQuestionAnswering.from_pretrained(checkpoint, torchscript=use_ths) # , force_download=True)
model.to(args.device)
if args.int8_mode != 0:
logger.info("int8_mode: " + str(args.int8_mode))
model.half()
elif args.data_type == 'fp16':
logger.info("Use fp16")
model.half()
elif args.data_type == 'bf16':
logger.info("Use bf16")
model.bfloat16()
if args.sparse:
logger.info("Sparse mode")
if args.model_type == 'thsext':
logger.info("Use custom BERT encoder for TorchScript")
from utils.encoder import EncoderWeights, CustomEncoder
weights = EncoderWeights(
model.config.num_hidden_layers, model.config.hidden_size,
torch.load(os.path.join(checkpoint, 'pytorch_model.bin'), map_location='cpu'))
if args.int8_mode != 0:
weights.to_int8(args.sparse, args.ths_path)
elif args.data_type == 'fp16':
weights.to_half()
elif args.data_type == 'bf16':
weights.to_bfloat16()
weights.to_cuda()
enc = CustomEncoder(model.config.num_hidden_layers,
model.config.num_attention_heads,
model.config.hidden_size//model.config.num_attention_heads,
weights,
int8_mode=args.int8_mode,
remove_padding=args.remove_padding,
sparse=args.sparse,
path=os.path.abspath(args.ths_path))
enc_ = torch.jit.script(enc)
model.replace_encoder(enc_)
if use_ths:
logger.info("Use TorchScript mode")
fake_input_id = torch.LongTensor(args.per_gpu_eval_batch_size, args.max_seq_length)
fake_input_id.fill_(1)
fake_input_id = fake_input_id.to(args.device)
fake_mask = torch.ones(args.per_gpu_eval_batch_size, args.max_seq_length).to(args.device)
fake_type_id = fake_input_id.clone().detach()
if args.data_type == 'fp16':
fake_mask = fake_mask.half()
elif args.data_type == 'bf16':
fake_mask = fake_mask.bfloat16()
model.eval()
model_ = torch.jit.trace(model, (fake_input_id, fake_mask, fake_type_id))
model = model_
# Evaluate
result = evaluate(args, model, tokenizer, prefix=global_step)
result = dict((k + ("_{}".format(global_step) if global_step else ""), v) for k, v in result.items())
results.update(result)
logger.info("Results: {}".format(results))
return results
if __name__ == "__main__":
main()
| FasterTransformer-main | examples/pytorch/bert/run_squad.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import shutil
import argparse
import tempfile
import urllib.request
import zipfile
MRPC_TRAIN = 'https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt'
MRPC_TEST = 'https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt'
def format_mrpc(mrpc_dir):
print("Processing MRPC...")
mrpc_train_file = os.path.join(mrpc_dir, "msr_paraphrase_train.txt")
mrpc_test_file = os.path.join(mrpc_dir, "msr_paraphrase_test.txt")
urllib.request.urlretrieve(MRPC_TRAIN, mrpc_train_file)
urllib.request.urlretrieve(MRPC_TEST, mrpc_test_file)
assert os.path.isfile(mrpc_train_file), "Train data not found at %s" % mrpc_train_file
assert os.path.isfile(mrpc_test_file), "Test data not found at %s" % mrpc_test_file
urllib.request.urlretrieve('https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-48f4-b431-7480817f1adc', os.path.join(mrpc_dir, "dev_ids.tsv"))
dev_ids = []
with open(os.path.join(mrpc_dir, "dev_ids.tsv"), encoding="utf8") as ids_fh:
for row in ids_fh:
dev_ids.append(row.strip().split('\t'))
with open(mrpc_train_file, encoding="utf8") as data_fh, \
open(os.path.join(mrpc_dir, "train.tsv"), 'w', encoding="utf8") as train_fh, \
open(os.path.join(mrpc_dir, "dev.tsv"), 'w', encoding="utf8") as dev_fh:
header = data_fh.readline()
train_fh.write(header)
dev_fh.write(header)
for row in data_fh:
label, id1, id2, s1, s2 = row.strip().split('\t')
if [id1, id2] in dev_ids:
dev_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2))
else:
train_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2))
with open(mrpc_test_file, encoding="utf8") as data_fh, \
open(os.path.join(mrpc_dir, "test.tsv"), 'w', encoding="utf8") as test_fh:
header = data_fh.readline()
test_fh.write("index\t#1 ID\t#2 ID\t#1 String\t#2 String\n")
for idx, row in enumerate(data_fh):
label, id1, id2, s1, s2 = row.strip().split('\t')
test_fh.write("%d\t%s\t%s\t%s\t%s\n" % (idx, id1, id2, s1, s2))
print("\tCompleted!")
def main(arguments):
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', help='directory to save data to', type=str, default='glue_data')
args = parser.parse_args(arguments)
format_mrpc(args.data_dir)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| FasterTransformer-main | examples/pytorch/bert/utils/get_mrpc_data.py |
#!/usr/bin/env python3
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
import pathlib
def main():
parser = argparse.ArgumentParser(
description="Script updating GPT config.ini hyper-parameters and requests parameters"
)
# config.ini path
parser.add_argument("--config-ini-path", required=True, help="Path to config.ini file to be updated")
# FT hyperparameters
parser.add_argument("--model-dir", type=str, required=True, help="Model path prefix")
parser.add_argument("--tensor-para-size", type=int, required=True, help="tensor parallelism size")
parser.add_argument("--pipeline-para-size", type=int, required=True, help="layer parallelism size")
parser.add_argument("--data-type", type=str, default="fp32", help="data type", choices=["fp32", "fp16", "bf16"])
# request
parser.add_argument("--request-batch-size", type=int, default=8, help="batch size")
parser.add_argument("--request-seq-len", type=int, default=32, help="output length")
args = parser.parse_args()
config_path = pathlib.Path(args.config_ini_path)
config = configparser.ConfigParser()
config.read(config_path)
config["ft_instance_hyperparameter"] = {
"tensor_para_size": args.tensor_para_size,
"pipeline_para_size": args.pipeline_para_size,
"data_type": args.data_type,
"is_sparse": 0,
"is_remove_padding": 1,
"int8_mode": 0,
"enable_custom_all_reduce": 0,
"model_name": "bert_base",
"model_dir": args.model_dir,
}
config["request"] = {
"request_batch_size": args.request_batch_size,
"request_seq_len": args.request_seq_len,
}
with config_path.open("w") as config_file:
config.write(config_file)
if __name__ == "__main__":
main()
| FasterTransformer-main | examples/pytorch/bert/utils/update_bert_config.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import re
import numpy as np
import torch
ACTIVATION_AMAX_NUM = 72
INT8O_GEMM_NUM = 8
TRT_FUSED_MHA_AMAX_NUM = 3
SCALE_RESERVE_NUM = 21
def checkpoint_quantization(init_dict, sparse, ths_path='./lib/libth_transformer.so', verbose=True):
print("Quantizing checkpoint ...")
torch.classes.load_library(ths_path)
weight_quantize = torch.ops.fastertransformer.weight_quantize
def init_graph():
layer_num = 0
regex = re.compile('layer.\d+')
amaxTotalNum = 0
for name, tensor_value in init_dict.items():
if "intermediate.dense.weight" in name and amaxTotalNum == 0:
amaxTotalNum = ACTIVATION_AMAX_NUM + 9 * tensor_value.size(1) + INT8O_GEMM_NUM + TRT_FUSED_MHA_AMAX_NUM + SCALE_RESERVE_NUM
if verbose:
print("amaxTotalNum", amaxTotalNum)
print("Hidden size:", tensor_value.size(1))
tmp = regex.findall(name)
if len(tmp) < 1:
continue
num_tmp = int(tmp[0].replace("layer.", ""))
if layer_num < num_tmp:
layer_num = num_tmp
layer_num = layer_num + 1
#add new var for amax
for i in range(layer_num):
init_dict["bert.encoder.layer.{}.amaxList".format(i)] = torch.zeros((amaxTotalNum,), dtype=torch.float32)
return layer_num, amaxTotalNum
layer_num, amaxTotalNum = init_graph()
kernel_name_list = ["attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"intermediate.dense",
"output.dense"]
amax_name_list = ["attention.self.query._input_quantizer",
"attention.self.query._aftergemm_quantizer",
"attention.self.matmul_q_input_quantizer",
"attention.self.key._aftergemm_quantizer",
"attention.self.matmul_k_input_quantizer",
"attention.self.value._aftergemm_quantizer",
"attention.self.matmul_v_input_quantizer",
"attention.self.softmax_input_quantizer",
"attention.self.matmul_a_input_quantizer",
"attention.output.dense._input_quantizer",
"attention.output.dense._aftergemm_quantizer",
"intermediate.dense._input_quantizer",
"intermediate.dense._aftergemm_quantizer",
"output.dense._input_quantizer",
"output.dense._aftergemm_quantizer",
"special_F2Bias_scale",
]
int8O_gemm_weight_amax_list = [0 for i in range(INT8O_GEMM_NUM)]
int8O_gemm_weight_list = ["attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.self.matmul_k_input_quantizer",
"attention.self.matmul_v_input_quantizer",
"attention.output.dense",
"intermediate.dense",
"output.dense"]
int8O_gemm_input_amax_list = [0 for i in range(INT8O_GEMM_NUM)]
int8O_gemm_input_list = ["attention.self.query._input_quantizer",
"attention.self.key._input_quantizer",
"attention.self.value._input_quantizer",
"attention.self.matmul_q_input_quantizer",
"attention.self.matmul_a_input_quantizer",
"attention.output.dense._input_quantizer",
"intermediate.dense._input_quantizer",
"output.dense._input_quantizer"]
int8O_gemm_output_amax_list = [0 for i in range(INT8O_GEMM_NUM)]
int8O_gemm_output_list = ["attention.self.query._aftergemm_quantizer",
"attention.self.key._aftergemm_quantizer",
"attention.self.value._aftergemm_quantizer",
"attention.self.softmax_input_quantizer",
"attention.output.dense._input_quantizer",
"attention.output.dense._aftergemm_quantizer",
"intermediate.dense._aftergemm_quantizer",
"output.dense._aftergemm_quantizer"]
same_value_tuple_list = [("attention.self.query._input_quantizer",
"attention.self.key._input_quantizer",
"attention.self.value._input_quantizer",
"attention.output.add_residual_input_quantizer"),
("intermediate.dense._input_quantizer",
"output.add_residual_input_quantizer")]
factor = 1000000.0
for i in range(layer_num):
amaxList = np.zeros([amaxTotalNum]).astype(np.float32)
amax_id = 0
# verify some quantizers have same value. input_quantizer is per-tensor quantization
for same_value_tuple in same_value_tuple_list:
tmp_v = init_dict["bert.encoder.layer.{}.{}._amax".format(i, same_value_tuple[0])].numpy()
for same_value_name in same_value_tuple:
tmp_v_2 = init_dict["bert.encoder.layer.{}.{}._amax".format(i, same_value_name)].numpy()
assert(np.allclose(tmp_v, tmp_v_2))
for amax_name in amax_name_list:
if amax_name == "special_F2Bias_scale":
if i != layer_num - 1:
quant_max = init_dict["bert.encoder.layer.{}.{}._amax".format(i+1, amax_name_list[0])].item()
amax = abs(quant_max)
else:
#not used, placeholder
amax = 1.0
amaxList[amax_id] = amax
amax_id += 1
amaxList[amax_id] = amax/127.0
amax_id += 1
amaxList[amax_id] = amax/127.0/127.0
amax_id += 1
amaxList[amax_id] = 127.0/amax
amax_id += 1
continue
quant_max = init_dict["bert.encoder.layer.{}.{}._amax".format(i, amax_name)].item()
amax = abs(quant_max)#round(abs(quant_max)*factor)/factor
if amax_name in int8O_gemm_input_list:
int8O_gemm_input_amax_list[int8O_gemm_input_list.index(amax_name)] = amax
if amax_name == "attention.self.query._input_quantizer":
int8O_gemm_input_amax_list[int8O_gemm_input_list.index("attention.self.key._input_quantizer")] = amax
int8O_gemm_input_amax_list[int8O_gemm_input_list.index("attention.self.value._input_quantizer")] = amax
if amax_name in int8O_gemm_output_list:
int8O_gemm_output_amax_list[int8O_gemm_output_list.index(amax_name)] = amax
if amax_name in int8O_gemm_weight_list:
int8O_gemm_weight_amax_list[int8O_gemm_weight_list.index(amax_name)] = amax
amaxList[amax_id] = amax
amax_id += 1
amaxList[amax_id] = amax/127.0
amax_id += 1
amaxList[amax_id] = amax/127.0/127.0
amax_id += 1
amaxList[amax_id] = 127.0/amax
amax_id += 1
# if verbose:
# print(i, amax_name)
# print('quant_max:', quant_max)
# print('amax:', amax)
if verbose:
print("done process layer_{} activation amax".format(i))
#kernel amax starts from ACTIVATION_AMAX_NUM
assert amax_id == 64
amax_id = ACTIVATION_AMAX_NUM
for kernel_id, kernel_name in enumerate(kernel_name_list):
kernel = init_dict["bert.encoder.layer.{}.{}.weight".format(i, kernel_name)].transpose(-1, -2).contiguous()
quant_max2 = init_dict["bert.encoder.layer.{}.{}._weight_quantizer._amax".format(i, kernel_name)]
amax2 = abs(quant_max2)
if (amax2.dim() == 0):
quant_max_processed = torch.full((kernel.size(1),), amax2.item(), dtype=amax2.dtype, device=amax2.device)
else:
quant_max_processed = amax2.view(-1)
kernel_processed = weight_quantize(kernel, quant_max_processed.cuda(), sparse)
init_dict["bert.encoder.layer.{}.{}.weight".format(i, kernel_name)] = kernel_processed
if kernel_name in int8O_gemm_weight_list:
int8O_gemm_weight_amax_list[int8O_gemm_weight_list.index(kernel_name)] = quant_max_processed[0]
for e in quant_max_processed:
amaxList[amax_id] = e
amax_id += 1
# if verbose:
# print(i, kernel_name)
# print('kernel:', kernel)
# print('quant_max2:', quant_max2)
# print('quant_max_processed_:', quant_max_processed)
#for int8O gemm deQuant
for j in range(INT8O_GEMM_NUM):
amaxList[amax_id] = (int8O_gemm_input_amax_list[j]*int8O_gemm_weight_amax_list[j])/(127.0*int8O_gemm_output_amax_list[j])
amax_id += 1
#for trt fused MHA amax
#### QKV_addBias_amax
amaxList[amax_id] = np.maximum(np.maximum(amaxList[8],amaxList[16]), amaxList[24])
amax_id += 1
#### softmax amax
amaxList[amax_id] = amaxList[32]
amax_id += 1
#### bmm2 amax
amaxList[amax_id] = amaxList[36]
amax_id += 1
init_dict["bert.encoder.layer.{}.amaxList".format(i)] = torch.tensor(amaxList, dtype=torch.float32)
if verbose:
print("done process layer_{} kernel weight".format(i))
print("Quantizing checkpoint done.")
return init_dict
| FasterTransformer-main | examples/pytorch/bert/utils/checkpoint_quantization.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
import torch
import torch.distributed as dist
from transformers import BertConfig
from transformers.modeling_bert import BertEncoder
from .checkpoint_quantization import checkpoint_quantization
class EncoderWeights(object):
def __init__(self, layer_num, hidden_dim, weights=None, sparse=False, tensor_para_size=1, pipeline_para_size=1):
"""weights need be a state_dict of bert model"""
self.layer_num = layer_num
self.int8 = False
self.hidden_dim = hidden_dim
self.weights = {}
self.tensor_para_size = tensor_para_size
self.pipeline_para_size = pipeline_para_size
self.use_mpi = dist.is_mpi_available()
if self.use_mpi:
try:
dist.init_process_group(backend='mpi')
except:
print("[INFO] WARNING: Exception occurred in dist.init_process_group(backend='mpi'). Maybe the process group has been initialized somewhere else.")
else:
print("[INFO] MPI is not available in this PyTorch build.")
assert tensor_para_size == 1, "[FATAL] MPI is required for tensor_para_size > 1."
assert pipeline_para_size == 1, "[FATAL] MPI is required for pipeline_para_size > 1."
self.rank = dist.get_rank() if self.use_mpi else 0
self.device_count = torch.cuda.device_count()
self.device = self.rank % self.device_count
torch.cuda.set_device(self.device)
world_size = dist.get_world_size() if self.use_mpi else 1
self.tensor_para_rank = self.rank % self.tensor_para_size
self.pipeline_para_rank = self.rank // self.tensor_para_size
start_layer = self.pipeline_para_rank * self.layer_num // self.pipeline_para_size
end_layer = (self.pipeline_para_rank + 1) * self.layer_num // self.pipeline_para_size
if weights is None:
self._generated_weights = True
for i in range(layer_num):
pre = 'bert.encoder.layer.' + str(i) + '.'
self.weights[pre + 'attention.self.query.weight'] = torch.zeros(hidden_dim, hidden_dim)
self.weights[pre + 'attention.self.query.bias'] = torch.zeros(hidden_dim)
self.weights[pre + 'attention.self.key.weight'] = torch.zeros(hidden_dim, hidden_dim)
self.weights[pre + 'attention.self.key.bias'] = torch.zeros(hidden_dim)
self.weights[pre + 'attention.self.value.weight'] = torch.zeros(hidden_dim, hidden_dim)
self.weights[pre + 'attention.self.value.bias'] = torch.zeros(hidden_dim)
self.weights[pre + 'attention.output.dense.weight'] = torch.zeros(hidden_dim, hidden_dim)
self.weights[pre + 'attention.output.dense.bias'] = torch.zeros(hidden_dim)
self.weights[pre + 'attention.output.LayerNorm.weight'] = torch.zeros(hidden_dim)
self.weights[pre + 'attention.output.LayerNorm.bias'] = torch.zeros(hidden_dim)
self.weights[pre + 'intermediate.dense.weight'] = torch.zeros(4 * hidden_dim, hidden_dim)
self.weights[pre + 'intermediate.dense.bias'] = torch.zeros(4 * hidden_dim)
self.weights[pre + 'output.dense.weight'] = torch.zeros(hidden_dim, 4 * hidden_dim)
self.weights[pre + 'output.dense.bias'] = torch.zeros(hidden_dim)
self.weights[pre + 'output.LayerNorm.weight'] = torch.zeros(hidden_dim)
self.weights[pre + 'output.LayerNorm.bias'] = torch.zeros(hidden_dim)
for k, v in self.weights.items():
if not k.endswith('_amax'):
self.weights[k] = torch.nn.init.uniform_(v, -1, 1)
if sparse:
for k, v in self.weights.items():
if 'query.weight' in k or 'key.weight' in k or 'value.weight' in k or 'dense.weight' in k:
v_shape = v.shape
v = v.view(-1, 4)
_, indices = torch.topk(torch.abs(v), 2, dim=-1, largest=False)
v.scatter_(1, indices, 0)
self.weights[k] = v.view(v_shape)
else:
self._generated_weights = False
for k, v in weights.items():
ks = k.split('.')
if ks[-2] == 'LayerNorm':
if ks[-1] == 'gamma':
ks[-1] = 'weight'
elif ks[-1] == 'beta':
ks[-1] = 'bias'
self.weights['.'.join(ks)] = v
def listed_weights(self):
ret = []
start_layer = self.pipeline_para_rank * self.layer_num // self.pipeline_para_size
end_layer = (self.pipeline_para_rank + 1) * self.layer_num // self.pipeline_para_size
if not self.int8:
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' + 'attention.self.query.weight'].transpose(-1, -2)
for layer_idx in range(start_layer, end_layer)], 0).contiguous()) # 0
ret[-1] = ret[-1].split(ret[-1].shape[-1] // self.tensor_para_size,
dim=-1)[self.tensor_para_rank].contiguous()
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'attention.self.query.bias'] for layer_idx in range(start_layer, end_layer)], 0).contiguous())
ret[-1] = ret[-1].split(ret[-1].shape[-1] // self.tensor_para_size,
dim=-1)[self.tensor_para_rank].contiguous()
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' + 'attention.self.key.weight'].transpose(-1, -2)
for layer_idx in range(start_layer, end_layer)], 0).contiguous()) # 2
ret[-1] = ret[-1].split(ret[-1].shape[-1] // self.tensor_para_size,
dim=-1)[self.tensor_para_rank].contiguous()
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'attention.self.key.bias'] for layer_idx in range(start_layer, end_layer)], 0).contiguous())
ret[-1] = ret[-1].split(ret[-1].shape[-1] // self.tensor_para_size,
dim=-1)[self.tensor_para_rank].contiguous()
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' + 'attention.self.value.weight'].transpose(-1, -2)
for layer_idx in range(start_layer, end_layer)], 0).contiguous()) # 4
ret[-1] = ret[-1].split(ret[-1].shape[-1] // self.tensor_para_size,
dim=-1)[self.tensor_para_rank].contiguous()
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'attention.self.value.bias'] for layer_idx in range(start_layer, end_layer)], 0).contiguous())
ret[-1] = ret[-1].split(ret[-1].shape[-1] // self.tensor_para_size,
dim=-1)[self.tensor_para_rank].contiguous()
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' + 'attention.output.dense.weight'].transpose(-1, -2)
for layer_idx in range(start_layer, end_layer)], 0).contiguous()) # 6
ret[-1] = ret[-1].split(ret[-1].shape[1] // self.tensor_para_size,
dim=1)[self.tensor_para_rank].contiguous()
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'attention.output.dense.bias'] for layer_idx in range(start_layer, end_layer)], 0).contiguous())
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'attention.output.LayerNorm.weight'] for layer_idx in range(start_layer, end_layer)], 0).contiguous())
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'attention.output.LayerNorm.bias'] for layer_idx in range(start_layer, end_layer)], 0).contiguous())
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' + 'intermediate.dense.weight'].transpose(-1, -2)
for layer_idx in range(start_layer, end_layer)], 0).contiguous()) # 10
ret[-1] = ret[-1].split(ret[-1].shape[-1] // self.tensor_para_size,
dim=-1)[self.tensor_para_rank].contiguous()
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'intermediate.dense.bias'] for layer_idx in range(start_layer, end_layer)], 0).contiguous())
ret[-1] = ret[-1].split(ret[-1].shape[-1] // self.tensor_para_size,
dim=-1)[self.tensor_para_rank].contiguous()
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' + 'output.dense.weight'].transpose(-1, -2)
for layer_idx in range(start_layer, end_layer)], 0).contiguous()) # 12
ret[-1] = ret[-1].split(ret[-1].shape[1] // self.tensor_para_size,
dim=1)[self.tensor_para_rank].contiguous()
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'output.dense.bias'] for layer_idx in range(start_layer, end_layer)], 0).contiguous())
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'output.LayerNorm.weight'] for layer_idx in range(start_layer, end_layer)], 0).contiguous())
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'output.LayerNorm.bias'] for layer_idx in range(start_layer, end_layer)], 0).contiguous())
else:
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'attention.self.query.weight'] for layer_idx in range(self.layer_num)], 0).contiguous()) # 0
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'attention.self.query.bias'] for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' + 'attention.self.key.weight']
for layer_idx in range(self.layer_num)], 0).contiguous()) # 2
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'attention.self.key.bias'] for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'attention.self.value.weight'] for layer_idx in range(self.layer_num)], 0).contiguous()) # 4
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'attention.self.value.bias'] for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'attention.output.dense.weight'] for layer_idx in range(self.layer_num)], 0).contiguous()) # 6
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'attention.output.dense.bias'] for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'attention.output.LayerNorm.weight'] for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'attention.output.LayerNorm.bias'] for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' + 'intermediate.dense.weight']
for layer_idx in range(self.layer_num)], 0).contiguous()) # 10
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'intermediate.dense.bias'] for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' + 'output.dense.weight']
for layer_idx in range(self.layer_num)], 0).contiguous()) # 12
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'output.dense.bias'] for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'output.LayerNorm.weight'] for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'output.LayerNorm.bias'] for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' + 'amaxList']
for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['bert.encoder.layer.' + str(layer_idx) + '.' +
'h_amaxList'] for layer_idx in range(self.layer_num)], 0).contiguous())
return ret
def to_cuda(self):
if not self.int8:
for k, v in self.weights.items():
self.weights[k] = v.cuda()
else:
h_scale_list = {}
for k, v in self.weights.items():
if "amaxList" in k:
k_h = k.replace("amaxList", "h_amaxList")
h_scale_list[k_h] = v
self.weights[k] = v.cuda()
for k, v in h_scale_list.items():
self.weights[k] = v
def to_half(self):
if self.int8:
raise RuntimeError("Cannot cast to half if the weights have been casted to int8.")
for k, v in self.weights.items():
self.weights[k] = v.half()
def to_bfloat16(self):
if self.int8:
raise RuntimeError("Cannot cast to bfloat16 if the weights have been casted to int8.")
for k, v in self.weights.items():
self.weights[k] = v.bfloat16()
def to_int8(self, sparse=False, ths_path='./lib/libth_transformer.so'):
if self._generated_weights:
amax_tensor_1 = torch.Tensor(self.hidden_dim).fill_(127.)
amax_tensor_2 = torch.Tensor(self.hidden_dim * 4).fill_(127.)
for i in range(self.layer_num):
pre = 'bert.encoder.layer.' + str(i) + '.'
self.weights[pre + 'attention.self.query._input_quantizer._amax'] = torch.tensor(127.)
self.weights[pre + 'attention.self.query._weight_quantizer._amax'] = amax_tensor_1
self.weights[pre + 'attention.self.query._aftergemm_quantizer._amax'] = torch.tensor(127.)
self.weights[pre + 'attention.self.key._input_quantizer._amax'] = torch.tensor(127.)
self.weights[pre + 'attention.self.key._weight_quantizer._amax'] = amax_tensor_1
self.weights[pre + 'attention.self.key._aftergemm_quantizer._amax'] = torch.tensor(127.)
self.weights[pre + 'attention.self.value._input_quantizer._amax'] = torch.tensor(127.)
self.weights[pre + 'attention.self.value._weight_quantizer._amax'] = amax_tensor_1
self.weights[pre + 'attention.self.value._aftergemm_quantizer._amax'] = torch.tensor(127.)
self.weights[pre + 'attention.self.matmul_q_input_quantizer._amax'] = torch.tensor(127.)
self.weights[pre + 'attention.self.matmul_k_input_quantizer._amax'] = torch.tensor(127.)
self.weights[pre + 'attention.self.matmul_v_input_quantizer._amax'] = torch.tensor(127.)
self.weights[pre + 'attention.self.matmul_a_input_quantizer._amax'] = torch.tensor(127.)
self.weights[pre + 'attention.self.softmax_input_quantizer._amax'] = torch.tensor(127.)
self.weights[pre + 'attention.output.dense._input_quantizer._amax'] = torch.tensor(127.)
self.weights[pre + 'attention.output.dense._weight_quantizer._amax'] = amax_tensor_1
self.weights[pre + 'attention.output.dense._aftergemm_quantizer._amax'] = torch.tensor(127.)
self.weights[pre + 'attention.output.add_local_input_quantizer._amax'] = torch.tensor(127.)
self.weights[pre + 'attention.output.add_residual_input_quantizer._amax'] = torch.tensor(127.)
self.weights[pre + 'intermediate.dense._input_quantizer._amax'] = torch.tensor(127.)
self.weights[pre + 'intermediate.dense._weight_quantizer._amax'] = amax_tensor_2
self.weights[pre + 'intermediate.dense._aftergemm_quantizer._amax'] = torch.tensor(127.)
self.weights[pre + 'output.dense._input_quantizer._amax'] = torch.tensor(127.)
self.weights[pre + 'output.dense._weight_quantizer._amax'] = amax_tensor_1
self.weights[pre + 'output.dense._aftergemm_quantizer._amax'] = torch.tensor(127.)
self.weights[pre + 'output.add_local_input_quantizer._amax'] = torch.tensor(127.)
self.weights[pre + 'output.add_residual_input_quantizer._amax'] = torch.tensor(127.)
if 'bert.encoder.layer.0.attention.self.query._input_quantizer._amax' not in self.weights:
raise RuntimeError("There is no quantization node in the checkpoint, cannot be quantized to int8.")
if self.int8:
return
self.int8 = True
for k, v in self.weights.items():
if k.endswith('bias') or k.endswith('LayerNorm.weight'):
self.weights[k] = v.half()
elif k.endswith('weight'):
self.weights[k] = v.float().cuda()
else:
self.weights[k] = v.float().cpu()
self.weights = checkpoint_quantization(self.weights, sparse, ths_path, verbose=False)
class CustomEncoder(torch.nn.Module):
def __init__(self, layer_num, head_num, head_size, weights,
int8_mode=0, remove_padding=False, sparse=False,
path='./lib/libth_transformer.so', tensor_para_size=1,
pipeline_para_size=1):
super().__init__()
self.layer_num = layer_num
self.remove_padding = remove_padding
self.int8_mode = int8_mode
torch.classes.load_library(path)
weights_ = weights.listed_weights()
self.use_mpi = dist.is_mpi_available()
if self.use_mpi:
try:
dist.init_process_group(backend='mpi')
except:
print("[INFO] WARNING: Exception occurred in dist.init_process_group(backend='mpi'). Maybe the process group has been initialized somewhere else.")
else:
print("[INFO] MPI is not available in this PyTorch build.")
assert tensor_para_size == 1, "[FATAL] MPI is required for tensor_para_size > 1."
assert pipeline_para_size == 1, "[FATAL] MPI is required for pipeline_para_size > 1."
if int8_mode == 0:
assert len(weights_) == 16
try:
self.encoders = torch.classes.FasterTransformer.Bert(
*weights_,
head_num, head_size, 4 * head_num * head_size, remove_padding, layer_num, sparse, 1.0,
tensor_para_size, pipeline_para_size)
except:
# legacy ths for 20.03 image
self.encoders = torch.classes.FasterTransformerBert(
*weights_,
head_num, head_size, 4 * head_num * head_size, remove_padding, layer_num, sparse, 1.0,
tensor_para_size, pipeline_para_size)
else:
assert len(weights_) == 18
assert tensor_para_size == 1, "INT8 BERT still only support tensor_para_size = 1"
assert pipeline_para_size == 1, "INT8 BERT still only support pipeline_para_size = 1"
try:
self.encoders = torch.classes.FasterTransformer.INT8Bert(
*weights_,
head_num, head_size, remove_padding, layer_num, int8_mode, sparse, 1.0)
except:
# legacy ths for 20.03 image
self.encoders = torch.classes.FasterTransformerINT8Bert(
*weights_,
head_num, head_size, remove_padding, layer_num, int8_mode, sparse, 1.0)
def forward(self, hidden_states, attention_mask, sequence_lengths):
hidden_states = self.encoders.forward(hidden_states, sequence_lengths)
return (hidden_states,)
class HuggingFaceEncoder(torch.nn.Module):
def __init__(self, layer_num, head_num, head_size, weights=None):
super().__init__()
hidden_dim = head_num * head_size
# TODO(bhsueh) The implementation of hidden_act='gelu' is different to FT's (and google BERT) implementation
# FT's implementation is equivalent to hidden_act='gelu_new', but there are some issues for int8 sparse under gelu_new
conf = BertConfig(hidden_size=hidden_dim, intermediate_size=4 * hidden_dim,
num_attention_heads=head_num, num_hidden_layers=layer_num, hidden_act='gelu')
self.encoder = BertEncoder(conf)
w = {}
for k, v in weights.weights.items():
if k.startswith('bert.encoder') and not k.endswith('_amax'):
w[k[13:]] = weights.weights[k]
self.encoder.load_state_dict(w)
self.head_mask = [None] * layer_num
def forward(self, hidden_states, attention_mask):
extended_attention_mask = (1.0 - attention_mask) * -10000.0
output = self.encoder(hidden_states, extended_attention_mask, self.head_mask)
return output
| FasterTransformer-main | examples/pytorch/bert/utils/encoder.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Convert huggingface bert model. Use https://huggingface.co/bert-base-uncased as demo.
'''
import argparse
import configparser
import multiprocessing
import numpy as np
import pathlib
import torch
import os
import sys
# __root_package_path__ = pathlib.Path(__file__).parent.parent.parent.parent.parent.absolute().as_posix()
# if __root_package_path__ not in sys.path:
# print(
# f"[ERROR] add project root directory to your PYTHONPATH with "
# f"'export PYTHONPATH={__root_package_path__}:${{PYTHONPATH}}'"
# )
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../../..")
sys.path.append(dir_path)
from examples.pytorch.utils import torch2np, safe_transpose, WEIGHT2DTYPE
from transformers import BertModel # transformers-4.10.0-py3
def split_and_convert_process(i, saved_dir,factor,key, args, val):
if key.find("attention.output.dense.bias") != -1 or \
key.find("attention.output.LayerNorm.weight") != -1 or \
key.find("attention.output.LayerNorm.bias") != -1 or \
key.find("output.dense.bias") != -1 or \
key.find("output.LayerNorm.weight") != -1 or \
key.find("output.LayerNorm.bias") != -1 :
# shared weights, only need to convert the weights of rank 0
if i == 0:
saved_path = saved_dir + "/model." + key + ".bin"
val.tofile(saved_path)
elif key.find("attention.output.dense.weight") != -1 or key.find("output.dense.weight") != -1:
split_vals = np.split(val, factor, axis=0)
for j in range(factor):
saved_path = f"{saved_dir}/model.{key}.{i * factor + j}.bin"
split_vals[j].tofile(saved_path)
elif key.find("attention.self.query.weight") != -1 or \
key.find("attention.self.query.bias") != -1 or \
key.find("attention.self.key.weight") != -1 or \
key.find("attention.self.key.bias") != -1 or \
key.find("attention.self.value.weight") != -1 or \
key.find("attention.self.value.bias") != -1 or \
key.find("intermediate.dense.weight") != -1 or \
key.find("intermediate.dense.bias") != -1:
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir + "/model." + key + ".%d.bin" % (i * factor + j)
split_vals[j].tofile(saved_path)
else:
print("[WARNING] cannot convert key '{}'".format(key))
def split_and_convert(args):
saved_dir = args.saved_dir + "/%d-gpu/" % args.infer_tensor_para_size
if(os.path.exists(saved_dir) == False):
os.makedirs(saved_dir)
ckpt_name = args.in_file
t_gpu_num = args.training_tensor_para_size
i_gpu_num = args.infer_tensor_para_size
assert(i_gpu_num % t_gpu_num == 0)
factor = (int)(i_gpu_num / t_gpu_num)
# load position_embedding from rank 0
torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = BertModel.from_pretrained(args.in_file).to(torch_device)
np_weight_data_type = WEIGHT2DTYPE[args.weight_data_type]
hf_config = vars(model.config)
# NOTE: save parameters to config files (loaded by triton backends)
config = configparser.ConfigParser()
config["bert"] = {}
try:
config["bert"]["model_name"] = "bert" if hf_config["model_type"] == '' else hf_config["model_type"]
config["bert"]["position_embedding_type"] = str(hf_config["position_embedding_type"])
config["bert"]["hidden_size"] = str(hf_config["hidden_size"])
config["bert"]["num_layer"] = str(hf_config["num_hidden_layers"])
config["bert"]["head_num"] = str(hf_config["num_attention_heads"])
config["bert"]["size_per_head"] = str(hf_config["hidden_size"] // hf_config["num_attention_heads"])
config["bert"]["activation_type"] = str(hf_config["hidden_act"])
config["bert"]["inter_size"] = str(hf_config["intermediate_size"])
config["bert"]["max_position_embeddings"] = str(hf_config["max_position_embeddings"])
config["bert"]["layer_norm_eps"] = str(hf_config["layer_norm_eps"])
config["bert"]["weight_data_type"] = args.weight_data_type
config["bert"]["tensor_para_size"] = str(args.infer_tensor_para_size)
with open(saved_dir + "/config.ini", 'w') as configfile:
config.write(configfile)
except:
print(f"Fail to save the config in config.ini.")
torch.multiprocessing.set_start_method("spawn")
torch.multiprocessing.set_sharing_strategy("file_system")
pool = multiprocessing.Pool(args.processes)
for name, param in model.named_parameters():
if name.find("weight") == -1 and name.find("bias") == -1:
continue
else:
pool.starmap(split_and_convert_process,
[(0, saved_dir, factor, name, args,
torch2np(safe_transpose(param.detach()), np_weight_data_type))], )
pool.close()
pool.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-saved_dir', '-o', type=str, help='file name of output file', required=True)
parser.add_argument('-in_file', '-i', type=str, help='file name of input checkpoint file', required=True)
parser.add_argument('-training_tensor_para_size', '-t_g', type=int, help='The size of tensor parallelism for training.', default=1)
parser.add_argument('-infer_tensor_para_size', '-i_g', type=int, help='The size of tensor parallelism for inference.', required=True)
parser.add_argument("-processes", "-p", type=int, help="How many processes to spawn for conversion (default: 4)", default=4)
parser.add_argument("-weight_data_type", type=str, default="fp32", choices=["fp32", "fp16"])
args = parser.parse_args()
print("\n=============== Argument ===============")
for key in vars(args):
print("{}: {}".format(key, vars(args)[key]))
print("========================================")
split_and_convert(args)
| FasterTransformer-main | examples/pytorch/bert/utils/huggingface_bert_convert.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model modified from HuggingFace transformers. """
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.modeling_bert import BertPreTrainedModel, BertEmbeddings, BertEncoder, BertPooler
class BertModel(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.init_weights()
self.use_ext_encoder = False
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if self.use_ext_encoder:
# if attention_mask.dim() == 3:
# extended_attention_mask = attention_mask
# elif attention_mask.dim() == 2:
# extended_attention_mask = attention_mask[:, None, :].repeat(1, input_shape[1], 1)
# else:
# raise ValueError(
# "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
# input_shape, attention_mask.shape
# )
# )
assert attention_mask.dim() == 2
extended_attention_mask = attention_mask.view(-1, 1, 1, attention_mask.size(-1))
m_2 = extended_attention_mask.transpose(-1, -2)
extended_attention_mask = extended_attention_mask * m_2
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
seq_lens = torch.sum(attention_mask, 1, dtype=torch.int32).cuda()
else:
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
if self.use_ext_encoder:
encoder_outputs = self.encoder(embedding_output, extended_attention_mask, seq_lens)
else:
head_mask = [None] * self.config.num_hidden_layers
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
class BertForQuestionAnswering(BertPreTrainedModel):
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,)
return outputs # start_logits, end_logits
def replace_encoder(self, encoder):
self.bert.use_ext_encoder = True
self.bert.encoder = encoder
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
labels=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
def replace_encoder(self, encoder):
self.bert.use_ext_encoder = True
self.bert.encoder = encoder
| FasterTransformer-main | examples/pytorch/bert/utils/modeling_bert.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
import numpy as np
from pathlib import Path
import torch
import os
# import sys
# dir_path = os.path.dirname(os.path.realpath(__file__))
# sys.path.append(dir_path + "/../../../..")
from transformers import BertModel
def get_weight_data_type(data_type):
if data_type == "fp32":
return np.float32
elif data_type == "fp16":
return np.float16
else:
assert False, f"Invalid weight data type {data_type}"
def split_and_convert(args):
assert args.infer_gpu_num == 1, "only support args.infer_gpu_num == 1 now"
saved_dir = args.saved_dir + "/%d-gpu/" % args.infer_gpu_num
if(os.path.exists(saved_dir) == False):
os.makedirs(saved_dir)
ckpt_name = args.in_file
torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
bert_model = BertModel.from_pretrained(args.in_file).to(torch_device)
try:
config = configparser.ConfigParser()
config["bert"] = {}
for key in vars(args):
config["bert"][key] = f"{vars(args)[key]}"
for k, v in vars(bert_model.config).items():
config["bert"][k] = f"{v}"
config["bert"]["weight_data_type"] = args.weight_data_type
with open((Path(saved_dir) / f"config.ini").as_posix(), 'w') as configfile:
config.write(configfile)
except Exception as e:
print(f"Fail to save the config in config.ini. due to {e}")
np_weight_data_type = get_weight_data_type(args.weight_data_type)
'''
huggingface_model_name_pattern = [
"attention.output.add_local_input_quantizer._amax",
"attention.output.add_residual_input_quantizer._amax",
"attention.output.dense.bias",
"attention.output.dense._input_quantizer._amax",
"attention.output.dense.weight",
"attention.output.dense._weight_quantizer._amax",
"attention.output.LayerNorm.bias",
"attention.output.LayerNorm.weight",
"attention.self.key.bias",
"attention.self.key._input_quantizer._amax",
"attention.self.key.weight",
"attention.self.key._weight_quantizer._amax",
"attention.self.matmul_a_input_quantizer._amax",
"attention.self.matmul_k_input_quantizer._amax",
"attention.self.matmul_q_input_quantizer._amax",
"attention.self.matmul_v_input_quantizer._amax",
"attention.self.query.bias",
"attention.self.query._input_quantizer._amax",
"attention.self.query.weight",
"attention.self.query._weight_quantizer._amax",
"attention.self.value.bias",
"attention.self.value._input_quantizer._amax",
"attention.self.value.weight",
"attention.self.value._weight_quantizer._amax",
"intermediate.dense.bias",
"intermediate.dense._input_quantizer._amax",
"intermediate.dense.weight",
"intermediate.dense._weight_quantizer._amax",
"intermediate.intermediate_act_fn_input_quantizer._amax",
"output.add_local_input_quantizer._amax",
"output.add_residual_input_quantizer._amax",
"output.dense.bias",
"output.dense._input_quantizer._amax",
"output.dense.weight",
"output.dense._weight_quantizer._amax",
"output.LayerNorm.bias",
"output.LayerNorm.weight",
]
'''
model = {}
for key, param in bert_model.named_parameters():
model[key] = param
for key in model:
if key == "bert.embeddings.word_embeddings.weight" or \
key == "bert.embeddings.position_embeddings.weight" or \
key == "bert.embeddings.token_type_embeddings.weight":
weight = model[key]
weight.detach().cpu().numpy().astype(np_weight_data_type).tofile(f"{saved_dir}/bert.{key}.bin")
print(f"convert {key}")
elif key.find("self.query") == -1 and key.find("self.key") == -1 and key.find("self.value") == -1:
# If not query, key and values, we don't do concat or other operations. Convert them directly.
weight = model[key]
if weight.dim() == 2:
weight = weight.transpose(1, 0)
weight.detach().cpu().numpy().astype(np_weight_data_type).tofile(f"{saved_dir}/bert.{key}.bin")
print(f"convert {key}")
elif key.find("self.query.bias") != -1:
q_name = key
k_name = key.replace("query", "key")
v_name = key.replace("query", "value")
q_bias = model[q_name]
k_bias = model[k_name]
v_bias = model[v_name]
qkv_bias = torch.cat([q_bias, k_bias, v_bias])
new_name = key.replace("query", "query_key_value")
qkv_bias.detach().cpu().numpy().astype(np_weight_data_type).tofile(f"{saved_dir}/bert.{new_name}.bin")
print(f"convert {new_name}")
elif key.find("self.query._input_quantizer") != -1:
new_name = key.replace("query", "query_key_value")
model[key].detach().cpu().numpy().astype(np_weight_data_type).tofile(f"{saved_dir}/bert.{new_name}.bin")
print(f"convert {new_name}")
elif key.find("self.query.weight") != -1:
q_name = key
k_name = key.replace("query", "key")
v_name = key.replace("query", "value")
q_weight = model[q_name].transpose(1, 0)
k_weight = model[k_name].transpose(1, 0)
v_weight = model[v_name].transpose(1, 0)
qkv_weight = torch.cat([q_weight, k_weight, v_weight], axis=-1)
new_name = key.replace("query", "query_key_value")
qkv_weight.detach().cpu().numpy().astype(np_weight_data_type).tofile(f"{saved_dir}/bert.{new_name}.bin")
print(f"convert {new_name}")
elif key.find("self.query._weight_quantizer") != -1:
# PER CHANNEL
'''
q_name = key
k_name = key.replace("query", "key")
v_name = key.replace("query", "value")
q_quantizer = model[q_name]
k_quantizer = model[k_name]
v_quantizer = model[v_name]
qkv_quantizer = torch.cat([q_quantizer, k_quantizer, v_quantizer])
new_name = key.replace("query", "query_key_value")
qkv_quantizer.detach().cpu().numpy().astype(np_weight_data_type).tofile(f"{saved_dir}/{new_name}.bin")
# print(f"name: {new_name}, {qkv_quantizer.shape}")
'''
# PER TENSOR, the checkpoint has separate q, k, v quantizers, need to choose max one
q_name = key
k_name = key.replace("query", "key")
v_name = key.replace("query", "value")
q_quantizer = model[q_name].view(1)
k_quantizer = model[k_name].view(1)
v_quantizer = model[v_name].view(1)
qkv_quantizer = torch.max(torch.cat([q_quantizer, k_quantizer, v_quantizer]))
new_name = key.replace("query", "query_key_value")
qkv_quantizer.detach().cpu().numpy().astype(np_weight_data_type).tofile(f"{saved_dir}/bert.{new_name}.bin")
print(f"convert {new_name}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-saved_dir', '-o', type=str, help='file name of output file', required=True)
parser.add_argument('-in_file', '-i', type=str, help='file name of input checkpoint file', required=True)
# parser.add_argument('-trained_gpu_num', '-t_g', type=int, help='How many gpus for inference', default=1)
parser.add_argument('-infer_gpu_num', '-i_g', type=int, help='How many gpus for inference', default=1)
# parser.add_argument("-processes", "-p", type=int, help="How many processes to spawn for conversion (default: 4)", default=4)
parser.add_argument("-weight_data_type", type=str, default="fp32", choices=["fp32", "fp16"])
args = parser.parse_args()
print("\n=============== Argument ===============")
for key in vars(args):
print("{}: {}".format(key, vars(args)[key]))
print("========================================")
split_and_convert(args)
| FasterTransformer-main | examples/pytorch/bert/utils/huggingface_bert_fp8_convert.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract pre-computed feature vectors from a PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import logging
import json
import re
import torch
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tokenization import BertTokenizer
from modeling import BertModel
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class InputExample(object):
def __init__(self, unique_id, text_a, text_b):
self.unique_id = unique_id
self.text_a = text_a
self.text_b = text_b
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids):
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
def convert_examples_to_features(examples, seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > seq_length - 2:
tokens_a = tokens_a[0:(seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
input_type_ids.append(1)
tokens.append("[SEP]")
input_type_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == seq_length
assert len(input_mask) == seq_length
assert len(input_type_ids) == seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (example.unique_id))
logger.info("tokens: %s" % " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"input_type_ids: %s" % " ".join([str(x) for x in input_type_ids]))
features.append(
InputFeatures(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def read_examples(input_file):
"""Read a list of `InputExample`s from an input file."""
examples = []
unique_id = 0
with open(input_file, "r", encoding='utf-8') as reader:
while True:
line = reader.readline()
if not line:
break
line = line.strip()
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
examples.append(
InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))
unique_id += 1
return examples
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--input_file", default=None, type=str, required=True)
parser.add_argument("--output_file", default=None, type=str, required=True)
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
## Other parameters
parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.")
parser.add_argument("--layers", default="-1,-2,-3,-4", type=str)
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences longer "
"than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--batch_size", default=32, type=int, help="Batch size for predictions.")
parser.add_argument("--local_rank",
type=int,
default=-1,
help = "local_rank for distributed training on gpus")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {} distributed training: {}".format(device, n_gpu, bool(args.local_rank != -1)))
layer_indexes = [int(x) for x in args.layers.split(",")]
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
examples = read_examples(args.input_file)
features = convert_examples_to_features(
examples=examples, seq_length=args.max_seq_length, tokenizer=tokenizer)
unique_id_to_feature = {}
for feature in features:
unique_id_to_feature[feature.unique_id] = feature
model = BertModel.from_pretrained(args.bert_model)
model.to(device)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index)
if args.local_rank == -1:
eval_sampler = SequentialSampler(eval_data)
else:
eval_sampler = DistributedSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size)
model.eval()
with open(args.output_file, "w", encoding='utf-8') as writer:
for input_ids, input_mask, example_indices in eval_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
all_encoder_layers, _ = model(input_ids, token_type_ids=None, attention_mask=input_mask)
all_encoder_layers = all_encoder_layers
for b, example_index in enumerate(example_indices):
feature = features[example_index.item()]
unique_id = int(feature.unique_id)
# feature = unique_id_to_feature[unique_id]
output_json = collections.OrderedDict()
output_json["linex_index"] = unique_id
all_out_features = []
for (i, token) in enumerate(feature.tokens):
all_layers = []
for (j, layer_index) in enumerate(layer_indexes):
layer_output = all_encoder_layers[int(layer_index)].detach().cpu().numpy()
layer_output = layer_output[b]
layers = collections.OrderedDict()
layers["index"] = layer_index
layers["values"] = [
round(x.item(), 6) for x in layer_output[i]
]
all_layers.append(layers)
out_features = collections.OrderedDict()
out_features["token"] = token
out_features["layers"] = all_layers
all_out_features.append(out_features)
output_json["features"] = all_out_features
writer.write(json.dumps(output_json) + "\n")
if __name__ == "__main__":
main()
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/extract_features.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import, division, print_function
import pickle
import argparse
import logging
import os
import random
import wget
import json
import time
import dllogger
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from file_utils import PYTORCH_PRETRAINED_BERT_CACHE
import modeling
from tokenization import BertTokenizer
from optimization import BertAdam, warmup_linear
from schedulers import LinearWarmUpScheduler
from apex import amp
from sklearn.metrics import matthews_corrcoef, f1_score
from utils import (is_main_process, mkdir_by_main_process, format_step,
get_world_size)
from processors.glue import PROCESSORS, convert_examples_to_features
import quant_utils
from pytorch_quantization.nn import QuantLinear
from apex_sparsity import ASP
torch._C._jit_set_profiling_mode(False)
torch._C._jit_set_profiling_executor(False)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger(__name__)
class Knowledge_Distillation_Loss(torch.nn.Module):
def __init__(self, T = 3):
super(Knowledge_Distillation_Loss, self).__init__()
self.KLdiv = torch.nn.KLDivLoss()
self.T = T
def get_knowledge_distillation_loss(self, output_student, output_teacher):
loss_kl = self.KLdiv(torch.nn.functional.log_softmax(output_student / self.T, dim=1), torch.nn.functional.softmax(output_teacher / self.T, dim=1))
loss = loss_kl
return loss
def compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "cola":
return {"mcc": matthews_corrcoef(labels, preds)}
elif task_name == "sst-2":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mrpc":
return acc_and_f1(preds, labels)
elif task_name == "sts-b":
return pearson_and_spearman(preds, labels)
elif task_name == "qqp":
return acc_and_f1(preds, labels)
elif task_name == "mnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mnli-mm":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "qnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "rte":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "wnli":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
from apex.multi_tensor_apply import multi_tensor_applier
class GradientClipper:
"""
Clips gradient norm of an iterable of parameters.
"""
def __init__(self, max_grad_norm):
self.max_norm = max_grad_norm
if multi_tensor_applier.available:
import amp_C
self._overflow_buf = torch.cuda.IntTensor([0])
self.multi_tensor_l2norm = amp_C.multi_tensor_l2norm
self.multi_tensor_scale = amp_C.multi_tensor_scale
else:
raise RuntimeError('Gradient clipping requires cuda extensions')
def step(self, parameters):
l = [p.grad for p in parameters if p.grad is not None]
total_norm, _ = multi_tensor_applier(
self.multi_tensor_l2norm,
self._overflow_buf,
[l],
False,
)
total_norm = total_norm.item()
if (total_norm == float('inf')): return
clip_coef = self.max_norm / (total_norm + 1e-6)
if clip_coef < 1:
multi_tensor_applier(
self.multi_tensor_scale,
self._overflow_buf,
[l, l],
clip_coef,
)
def parse_args(parser=argparse.ArgumentParser()):
## Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data "
"files) for the task.",
)
parser.add_argument(
"--bert_model",
default=None,
type=str,
required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, "
"bert-base-multilingual-uncased, bert-base-multilingual-cased, "
"bert-base-chinese.",
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
choices=PROCESSORS.keys(),
help="The name of the task to train.",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints "
"will be written.",
)
parser.add_argument(
"--init_checkpoint",
default=None,
type=str,
required=True,
help="The checkpoint file from pretraining",
)
## Other parameters
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece "
"tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to get model-task performance on the dev"
" set by running eval.")
parser.add_argument("--do_predict",
action='store_true',
help="Whether to output prediction results on the dev "
"set by running eval.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Batch size per GPU for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Batch size per GPU for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps",
default=-1.0,
type=float,
help="Total number of training steps to perform.")
parser.add_argument(
"--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup "
"for. E.g., 0.1 = 10%% of training.",
)
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=1,
help="random seed for initialization")
parser.add_argument(
'--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a "
"backward/update pass.")
parser.add_argument(
'--fp16',
action='store_true',
help="Mixed precision training",
)
parser.add_argument(
'--amp',
action='store_true',
help="Mixed precision training",
)
parser.add_argument(
'--loss_scale',
type=float,
default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when "
"fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n",
)
parser.add_argument('--server_ip',
type=str,
default='',
help="Can be used for distant debugging.")
parser.add_argument('--server_port',
type=str,
default='',
help="Can be used for distant debugging.")
parser.add_argument('--vocab_file',
type=str,
default=None,
required=True,
help="Vocabulary mapping/file BERT was pretrainined on")
parser.add_argument("--config_file",
default=None,
type=str,
required=True,
help="The BERT model config")
parser.add_argument('--skip_checkpoint',
default=False,
action='store_true',
help="Whether to save checkpoints")
parser.add_argument("--save_per_epoch",
default=False,
action='store_true',
help="Whether to save checkpoint after each epoch")
parser.add_argument("--do_calib",
action="store_true",
help="Whether to run calibration of quantization ranges.")
parser.add_argument('--num-calib-batch',
default=4, type=int,
help='Number of batches for calibration. 0 will disable calibration')
quant_utils.add_arguments(parser)
parser.add_argument("--distillation",
action='store_true',
help="Whether or not to use the techer-student model for finetuning (Knowledge distillation)")
parser.add_argument("--teacher",
default=None, type=str,
help="teacher pytorch model file for distillation")
parser.add_argument('--distillation_loss_scale',
type=float, default=10000.,
help="scale applied to distillation component of loss")
parser.add_argument("--sparse",
action='store_true',
help="Whether to sparse train")
parser.add_argument("--recompute_sparse_masks",
action='store_true',
help="Whether or not to recompute sparse masks during sparse training after every epoch")
return parser.parse_args()
def init_optimizer_and_amp(model, learning_rate, loss_scale, warmup_proportion,
num_train_optimization_steps, use_fp16):
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{
'params': [
p for n, p in param_optimizer
if not any(nd in n for nd in no_decay)
],
'weight_decay': 0.01
},
{
'params': [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
'weight_decay': 0.0
},
]
optimizer, scheduler = None, None
if use_fp16:
logger.info("using fp16")
try:
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from "
"https://www.github.com/nvidia/apex to use "
"distributed and fp16 training.")
if num_train_optimization_steps is not None:
optimizer = FusedAdam(
optimizer_grouped_parameters,
lr=learning_rate,
bias_correction=False,
)
amp_inits = amp.initialize(
model,
optimizers=optimizer,
opt_level="O2",
keep_batchnorm_fp32=False,
loss_scale="dynamic" if loss_scale == 0 else loss_scale,
)
model, optimizer = (amp_inits
if num_train_optimization_steps is not None else
(amp_inits, None))
if num_train_optimization_steps is not None:
scheduler = LinearWarmUpScheduler(
optimizer,
warmup=warmup_proportion,
total_steps=num_train_optimization_steps,
)
else:
logger.info("using fp32")
if num_train_optimization_steps is not None:
optimizer = BertAdam(
optimizer_grouped_parameters,
lr=learning_rate,
warmup=warmup_proportion,
t_total=num_train_optimization_steps,
)
return model, optimizer, scheduler
def gen_tensor_dataset(features):
all_input_ids = torch.tensor(
[f.input_ids for f in features],
dtype=torch.long,
)
all_input_mask = torch.tensor(
[f.input_mask for f in features],
dtype=torch.long,
)
all_segment_ids = torch.tensor(
[f.segment_ids for f in features],
dtype=torch.long,
)
all_label_ids = torch.tensor(
[f.label_id for f in features],
dtype=torch.long,
)
return TensorDataset(
all_input_ids,
all_input_mask,
all_segment_ids,
all_label_ids,
)
def get_train_features(data_dir, bert_model, max_seq_length, do_lower_case,
local_rank, train_batch_size,
gradient_accumulation_steps, num_train_epochs, tokenizer,
processor):
cached_train_features_file = os.path.join(
data_dir,
'{0}_{1}_{2}'.format(
list(filter(None, bert_model.split('/'))).pop(),
str(max_seq_length),
str(do_lower_case),
),
)
train_features = None
try:
with open(cached_train_features_file, "rb") as reader:
train_features = pickle.load(reader)
logger.info("Loaded pre-processed features from {}".format(
cached_train_features_file))
except:
logger.info("Did not find pre-processed features from {}".format(
cached_train_features_file))
train_examples = processor.get_train_examples(data_dir)
train_features, _ = convert_examples_to_features(
train_examples,
processor.get_labels(),
max_seq_length,
tokenizer,
)
if is_main_process():
logger.info(" Saving train features into cached file %s",
cached_train_features_file)
with open(cached_train_features_file, "wb") as writer:
pickle.dump(train_features, writer)
return train_features
def dump_predictions(path, label_map, preds, examples):
label_rmap = {label_idx: label for label, label_idx in label_map.items()}
predictions = {
example.guid: label_rmap[preds[i]] for i, example in enumerate(examples)
}
with open(path, "w") as writer:
json.dump(predictions, writer)
def main(args):
if args.quant_mode is not None:
args = quant_utils.set_args(args)
args.fp16 = args.fp16 or args.amp
if args.server_ip and args.server_port:
# Distant debugging - see
# https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
logger.info("Waiting for debugger attach")
ptvsd.enable_attach(
address=(args.server_ip, args.server_port),
redirect_output=True,
)
ptvsd.wait_for_attach()
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of
# sychronizing nodes/GPUs.
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, "
"16-bits training: {}".format(
device,
n_gpu,
bool(args.local_rank != -1),
args.fp16,
))
if not args.do_train and not args.do_eval and not args.do_predict and not args.do_calib:
raise ValueError("At least one of `do_train`, `do_eval` or "
"`do_predict` or `do_calib` must be True.")
if n_gpu > 1 and args.do_calib:
raise NotImplementedError("multi-gpu calibration is not supported")
if is_main_process():
if (os.path.exists(args.output_dir) and os.listdir(args.output_dir) and
args.do_train):
logger.warning("Output directory ({}) already exists and is not "
"empty.".format(args.output_dir))
mkdir_by_main_process(args.output_dir)
if is_main_process():
dllogger.init(backends=[
dllogger.JSONStreamBackend(
verbosity=dllogger.Verbosity.VERBOSE,
filename=os.path.join(args.output_dir, 'dllogger.json'),
),
dllogger.StdOutBackend(
verbosity=dllogger.Verbosity.VERBOSE,
step_format=format_step,
),
])
else:
dllogger.init(backends=[])
dllogger.log(step="PARAMETER", data={"Config": [str(args)]})
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, "
"should be >= 1".format(
args.gradient_accumulation_steps))
if args.gradient_accumulation_steps > args.train_batch_size:
raise ValueError("gradient_accumulation_steps ({}) cannot be larger "
"train_batch_size ({}) - there cannot be a fraction "
"of one sample.".format(
args.gradient_accumulation_steps,
args.train_batch_size,
))
args.train_batch_size = (args.train_batch_size //
args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
dllogger.log(step="PARAMETER", data={"SEED": args.seed})
processor = PROCESSORS[args.task_name]()
num_labels = len(processor.get_labels())
#tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
tokenizer = BertTokenizer(
args.vocab_file,
do_lower_case=args.do_lower_case,
max_len=512,
) # for bert large
num_train_optimization_steps = None
if args.do_train or args.do_calib:
train_features = get_train_features(
args.data_dir,
args.bert_model,
args.max_seq_length,
args.do_lower_case,
args.local_rank,
args.train_batch_size,
args.gradient_accumulation_steps,
args.num_train_epochs,
tokenizer,
processor,
)
num_train_optimization_steps = int(
len(train_features) / args.train_batch_size /
args.gradient_accumulation_steps) * args.num_train_epochs
if args.local_rank != -1:
num_train_optimization_steps = (num_train_optimization_steps //
torch.distributed.get_world_size())
# Prepare model
config = modeling.BertConfig.from_json_file(args.config_file)
# Padding for divisibility by 8
# if config.vocab_size % 8 != 0:
# config.vocab_size += 8 - (config.vocab_size % 8)
quant_utils.set_default_quantizers(args)
modeling.ACT2FN["bias_gelu"] = modeling.bias_gelu_training
model = modeling.BertForSequenceClassification(
config,
num_labels=num_labels,
)
def init_sparse_model(model):
ASP.init_model_for_pruning(model, mask_calculator="m4n2_1d", verbosity=3,
allow_recompute_mask=args.recompute_sparse_masks,
custom_layer_dict={modeling.LinearActivation: ['weight'],
QuantLinear: ['weight']})
logger.info("USING CHECKPOINT from {}".format(args.init_checkpoint))
# model.load_state_dict(
# torch.load(args.init_checkpoint, map_location='cpu')["model"],
# strict=False,
# )
loaded_ckpt_tmp = torch.load(args.init_checkpoint, map_location='cpu')
if "model" in loaded_ckpt_tmp:
loaded_ckpt_tmp = loaded_ckpt_tmp["model"]
loaded_ckpt = {}
for k, v in loaded_ckpt_tmp.items():
ks = k.split('.')
if ks[-2] == 'LayerNorm':
if ks[-1] == 'weight':
ks[-1] = 'gamma'
elif ks[-1] == 'bias':
ks[-1] = 'beta'
loaded_ckpt['.'.join(ks)] = v
model.load_state_dict(loaded_ckpt, strict=False)
logger.info("USED CHECKPOINT from {}".format(args.init_checkpoint))
dllogger.log(
step="PARAMETER",
data={
"num_parameters":
sum([p.numel() for p in model.parameters() if p.requires_grad]),
},
)
model.to(device)
if args.sparse:
print("SPARSE: Computing sparse mask")
init_sparse_model(model)
ASP.compute_sparse_masks()
# Prepare optimizer
model, optimizer, scheduler = init_optimizer_and_amp(
model,
args.learning_rate,
args.loss_scale,
args.warmup_proportion,
num_train_optimization_steps,
args.fp16,
)
if args.do_train and args.distillation:
modeling.ACT2FN["bias_gelu"] = modeling.bias_gelu_training
teacher_model = modeling.BertForSequenceClassification(config, num_labels=num_labels)
print(f"loading teacher model {args.teacher}...")
teacher_ckpt = torch.load(args.teacher, map_location='cpu')
if "model" in teacher_ckpt:
teacher_model.load_state_dict(teacher_ckpt["model"], strict=False)
else:
teacher_model.load_state_dict(teacher_ckpt, strict=False)
distillation_loss = Knowledge_Distillation_Loss().cuda()
teacher_model.to(device)
teacher_model, _ = amp.initialize(teacher_model, [], opt_level="O2", keep_batchnorm_fp32=False)
teacher_model.eval()
quant_utils.set_quantizer_by_name(teacher_model, [''], _disabled=True)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from "
"https://www.github.com/nvidia/apex to use "
"distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
loss_fct = torch.nn.CrossEntropyLoss()
results = {}
if args.do_train or args.do_calib:
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_features))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
train_data = gen_tensor_dataset(train_features)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(
train_data,
sampler=train_sampler,
batch_size=args.train_batch_size,
)
if args.do_calib:
quant_utils.configure_model(model, args, calib=True)
model.eval()
quant_utils.enable_calibration(model)
# run forward passes on a sample of the training set
train_iter = tqdm(train_dataloader, desc="Batches", total=args.num_calib_batch) if is_main_process() else train_dataloader
for step, batch in enumerate(train_iter):
if step > args.num_calib_batch:
break
if n_gpu == 1:
batch = tuple(t.to(device) for t in batch) # multi-gpu does scattering it-self
input_ids, input_mask, segment_ids, label_ids = batch
logits = model(input_ids, segment_ids, input_mask)
quant_utils.finish_calibration(model, args)
if args.do_train:
quant_utils.configure_model(model, args, calib=False)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
latency_train = 0.0
nb_tr_examples = 0
model.train()
tic_train = time.perf_counter()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
if args.sparse and args.recompute_sparse_masks:
ASP.compute_sparse_masks()
tr_loss, nb_tr_steps = 0, 0
train_iter = tqdm(train_dataloader, desc="Iteration") if is_main_process() else train_dataloader
for step, batch in enumerate(train_iter):
if args.max_steps > 0 and global_step > args.max_steps:
break
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
logits = model(input_ids, segment_ids, input_mask)
loss = loss_fct(
logits.view(-1, num_labels),
label_ids.view(-1),
)
if args.distillation:
with torch.no_grad():
teacher_logits = teacher_model(input_ids, segment_ids, input_mask)
dloss = distillation_loss.get_knowledge_distillation_loss(logits, teacher_logits)
loss = loss + args.distillation_loss_scale * dloss
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
# modify learning rate with special warm up for BERT
# which FusedAdam doesn't do
scheduler.step()
optimizer.step()
optimizer.zero_grad()
global_step += 1
if args.save_per_epoch and is_main_process() and not args.skip_checkpoint:
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, modeling.WEIGHTS_NAME + '_' + str(epoch))
# torch.save({"model":model_to_save.state_dict()}, output_model_file)
torch.save(model_to_save.state_dict(), output_model_file)
output_config_file = os.path.join(args.output_dir, modeling.CONFIG_NAME)
with open(output_config_file, 'w') as f:
f.write(model_to_save.config.to_json_string())
latency_train = time.perf_counter() - tic_train
tr_loss = tr_loss / nb_tr_steps
results.update({
'global_step':
global_step,
'train:loss':
tr_loss,
'train:latency':
latency_train,
'train:num_samples_per_gpu':
nb_tr_examples,
'train:num_steps':
nb_tr_steps,
'train:throughput':
get_world_size() * nb_tr_examples / latency_train,
})
if args.do_train or args.do_calib:
if is_main_process() and not args.skip_checkpoint:
model_to_save = model.module if hasattr(model, 'module') else model
torch.save(
model_to_save.state_dict(),
os.path.join(args.output_dir, modeling.WEIGHTS_NAME),
)
with open(
os.path.join(args.output_dir, modeling.CONFIG_NAME),
'w',
) as f:
f.write(model_to_save.config.to_json_string())
if (args.do_eval or args.do_predict) and is_main_process():
eval_examples = processor.get_dev_examples(args.data_dir)
eval_features, label_map = convert_examples_to_features(
eval_examples,
processor.get_labels(),
args.max_seq_length,
tokenizer,
)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_data = gen_tensor_dataset(eval_features)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(
eval_data,
sampler=eval_sampler,
batch_size=args.eval_batch_size,
)
quant_utils.configure_model(model, args)
model.eval()
preds = None
out_label_ids = None
eval_loss = 0
nb_eval_steps, nb_eval_examples = 0, 0
cuda_events = [(torch.cuda.Event(enable_timing=True),
torch.cuda.Event(enable_timing=True))
for _ in range(len(eval_dataloader))]
for i, (input_ids, input_mask, segment_ids, label_ids) in tqdm(
enumerate(eval_dataloader),
desc="Evaluating",
):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
cuda_events[i][0].record()
logits = model(input_ids, segment_ids, input_mask)
cuda_events[i][1].record()
if args.do_eval:
eval_loss += loss_fct(
logits.view(-1, num_labels),
label_ids.view(-1),
).mean().item()
nb_eval_steps += 1
nb_eval_examples += input_ids.size(0)
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = label_ids.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(
out_label_ids,
label_ids.detach().cpu().numpy(),
axis=0,
)
torch.cuda.synchronize()
eval_latencies = [
event_start.elapsed_time(event_end)
for event_start, event_end in cuda_events
]
eval_latencies = list(sorted(eval_latencies))
def infer_latency_sli(threshold):
index = int(len(eval_latencies) * threshold) - 1
index = min(max(index, 0), len(eval_latencies) - 1)
return eval_latencies[index]
eval_throughput = (args.eval_batch_size /
(np.mean(eval_latencies) / 1000))
results.update({
'eval:num_samples_per_gpu': nb_eval_examples,
'eval:num_steps': nb_eval_steps,
'infer:latency(ms):50%': infer_latency_sli(0.5),
'infer:latency(ms):90%': infer_latency_sli(0.9),
'infer:latency(ms):95%': infer_latency_sli(0.95),
'infer:latency(ms):99%': infer_latency_sli(0.99),
'infer:latency(ms):100%': infer_latency_sli(1.0),
'infer:latency(ms):avg': np.mean(eval_latencies),
'infer:latency(ms):std': np.std(eval_latencies),
'infer:latency(ms):sum': np.sum(eval_latencies),
'infer:throughput(samples/s):avg': eval_throughput,
})
preds = np.argmax(preds, axis=1)
if args.do_predict:
dump_predictions(
os.path.join(args.output_dir, 'predictions.json'),
label_map,
preds,
eval_examples,
)
if args.do_eval:
results['eval:loss'] = eval_loss / nb_eval_steps
eval_result = compute_metrics(args.task_name, preds, out_label_ids)
results.update(eval_result)
if is_main_process():
logger.info("***** Results *****")
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
with open(os.path.join(args.output_dir, "results.txt"), "w") as writer:
json.dump(results, writer)
dllogger_queries_from_results = {
'exact_match': 'acc',
'F1': 'f1',
'e2e_train_time': 'train:latency',
'training_sequences_per_second': 'train:throughput',
'e2e_inference_time': ('infer:latency(ms):sum', lambda x: x / 1000),
'inference_sequences_per_second': 'infer:throughput(samples/s):avg',
}
for key, query in dllogger_queries_from_results.items():
results_key, convert = (query if isinstance(query, tuple) else
(query, lambda x: x))
if results_key not in results:
continue
dllogger.log(
step=tuple(),
data={key: convert(results[results_key])},
)
dllogger.flush()
return results
if __name__ == "__main__":
main(parse_args())
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/run_glue.py |
# coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
#from fused_adam_local import FusedAdam
from apex.optimizers import FusedAdam
from apex.multi_tensor_apply import multi_tensor_applier
import amp_C
from utils import is_main_process
multi_tensor_l2norm = amp_C.multi_tensor_l2norm
lamb_compute_update = amp_C.multi_tensor_lamb_stage1_cuda
lamb_apply_update = amp_C.multi_tensor_lamb_stage2_cuda
scale = amp_C.multi_tensor_scale
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x/warmup
return 0.5 * (1.0 + torch.cos(math.pi * x))
def warmup_constant(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x/warmup
return max((x - 1. )/ (warmup - 1.), 0.)
def warmup_poly(x, warmup=0.002, degree=0.5):
if x < warmup:
return x/warmup
return (1.0 - x)**degree
SCHEDULES = {
'warmup_cosine':warmup_cosine,
'warmup_constant':warmup_constant,
'warmup_linear':warmup_linear,
'warmup_poly':warmup_poly,
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,
max_grad_norm=1.0):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
return loss
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/optimization.py |
# coding=utf-8
# Copyright (c) 2019-2022 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run BERT on SQuAD."""
from __future__ import absolute_import, division, print_function
import argparse
import collections
import json
import logging
import math
import os
import random
import sys
from io import open
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from apex import amp
from schedulers import LinearWarmUpScheduler
from file_utils import PYTORCH_PRETRAINED_BERT_CACHE
import modeling
from optimization import BertAdam, warmup_linear
from tokenization import (BasicTokenizer, BertTokenizer, whitespace_tokenize)
from utils import is_main_process, format_step
import dllogger, time
import quant_utils
from pytorch_quantization.nn import QuantLinear
from apex_sparsity import ASP
torch._C._jit_set_profiling_mode(False)
torch._C._jit_set_profiling_executor(False)
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class Knowledge_Distillation_Loss(torch.nn.Module):
def __init__(self, T = 3):
super(Knowledge_Distillation_Loss, self).__init__()
self.KLdiv = torch.nn.KLDivLoss()
self.T = T
def get_knowledge_distillation_loss(self, output_student, output_teacher):
loss_kl = self.KLdiv(torch.nn.functional.log_softmax(output_student / self.T, dim=1), torch.nn.functional.softmax(output_teacher / self.T, dim=1))
loss = loss_kl
return loss
class SquadExample(object):
"""
A single training/test example for the Squad dataset.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=None):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (self.qas_id)
s += ", question_text: %s" % (
self.question_text)
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.end_position:
s += ", end_position: %d" % (self.end_position)
if self.is_impossible:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def read_squad_examples(input_file, is_training, version_2_with_negative):
"""Read a SQuAD json file into a list of SquadExample."""
with open(input_file, "r", encoding='utf-8') as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if version_2_with_negative:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
features = []
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and example.is_impossible:
start_position = 0
end_position = 0
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible))
unique_id += 1
return features
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def get_answers(examples, features, results, args):
predictions = collections.defaultdict(list) #it is possible that one example corresponds to multiple features
Prediction = collections.namedtuple('Prediction', ['text', 'start_logit', 'end_logit'])
if args.version_2_with_negative:
null_vals = collections.defaultdict(lambda: (float("inf"),0,0))
for ex, feat, result in match_results(examples, features, results):
start_indices = _get_best_indices(result.start_logits, args.n_best_size)
end_indices = _get_best_indices(result.end_logits, args.n_best_size)
prelim_predictions = get_valid_prelim_predictions(start_indices, end_indices, feat, result, args)
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
if args.version_2_with_negative:
score = result.start_logits[0] + result.end_logits[0]
if score < null_vals[ex.qas_id][0]:
null_vals[ex.qas_id] = (score, result.start_logits[0], result.end_logits[0])
curr_predictions = []
seen_predictions = []
for pred in prelim_predictions:
if len(curr_predictions) == args.n_best_size:
break
if pred.start_index > 0: # this is a non-null prediction TODO: this probably is irrelevant
final_text = get_answer_text(ex, feat, pred, args)
if final_text in seen_predictions:
continue
else:
final_text = ""
seen_predictions.append(final_text)
curr_predictions.append(Prediction(final_text, pred.start_logit, pred.end_logit))
predictions[ex.qas_id] += curr_predictions
#Add empty prediction
if args.version_2_with_negative:
for qas_id in predictions.keys():
predictions[qas_id].append(Prediction('',
null_vals[ex.qas_id][1],
null_vals[ex.qas_id][2]))
nbest_answers = collections.defaultdict(list)
answers = {}
for qas_id, preds in predictions.items():
nbest = sorted(
preds,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)[:args.n_best_size]
# In very rare edge cases we could only have single null prediction.
# So we just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(Prediction(text="empty", start_logit=0.0, end_logit=0.0))
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry and entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_answers[qas_id].append(output)
if args.version_2_with_negative:
score_diff = null_vals[qas_id][0] - best_non_null_entry.start_logit - best_non_null_entry.end_logit
if score_diff > args.null_score_diff_threshold:
answers[qas_id] = ""
else:
answers[qas_id] = best_non_null_entry.text
else:
answers[qas_id] = nbest_answers[qas_id][0]['text']
return answers, nbest_answers
def get_answer_text(example, feature, pred, args):
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, args.do_lower_case, args.verbose_logging)
return final_text
def get_valid_prelim_predictions(start_indices, end_indices, feature, result, args):
_PrelimPrediction = collections.namedtuple(
"PrelimPrediction",
["start_index", "end_index", "start_logit", "end_logit"])
prelim_predictions = []
for start_index in start_indices:
for end_index in end_indices:
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > args.max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
return prelim_predictions
def match_results(examples, features, results):
unique_f_ids = set([f.unique_id for f in features])
unique_r_ids = set([r.unique_id for r in results])
matching_ids = unique_f_ids & unique_r_ids
features = [f for f in features if f.unique_id in matching_ids]
results = [r for r in results if r.unique_id in matching_ids]
features.sort(key=lambda x: x.unique_id)
results.sort(key=lambda x: x.unique_id)
for f, r in zip(features, results): #original code assumes strict ordering of examples. TODO: rewrite this
yield examples[f.example_index], f, r
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logger.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logger.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logger.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indices(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indices = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indices.append(index_and_score[i][0])
return best_indices
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
from apex.multi_tensor_apply import multi_tensor_applier
class GradientClipper:
"""
Clips gradient norm of an iterable of parameters.
"""
def __init__(self, max_grad_norm):
self.max_norm = max_grad_norm
if multi_tensor_applier.available:
import amp_C
self._overflow_buf = torch.cuda.IntTensor([0])
self.multi_tensor_l2norm = amp_C.multi_tensor_l2norm
self.multi_tensor_scale = amp_C.multi_tensor_scale
else:
raise RuntimeError('Gradient clipping requires cuda extensions')
def step(self, parameters):
l = [p.grad for p in parameters if p.grad is not None]
total_norm, _ = multi_tensor_applier(self.multi_tensor_l2norm, self._overflow_buf, [l], False)
total_norm = total_norm.item()
if (total_norm == float('inf')): return
clip_coef = self.max_norm / (total_norm + 1e-6)
if clip_coef < 1:
multi_tensor_applier(self.multi_tensor_scale, self._overflow_buf, [l, l], clip_coef)
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model checkpoints and predictions will be written.")
parser.add_argument("--init_checkpoint",
default=None,
type=str,
required=True,
help="The checkpoint file from pretraining")
## Other parameters
parser.add_argument("--train_file", default=None, type=str, help="SQuAD json for training. E.g., train-v1.1.json")
parser.add_argument("--predict_file", default=None, type=str,
help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
parser.add_argument("--max_seq_length", default=384, type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--doc_stride", default=128, type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.")
parser.add_argument("--max_query_length", default=64, type=int,
help="The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length.")
parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
parser.add_argument("--do_predict", action='store_true', help="Whether to run eval on the dev set.")
parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.")
parser.add_argument("--predict_batch_size", default=8, type=int, help="Total batch size for predictions.")
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1.0, type=float,
help="Total number of training steps to perform.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% "
"of training.")
parser.add_argument("--n_best_size", default=20, type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json "
"output file.")
parser.add_argument("--max_answer_length", default=30, type=int,
help="The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another.")
parser.add_argument("--verbose_logging", action='store_true',
help="If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Whether to lower case the input text. True for uncased models, False for cased models.")
parser.add_argument("--local_rank",
type=int,
default=os.getenv('LOCAL_RANK', -1),
help="local_rank for distributed training on gpus")
parser.add_argument('--fp16',
default=False,
action='store_true',
help="Mixed precision training")
parser.add_argument('--amp',
default=False,
action='store_true',
help="Mixed precision training")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.')
parser.add_argument('--null_score_diff_threshold',
type=float, default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.")
parser.add_argument('--vocab_file',
type=str, default=None, required=True,
help="Vocabulary mapping/file BERT was pretrainined on")
parser.add_argument("--config_file",
default=None,
type=str,
required=True,
help="The BERT model config")
parser.add_argument('--log_freq',
type=int, default=50,
help='frequency of logging loss.')
parser.add_argument('--json-summary', type=str, default="results/dllogger.json",
help='If provided, the json summary will be written to'
'the specified file.')
parser.add_argument("--eval_script",
help="Script to evaluate squad predictions",
default="evaluate.py",
type=str)
parser.add_argument("--do_eval",
action='store_true',
help="Whether to use evaluate accuracy of predictions")
parser.add_argument("--use_env",
action='store_true',
help="Whether to read local rank from ENVVAR")
parser.add_argument('--skip_checkpoint',
default=False,
action='store_true',
help="Whether to save checkpoints")
parser.add_argument('--disable-progress-bar',
default=False,
action='store_true',
help='Disable tqdm progress bar')
parser.add_argument("--skip_cache",
default=False,
action='store_true',
help="Whether to cache train features")
parser.add_argument("--cache_dir",
default=None,
type=str,
help="Location to cache train feaures. Will default to the dataset directory")
parser.add_argument("--save_per_epoch",
default=False,
action='store_true',
help="Whether to save checkpoint after each epoch")
parser.add_argument("--do_calib",
action="store_true",
help="Whether to run calibration of quantization ranges.")
parser.add_argument('--num-calib-batch',
default=4, type=int,
help='Number of batches for calibration. 0 will disable calibration')
quant_utils.add_arguments(parser)
parser.add_argument("--distillation",
action='store_true',
help="Whether or not to use the techer-student model for finetuning (Knowledge distillation)")
parser.add_argument("--teacher",
default=None, type=str,
help="teacher pytorch model file for distillation")
parser.add_argument('--distillation_loss_scale',
type=float, default=10000.,
help="scale applied to distillation component of loss")
parser.add_argument("--sparse",
action='store_true',
help="Whether to sparse train")
parser.add_argument("--recompute_sparse_masks",
action='store_true',
help="Whether or not to recompute sparse masks during sparse training after every epoch")
args = parser.parse_args()
if args.quant_mode is not None:
args = quant_utils.set_args(args)
args.fp16 = args.fp16 or args.amp
print(args)
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl', init_method='env://')
n_gpu = 1
if is_main_process():
dllogger.init(backends=[dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE,
filename=args.json_summary),
dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE, step_format=format_step)])
else:
dllogger.init(backends=[])
print("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
dllogger.log(step="PARAMETER", data={"Config": [str(args)]})
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
dllogger.log(step="PARAMETER", data={"SEED": args.seed})
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_predict and not args.do_calib:
raise ValueError("At least one of `do_train` or `do_predict` or `do_calib` must be True.")
if args.do_train:
if not args.train_file:
raise ValueError(
"If `do_train` is True, then `train_file` must be specified.")
if args.do_predict:
if not args.predict_file:
raise ValueError(
"If `do_predict` is True, then `predict_file` must be specified.")
if n_gpu > 1 and args.do_calib:
raise NotImplementedError("multi-gpu calibration is not supported")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and os.listdir(args.output_dir)!=['logfile.txt']:
print("WARNING: Output directory {} already exists and is not empty.".format(args.output_dir), os.listdir(args.output_dir))
if not os.path.exists(args.output_dir) and is_main_process():
os.makedirs(args.output_dir)
tokenizer = BertTokenizer(args.vocab_file, do_lower_case=args.do_lower_case, max_len=512) # for bert large
# tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
train_examples = None
num_train_optimization_steps = None
if args.do_train or args.do_calib:
train_examples = read_squad_examples(
input_file=args.train_file, is_training=True, version_2_with_negative=args.version_2_with_negative)
num_train_optimization_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
# Prepare model
config = modeling.BertConfig.from_json_file(args.config_file)
# Padding for divisibility by 8
# if config.vocab_size % 8 != 0:
# config.vocab_size += 8 - (config.vocab_size % 8)
quant_utils.set_default_quantizers(args)
modeling.ACT2FN["bias_gelu"] = modeling.bias_gelu_training
model = modeling.BertForQuestionAnswering(config)
# model = modeling.BertForQuestionAnswering.from_pretrained(args.bert_model,
# cache_dir=os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank)))
def init_sparse_model(model):
ASP.init_model_for_pruning(model, mask_calculator="m4n2_1d", verbosity=3,
allow_recompute_mask=args.recompute_sparse_masks,
custom_layer_dict={modeling.LinearActivation: ['weight'],
QuantLinear: ['weight']})
dllogger.log(step="PARAMETER", data={"loading_checkpoint": True})
# model.load_state_dict(torch.load(args.init_checkpoint, map_location='cpu')["model"], strict=False)
loaded_ckpt_tmp = torch.load(args.init_checkpoint, map_location='cpu')
if "model" in loaded_ckpt_tmp:
loaded_ckpt_tmp = loaded_ckpt_tmp["model"]
loaded_ckpt = {}
for k, v in loaded_ckpt_tmp.items():
ks = k.split('.')
if ks[-2] == 'LayerNorm':
if ks[-1] == 'weight':
ks[-1] = 'gamma'
elif ks[-1] == 'bias':
ks[-1] = 'beta'
loaded_ckpt['.'.join(ks)] = v
model.load_state_dict(loaded_ckpt, strict=False)
dllogger.log(step="PARAMETER", data={"loaded_checkpoint": True})
model.to(device)
if args.sparse:
print("SPARSE: Computing sparse mask")
init_sparse_model(model)
ASP.compute_sparse_masks()
num_weights = sum([p.numel() for p in model.parameters() if p.requires_grad])
dllogger.log(step="PARAMETER", data={"model_weights_num":num_weights})
# Prepare optimizer
param_optimizer = list(model.named_parameters())
# hack to remove pooler, which is not used
# thus it produce None grad that break apex
param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.do_train:
if args.fp16:
try:
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False)
if args.loss_scale == 0:
model, optimizer = amp.initialize(model, optimizer, opt_level="O2", keep_batchnorm_fp32=False,
loss_scale="dynamic")
else:
model, optimizer = amp.initialize(model, optimizer, opt_level="O2", keep_batchnorm_fp32=False, loss_scale=args.loss_scale)
if args.do_train:
scheduler = LinearWarmUpScheduler(optimizer, warmup=args.warmup_proportion, total_steps=num_train_optimization_steps)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
if args.distillation:
teacher_model = modeling.BertForQuestionAnswering(config)
print(f"loading teacher model {args.teacher}...")
teacher_ckpt = torch.load(args.teacher, map_location='cpu')
if "model" in teacher_ckpt:
teacher_model.load_state_dict(teacher_ckpt["model"], strict=False)
else:
teacher_model.load_state_dict(teacher_ckpt, strict=False)
distillation_loss = Knowledge_Distillation_Loss().cuda()
teacher_model.to(device)
teacher_model, _ = amp.initialize(teacher_model, [], opt_level="O2", keep_batchnorm_fp32=False)
teacher_model.eval()
quant_utils.set_quantizer_by_name(teacher_model, [''], _disabled=True)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
global_step = 0
if args.do_train or args.do_calib:
if args.cache_dir is None:
cached_train_features_file = args.train_file + '_{0}_{1}_{2}_{3}'.format(
list(filter(None, args.bert_model.split('/'))).pop(), str(args.max_seq_length), str(args.doc_stride),
str(args.max_query_length))
else:
cached_train_features_file = args.cache_dir.strip('/') + '/' + args.train_file.split('/')[-1] + '_{0}_{1}_{2}_{3}'.format(
list(filter(None, args.bert_model.split('/'))).pop(), str(args.max_seq_length), str(args.doc_stride),
str(args.max_query_length))
train_features = None
try:
with open(cached_train_features_file, "rb") as reader:
train_features = pickle.load(reader)
except:
train_features = convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=True)
if not args.skip_cache and is_main_process():
dllogger.log(step="PARAMETER", data={"Cached_train features_file": cached_train_features_file})
with open(cached_train_features_file, "wb") as writer:
pickle.dump(train_features, writer)
dllogger.log(step="PARAMETER", data={"train_start": True})
dllogger.log(step="PARAMETER", data={"training_samples": len(train_examples)})
dllogger.log(step="PARAMETER", data={"training_features": len(train_features)})
dllogger.log(step="PARAMETER", data={"train_batch_size":args.train_batch_size})
dllogger.log(step="PARAMETER", data={"steps":num_train_optimization_steps})
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_start_positions, all_end_positions)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size * n_gpu)
if args.do_calib:
quant_utils.configure_model(model, args, calib=True)
model.eval()
quant_utils.enable_calibration(model)
# run forward passes on a sample of the training set
train_iter = tqdm(train_dataloader, desc="Batches", disable=args.disable_progress_bar, total=args.num_calib_batch) if is_main_process() else train_dataloader
for step, batch in enumerate(train_iter):
if step > args.num_calib_batch:
break
if n_gpu == 1:
batch = tuple(t.to(device) for t in batch) # multi-gpu does scattering it-self
input_ids, input_mask, segment_ids, start_positions, end_positions = batch
start_logits, end_logits = model(input_ids, segment_ids, input_mask)
quant_utils.finish_calibration(model, args)
if args.do_train:
quant_utils.configure_model(model, args, calib=False)
model.train()
gradClipper = GradientClipper(max_grad_norm=1.0)
final_loss = None
if args.sparse:
ASP.init_optimizer_for_pruning(optimizer)
train_start = time.time()
for epoch in range(int(args.num_train_epochs)):
if args.sparse and args.recompute_sparse_masks:
ASP.compute_sparse_masks()
train_iter = tqdm(train_dataloader, desc="Iteration", disable=args.disable_progress_bar) if is_main_process() else train_dataloader
for step, batch in enumerate(train_iter):
# Terminate early for benchmarking
if args.max_steps > 0 and global_step > args.max_steps:
break
if n_gpu == 1:
batch = tuple(t.to(device) for t in batch) # multi-gpu does scattering it-self
input_ids, input_mask, segment_ids, start_positions, end_positions = batch
start_logits, end_logits = model(input_ids, segment_ids, input_mask)
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = torch.nn.CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
loss = (start_loss + end_loss) / 2
if args.distillation:
with torch.no_grad():
teacher_start_logits, teacher_end_logits = teacher_model(input_ids, segment_ids, input_mask)
loss_start_pos = distillation_loss.get_knowledge_distillation_loss(start_logits, teacher_start_logits)
loss_end_pos = distillation_loss.get_knowledge_distillation_loss(end_logits, teacher_end_logits)
dloss = loss_start_pos + loss_end_pos
loss = loss + args.distillation_loss_scale * dloss
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# gradient clipping
gradClipper.step(amp.master_params(optimizer))
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16 :
# modify learning rate with special warm up for BERT which FusedAdam doesn't do
scheduler.step()
optimizer.step()
optimizer.zero_grad()
global_step += 1
final_loss = loss.item()
if step % args.log_freq == 0:
dllogger.log(step=(epoch, global_step,), data={"step_loss": final_loss,
"learning_rate": optimizer.param_groups[0]['lr']})
if args.save_per_epoch and is_main_process() and not args.skip_checkpoint:
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, modeling.WEIGHTS_NAME + '_' + str(epoch))
# torch.save({"model":model_to_save.state_dict()}, output_model_file)
torch.save(model_to_save.state_dict(), output_model_file)
output_config_file = os.path.join(args.output_dir, modeling.CONFIG_NAME)
with open(output_config_file, 'w') as f:
f.write(model_to_save.config.to_json_string())
time_to_train = time.time() - train_start
if (args.do_train or args.do_calib) and is_main_process() and not args.skip_checkpoint:
# Save a trained model and the associated configuration
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, modeling.WEIGHTS_NAME)
# torch.save({"model":model_to_save.state_dict()}, output_model_file)
torch.save(model_to_save.state_dict(), output_model_file)
output_config_file = os.path.join(args.output_dir, modeling.CONFIG_NAME)
with open(output_config_file, 'w') as f:
f.write(model_to_save.config.to_json_string())
if args.do_predict and (args.local_rank == -1 or is_main_process()):
quant_utils.configure_model(model, args)
if not args.do_train and args.fp16:
model.half()
eval_examples = read_squad_examples(
input_file=args.predict_file, is_training=False, version_2_with_negative=args.version_2_with_negative)
eval_features = convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=False)
dllogger.log(step="PARAMETER", data={"infer_start": True})
dllogger.log(step="PARAMETER", data={"eval_samples": len(eval_examples)})
dllogger.log(step="PARAMETER", data={"eval_features": len(eval_features)})
dllogger.log(step="PARAMETER", data={"predict_batch_size": args.predict_batch_size})
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.predict_batch_size)
infer_start = time.time()
model.eval()
all_results = []
dllogger.log(step="PARAMETER", data={"eval_start": True})
for input_ids, input_mask, segment_ids, example_indices in tqdm(eval_dataloader, desc="Evaluating", disable=args.disable_progress_bar):
if len(all_results) % 1000 == 0:
dllogger.log(step="PARAMETER", data={"sample_number": len(all_results)})
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
batch_start_logits, batch_end_logits = model(input_ids, segment_ids, input_mask)
for i, example_index in enumerate(example_indices):
start_logits = batch_start_logits[i].detach().cpu().tolist()
end_logits = batch_end_logits[i].detach().cpu().tolist()
eval_feature = eval_features[example_index.item()]
unique_id = int(eval_feature.unique_id)
all_results.append(RawResult(unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
time_to_infer = time.time() - infer_start
output_prediction_file = os.path.join(args.output_dir, "predictions.json")
output_nbest_file = os.path.join(args.output_dir, "nbest_predictions.json")
answers, nbest_answers = get_answers(eval_examples, eval_features, all_results, args)
with open(output_prediction_file, "w") as f:
f.write(json.dumps(answers, indent=4) + "\n")
with open(output_nbest_file, "w") as f:
f.write(json.dumps(nbest_answers, indent=4) + "\n")
# output_null_log_odds_file = os.path.join(args.output_dir, "null_odds.json")
# write_predictions(eval_examples, eval_features, all_results,
# args.n_best_size, args.max_answer_length,
# args.do_lower_case, output_prediction_file,
# output_nbest_file, output_null_log_odds_file, args.verbose_logging,
# args.version_2_with_negative, args.null_score_diff_threshold)
if args.do_eval and is_main_process():
import sys
import subprocess
eval_out = subprocess.check_output([sys.executable, args.eval_script,
args.predict_file, args.output_dir + "/predictions.json"])
scores = str(eval_out).strip()
exact_match = float(scores.split(":")[1].split(",")[0])
f1 = float(scores.split(":")[2].split("}")[0])
if args.do_train:
gpu_count = n_gpu
if torch.distributed.is_initialized():
gpu_count = torch.distributed.get_world_size()
if args.max_steps == -1:
dllogger.log(step=tuple(), data={"e2e_train_time": time_to_train,
"training_sequences_per_second": len(train_features) * args.num_train_epochs / time_to_train,
"final_loss": final_loss})
else:
dllogger.log(step=tuple(), data={"e2e_train_time": time_to_train,
"training_sequences_per_second": args.train_batch_size * args.gradient_accumulation_steps \
* args.max_steps * gpu_count / time_to_train,
"final_loss": final_loss})
if args.do_predict and is_main_process():
dllogger.log(step=tuple(), data={"e2e_inference_time": time_to_infer,
"inference_sequences_per_second": len(eval_features) / time_to_infer})
if args.do_eval and is_main_process():
dllogger.log(step=tuple(), data={"exact_match": exact_match, "F1": f1})
if __name__ == "__main__":
main()
dllogger.flush()
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/run_squad.py |
# coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
import six
from io import open
from file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
}
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
}
VOCAB_NAME = 'vocab.txt'
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True, max_len=None,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
never_split=never_split)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.max_len = max_len if max_len is not None else int(1e12)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
raise ValueError(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
)
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
vocab_file = pretrained_model_name_or_path
if os.path.isdir(vocab_file):
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer won't index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self,
do_lower_case=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in self.never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
if text in self.never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically controlled characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/tokenization.py |
# coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# ==================
import csv
import os
import time
import argparse
import random
import h5py
from tqdm import tqdm, trange
import os
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, Dataset
from torch.utils.data.distributed import DistributedSampler
import math
from apex import amp
import multiprocessing
from tokenization import BertTokenizer
import modeling
from apex.optimizers import FusedLAMB
from schedulers import PolyWarmUpScheduler
from file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from utils import is_main_process, format_step, get_world_size, get_rank
from apex.parallel import DistributedDataParallel as DDP
from schedulers import LinearWarmUpScheduler
from apex.parallel.distributed import flat_dist_call
import amp_C
import apex_C
from apex.amp import _amp_state
import dllogger
from concurrent.futures import ProcessPoolExecutor
from apex_sparsity import ASP
import quant_utils
from pytorch_quantization.nn import QuantLinear
torch._C._jit_set_profiling_mode(False)
torch._C._jit_set_profiling_executor(False)
skipped_steps = 0
# Track whether a SIGTERM (cluster time up) has been handled
timeout_sent = False
import signal
# handle SIGTERM sent from the scheduler and mark so we
# can gracefully save & exit
def signal_handler(sig, frame):
global timeout_sent
timeout_sent = True
signal.signal(signal.SIGTERM, signal_handler)
#Workaround because python functions are not picklable
class WorkerInitObj(object):
def __init__(self, seed):
self.seed = seed
def __call__(self, id):
np.random.seed(seed=self.seed + id)
random.seed(self.seed + id)
def create_pretraining_dataset(input_file, max_pred_length, shared_list, args, worker_init):
train_data = pretraining_dataset(input_file=input_file, max_pred_length=max_pred_length)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler,
batch_size=args.train_batch_size * args.n_gpu,
num_workers=4, worker_init_fn=worker_init,
pin_memory=True)
return train_dataloader, input_file
class pretraining_dataset(Dataset):
def __init__(self, input_file, max_pred_length):
self.input_file = input_file
self.max_pred_length = max_pred_length
f = h5py.File(input_file, "r")
keys = ['input_ids', 'input_mask', 'segment_ids', 'masked_lm_positions', 'masked_lm_ids',
'next_sentence_labels']
self.inputs = [np.asarray(f[key][:]) for key in keys]
f.close()
def __len__(self):
'Denotes the total number of samples'
return len(self.inputs[0])
def __getitem__(self, index):
[input_ids, input_mask, segment_ids, masked_lm_positions, masked_lm_ids, next_sentence_labels] = [
torch.from_numpy(input[index].astype(np.int64)) if indice < 5 else torch.from_numpy(
np.asarray(input[index].astype(np.int64))) for indice, input in enumerate(self.inputs)]
masked_lm_labels = torch.ones(input_ids.shape, dtype=torch.long) * -1
index = self.max_pred_length
# store number of masked tokens in index
padded_mask_indices = (masked_lm_positions == 0).nonzero()
if len(padded_mask_indices) != 0:
index = padded_mask_indices[0].item()
masked_lm_labels[masked_lm_positions[:index]] = masked_lm_ids[:index]
return [input_ids, segment_ids, input_mask,
masked_lm_labels, next_sentence_labels]
class BertPretrainingCriterion(torch.nn.Module):
def __init__(self, vocab_size):
super(BertPretrainingCriterion, self).__init__()
self.loss_fn = torch.nn.CrossEntropyLoss(ignore_index=-1)
self.vocab_size = vocab_size
def forward(self, prediction_scores, seq_relationship_score, masked_lm_labels, next_sentence_labels):
masked_lm_loss = self.loss_fn(prediction_scores.view(-1, self.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = self.loss_fn(seq_relationship_score.view(-1, 2), next_sentence_labels.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
def parse_arguments():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--input_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain .hdf5 files for the task.")
parser.add_argument("--config_file",
default=None,
type=str,
required=True,
help="The BERT model config")
parser.add_argument("--bert_model", default="bert-large-uncased", type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints will be written.")
## Other parameters
parser.add_argument("--init_checkpoint",
default=None,
type=str,
help="The initial checkpoint to start training from.")
parser.add_argument("--max_seq_length",
default=512,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--max_predictions_per_seq",
default=80,
type=int,
help="The maximum total of masked tokens in input sequence")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps",
default=1000,
type=float,
help="Total number of training steps to perform.")
parser.add_argument("--warmup_proportion",
default=0.01,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--local_rank",
type=int,
default=os.getenv('LOCAL_RANK', -1),
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
default=False,
action='store_true',
help="Mixed precision training")
parser.add_argument('--amp',
default=False,
action='store_true',
help="Mixed precision training")
parser.add_argument('--loss_scale',
type=float, default=0.0,
help='Loss scaling, positive power of 2 values can improve fp16 convergence.')
parser.add_argument('--log_freq',
type=float, default=1.0,
help='frequency of logging loss.')
parser.add_argument('--checkpoint_activations',
default=False,
action='store_true',
help="Whether to use gradient checkpointing")
parser.add_argument("--resume_from_checkpoint",
default=False,
action='store_true',
help="Whether to resume training from checkpoint.")
parser.add_argument('--resume_step',
type=int,
default=-1,
help="Step to resume training from.")
parser.add_argument('--num_steps_per_checkpoint',
type=int,
default=100,
help="Number of update steps until a model checkpoint is saved to disk.")
parser.add_argument('--skip_checkpoint',
default=False,
action='store_true',
help="Whether to save checkpoints")
parser.add_argument('--phase2',
default=False,
action='store_true',
help="Whether to train with seq len 512")
parser.add_argument('--allreduce_post_accumulation',
default=False,
action='store_true',
help="Whether to do allreduces during gradient accumulation steps.")
parser.add_argument('--allreduce_post_accumulation_fp16',
default=False,
action='store_true',
help="Whether to do fp16 allreduce post accumulation.")
parser.add_argument('--phase1_end_step',
type=int,
default=7038,
help="Number of training steps in Phase1 - seq len 128")
parser.add_argument('--init_loss_scale',
type=int,
default=2**20,
help="Initial loss scaler value")
parser.add_argument("--do_train",
default=False,
action='store_true',
help="Whether to run training.")
parser.add_argument('--json-summary', type=str, default="results/dllogger.json",
help='If provided, the json summary will be written to'
'the specified file.')
parser.add_argument("--use_env",
action='store_true',
help="Whether to read local rank from ENVVAR")
parser.add_argument('--disable_progress_bar',
default=False,
action='store_true',
help='Disable tqdm progress bar')
parser.add_argument('--steps_this_run', type=int, default=-1,
help='If provided, only run this many steps before exiting')
parser.add_argument("--sparse",
action='store_true',
help="Whether to sparse train")
parser.add_argument("--dense_checkpoint",
default=None,
type=str,
help="The initial checkpoint to start sparse training from.")
parser.add_argument("--recompute_sparse_masks",
action='store_true',
help="Whether or not to recompute sparse masks during sparse training after every epoch")
args = parser.parse_args()
args.fp16 = args.fp16 or args.amp
args.do_train = True
print(args)
if args.steps_this_run < 0:
args.steps_this_run = args.max_steps
return args
def setup_training(args):
assert (torch.cuda.is_available())
if args.local_rank == -1:
device = torch.device("cuda")
args.n_gpu = torch.cuda.device_count()
args.allreduce_post_accumulation = False
args.allreduce_post_accumulation_fp16 = False
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.n_gpu = 1
if args.gradient_accumulation_steps == 1:
args.allreduce_post_accumulation = False
args.allreduce_post_accumulation_fp16 = False
if is_main_process():
dllogger.init(backends=[dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE,
filename=args.json_summary),
dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE, step_format=format_step)])
else:
dllogger.init(backends=[])
print("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, args.n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if args.train_batch_size % args.gradient_accumulation_steps != 0:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, batch size {} should be divisible".format(
args.gradient_accumulation_steps, args.train_batch_size))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
if not args.do_train:
raise ValueError(" `do_train` must be True.")
if not args.resume_from_checkpoint and os.path.exists(args.output_dir) and (
os.listdir(args.output_dir) and any([i.startswith('ckpt') for i in os.listdir(args.output_dir)])):
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if (not args.resume_from_checkpoint or not os.path.exists(args.output_dir)) and is_main_process():
os.makedirs(args.output_dir, exist_ok=True)
return device, args
def prepare_model_and_optimizer(args, device):
# Prepare model
config = modeling.BertConfig.from_json_file(args.config_file)
# Padding for divisibility by 8
# if config.vocab_size % 8 != 0:
# config.vocab_size += 8 - (config.vocab_size % 8)
modeling.ACT2FN["bias_gelu"] = modeling.bias_gelu_training
model = modeling.BertForPreTraining(config)
checkpoint = None
if not args.resume_from_checkpoint:
global_step = 0
if args.sparse:
dense_ckpt = torch.load(args.dense_checkpoint, map_location="cpu")
if "model" in dense_ckpt:
model.load_state_dict(dense_ckpt["model"], strict=False)
else:
model.load_state_dict(dense_ckpt, strict=False)
else:
if args.resume_step == -1 and not args.init_checkpoint:
model_names = [f for f in os.listdir(args.output_dir) if f.endswith(".pt")]
args.resume_step = max([int(x.split('.pt')[0].split('_')[1].strip()) for x in model_names])
global_step = args.resume_step if not args.init_checkpoint else 0
if not args.init_checkpoint:
checkpoint = torch.load(os.path.join(args.output_dir, "ckpt_{}.pt".format(global_step)), map_location="cpu")
else:
checkpoint = torch.load(args.init_checkpoint, map_location="cpu")
model.load_state_dict(checkpoint['model'], strict=False)
if args.phase2 and not args.init_checkpoint:
global_step -= args.phase1_end_step
if is_main_process():
print("resume step from ", args.resume_step)
model.to(device)
if args.sparse:
ASP.init_model_for_pruning(model, mask_calculator="m4n2_1d", verbosity=3,
allow_recompute_mask=args.recompute_sparse_masks,
custom_layer_dict={modeling.LinearActivation: ['weight'],
QuantLinear: ['weight']})
ASP.compute_sparse_masks()
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta', 'LayerNorm']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}]
optimizer = FusedLAMB(optimizer_grouped_parameters,
lr=args.learning_rate)
lr_scheduler = PolyWarmUpScheduler(optimizer,
warmup=args.warmup_proportion,
total_steps=args.max_steps)
if args.fp16:
if args.loss_scale == 0:
model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale="dynamic", cast_model_outputs=torch.float16)
else:
model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale=args.loss_scale, cast_model_outputs=torch.float16)
amp._amp_state.loss_scalers[0]._loss_scale = args.init_loss_scale
model.checkpoint_activations(args.checkpoint_activations)
if args.resume_from_checkpoint:
if args.phase2 or args.init_checkpoint:
keys = list(checkpoint['optimizer']['state'].keys())
#Override hyperparameters from previous checkpoint
for key in keys:
checkpoint['optimizer']['state'][key]['step'] = global_step
for iter, item in enumerate(checkpoint['optimizer']['param_groups']):
checkpoint['optimizer']['param_groups'][iter]['step'] = global_step
checkpoint['optimizer']['param_groups'][iter]['t_total'] = args.max_steps
checkpoint['optimizer']['param_groups'][iter]['warmup'] = args.warmup_proportion
checkpoint['optimizer']['param_groups'][iter]['lr'] = args.learning_rate
optimizer.load_state_dict(checkpoint['optimizer']) # , strict=False)
# Restore AMP master parameters
if args.fp16:
optimizer._lazy_init_maybe_master_weights()
optimizer._amp_stash.lazy_init_called = True
optimizer.load_state_dict(checkpoint['optimizer'])
for param, saved_param in zip(amp.master_params(optimizer), checkpoint['master params']):
param.data.copy_(saved_param.data)
if args.local_rank != -1:
if not args.allreduce_post_accumulation:
model = DDP(model, message_size=250000000, gradient_predivide_factor=get_world_size())
else:
flat_dist_call([param.data for param in model.parameters()], torch.distributed.broadcast, (0,) )
elif args.n_gpu > 1:
model = torch.nn.DataParallel(model)
criterion = BertPretrainingCriterion(config.vocab_size)
return model, optimizer, lr_scheduler, checkpoint, global_step, criterion
def take_optimizer_step(args, optimizer, model, overflow_buf, global_step):
global skipped_steps
if args.allreduce_post_accumulation:
# manually allreduce gradients after all accumulation steps
# check for Inf/NaN
# 1. allocate an uninitialized buffer for flattened gradient
loss_scale = _amp_state.loss_scalers[0].loss_scale() if args.fp16 else 1
master_grads = [p.grad for p in amp.master_params(optimizer) if p.grad is not None]
flat_grad_size = sum(p.numel() for p in master_grads)
allreduce_dtype = torch.float16 if args.allreduce_post_accumulation_fp16 else torch.float32
flat_raw = torch.empty(flat_grad_size, device='cuda', dtype=allreduce_dtype)
# 2. combine unflattening and predivision of unscaled 'raw' gradient
allreduced_views = apex_C.unflatten(flat_raw, master_grads)
overflow_buf.zero_()
amp_C.multi_tensor_scale(65536,
overflow_buf,
[master_grads, allreduced_views],
loss_scale / (get_world_size() * args.gradient_accumulation_steps))
# 3. sum gradient across ranks. Because of the predivision, this averages the gradient
torch.distributed.all_reduce(flat_raw)
# 4. combine unscaling and unflattening of allreduced gradient
overflow_buf.zero_()
amp_C.multi_tensor_scale(65536,
overflow_buf,
[allreduced_views, master_grads],
1./loss_scale)
# 5. update loss scale
if args.fp16:
scaler = _amp_state.loss_scalers[0]
old_overflow_buf = scaler._overflow_buf
scaler._overflow_buf = overflow_buf
had_overflow = scaler.update_scale()
scaler._overfloat_buf = old_overflow_buf
else:
had_overflow = 0
# 6. call optimizer step function
if had_overflow == 0:
optimizer.step()
global_step += 1
else:
# Overflow detected, print message and clear gradients
skipped_steps += 1
if is_main_process():
scaler = _amp_state.loss_scalers[0]
dllogger.log(step="PARAMETER", data={"loss_scale": scaler.loss_scale()})
if _amp_state.opt_properties.master_weights:
for param in optimizer._amp_stash.all_fp32_from_fp16_params:
param.grad = None
for param in model.parameters():
param.grad = None
else:
optimizer.step()
#optimizer.zero_grad()
for param in model.parameters():
param.grad = None
global_step += 1
return global_step
def main():
global timeout_sent
args = parse_arguments()
random.seed(args.seed + args.local_rank)
np.random.seed(args.seed + args.local_rank)
torch.manual_seed(args.seed + args.local_rank)
torch.cuda.manual_seed(args.seed + args.local_rank)
worker_init = WorkerInitObj(args.seed + args.local_rank)
device, args = setup_training(args)
dllogger.log(step="PARAMETER", data={"Config": [str(args)]})
# Prepare optimizer
model, optimizer, lr_scheduler, checkpoint, global_step, criterion = prepare_model_and_optimizer(args, device)
if is_main_process():
dllogger.log(step="PARAMETER", data={"SEED": args.seed})
raw_train_start = None
if args.do_train:
if is_main_process():
dllogger.log(step="PARAMETER", data={"train_start": True})
dllogger.log(step="PARAMETER", data={"batch_size_per_gpu": args.train_batch_size})
dllogger.log(step="PARAMETER", data={"learning_rate": args.learning_rate})
quant_utils.set_quantizer_by_name(model, [''], _disabled=True)
model.train()
most_recent_ckpts_paths = []
average_loss = 0.0 # averaged loss every args.log_freq steps
epoch = 0
training_steps = 0
if args.sparse:
ASP.init_optimizer_for_pruning(optimizer)
pool = ProcessPoolExecutor(1)
# Note: We loop infinitely over epochs, termination is handled via iteration count
while True:
if args.sparse and args.recompute_sparse_masks:
ASP.compute_sparse_masks()
thread = None
restored_data_loader = None
if not args.resume_from_checkpoint or epoch > 0 or (args.phase2 and global_step < 1) or args.init_checkpoint:
files = [os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir) if
os.path.isfile(os.path.join(args.input_dir, f)) and 'training' in f]
files.sort()
num_files = len(files)
random.Random(args.seed + epoch).shuffle(files)
f_start_id = 0
else:
f_start_id = checkpoint['files'][0]
files = checkpoint['files'][1:]
args.resume_from_checkpoint = False
num_files = len(files)
# may not exist in all checkpoints
epoch = checkpoint.get('epoch', 0)
restored_data_loader = checkpoint.get('data_loader', None)
shared_file_list = {}
if torch.distributed.is_initialized() and get_world_size() > num_files:
remainder = get_world_size() % num_files
data_file = files[(f_start_id*get_world_size()+get_rank() + remainder*f_start_id)%num_files]
else:
data_file = files[(f_start_id*get_world_size()+get_rank())%num_files]
previous_file = data_file
if restored_data_loader is None:
train_data = pretraining_dataset(data_file, args.max_predictions_per_seq)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler,
batch_size=args.train_batch_size * args.n_gpu,
num_workers=4, worker_init_fn=worker_init,
pin_memory=True)
# shared_file_list["0"] = (train_dataloader, data_file)
else:
train_dataloader = restored_data_loader
restored_data_loader = None
overflow_buf = None
if args.allreduce_post_accumulation:
overflow_buf = torch.cuda.IntTensor([0])
for f_id in range(f_start_id + 1 , len(files)):
if get_world_size() > num_files:
data_file = files[(f_id*get_world_size()+get_rank() + remainder*f_id)%num_files]
else:
data_file = files[(f_id*get_world_size()+get_rank())%num_files]
previous_file = data_file
dataset_future = pool.submit(create_pretraining_dataset, data_file, args.max_predictions_per_seq, shared_file_list, args, worker_init)
train_iter = tqdm(train_dataloader, desc="Iteration", disable=args.disable_progress_bar) if is_main_process() else train_dataloader
if raw_train_start is None:
raw_train_start = time.time()
for step, batch in enumerate(train_iter):
training_steps += 1
batch = [t.to(device) for t in batch]
input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels = batch
prediction_scores, seq_relationship_score = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
loss = criterion(prediction_scores, seq_relationship_score, masked_lm_labels, next_sentence_labels)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
divisor = args.gradient_accumulation_steps
if args.gradient_accumulation_steps > 1:
if not args.allreduce_post_accumulation:
# this division was merged into predivision
loss = loss / args.gradient_accumulation_steps
divisor = 1.0
if args.fp16:
with amp.scale_loss(loss, optimizer, delay_overflow_check=args.allreduce_post_accumulation) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
average_loss += loss.item()
if training_steps % args.gradient_accumulation_steps == 0:
lr_scheduler.step() # learning rate warmup
global_step = take_optimizer_step(args, optimizer, model, overflow_buf, global_step)
if global_step >= args.steps_this_run or timeout_sent:
train_time_raw = time.time() - raw_train_start
last_num_steps = int(training_steps / args.gradient_accumulation_steps) % args.log_freq
last_num_steps = args.log_freq if last_num_steps == 0 else last_num_steps
average_loss = torch.tensor(average_loss, dtype=torch.float32).cuda()
average_loss = average_loss / (last_num_steps * divisor)
if (torch.distributed.is_initialized()):
average_loss /= get_world_size()
torch.distributed.all_reduce(average_loss)
final_loss = average_loss.item()
if is_main_process():
dllogger.log(step=(epoch, global_step, ), data={"final_loss": final_loss})
elif training_steps % (args.log_freq * args.gradient_accumulation_steps) == 0:
if is_main_process():
dllogger.log(step=(epoch, global_step, ), data={"average_loss": average_loss / (args.log_freq * divisor),
"step_loss": loss.item() * args.gradient_accumulation_steps / divisor,
"learning_rate": optimizer.param_groups[0]['lr']})
average_loss = 0
if global_step >= args.steps_this_run or training_steps % (
args.num_steps_per_checkpoint * args.gradient_accumulation_steps) == 0 or timeout_sent:
if is_main_process() and not args.skip_checkpoint:
# Save a trained model
dllogger.log(step="PARAMETER", data={"checkpoint_step": global_step})
model_to_save = model.module if hasattr(model,
'module') else model # Only save the model it-self
if args.resume_step < 0 or not args.phase2:
output_save_file = os.path.join(args.output_dir, "ckpt_{}.pt".format(global_step))
else:
output_save_file = os.path.join(args.output_dir, "ckpt_{}.pt".format(global_step + args.phase1_end_step))
if args.do_train:
torch.save({'model': model_to_save.state_dict(),
'optimizer': optimizer.state_dict(),
'master params': list(amp.master_params(optimizer)),
'files': [f_id] + files,
'epoch': epoch,
'data_loader': None if global_step >= args.max_steps else train_dataloader}, output_save_file)
most_recent_ckpts_paths.append(output_save_file)
if len(most_recent_ckpts_paths) > 3:
ckpt_to_be_removed = most_recent_ckpts_paths.pop(0)
os.remove(ckpt_to_be_removed)
# Exiting the training due to hitting max steps, or being sent a
# timeout from the cluster scheduler
if global_step >= args.steps_this_run or timeout_sent:
del train_dataloader
# thread.join()
return args, final_loss, train_time_raw, global_step
del train_dataloader
# thread.join()
# Make sure pool has finished and switch train_dataloader
# NOTE: Will block until complete
train_dataloader, data_file = dataset_future.result(timeout=None)
epoch += 1
if __name__ == "__main__":
now = time.time()
args, final_loss, train_time_raw, global_step = main()
gpu_count = args.n_gpu
global_step += args.phase1_end_step if (args.phase2 and args.resume_step > 0) else 0
if args.resume_step == -1:
args.resume_step = 0
if torch.distributed.is_initialized():
gpu_count = get_world_size()
if is_main_process():
e2e_time = time.time() - now
training_perf = args.train_batch_size * args.gradient_accumulation_steps * gpu_count\
* (global_step - args.resume_step + skipped_steps) / train_time_raw
dllogger.log(step=tuple(), data={"e2e_train_time": e2e_time, "training_sequences_per_second": training_perf,
"final_loss": final_loss, "raw_train_time": train_time_raw })
dllogger.flush()
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/run_pretraining.py |
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed as dist
from pathlib import Path
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def is_main_process():
return get_rank() == 0
def barrier():
if dist.is_available() and dist.is_initialized():
dist.barrier()
def format_step(step):
if isinstance(step, str):
return step
s = ""
if len(step) > 0:
s += "Training Epoch: {} ".format(step[0])
if len(step) > 1:
s += "Training Iteration: {} ".format(step[1])
if len(step) > 2:
s += "Validation Iteration: {} ".format(step[2])
return s
def mkdir(path):
Path(path).mkdir(parents=True, exist_ok=True)
def mkdir_by_main_process(path):
if is_main_process():
mkdir(path)
barrier()
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/utils.py |
# coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create masked LM/next sentence masked_lm TF examples for BERT."""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import os
import random
from io import open
import h5py
import numpy as np
from tqdm import tqdm, trange
from tokenization import BertTokenizer
import tokenization as tokenization
import random
import collections
class TrainingInstance(object):
"""A single training instance (sentence pair)."""
def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels,
is_random_next):
self.tokens = tokens
self.segment_ids = segment_ids
self.is_random_next = is_random_next
self.masked_lm_positions = masked_lm_positions
self.masked_lm_labels = masked_lm_labels
def __str__(self):
s = ""
s += "tokens: %s\n" % (" ".join(
[tokenization.printable_text(x) for x in self.tokens]))
s += "segment_ids: %s\n" % (" ".join([str(x) for x in self.segment_ids]))
s += "is_random_next: %s\n" % self.is_random_next
s += "masked_lm_positions: %s\n" % (" ".join(
[str(x) for x in self.masked_lm_positions]))
s += "masked_lm_labels: %s\n" % (" ".join(
[tokenization.printable_text(x) for x in self.masked_lm_labels]))
s += "\n"
return s
def __repr__(self):
return self.__str__()
def write_instance_to_example_file(instances, tokenizer, max_seq_length,
max_predictions_per_seq, output_file):
"""Create TF example files from `TrainingInstance`s."""
total_written = 0
features = collections.OrderedDict()
num_instances = len(instances)
features["input_ids"] = np.zeros([num_instances, max_seq_length], dtype="int32")
features["input_mask"] = np.zeros([num_instances, max_seq_length], dtype="int32")
features["segment_ids"] = np.zeros([num_instances, max_seq_length], dtype="int32")
features["masked_lm_positions"] = np.zeros([num_instances, max_predictions_per_seq], dtype="int32")
features["masked_lm_ids"] = np.zeros([num_instances, max_predictions_per_seq], dtype="int32")
features["next_sentence_labels"] = np.zeros(num_instances, dtype="int32")
for inst_index, instance in enumerate(tqdm(instances)):
input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
input_mask = [1] * len(input_ids)
segment_ids = list(instance.segment_ids)
assert len(input_ids) <= max_seq_length
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
masked_lm_positions = list(instance.masked_lm_positions)
masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
masked_lm_weights = [1.0] * len(masked_lm_ids)
while len(masked_lm_positions) < max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(0)
masked_lm_weights.append(0.0)
next_sentence_label = 1 if instance.is_random_next else 0
features["input_ids"][inst_index] = input_ids
features["input_mask"][inst_index] = input_mask
features["segment_ids"][inst_index] = segment_ids
features["masked_lm_positions"][inst_index] = masked_lm_positions
features["masked_lm_ids"][inst_index] = masked_lm_ids
features["next_sentence_labels"][inst_index] = next_sentence_label
total_written += 1
# if inst_index < 20:
# tf.logging.info("*** Example ***")
# tf.logging.info("tokens: %s" % " ".join(
# [tokenization.printable_text(x) for x in instance.tokens]))
# for feature_name in features.keys():
# feature = features[feature_name]
# values = []
# if feature.int64_list.value:
# values = feature.int64_list.value
# elif feature.float_list.value:
# values = feature.float_list.value
# tf.logging.info(
# "%s: %s" % (feature_name, " ".join([str(x) for x in values])))
print("saving data")
f= h5py.File(output_file, 'w')
f.create_dataset("input_ids", data=features["input_ids"], dtype='i4', compression='gzip')
f.create_dataset("input_mask", data=features["input_mask"], dtype='i1', compression='gzip')
f.create_dataset("segment_ids", data=features["segment_ids"], dtype='i1', compression='gzip')
f.create_dataset("masked_lm_positions", data=features["masked_lm_positions"], dtype='i4', compression='gzip')
f.create_dataset("masked_lm_ids", data=features["masked_lm_ids"], dtype='i4', compression='gzip')
f.create_dataset("next_sentence_labels", data=features["next_sentence_labels"], dtype='i1', compression='gzip')
f.flush()
f.close()
def create_training_instances(input_files, tokenizer, max_seq_length,
dupe_factor, short_seq_prob, masked_lm_prob,
max_predictions_per_seq, rng):
"""Create `TrainingInstance`s from raw text."""
all_documents = [[]]
# Input file format:
# (1) One sentence per line. These should ideally be actual sentences, not
# entire paragraphs or arbitrary spans of text. (Because we use the
# sentence boundaries for the "next sentence prediction" task).
# (2) Blank lines between documents. Document boundaries are needed so
# that the "next sentence prediction" task doesn't span between documents.
for input_file in input_files:
print("creating instance from {}".format(input_file))
with open(input_file, "r") as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
# Empty lines are used as document delimiters
if not line:
all_documents.append([])
tokens = tokenizer.tokenize(line)
if tokens:
all_documents[-1].append(tokens)
# Remove empty documents
all_documents = [x for x in all_documents if x]
rng.shuffle(all_documents)
vocab_words = list(tokenizer.vocab.keys())
instances = []
for _ in range(dupe_factor):
for document_index in range(len(all_documents)):
instances.extend(
create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng))
rng.shuffle(instances)
return instances
def create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng):
"""Creates `TrainingInstance`s for a single document."""
document = all_documents[document_index]
# Account for [CLS], [SEP], [SEP]
max_num_tokens = max_seq_length - 3
# We *usually* want to fill up the entire sequence since we are padding
# to `max_seq_length` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
# sequences to minimize the mismatch between pre-training and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `max_seq_length` is a hard limit.
target_seq_length = max_num_tokens
if rng.random() < short_seq_prob:
target_seq_length = rng.randint(2, max_num_tokens)
# We DON'T just concatenate all of the tokens from a document into a long
# sequence and choose an arbitrary split point because this would make the
# next sentence prediction task too easy. Instead, we split the input into
# segments "A" and "B" based on the actual "sentences" provided by the user
# input.
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# `a_end` is how many segments from `current_chunk` go into the `A`
# (first) sentence.
a_end = 1
if len(current_chunk) >= 2:
a_end = rng.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
# Random next
is_random_next = False
if len(current_chunk) == 1 or rng.random() < 0.5:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
# This should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document
# we're processing.
for _ in range(10):
random_document_index = rng.randint(0, len(all_documents) - 1)
if random_document_index != document_index:
break
#If picked random document is the same as the current document
if random_document_index == document_index:
is_random_next = False
random_document = all_documents[random_document_index]
random_start = rng.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
# We didn't actually use these segments so we "put them back" so
# they don't go to waste.
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
# Actual next
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)
assert len(tokens_a) >= 1
assert len(tokens_b) >= 1
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
(tokens, masked_lm_positions,
masked_lm_labels) = create_masked_lm_predictions(
tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)
instance = TrainingInstance(
tokens=tokens,
segment_ids=segment_ids,
is_random_next=is_random_next,
masked_lm_positions=masked_lm_positions,
masked_lm_labels=masked_lm_labels)
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
return instances
MaskedLmInstance = collections.namedtuple("MaskedLmInstance",
["index", "label"])
def create_masked_lm_predictions(tokens, masked_lm_prob,
max_predictions_per_seq, vocab_words, rng):
"""Creates the predictions for the masked LM objective."""
cand_indexes = []
for (i, token) in enumerate(tokens):
if token == "[CLS]" or token == "[SEP]":
continue
cand_indexes.append(i)
rng.shuffle(cand_indexes)
output_tokens = list(tokens)
num_to_predict = min(max_predictions_per_seq,
max(1, int(round(len(tokens) * masked_lm_prob))))
masked_lms = []
covered_indexes = set()
for index in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
if index in covered_indexes:
continue
covered_indexes.add(index)
masked_token = None
# 80% of the time, replace with [MASK]
if rng.random() < 0.8:
masked_token = "[MASK]"
else:
# 10% of the time, keep original
if rng.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]
output_tokens[index] = masked_token
masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
masked_lms = sorted(masked_lms, key=lambda x: x.index)
masked_lm_positions = []
masked_lm_labels = []
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (output_tokens, masked_lm_positions, masked_lm_labels)
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):
"""Truncates a pair of sequences to a maximum sequence length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
assert len(trunc_tokens) >= 1
# We want to sometimes truncate from the front and sometimes from the
# back to add more randomness and avoid biases.
if rng.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--vocab_file",
default=None,
type=str,
required=True,
help="The vocabulary the BERT model will train on.")
parser.add_argument("--input_file",
default=None,
type=str,
required=True,
help="The input train corpus. can be directory with .txt files or a path to a single file")
parser.add_argument("--output_file",
default=None,
type=str,
required=True,
help="The output file where the model checkpoints will be written.")
## Other parameters
# str
parser.add_argument("--bert_model", default="bert-large-uncased", type=str, required=False,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
#int
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--dupe_factor",
default=10,
type=int,
help="Number of times to duplicate the input data (with different masks).")
parser.add_argument("--max_predictions_per_seq",
default=20,
type=int,
help="Maximum sequence length.")
# floats
parser.add_argument("--masked_lm_prob",
default=0.15,
type=float,
help="Masked LM probability.")
parser.add_argument("--short_seq_prob",
default=0.1,
type=float,
help="Probability to create a sequence shorter than maximum sequence length")
parser.add_argument("--do_lower_case",
action='store_true',
default=True,
help="Whether to lower case the input text. True for uncased models, False for cased models.")
parser.add_argument('--random_seed',
type=int,
default=12345,
help="random seed for initialization")
args = parser.parse_args()
tokenizer = BertTokenizer(args.vocab_file, do_lower_case=args.do_lower_case, max_len=512)
input_files = []
if os.path.isfile(args.input_file):
input_files.append(args.input_file)
elif os.path.isdir(args.input_file):
input_files = [os.path.join(args.input_file, f) for f in os.listdir(args.input_file) if (os.path.isfile(os.path.join(args.input_file, f)) and f.endswith('.txt') )]
else:
raise ValueError("{} is not a valid path".format(args.input_file))
rng = random.Random(args.random_seed)
instances = create_training_instances(
input_files, tokenizer, args.max_seq_length, args.dupe_factor,
args.short_seq_prob, args.masked_lm_prob, args.max_predictions_per_seq,
rng)
output_file = args.output_file
write_instance_to_example_file(instances, tokenizer, args.max_seq_length,
args.max_predictions_per_seq, output_file)
if __name__ == "__main__":
main()
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/create_pretraining_data.py |
# coding=utf-8
# Copyright (c) 2019-2022 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for training models with pytorch-quantization"""
import pickle
import re
import time
import numpy as np
import torch
import random
import pytorch_quantization as quantization
import pytorch_quantization.nn as quant_nn
from pytorch_quantization.tensor_quant import QuantDescriptor
from pytorch_quantization import calib
class Logger:
def info(self, s):
print("INFO:", s)
def warn(self, s):
print("WARN:", s)
logger = Logger()
name_width = 50 # max width of layer names
qname_width = name_width + 20 # max width of quantizer names
def add_arguments(parser):
"""Add arguments to parser for functions defined in quant_trainer."""
group = parser.add_argument_group('quant_trainer arguments')
group.add_argument('--wprec', type=int, default=8,
help='weight precision')
group.add_argument('--aprec', type=int, default=8,
help='activation precision')
group.add_argument('--quant-per-tensor', action='store_true',
help='per tensor weight scaling')
group.add_argument('--quant-disable', action='store_true',
help='disable all quantizers')
group.add_argument('--quant-disable-keyword', type=str, nargs='+',
help='disable quantizers by keyword')
group.add_argument('--calibrator', default='max',
help='which quantization range calibrator to use')
group.add_argument('--percentile', default=None, type=float,
help='percentile for PercentileCalibrator')
group.add_argument('--fuse-qkv', action='store_true',
help='use the same scale factor for qkv')
group.add_argument('--narrow_range', action='store_true',
help='use [-127, 127] range for activations rather than [-128, 127]')
group.add_argument('--quant_mode', type=str, default=None,
help='predefined quantization mode, choices: ["ft1", "ft2", "ft3", "trt"]')
def set_args(args):
if args.quant_mode == 'ft1':
args.wprec = 8
args.aprec = 8
args.quant_per_tensor = False
args.quant_disable = False
args.quant_disable_keyword = ['final_input', 'layernorm_input', 'softmax_input', 'residual_input', 'local_input', 'aftergemm']
args.fuse_qkv = False
args.narrow_range = False
elif args.quant_mode == 'ft2':
args.wprec = 8
args.aprec = 8
args.quant_per_tensor = True
args.quant_disable = False
args.quant_disable_keyword = ['final_input', 'layernorm_input', 'softmax_input', 'local_input']
args.fuse_qkv = True
args.narrow_range = False
elif args.quant_mode == 'ft3':
args.wprec = 8
args.aprec = 8
args.quant_per_tensor = True
args.quant_disable = False
args.quant_disable_keyword = ['final_input', 'layernorm_input', 'local_input']
args.fuse_qkv = True
args.narrow_range = False
elif args.quant_mode == 'trt':
# for demobert
args.wprec = 8
args.aprec = 8
args.quant_per_tensor = True
args.quant_disable = False
args.quant_disable_keyword = ['layernorm_input', 'softmax_input', 'aftergemm']
args.fuse_qkv = True
args.narrow_range = False
else:
raise ValueError("wrong argument value for 'quant_mode'")
return args
def set_default_quantizers(args):
"""Set default quantizers before creating the model."""
if args.calibrator == 'max':
calib_method = 'max'
elif args.calibrator == 'percentile':
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator')
calib_method = 'histogram'
elif args.calibrator == 'mse':
calib_method = 'histogram'
elif args.calibrator == 'entropy':
calib_method = 'histogram'
else:
raise ValueError(F'Invalid calibrator {args.calibrator}')
input_desc = QuantDescriptor(num_bits=args.aprec,
calib_method=calib_method,
narrow_range=args.narrow_range,
)
weight_desc = QuantDescriptor(num_bits=args.wprec,
axis=(None if args.quant_per_tensor else (0,)),
)
quant_nn.QuantLinear.set_default_quant_desc_input(input_desc)
quant_nn.QuantLinear.set_default_quant_desc_weight(weight_desc)
def configure_model(model, args, calib=False):
"""Function called before the training loop."""
logger.info('Configuring Model for Quantization')
logger.info(F'using quantization package {quantization.__file__}')
if not calib:
if args.quant_disable:
set_quantizer_by_name(model, [''], _disabled=True)
if args.quant_disable_keyword:
set_quantizer_by_name(model, args.quant_disable_keyword, _disabled=True)
if args.fuse_qkv:
fuse_qkv(model, args)
if args.local_rank in [-1, 0] and not calib:
print_quant_summary(model)
time.sleep(1) # prevent eval printing overlap
def enable_calibration(model):
"""Enable calibration of all *_input_quantizer modules in model."""
logger.info("Enabling Calibration")
for name, module in model.named_modules():
if name.endswith("_quantizer"):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:{qname_width}}: {module}")
def finish_calibration(model, args):
"""Disable calibration and load amax for all "*_input_quantizer modules in model."""
logger.info("Loading calibrated amax")
for name, module in model.named_modules():
if name.endswith("_quantizer"):
if module._calibrator is not None:
if isinstance(module._calibrator, calib.MaxCalibrator):
module.load_calib_amax()
elif args.calibrator == "percentile":
module.load_calib_amax("percentile", percentile=args.percentile)
else:
module.load_calib_amax(args.calibrator)
module.enable_quant()
module.disable_calib()
else:
module.enable()
if args.fuse_qkv:
fuse_qkv(model, args)
model.cuda()
print_quant_summary(model)
def fuse_qkv(model, args):
"""Adjust quantization ranges to match an implementation where the QKV projections are implemented with a single GEMM.
Force the weight and output scale factors to match by taking the max of (Q,K,V).
"""
def fuse3(qq, qk, qv):
if not hasattr(qq, '_amax') or not hasattr(qk, '_amax') or not hasattr(qv, '_amax'):
logger.warn('missing amax buffer, unable to fuse')
return
q = qq._amax.detach().item()
k = qk._amax.detach().item()
v = qv._amax.detach().item()
amax = max(q, k, v)
qq._amax.fill_(amax)
qk._amax.fill_(amax)
qv._amax.fill_(amax)
logger.info(f' q={q:7.4f} k={k:7.4f} v={v:7.4f} -> {amax:7.4f}')
for name, mod in model.named_modules():
if name.endswith('.attention.self'):
logger.info(f'FUSE_QKV: {name:{name_width}}')
fuse3(mod.matmul_q_input_quantizer, mod.matmul_k_input_quantizer, mod.matmul_v_input_quantizer)
fuse3(mod.query._weight_quantizer, mod.key._weight_quantizer, mod.value._weight_quantizer)
fuse3(mod.query._aftergemm_quantizer, mod.key._aftergemm_quantizer, mod.value._aftergemm_quantizer)
def print_quant_summary(model):
"""Print summary of all quantizer modules in the model."""
counters = {'quantizers': 0, 'enabled_quantizers': 0,
'weights': 0, 'quant_weights': 0, 'sparse_weights': 0,
'params': 0, 'sparse_params': 0}
for name, mod in model.named_modules():
if isinstance(mod, quantization.nn.TensorQuantizer):
print(f'{name:80} {mod}')
counters['quantizers'] += 1
if not mod._disabled:
counters['enabled_quantizers'] += 1
for pname, param in mod.named_parameters():
if '.' in pname:
continue
counters['params'] += param.numel()
# fullname = f'{name}.{pname}'
# print(f'{fullname:80} {param.numel():12}')
weight_quantizer = getattr(mod, '_weight_quantizer', None)
if pname == 'weight':
counters['weights'] += param.numel()
if weight_quantizer is not None and not weight_quantizer._disabled:
counters['quant_weights'] += param.numel()
counters['sparse_weights'] += param.eq(0).sum().item()
counters['sparse_params'] += param.eq(0).sum().item()
def print_fraction(a, b, counters, desc):
va = counters[a]
vb = counters[b]
pct = va/vb * 100 if vb != 0 else float('NaN')
print(f'{counters[a]:12}/{vb:12} ({pct:6.2f}%) {desc}')
print_fraction('enabled_quantizers', 'quantizers', counters, 'TensorQuantizers enabled')
print_fraction('quant_weights', 'weights', counters, 'Quantized weights')
print_fraction('sparse_weights', 'weights', counters, 'Zero weights')
print_fraction('weights', 'params', counters, 'Weight parameters')
print('\n\n')
def set_quantizer(name, mod, quantizer, k ,v):
"""Set attributes for mod.quantizer."""
quantizer_mod = getattr(mod, quantizer, None)
if quantizer_mod is not None:
assert hasattr(quantizer_mod, k)
setattr(quantizer_mod, k, v)
else:
logger.warn(f'{name} has no {quantizer}')
def set_quantizers(name, mod, which='both', **kwargs):
"""Set quantizer attributes for mod."""
s = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += (f' {k}={v}')
if which in ['input', 'both']:
set_quantizer(name, mod, '_input_quantizer', k, v)
if which in ['weight', 'both']:
set_quantizer(name, mod, '_weight_quantizer', k, v)
logger.info(s)
def set_quantizer_by_name(model, names, **kwargs):
"""Set quantizer attributes for layers where name contains a substring in names."""
for name, mod in model.named_modules():
if hasattr(mod, '_input_quantizer') or hasattr(mod, '_weight_quantizer'):
for n in names:
if re.search(n, name):
set_quantizers(name, mod, **kwargs)
elif name.endswith('_quantizer'):
for n in names:
if re.search(n, name):
s = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += (f' {k}={v}')
setattr(mod, k, v)
logger.info(s)
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/quant_utils.py |
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import json
import logging
import os
import shutil
import tempfile
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
except AttributeError:
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
os.path.join(os.path.expanduser("~"), '.pytorch_pretrained_bert'))
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w', encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/file_utils.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" BERT inference script. Does not depend on dataset. """
from __future__ import absolute_import, division, print_function
import argparse
import collections
import json
import logging
import math
import os
import random
import sys
from io import open
import numpy as np
import torch
from tqdm import tqdm, trange
from types import SimpleNamespace
from file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from modeling import BertForQuestionAnswering, BertConfig, WEIGHTS_NAME, CONFIG_NAME
from tokenization import (BasicTokenizer, BertTokenizer, whitespace_tokenize)
from run_squad import _get_best_indices, _compute_softmax, get_valid_prelim_predictions, get_answer_text
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
import math
import json
import numpy as np
import collections
def preprocess_tokenized_text(doc_tokens, query_tokens, tokenizer,
max_seq_length, max_query_length):
""" converts an example into a feature """
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# truncate if too long
length = len(all_doc_tokens)
length = min(length, max_tokens_for_doc)
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(length):
token_to_orig_map[len(tokens)] = tok_to_orig_index[i]
token_is_max_context[len(tokens)] = True
tokens.append(all_doc_tokens[i])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
tensors_for_inference = {
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids
}
tensors_for_inference = SimpleNamespace(**tensors_for_inference)
tokens_for_postprocessing = {
'tokens': tokens,
'token_to_orig_map': token_to_orig_map,
'token_is_max_context': token_is_max_context
}
tokens_for_postprocessing = SimpleNamespace(**tokens_for_postprocessing)
return tensors_for_inference, tokens_for_postprocessing
RawResult = collections.namedtuple("RawResult", ["start_logits", "end_logits"])
def get_answer(doc_tokens, tokens_for_postprocessing,
start_logits, end_logits, args):
result = RawResult(start_logits=start_logits, end_logits=end_logits)
predictions = []
Prediction = collections.namedtuple('Prediction', ['text', 'start_logit', 'end_logit'])
if args.version_2_with_negative:
null_val = (float("inf"), 0, 0)
start_indices = _get_best_indices(result.start_logits, args.n_best_size)
end_indices = _get_best_indices(result.end_logits, args.n_best_size)
prelim_predictions = get_valid_prelim_predictions(start_indices, end_indices,
tokens_for_postprocessing, result, args)
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True
)
if args.version_2_with_negative:
score = result.start_logits[0] + result.end_logits[0]
if score < null_val[0]:
null_val = (score, result.start_logits[0], result.end_logits[0])
doc_tokens_obj = {
'doc_tokens': doc_tokens,
}
doc_tokens_obj = SimpleNamespace(**doc_tokens_obj)
curr_predictions = []
seen_predictions = []
for pred in prelim_predictions:
if len(curr_predictions) == args.n_best_size:
break
if pred.end_index > 0: # this is a non-null prediction
final_text = get_answer_text(doc_tokens_obj, tokens_for_postprocessing, pred, args)
if final_text in seen_predictions:
continue
else:
final_text = ""
seen_predictions.append(final_text)
curr_predictions.append(Prediction(final_text, pred.start_logit, pred.end_logit))
predictions += curr_predictions
# add empty prediction
if args.version_2_with_negative:
predictions.append(Prediction('', null_val[1], null_val[2]))
nbest_answers = []
answer = None
nbest = sorted(predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)[:args.n_best_size]
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry and entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_answers.append(output)
if args.version_2_with_negative:
score_diff = null_val[0] - best_non_null_entry.start_logit - best_non_null_entry.end_logit
if score_diff > args.null_score_diff_threshold:
answer = ""
else:
answer = best_non_null_entry.text
else:
answer = nbest_answers[0]['text']
return answer, nbest_answers
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--init_checkpoint",
default=None,
type=str,
required=True,
help="The checkpoint file from pretraining")
## Other parameters
parser.add_argument("--verbose_logging", action='store_true',
help="If true, all of the warnings related to data processing will be printed. ")
parser.add_argument("--seed", default=1, type=int)
parser.add_argument("--question", default="Most antibiotics target bacteria and don't affect what class of organisms? ",
type=str, help="question")
parser.add_argument("--context", default="Within the genitourinary and gastrointestinal tracts, commensal flora serve as biological barriers by competing with pathogenic bacteria for food and space and, in some cases, by changing the conditions in their environment, such as pH or available iron. This reduces the probability that pathogens will reach sufficient numbers to cause illness. However, since most antibiotics non-specifically target bacteria and do not affect fungi, oral antibiotics can lead to an overgrowth of fungi and cause conditions such as a vaginal candidiasis (a yeast infection). There is good evidence that re-introduction of probiotic flora, such as pure cultures of the lactobacilli normally found in unpasteurized yogurt, helps restore a healthy balance of microbial populations in intestinal infections in children and encouraging preliminary data in studies on bacterial gastroenteritis, inflammatory bowel diseases, urinary tract infection and post-surgical infections. ",
type=str, help="context")
parser.add_argument("--max_seq_length", default=384, type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--max_query_length", default=64, type=int,
help="The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length.")
parser.add_argument("--n_best_size", default=1, type=int,
help="The total number of n-best predictions to generate. ")
parser.add_argument("--max_answer_length", default=30, type=int,
help="The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--do_lower_case",
action='store_true',
help="Whether to lower case the input text. True for uncased models, False for cased models.")
parser.add_argument('--version_2_with_negative',
action='store_true',
help='If true, then the model can reply with "unknown". ')
parser.add_argument('--null_score_diff_threshold',
type=float, default=-11.0,
help="If null_score - best_non_null is greater than the threshold predict 'unknown'. ")
parser.add_argument('--vocab_file',
type=str, default=None, required=True,
help="Vocabulary mapping/file BERT was pretrainined on")
parser.add_argument("--config_file",
default=None,
type=str,
required=True,
help="The BERT model config")
parser.add_argument('--fp16',
action='store_true',
help="use mixed-precision")
parser.add_argument("--local_rank", default=-1, help="ordinal of the GPU to use")
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
tokenizer = BertTokenizer(args.vocab_file, do_lower_case=args.do_lower_case, max_len=512) # for bert large
# Prepare model
config = BertConfig.from_json_file(args.config_file)
# Padding for divisibility by 8
if config.vocab_size % 8 != 0:
config.vocab_size += 8 - (config.vocab_size % 8)
# initialize model
model = BertForQuestionAnswering(config)
model.load_state_dict(torch.load(args.init_checkpoint, map_location='cpu')["model"])
model.to(device)
if args.fp16:
model.half()
model.eval()
print("question: ", args.question)
print("context: ", args.context)
print()
# preprocessing
doc_tokens = args.context.split()
query_tokens = tokenizer.tokenize(args.question)
feature = preprocess_tokenized_text(doc_tokens,
query_tokens,
tokenizer,
max_seq_length=args.max_seq_length,
max_query_length=args.max_query_length)
tensors_for_inference, tokens_for_postprocessing = feature
input_ids = torch.tensor(tensors_for_inference.input_ids, dtype=torch.long).unsqueeze(0)
segment_ids = torch.tensor(tensors_for_inference.segment_ids, dtype=torch.long).unsqueeze(0)
input_mask = torch.tensor(tensors_for_inference.input_mask, dtype=torch.long).unsqueeze(0)
# load tensors to device
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
# run prediction
with torch.no_grad():
start_logits, end_logits = model(input_ids, segment_ids, input_mask)
# post-processing
start_logits = start_logits[0].detach().cpu().tolist()
end_logits = end_logits[0].detach().cpu().tolist()
answer, answers = get_answer(doc_tokens, tokens_for_postprocessing,
start_logits, end_logits, args)
# print result
print()
print(answer)
print()
print(json.dumps(answers, indent=4))
if __name__ == "__main__":
main()
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/inference.py |
# coding=utf-8
# Copyright (c) 2019-2022 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.utils import checkpoint
sys.path.append('/workspace/bert/')
from file_utils import cached_path
from torch.nn import Module
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import torch.nn.init as init
QUANT = True
if QUANT:
from pytorch_quantization.nn import QuantLinear, TensorQuantizer
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
}
CONFIG_NAME = 'bert_config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
TF_WEIGHTS_NAME = 'model.ckpt'
def load_tf_weights_in_bert(model, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.ascontiguousarray(np.transpose(array))
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
return x * 0.5 * (1.0 + torch.erf(x / 1.41421))
#used only for triton inference
def bias_gelu(bias, y):
x = bias + y
return x * 0.5 * (1.0 + torch.erf(x / 1.41421))
# used specifically for training since torch.nn.functional.gelu breaks ONNX export
def bias_gelu_training(bias, y):
x = bias + y
return torch.nn.functional.gelu(x) # Breaks ONNX export
def bias_tanh(bias, y):
x = bias + y
return torch.tanh(x)
def swish(x):
return x * torch.sigmoid(x)
def bias_noact(bias, y):
return bias + y
#torch.nn.functional.gelu(x) # Breaks ONNX export
ACT2FN = {"gelu": gelu, "bias_gelu": bias_gelu, "bias_tanh": bias_tanh, "relu": torch.nn.functional.relu, "swish": swish,
"bias_noact": bias_noact}
class LinearActivation(Module):
r"""Fused Linear and activation Module.
"""
__constants__ = ['bias']
def __init__(self, in_features, out_features, act='gelu', bias=True, do_quant=False):
super(LinearActivation, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.act_fn = nn.Identity() #
self.biased_act_fn = None #
self.bias = None #
if isinstance(act, str) or (sys.version_info[0] == 2 and isinstance(act, unicode)): # For TorchScript
if bias and not 'bias' in act: # compatibility
act = 'bias_' + act #
self.biased_act_fn = ACT2FN[act] #
else:
self.act_fn = ACT2FN[act]
else:
self.act_fn = act
self.weight = Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.do_quant = do_quant
if QUANT and do_quant:
self._input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self._weight_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_weight)
if bias:
self._aftergemm_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.reset_parameters()
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
if QUANT and self.do_quant:
input = self._input_quantizer(input)
weight = self._weight_quantizer(self.weight)
else:
weight = self.weight
if not self.bias is None:
if QUANT and self.do_quant:
return self.biased_act_fn(self.bias, self._aftergemm_quantizer(F.linear(input, weight, None)))
else:
return self.biased_act_fn(self.bias, F.linear(input, weight, None))
else:
return self.act_fn(F.linear(input, weight, self.bias))
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
output_all_encoded_layers=False):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.output_all_encoded_layers = output_all_encoded_layers
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertNonFusedLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertNonFusedLayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(hidden_size))
self.beta = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u)
s = s * s
s = s.mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
try:
import apex
#apex.amp.register_half_function(apex.normalization.fused_layer_norm, 'FusedLayerNorm')
import apex.normalization
from apex.normalization.fused_layer_norm import FusedLayerNormAffineFunction
#apex.amp.register_float_function(apex.normalization.FusedLayerNorm, 'forward')
#BertLayerNorm = apex.normalization.FusedLayerNorm
APEX_IS_AVAILABLE = True
except ImportError:
print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
#BertLayerNorm = BertNonFusedLayerNorm
APEX_IS_AVAILABLE = False
class BertLayerNorm(Module):
def __init__(self, hidden_size, eps=1e-12):
super(BertLayerNorm, self).__init__()
self.shape = torch.Size((hidden_size,))
self.eps = eps
self.gamma = nn.Parameter(torch.ones(hidden_size))
self.beta = nn.Parameter(torch.zeros(hidden_size))
self.apex_enabled = APEX_IS_AVAILABLE
@torch.jit.unused
def fused_layer_norm(self, x):
return FusedLayerNormAffineFunction.apply(
x, self.gamma, self.beta, self.shape, self.eps)
def forward(self, x):
if self.apex_enabled and not torch.jit.is_scripting():
x = self.fused_layer_norm(x)
else:
u = x.mean(-1, keepdim=True)
s = (x - u)
s = s * s
s = s.mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.gamma * x + self.beta
return x
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = LinearActivation(config.hidden_size, self.all_head_size, act='noact', do_quant=True)
self.key = LinearActivation(config.hidden_size, self.all_head_size, act='noact', do_quant=True)
self.value = LinearActivation(config.hidden_size, self.all_head_size, act='noact', do_quant=True)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
if QUANT:
self.matmul_q_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.matmul_k_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.matmul_v_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.matmul_a_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.softmax_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = torch.reshape(x, new_x_shape)
return x.permute(0, 2, 1, 3)
def transpose_key_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = torch.reshape(x, new_x_shape)
return x.permute(0, 2, 3, 1)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_key_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
if QUANT:
attention_scores = torch.matmul(self.matmul_q_input_quantizer(query_layer),
self.matmul_k_input_quantizer(key_layer))
attention_scores = self.softmax_input_quantizer(attention_scores)
else:
attention_scores = torch.matmul(query_layer, key_layer)
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = F.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
if QUANT:
context_layer = torch.matmul(self.matmul_a_input_quantizer(attention_probs),
self.matmul_v_input_quantizer(value_layer))
else:
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = torch.reshape(context_layer, new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = LinearActivation(config.hidden_size, config.hidden_size, act='noact', do_quant=True)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if QUANT:
self.add_local_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.add_residual_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.layernorm_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
if QUANT:
add_local = self.add_local_input_quantizer(hidden_states)
add_residual = self.add_residual_input_quantizer(input_tensor)
lnorm_input = self.layernorm_input_quantizer(add_local + add_residual)
hidden_states = self.LayerNorm(lnorm_input)
else:
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = LinearActivation(config.hidden_size, config.intermediate_size, act=config.hidden_act, do_quant=True)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = LinearActivation(config.intermediate_size, config.hidden_size, act='noact', do_quant=True)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if QUANT:
self.add_local_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.add_residual_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.layernorm_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
if QUANT:
add_local = self.add_local_input_quantizer(hidden_states)
add_residual = self.add_residual_input_quantizer(input_tensor)
lnorm_input = self.layernorm_input_quantizer(add_local + add_residual)
hidden_states = self.LayerNorm(lnorm_input)
else:
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
self.output_all_encoded_layers = config.output_all_encoded_layers
self._checkpoint_activations = False
if QUANT:
self.final_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
@torch.jit.unused
def checkpointed_forward(self, hidden_states, attention_mask):
def custom(start, end):
def custom_forward(*inputs):
layers = self.layer[start:end]
x_ = inputs[0]
for layer in layers:
x_ = layer(x_, inputs[1])
return x_
return custom_forward
l = 0
num_layers = len(self.layer)
chunk_length = math.ceil(math.sqrt(num_layers))
while l < num_layers:
hidden_states = checkpoint.checkpoint(custom(l, l+chunk_length), hidden_states, attention_mask*1)
l += chunk_length
return hidden_states
def forward(self, hidden_states, attention_mask):
all_encoder_layers = []
if self._checkpoint_activations:
hidden_states = self.checkpointed_forward(hidden_states, attention_mask)
if QUANT:
hidden_states = self.final_input_quantizer(hidden_states)
else:
for i,layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states, attention_mask)
if QUANT and i == len(self.layer) - 1:
hidden_states = self.final_input_quantizer(hidden_states)
if self.output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not self.output_all_encoded_layers or self._checkpoint_activations:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = LinearActivation(config.hidden_size, config.hidden_size, act="tanh")
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = LinearActivation(config.hidden_size, config.hidden_size, act=config.hidden_act)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.beta.data.zero_()
module.gamma.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def checkpoint_activations(self, val):
def _apply_flag(module):
if hasattr(module, "_checkpoint_activations"):
module._checkpoint_activations=val
self.apply(_apply_flag)
def enable_apex(self, val):
def _apply_flag(module):
if hasattr(module, "apex_enabled"):
module.apex_enabled=val
self.apply(_apply_flag)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None,
from_tf=False, *inputs, **kwargs):
"""
Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = pretrained_model_name_or_path
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
archive_file))
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file) or from_tf:
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
def is_within_directory(directory, target):
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise Exception("Attempted Path Traversal in Tar File")
tar.extractall(path, members, numeric_owner=numeric_owner)
safe_extract(archive, tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path, map_location='cpu' if not torch.cuda.is_available() else None)
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
if from_tf:
# Directly load from a TensorFlow checkpoint
weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME)
return load_tf_weights_in_bert(model, weights_path)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
return model
class BertModel(BertPreTrainedModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controlled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
self.output_all_encoded_layers = config.output_all_encoded_layers
def forward(self, input_ids, token_type_ids, attention_mask):
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.embeddings.word_embeddings.weight.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output, extended_attention_mask)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not self.output_all_encoded_layers:
encoded_layers = encoded_layers[-1:]
return encoded_layers, pooled_output
class BertForPreTraining(BertPreTrainedModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids, attention_mask):
encoded_layers, pooled_output = self.bert(input_ids, token_type_ids, attention_mask)
sequence_output = encoded_layers[-1]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
return prediction_scores, seq_relationship_score
class BertForMaskedLM(BertPreTrainedModel):
"""BERT model with the masked language modeling head.
This module comprises the BERT model followed by the masked language modeling head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
Outputs:
if `masked_lm_labels` is not `None`:
Outputs the masked language modeling loss.
if `masked_lm_labels` is `None`:
Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForMaskedLM(config)
masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForMaskedLM, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None):
encoded_layers, _ = self.bert(input_ids, token_type_ids, attention_mask)
sequence_output = encoded_layers[-1]
prediction_scores = self.cls(sequence_output)
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
return masked_lm_loss
else:
return prediction_scores
class BertForNextSentencePrediction(BertPreTrainedModel):
"""BERT model with next sentence prediction head.
This module comprises the BERT model followed by the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `next_sentence_label` is not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `next_sentence_label` is `None`:
Outputs the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForNextSentencePrediction(config)
seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForNextSentencePrediction, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask)
seq_relationship_score = self.cls( pooled_output)
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
return next_sentence_loss
else:
return seq_relationship_score
class BertForSequenceClassification(BertPreTrainedModel):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask)
pooled_output = self.dropout(pooled_output)
return self.classifier(pooled_output)
class BertForMultipleChoice(BertPreTrainedModel):
"""BERT model for multiple choice tasks.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_choices`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])
input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])
token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_choices = 2
model = BertForMultipleChoice(config, num_choices)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_choices):
super(BertForMultipleChoice, self).__init__(config)
self.num_choices = num_choices
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
_, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, self.num_choices)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return loss
else:
return reshaped_logits
class BertForTokenClassification(BertPreTrainedModel):
"""BERT model for token-level classification.
This module is composed of the BERT model with a linear layer on top of
the full hidden state of the last layer.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, sequence_length, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForTokenClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels):
super(BertForTokenClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
encoded_layers, _ = self.bert(input_ids, token_type_ids, attention_mask)
sequence_output = encoded_layers[-1]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForQuestionAnswering(BertPreTrainedModel):
"""BERT model for Question Answering (span extraction).
This module is composed of the BERT model with a linear layer on top of
the sequence output that computes start_logits and end_logits
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
Outputs:
Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
position tokens of shape [batch_size, sequence_length].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForQuestionAnswering(config)
start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.bert = BertModel(config)
# TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids, attention_mask):
encoded_layers, _ = self.bert(input_ids, token_type_ids, attention_mask)
sequence_output = encoded_layers[-1]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
return start_logits, end_logits
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/modeling.py |
# coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
import argparse
import csv
import logging
import os
import random
import sys
from io import open
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from modeling import BertForMultipleChoice, BertConfig, WEIGHTS_NAME, CONFIG_NAME
from optimization import BertAdam, warmup_linear
from tokenization import BertTokenizer
torch._C._jit_set_profiling_mode(False)
torch._C._jit_set_profiling_executor(False)
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class SwagExample(object):
"""A single training/test example for the SWAG dataset."""
def __init__(self,
swag_id,
context_sentence,
start_ending,
ending_0,
ending_1,
ending_2,
ending_3,
label = None):
self.swag_id = swag_id
self.context_sentence = context_sentence
self.start_ending = start_ending
self.endings = [
ending_0,
ending_1,
ending_2,
ending_3,
]
self.label = label
def __str__(self):
return self.__repr__()
def __repr__(self):
l = [
"swag_id: {}".format(self.swag_id),
"context_sentence: {}".format(self.context_sentence),
"start_ending: {}".format(self.start_ending),
"ending_0: {}".format(self.endings[0]),
"ending_1: {}".format(self.endings[1]),
"ending_2: {}".format(self.endings[2]),
"ending_3: {}".format(self.endings[3]),
]
if self.label is not None:
l.append("label: {}".format(self.label))
return ", ".join(l)
class InputFeatures(object):
def __init__(self,
example_id,
choices_features,
label
):
self.example_id = example_id
self.choices_features = [
{
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids
}
for _, input_ids, input_mask, segment_ids in choices_features
]
self.label = label
def read_swag_examples(input_file, is_training):
with open(input_file, 'r', encoding='utf-8') as f:
reader = csv.reader(f)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
if is_training and lines[0][-1] != 'label':
raise ValueError(
"For training, the input file must contain a label column."
)
examples = [
SwagExample(
swag_id = line[2],
context_sentence = line[4],
start_ending = line[5], # in the swag dataset, the
# common beginning of each
# choice is stored in "sent2".
ending_0 = line[7],
ending_1 = line[8],
ending_2 = line[9],
ending_3 = line[10],
label = int(line[11]) if is_training else None
) for line in lines[1:] # we skip the line with the column names
]
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length,
is_training):
"""Loads a data file into a list of `InputBatch`s."""
# Swag is a multiple choice task. To perform this task using Bert,
# we will use the formatting proposed in "Improving Language
# Understanding by Generative Pre-Training" and suggested by
# @jacobdevlin-google in this issue
# https://github.com/google-research/bert/issues/38.
#
# Each choice will correspond to a sample on which we run the
# inference. For a given Swag example, we will create the 4
# following inputs:
# - [CLS] context [SEP] choice_1 [SEP]
# - [CLS] context [SEP] choice_2 [SEP]
# - [CLS] context [SEP] choice_3 [SEP]
# - [CLS] context [SEP] choice_4 [SEP]
# The model will output a single value for each input. To get the
# final decision of the model, we will run a softmax over these 4
# outputs.
features = []
for example_index, example in enumerate(examples):
context_tokens = tokenizer.tokenize(example.context_sentence)
start_ending_tokens = tokenizer.tokenize(example.start_ending)
choices_features = []
for ending_index, ending in enumerate(example.endings):
# We create a copy of the context tokens in order to be
# able to shrink it according to ending_tokens
context_tokens_choice = context_tokens[:]
ending_tokens = start_ending_tokens + tokenizer.tokenize(ending)
# Modifies `context_tokens_choice` and `ending_tokens` in
# place so that the total length is less than the
# specified length. Account for [CLS], [SEP], [SEP] with
# "- 3"
_truncate_seq_pair(context_tokens_choice, ending_tokens, max_seq_length - 3)
tokens = ["[CLS]"] + context_tokens_choice + ["[SEP]"] + ending_tokens + ["[SEP]"]
segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * (len(ending_tokens) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
choices_features.append((tokens, input_ids, input_mask, segment_ids))
label = example.label
if example_index < 5:
logger.info("*** Example ***")
logger.info("swag_id: {}".format(example.swag_id))
for choice_idx, (tokens, input_ids, input_mask, segment_ids) in enumerate(choices_features):
logger.info("choice: {}".format(choice_idx))
logger.info("tokens: {}".format(' '.join(tokens)))
logger.info("input_ids: {}".format(' '.join(map(str, input_ids))))
logger.info("input_mask: {}".format(' '.join(map(str, input_mask))))
logger.info("segment_ids: {}".format(' '.join(map(str, segment_ids))))
if is_training:
logger.info("label: {}".format(label))
features.append(
InputFeatures(
example_id = example.swag_id,
choices_features = choices_features,
label = label
)
)
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def select_field(features, field):
return [
[
choice[field]
for choice in feature.choices_features
]
for feature in features
]
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .csv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints will be written.")
parser.add_argument("--init_checkpoint",
default=None,
type=str,
required=True,
help="The checkpoint file from pretraining")
## Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1.0, type=float,
help="Total number of training steps to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
default=False,
action='store_true',
help="Mixed precision training")
parser.add_argument('--amp',
default=False,
action='store_true',
help="Mixed precision training")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
args = parser.parse_args()
args.fp16 = args.fp16 or args.amp
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
print("WARNING: Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
train_examples = None
num_train_optimization_steps = None
if args.do_train:
train_examples = read_swag_examples(os.path.join(args.data_dir, 'train.csv'), is_training = True)
num_train_optimization_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
# Prepare model
model = BertForMultipleChoice.from_pretrained(args.bert_model,
cache_dir=os.path.join(PYTORCH_PRETRAINED_BERT_CACHE, 'distributed_{}'.format(args.local_rank)),
num_choices=4)
model.load_state_dict(torch.load(args.init_checkpoint, map_location='cpu'), strict=False)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
# hack to remove pooler, which is not used
# thus it produce None grad that break apex
param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
from apex.contrib.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
global_step = 0
if args.do_train:
train_features = convert_examples_to_features(
train_examples, tokenizer, args.max_seq_length, True)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
all_input_ids = torch.tensor(select_field(train_features, 'input_ids'), dtype=torch.long)
all_input_mask = torch.tensor(select_field(train_features, 'input_mask'), dtype=torch.long)
all_segment_ids = torch.tensor(select_field(train_features, 'segment_ids'), dtype=torch.long)
all_label = torch.tensor([f.label for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
# Terminate early for benchmarking
if args.max_steps > 0 and global_step > args.max_steps:
break
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
loss = model(input_ids, segment_ids, input_mask, label_ids)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.fp16 and args.loss_scale != 1.0:
# rescale loss for fp16 training
# see https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html
loss = loss * args.loss_scale
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used that handles this automatically
lr_this_step = args.learning_rate * warmup_linear(global_step/num_train_optimization_steps, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
if args.do_train:
# Save a trained model and the associated configuration
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
with open(output_config_file, 'w') as f:
f.write(model_to_save.config.to_json_string())
# Load a trained model and config that you have fine-tuned
config = BertConfig(output_config_file)
model = BertForMultipleChoice(config, num_choices=4)
model.load_state_dict(torch.load(output_model_file))
else:
model = BertForMultipleChoice.from_pretrained(args.bert_model, num_choices=4)
model.load_state_dict(torch.load(args.init_checkpoint, map_location='cpu'), strict=False)
model.to(device)
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
eval_examples = read_swag_examples(os.path.join(args.data_dir, 'val.csv'), is_training = True)
eval_features = convert_examples_to_features(
eval_examples, tokenizer, args.max_seq_length, True)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor(select_field(eval_features, 'input_ids'), dtype=torch.long)
all_input_mask = torch.tensor(select_field(eval_features, 'input_mask'), dtype=torch.long)
all_segment_ids = torch.tensor(select_field(eval_features, 'segment_ids'), dtype=torch.long)
all_label = torch.tensor([f.label for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids)
logits = model(input_ids, segment_ids, input_mask)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
tmp_eval_accuracy = accuracy(logits, label_ids)
eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy,
'global_step': global_step,
'loss': tr_loss/nb_tr_steps}
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
main()
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/run_swag.py |
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch.optim.optimizer import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
class LRScheduler(_LRScheduler):
def __init__(self, optimizer, last_epoch=-1):
# Check if using mixed precision training
self.mixed_training = False
base_optimizer = optimizer
# Check that optimizer param is valid
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
super(LRScheduler, self).__init__(base_optimizer, last_epoch)
def step(self, epoch=None):
# Set the current training step
# ('epoch' is used to be consistent with _LRScheduler)
if self.mixed_training:
# The assumption is that the step will be constant
state_dict = self.optimizer.state[self.optimizer.param_groups[0]['params'][0]]
if 'step' in state_dict:
self.last_epoch = state_dict['step'] + 1
else:
self.last_epoch = 1
else:
self.last_epoch = epoch if epoch is not None else self.last_epoch + 1
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
class CosineWarmUpScheduler(LRScheduler):
"""
Applies a warm up period to the learning rate.
"""
def __init__(self, optimizer, warmup, total_steps, last_epoch=-1):
self.warmup = warmup
self.total_steps = total_steps
super(CosineWarmUpScheduler, self).__init__(optimizer, last_epoch)
def get_lr(self):
progress = self.last_epoch / self.total_steps
if progress < self.warmup:
return [base_lr * progress / self.warmup for base_lr in self.base_lrs]
else:
return [base_lr * (0.5 * (1.0 + torch.cos(math.pi + progress))) for base_lr in self.base_lrs]
class ConstantWarmUpScheduler(LRScheduler):
"""
Applies a warm up period to the learning rate.
"""
def __init__(self, optimizer, warmup, total_steps, last_epoch=-1):
self.warmup = warmup
self.total_steps = total_steps
super(ConstantWarmUpScheduler, self).__init__(optimizer, last_epoch)
def get_lr(self):
progress = self.last_epoch / self.total_steps
if progress < self.warmup:
return [base_lr * progress / self.warmup for base_lr in self.base_lrs]
else:
return self.base_lrs
class LinearWarmUpScheduler(LRScheduler):
"""
Applies a warm up period to the learning rate.
"""
def __init__(self, optimizer, warmup, total_steps, last_epoch=-1):
self.warmup = warmup
self.total_steps = total_steps
super(LinearWarmUpScheduler, self).__init__(optimizer, last_epoch)
def get_lr(self):
progress = self.last_epoch / self.total_steps
if progress < self.warmup:
return [base_lr * progress / self.warmup for base_lr in self.base_lrs]
else:
return [base_lr * max(( progress - 1.0)/(self.warmup - 1.0), 0.) for base_lr in self.base_lrs]
class PolyWarmUpScheduler(LRScheduler):
"""
Applies a warm up period to the learning rate.
"""
def __init__(self, optimizer, warmup, total_steps, degree=0.5, last_epoch=-1):
self.warmup = warmup
self.total_steps = total_steps
self.degree = degree
super(PolyWarmUpScheduler, self).__init__(optimizer, last_epoch)
def step(self, epoch=None):
param_group = self.optimizer.param_groups[0]
if 'step' in param_group:
self.last_epoch = param_group['step'] + 1
else:
self.last_epoch = 1
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
def get_lr(self):
progress = self.last_epoch / self.total_steps
if progress < self.warmup:
return [base_lr * progress / self.warmup for base_lr in self.base_lrs]
else:
return [base_lr * ((1.0 - progress) ** self.degree) for base_lr in self.base_lrs]
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/schedulers.py |
FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/processors/__init__.py |
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
import sys
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For
single sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second
sequence. Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test
examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")),
"train",
)
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev",
)
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
label = line[0]
examples.append(
InputExample(guid=guid,
text_a=text_a,
text_b=text_b,
label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")),
"train",
)
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched",
)
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[8]
text_b = line[9]
label = line[-1]
examples.append(
InputExample(guid=guid,
text_a=text_a,
text_b=text_b,
label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")),
"train",
)
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev",
)
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None,
label=label))
return examples
class Sst2Processor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")),
"train",
)
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev",
)
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None,
label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features, label_map
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
PROCESSORS = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"sst-2": Sst2Processor,
}
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/processors/glue.py |
from .sparse_masklib import create_mask
from .asp import ASP
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/apex_sparsity/__init__.py |
import types
import torch
from .sparse_masklib import create_mask
torchvision_imported=True
try:
import torchvision
except ImportError:
print("[ASP][Warning] torchvision cannot be imported.")
torchvision_imported=False
def eligible_modules(model, whitelist_layer_types, allowed_layer_names, disallowed_layer_names):
eligible_modules_list = []
for name, mod in model.named_modules():
if isinstance(mod, whitelist_layer_types) and name not in disallowed_layer_names:
if allowed_layer_names is not None and name not in allowed_layer_names:
continue
eligible_modules_list.append((name, mod))
return eligible_modules_list
class ASP:
__model = None
__verbosity = 0
__optimizer = None
__sparse_parameters = []
__calculate_mask = None
@classmethod
def init_model_for_pruning(cls, model, mask_calculator="m4n2_1d",
verbosity=3,
whitelist=[torch.nn.Linear, torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d],
allowed_layer_names=None, disallowed_layer_names=[],
allow_recompute_mask=False, custom_layer_dict={}):
"""Call this method to modify your model to take advantage of sparse matrix multiplication.
Note that this call alone only augments the model with additional buffers needed for sparse MMA,
it does not enable use of sparse MMA.
If you are starting with a fresh model:
model = ...
ASP.init_model_for_pruning(model, mask_calculator, ...)
if (training) ASP.init_optimizer_for_pruning(optimizer)
ASP.compute_sparse_masks() // sparsity is off by default, call when youy want to enable it.
If you are starting from a checkpoint:
model = ...
ASP.init_model_for_pruning(model, mask_calculator, ...)
torch.load(...)
if (training) ASP.init_optimizer_for_pruning(optimizer)
Arguments:
model The model
mask_calculator Either callable that computes mask given a tensor OR pattern string for sparse mask lib.
verbosity Integer controlling verbosity level.
0 -> Only errors.
1 -> Errors and warnings.
2 -> Errors, warnings and info.
3 -> Errors, warnings, info and debug.
whitelist Module types approved for sparsity.
allowed_layer_names If not None, only layer names that appear in this list are considered for sparsity.
disallowed_layer_names If not [], only layer names that do not appear in this list are considered for sparsity.
allow_recompute_mask If True, stores pruned values so that dense weights can be restored.
Pruned weights are stored in CPU memory, hence this option does not increase GPU memory usage.
custom_layer_dict Dictionary of additional layer parameters to sparsify. e.g. {CustomLinear: ['weight']}
[Future] Support for allow_recompute_mask can be removed, it is not part of sparse inference recipe -- AKM.
"""
assert (cls.__model is None), "ASP has been initialized already."
cls.__model = model
cls.__verbosity = verbosity
if isinstance(mask_calculator, str):
def create_mask_from_pattern(param):
return create_mask(param, mask_calculator).bool()
cls.__calculate_mask = create_mask_from_pattern
else:
cls.__calculate_mask = mask_calculator #user defined function
# function to extract variables that will be sparsified.
# idea is that you will add one of these functions for each module type that can be sparsified.
if torchvision_imported:
print("[ASP] torchvision is imported, can work with the MaskRCNN/KeypointRCNN from torchvision.")
sparse_parameter_list = {torch.nn.Linear: ['weight'], torch.nn.Conv1d: ['weight'], torch.nn.Conv2d: ['weight'], torch.nn.Conv3d: ['weight'], torchvision.ops.misc.Conv2d: ['weight']}
else:
sparse_parameter_list = {torch.nn.Linear: ['weight'], torch.nn.Conv1d: ['weight'], torch.nn.Conv2d: ['weight'], torch.nn.Conv3d: ['weight']}
if custom_layer_dict: # Update default list to include user supplied custom (layer type : parameter tensor), make sure this tensor type is something ASP knows how to prune
sparse_parameter_list.update(custom_layer_dict)
whitelist += list(custom_layer_dict.keys())
for module_type in whitelist:
assert (module_type in sparse_parameter_list), "Module %s :: Don't know how to sparsify module." % module.dtype()
# find all sparse modules, extract sparse parameters and decorate
def add_sparse_attributes(module_name, module):
sparse_parameters = sparse_parameter_list[type(module)]
for p_name, p in module.named_parameters():
if p_name in sparse_parameters and p.requires_grad:
# check for NVIDIA's TC compatibility: we check along the horizontal direction
if p.dtype == torch.float32 and ((p.size()[0] % 8) != 0 or (p.size()[1] % 16) != 0): #User defines FP32 and APEX internally uses FP16 math
print("[ASP] Auto skipping pruning %s::%s of size=%s and type=%s for sparsity" % (module_name, p_name, str(p.size()), str(p.dtype)))
continue
if p.dtype == torch.float16 and ((p.size()[0] % 8) != 0 or (p.size()[1] % 16) != 0): #For Conv2d dim= K x CRS; we prune along C
print("[ASP] Auto skipping pruning %s::%s of size=%s and type=%s for sparsity" % (module_name, p_name, str(p.size()), str(p.dtype)))
continue
if cls.__verbosity >= 3:
print("[ASP] Sparsifying %s::%s of size=%s and type=%s for sparsity" % (module_name, p_name, str(p.size()), str(p.dtype)))
mask = torch.ones_like(p).bool()
buffname = p_name.split(".")[-1] # buffer names cannot contain "."
module.register_buffer('__%s_mma_mask' % buffname, mask)
if allow_recompute_mask:
pruned = torch.zeros_like(p).cpu()
module.register_buffer('__%s_mma_pruned_p' % buffname, pruned)
else:
pruned = None
cls.__sparse_parameters.append((module_name, module, p_name, p, mask, pruned))
else:
if cls.__verbosity >= 3:
print("[ASP] Not sparsifying %s::%s of size=%s and type=%s" % (module_name, p_name, str(p.size()), str(p.dtype)))
for name, sparse_module in eligible_modules(model, tuple(whitelist), allowed_layer_names, disallowed_layer_names):
add_sparse_attributes(name, sparse_module)
@classmethod
def init_optimizer_for_pruning(cls, optimizer):
"""Call this method to monkey patch optimizer step function so that masks can be applied to
gradients and weights during training.
You must call init_model_for_pruning(...) before calling init_optimizer_for_pruning(...)
"""
assert (cls.__optimizer is None), "ASP has initialized optimizer already."
assert (cls.__calculate_mask is not None), "Called ASP.init_optimizer_for_pruning before ASP.init_model_for_pruning."
# store pointer to original optimizer step method
cls.__optimizer = optimizer
cls.__optimizer.__step = optimizer.step
def __step(opt_self, *args, **kwargs):
# prune gradients before step method
with torch.no_grad():
for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:
if p.grad is not None: #thx pjudd
p.grad.mul_(mask)
# call original optimizer step method
rval = opt_self.__step(*args, **kwargs)
# prune parameters after step method
with torch.no_grad():
for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:
p.mul_(mask)
return rval
cls.__optimizer.step = types.MethodType(__step, cls.__optimizer)
@classmethod
def compute_sparse_masks(cls):
"""Call this method to enable sparsity.
If init(...) was called with allow_recompute_mask=False AND sparsity is disabled, pruned field can be None.
"""
with torch.no_grad():
for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:
if mask.sum() < mask.numel(): # when recalculating masks
# restore dense parameter if allow_recompute_mask is enabled
assert (pruned is not None), "Unable to restore dense parameter because allow_recompute_mask == False"
p.add_(pruned.cuda())
mask.set_(cls.__calculate_mask(p))
if pruned is not None: # stow away pruned weights to cpu
pruned.set_((p * (~mask)).cpu())
p.mul_(mask) # in-place multiplication, so pruned weights are 0-values, hence checkpoint will have 0s for pruned weights
if cls.__verbosity >= 2:
print("[ASP] Enabled %.2f%% sparsity for %s::%s of size=%s and type=%s" % (100.0*mask.sum()/mask.numel(), module_name, p_name, str(p.size()), str(p.dtype)))
@classmethod
def restore_pruned_weights(cls):
"""Call this method to disable sparsity and restore all weights.
This will only work if init(...) was called with allow_recompute=True.
"""
with torch.no_grad():
for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:
if mask.sum() < mask.numel():
assert (pruned is not None), "Unable to restore dense parameter because allow_recompute_mask == False"
p.add_(pruned.cuda())
mask.fill_(1)
pruned.zero_()
if cls.__verbosity >= 2:
print("[ASP] Disabled sparsity for %s::%s (dense weights restored)" % (module_name, p_name))
@classmethod
def is_sparsity_enabled(cls):
"""Call this method to determine if sparsity is enabled in the model.
The typical use case is right after checkpoint has been loaded.
"""
total,sp100,sp50 = 0,0,0
for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:
total += 1
mask_sum = mask.sum()
mask_numel = mask.numel()
if mask_sum == mask_numel:
sp100 += 1
elif mask_sum*2 == mask_numel:
sp50 += 1
assert (total == sp100 or total == sp50), "Inconsistent model sparsity"
if total == sp100:
return False
elif total == sp50:
return True
@classmethod
def prune_trained_model(cls, model, optimizer):
# add mask buffers to model (init_model_for_pruning), augment optimizer (init_optimizer_for_pruning) and compute masks (compute_sparse_masks)
cls.init_model_for_pruning(model, mask_calculator="m4n2_1d", verbosity=2, whitelist=[torch.nn.Linear, torch.nn.Conv2d], allow_recompute_mask=False)
cls.init_optimizer_for_pruning(optimizer)
cls.compute_sparse_masks()
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/apex_sparsity/asp.py |
import sys
import torch
import numpy as np
import collections
from itertools import permutations
""" compute density (helper fn to compute % NNZs in a tensor) """
def fill(x):
return float(x.nonzero().size(0))/torch.numel(x)
""" reshape matrix into m-dimensional vectors: (h,w) -> (hw/m, m) """
def reshape_1d(matrix, m):
# If not a nice multiple of m, fill with zeroes.
if matrix.shape[1] % m > 0:
mat = torch.cuda.FloatTensor(matrix.shape[0], matrix.shape[1] + (m-matrix.shape[1]%m)).fill_(0)
mat[:, :matrix.shape[1]] = matrix
shape = mat.shape
return mat.view(-1,m),shape
else:
return matrix.view(-1,m), matrix.shape
""" return all possible m:n patterns in a 1d vector """
valid_m4n2_1d_patterns = None
def compute_valid_1d_patterns(m,n):
# Early exit if patterns was already created.
global valid_m4n2_1d_patterns
if m==4 and n==2 and valid_m4n2_1d_patterns is not None: return valid_m4n2_1d_patterns
patterns = torch.zeros(m)
patterns[:n] = 1
valid_patterns = torch.Tensor(list(set(permutations(patterns.tolist()))))
if m == 4 and n == 2: valid_m4n2_1d_patterns = valid_patterns
return valid_patterns
""" m:n 1d structured best """
def mn_1d_best(matrix, m, n):
# Find all possible patterns.
patterns = compute_valid_1d_patterns(m,n).cuda()
# Find the best m:n pattern (sum of non-masked weights).
mask = torch.cuda.IntTensor(matrix.shape).fill_(1).view(-1,m)
mat,shape = reshape_1d(matrix,m)
pmax = torch.argmax(torch.matmul(mat.abs(),patterns.t()), dim=1)
mask[:] = patterns[pmax[:]]
mask = mask.view(matrix.shape)
return mask
def m4n2_1d(mat, density):
return mn_1d_best(mat, 4, 2)
"""
Below 2d-masking related code is targeted more for training (from scratch).
2d-pruning of a weight tensor is done to accelerate DGRAD step during backprop
phase of training algorithm. Acceleration comes from using SpMMA instructions in
Tensor Cores of NVIDIA Ampere GPU Architecture
(note: this code does not do the acceleration, GPU kernels are required for this).
1d pruning of weight tensor helps speed up FPROP step by pruning in 2:4 pattern
along the horizontal (logical) direction.
During DGRAD step, weight tensor is transposed. 2d pruning functions below, mask
weight tensor such that their transposed versions are also 2:4 sparse along the
horizontal (logical) direction. Thus, with 2d pruning, weight tensors are
2:4 sparse along row and column directions.
"""
""" m:n 2d structured pruning: greedy method to select mask """
def mn_2d_greedy(matrix, m, n):
# Convert to numpy
mat = matrix.cpu().detach().numpy()
mask = np.ones(mat.shape, dtype=int)
rowCount = int(mat.shape[0]/m) * m
colCount = int(mat.shape[1]/m) * m
for rowStartIdx in range(0, rowCount, m):
rowEndIdx = rowStartIdx + m
for colStartIdx in range(0, colCount, m):
colEndIdx = colStartIdx + m
matrixSub = np.absolute(np.squeeze(mat[rowStartIdx:rowEndIdx, colStartIdx:colEndIdx]))
maskSub = np.squeeze(mask[rowStartIdx:rowEndIdx, colStartIdx:colEndIdx])
maskSub.fill(0.0)
matrixVecView = matrixSub.reshape(-1)
maskVecView = maskSub.reshape(-1)
linearIdx = np.argsort(matrixVecView)
matrixIdx = [(int(x/m), x % m) for x in linearIdx]
rowCounter = collections.Counter()
colCounter = collections.Counter()
for currIdx in range(len(linearIdx) - 1, -1, -1):
currMatrixEntry = matrixIdx[currIdx]
if (rowCounter[currMatrixEntry[0]] == n) or (colCounter[currMatrixEntry[1]] == n):
continue
#end if
maskSub[currMatrixEntry[0], currMatrixEntry[1]] = 1.0
rowCounter[currMatrixEntry[0]] += 1
colCounter[currMatrixEntry[1]] += 1
return torch.tensor(mask.cuda())
def m4n2_2d_greedy(mat, density):
return mn_2d_greedy(mat, 4, 2)
""" return all possible m:n patterns in a mxn block. """
valid_m4n2_2d_patterns = None
def compute_valid_2d_patterns(m,n):
# Early exit if patterns was already created.
global valid_m4n2_2d_patterns
if valid_m4n2_2d_patterns is not None: return valid_m4n2_2d_patterns
patterns = torch.zeros(m)
patterns[:n] = 1
patterns = list(set(permutations(patterns.tolist())))
patterns = patterns + patterns
patterns = torch.Tensor(list(set(permutations(patterns,m))))
valid = ((patterns.sum(dim=1) <= n).sum(dim=1) == m).nonzero().view(-1)
valid_patterns = torch.Tensor(valid.shape[0],m,m)
valid_patterns[:] = patterns[valid[:]]
if m == 4 and n == 2: valid_m4n2_2d_patterns = valid_patterns
return valid_patterns
""" m:n 2d structured pruning: exhaustive method to select best mask """
def mn_2d_best(matrix, m, n):
# Find all possible patterns.
patterns = compute_valid_2d_patterns(m,n).cuda()
# Find the best m:n pattern (sum of non-masked weights).
mask = torch.cuda.IntTensor(matrix.shape).fill_(1)
mat = reshape_2d(matrix,m,m).abs()
pmax = torch.argmax(torch.matmul(mat,patterns.view(patterns.shape[0],m*m).t()), dim=2)
# Copy best m:n patterns into mask.
mat = mat.view(mat.shape[0]*mat.shape[1],-1)
pmax = pmax.view(pmax.shape[0]*pmax.shape[1]).unsqueeze(1).expand(-1,mat.shape[1])
patterns = patterns.view(patterns.shape[0],patterns.shape[1]*patterns.shape[2])
mat = torch.gather(patterns,0,pmax)
mat = reshape_2d_inv(mat.view(matrix.shape[0]//m,matrix.shape[1]//m,m,m))
mask.copy_(mat.type(mask.type()))
return mask
def m4n2_2d_best(mat, density):
return mn_2d_best(mat, 4, 2)
""" returns a sparse mask """
def create_mask(tensor, pattern="m4n2_1d", density=0.5):
# Reshape tensor and mask.
shape = tensor.shape
ttype = tensor.type()
t = tensor.float().contiguous()
# 1d-tensor
if len(shape) == 1:
t = t.view(1, shape[0])
func = getattr(sys.modules[__name__], pattern, None)
mask = func(t, density)
return mask.view(shape).type(ttype)
# 2d-tensor (in, out)
elif len(shape) == 2:
t = t.view(shape[0], shape[1])
func = getattr(sys.modules[__name__], pattern, None)
mask = func(t, density)
return mask.view(shape).type(ttype)
# 3d-tensor (batch, in, out)
elif len(shape) == 3:
t = t.view(shape[0]*shape[1], shape[2])
func = getattr(sys.modules[__name__], pattern, None)
mask = func(t, density)
return mask.view(shape).type(ttype)
# 4d-tensor (in, out, h, w)
elif len(shape) == 4:
"""
# transformers (bmm)
t = t.view(shape[0]*shape[1]*shape[2], shape[3])
func = getattr(sys.modules[__name__], pattern, None)
mask = func(t, density)
return mask.view(shape).type(ttype)
"""
# convs
t = t.permute(2,3,0,1).contiguous().view(shape[2]*shape[3]*shape[0], shape[1])
func = getattr(sys.modules[__name__], pattern, None)
mask = func(t, density)
mask = mask.view(shape[2], shape[3], shape[0], shape[1]).permute(2,3,0,1).contiguous()
return mask.view(shape).type(ttype)
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/apex_sparsity/sparse_masklib.py |
from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.input_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
elif i == args.num_layers-1:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.output_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.output_features])
else:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
return torch.nn.Sequential(od)
def train_step(args, model, optimizer, input_batch, target_batch, step):
predicted_target = model(input_batch)
loss = ((predicted_target-target_batch)**2).sum()
loss.backward()
optimizer.step()
optimizer.zero_grad()
step = step + 1
#print("Step %d :: loss=%e" % (step, loss.item()))
return step
def train_loop(args, model, optimizer, step, num_steps):
for i in range(num_steps):
input_batch = torch.randn([args.batch_size, args.input_features]).cuda()
target_batch = torch.randn([args.batch_size, args.output_features]).cuda()
step = train_step(args, model, optimizer, input_batch, target_batch, step)
return step
def main(args):
model = build_model(args).cuda()
one_ll = next(model.children()).weight
optimizer = FusedAdam(model.parameters())
# only prune linear layers, even though we also support conv1d, conv2d and conv3d
ASP.init_model_for_pruning(model, "m4n2_1d", whitelist=[torch.nn.Linear], allow_recompute_mask=True)
ASP.init_optimizer_for_pruning(optimizer)
step = 0
# train for a few steps with dense weights
print("DENSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_dense_steps)
# simulate sparsity by inserting zeros into existing dense weights
ASP.compute_sparse_masks()
# train for a few steps with sparse weights
print("SPARSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_sparse_steps)
# recompute sparse masks
ASP.compute_sparse_masks()
# train for a few steps with sparse weights
print("SPARSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_sparse_steps_2)
# turn off sparsity
print("SPARSE :: ",one_ll)
ASP.restore_pruned_weights()
# train for a few steps with dense weights
print("DENSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_dense_steps_2)
if __name__ == '__main__':
class Args:
batch_size = 32
input_features = 16
output_features = 8
hidden_features = 40
num_layers = 4
num_dense_steps = 2000
num_sparse_steps = 3000
num_sparse_steps_2 = 1000
num_dense_steps_2 = 1500
args = Args()
main(args)
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/apex_sparsity/test/toy_problem.py |
from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.input_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
elif i == args.num_layers-1:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.output_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.output_features])
else:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
return torch.nn.Sequential(od)
def train_step(args, model, optimizer, input_batch, target_batch, step):
predicted_target = model(input_batch)
loss = ((predicted_target-target_batch)**2).sum()
loss.backward()
optimizer.step()
optimizer.zero_grad()
step = step + 1
#print("Step %d :: loss=%e" % (step, loss.item()))
return step
def train_loop(args, model, optimizer, step, num_steps):
for i in range(num_steps):
input_batch = torch.randn([args.batch_size, args.input_features]).cuda()
target_batch = torch.randn([args.batch_size, args.output_features]).cuda()
step = train_step(args, model, optimizer, input_batch, target_batch, step)
return step
def main(step, args, model_state_dict, optimizer_state_dict):
#
# PART2
#
model = build_model(args).cuda()
one_ll = next(model.children()).weight
optimizer = FusedAdam(model.parameters())
ASP.init_model_for_pruning(model, args.pattern, verbosity=args.verbosity, whitelist=args.whitelist, allow_recompute_mask=args.allow_recompute_mask)
ASP.init_optimizer_for_pruning(optimizer)
torch.manual_seed(args.seed2)
model.load_state_dict(model_state_dict)
optimizer.load_state_dict(optimizer_state_dict)
print("Model sparsity is %s" % ("enabled" if ASP.sparsity_is_enabled() else "disabled"))
# train for a few steps with sparse weights
print("SPARSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_sparse_steps_2)
if __name__ == '__main__':
checkpoint = torch.load("part1.chkp")
class Args:
verbosity = checkpoint['verbosity']
seed = 4873
seed2 = checkpoint['seed2']
pattern = checkpoint['pattern']
whitelist = checkpoint['whitelist']
allow_recompute_mask = checkpoint['allow_recompute_mask']
batch_size = 32
input_features = 8
output_features = 8
hidden_features = 32
num_layers = 4
num_dense_steps = 2000
num_sparse_steps = 3000
num_sparse_steps_2 = 1000
checkpoint_path = "part1.chkp"
args = Args()
main(checkpoint['step'], args, checkpoint['model_state_dict'], checkpoint['optimizer_state_dict'])
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/apex_sparsity/test/checkpointing_test_part2.py |
from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.input_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
elif i == args.num_layers-1:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.output_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.output_features])
else:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
return torch.nn.Sequential(od)
def train_step(args, model, optimizer, input_batch, target_batch, step):
predicted_target = model(input_batch)
loss = ((predicted_target-target_batch)**2).sum()
loss.backward()
optimizer.step()
optimizer.zero_grad()
step = step + 1
#print("Step %d :: loss=%e" % (step, loss.item()))
return step
def train_loop(args, model, optimizer, step, num_steps):
for i in range(num_steps):
input_batch = torch.randn([args.batch_size, args.input_features]).cuda()
target_batch = torch.randn([args.batch_size, args.output_features]).cuda()
step = train_step(args, model, optimizer, input_batch, target_batch, step)
return step
def main(args):
#
# PART1
#
torch.manual_seed(args.seed)
model = build_model(args).cuda()
one_ll = next(model.children()).weight
optimizer = FusedAdam(model.parameters())
ASP.init_model_for_pruning(model, args.pattern, verbosity=args.verbosity, whitelist=args.whitelist, allow_recompute_mask=args.allow_recompute_mask)
ASP.init_optimizer_for_pruning(optimizer)
step = 0
# train for a few steps with dense weights
print("DENSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_dense_steps)
# simulate sparsity by inserting zeros into existing dense weights
ASP.enable_sparsity()
# train for a few steps with sparse weights
print("SPARSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_sparse_steps)
torch.save({
'step': step,
'verbosity': args.verbosity,
'seed2': args.seed2,
'pattern': args.pattern,
'whitelist': args.whitelist,
'allow_recompute_mask': args.allow_recompute_mask,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, args.checkpoint_path)
if __name__ == '__main__':
class Args:
verbosity=3
seed = 4873
seed2 = 99875
pattern = "m4n2_2d_best"
whitelist = [torch.nn.Linear]
allow_recompute_mask = True
batch_size = 32
input_features = 8
output_features = 8
hidden_features = 32
num_layers = 4
num_dense_steps = 2000
num_sparse_steps = 3000
num_sparse_steps_2 = 1000
checkpoint_path = "part1.chkp"
args = Args()
main(args)
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/apex_sparsity/test/checkpointing_test_part1.py |
from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
#
# Reference run for checkpointing test (part1 + part2)
#
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.input_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
elif i == args.num_layers-1:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.output_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.output_features])
else:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
return torch.nn.Sequential(od)
def train_step(args, model, optimizer, input_batch, target_batch, step):
predicted_target = model(input_batch)
loss = ((predicted_target-target_batch)**2).sum()
loss.backward()
optimizer.step()
optimizer.zero_grad()
step = step + 1
#print("Step %d :: loss=%e" % (step, loss.item()))
return step
def train_loop(args, model, optimizer, step, num_steps):
for i in range(num_steps):
input_batch = torch.randn([args.batch_size, args.input_features]).cuda()
target_batch = torch.randn([args.batch_size, args.output_features]).cuda()
step = train_step(args, model, optimizer, input_batch, target_batch, step)
return step
def main(args):
#
# PART1
#
torch.manual_seed(args.seed)
model = build_model(args).cuda()
one_ll = next(model.children()).weight
optimizer = FusedAdam(model.parameters())
ASP.init_model_for_pruning(model, args.pattern, whitelist=args.whitelist, allow_recompute_mask=args.allow_recompute_mask)
ASP.init_optimizer_for_pruning(optimizer)
step = 0
# train for a few steps with dense weights
print("DENSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_dense_steps)
# simulate sparsity by inserting zeros into existing dense weights
ASP.enable_sparsity()
# train for a few steps with sparse weights
print("SPARSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_sparse_steps)
#
# PART 2
#
torch.manual_seed(args.seed2)
# train for a few steps with sparse weights
print("SPARSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_sparse_steps_2)
if __name__ == '__main__':
class Args:
seed = 4873
seed2 = 99875
pattern = "m4n2_2d_best"
whitelist = [torch.nn.Linear]
allow_recompute_mask = True
batch_size = 32
input_features = 8
output_features = 8
hidden_features = 32
num_layers = 4
num_dense_steps = 2000
num_sparse_steps = 3000
num_sparse_steps_2 = 1000
checkpoint_path = "part1.chkp"
args = Args()
main(args)
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/apex_sparsity/test/checkpointing_test_reference.py |
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bz2
import os
import urllib.request
import sys
class SquadDownloader:
def __init__(self, save_path):
self.save_path = save_path + '/squad'
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
if not os.path.exists(self.save_path + '/v1.1'):
os.makedirs(self.save_path + '/v1.1')
if not os.path.exists(self.save_path + '/v2.0'):
os.makedirs(self.save_path + '/v2.0')
self.download_urls = {
'https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json' : 'v1.1/train-v1.1.json',
'https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json' : 'v1.1/dev-v1.1.json',
'https://worksheets.codalab.org/rest/bundles/0xbcd57bee090b421c982906709c8c27e1/contents/blob/' : 'v1.1/evaluate-v1.1.py',
'https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json' : 'v2.0/train-v2.0.json',
'https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json' : 'v2.0/dev-v2.0.json',
'https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/' : 'v2.0/evaluate-v2.0.py',
}
def download(self):
for item in self.download_urls:
url = item
file = self.download_urls[item]
print('Downloading:', url)
if os.path.isfile(self.save_path + '/' + file):
print('** Download file already exists, skipping download')
else:
response = urllib.request.urlopen(url)
with open(self.save_path + '/' + file, "wb") as handle:
handle.write(response.read())
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/data/SquadDownloader.py |
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from GooglePretrainedWeightDownloader import GooglePretrainedWeightDownloader
from NVIDIAPretrainedWeightDownloader import NVIDIAPretrainedWeightDownloader
from WikiDownloader import WikiDownloader
from BooksDownloader import BooksDownloader
from GLUEDownloader import GLUEDownloader
from SquadDownloader import SquadDownloader
class Downloader:
def __init__(self, dataset_name, save_path):
self.dataset_name = dataset_name
self.save_path = save_path
def download(self):
if self.dataset_name == 'bookscorpus':
self.download_bookscorpus()
elif self.dataset_name == 'wikicorpus_en':
self.download_wikicorpus('en')
elif self.dataset_name == 'wikicorpus_zh':
self.download_wikicorpus('zh')
elif self.dataset_name == 'google_pretrained_weights':
self.download_google_pretrained_weights()
elif self.dataset_name == 'nvidia_pretrained_weights':
self.download_nvidia_pretrained_weights()
elif self.dataset_name in {'mrpc', 'sst-2'}:
self.download_glue(self.dataset_name)
elif self.dataset_name == 'squad':
self.download_squad()
elif self.dataset_name == 'all':
self.download_bookscorpus()
self.download_wikicorpus('en')
self.download_wikicorpus('zh')
self.download_google_pretrained_weights()
self.download_nvidia_pretrained_weights()
self.download_glue('mrpc')
self.download_glue('sst-2')
self.download_squad()
else:
print(self.dataset_name)
assert False, 'Unknown dataset_name provided to downloader'
def download_bookscorpus(self):
downloader = BooksDownloader(self.save_path)
downloader.download()
def download_wikicorpus(self, language):
downloader = WikiDownloader(language, self.save_path)
downloader.download()
def download_google_pretrained_weights(self):
downloader = GooglePretrainedWeightDownloader(self.save_path)
downloader.download()
def download_nvidia_pretrained_weights(self):
downloader = NVIDIAPretrainedWeightDownloader(self.save_path)
downloader.download()
def download_glue(self, task_name):
downloader = GLUEDownloader(self.save_path)
downloader.download(task_name)
def download_squad(self):
downloader = SquadDownloader(self.save_path)
downloader.download()
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/data/Downloader.py |
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
class BookscorpusTextFormatting:
def __init__(self, books_path, output_filename, recursive = False):
self.books_path = books_path
self.recursive = recursive
self.output_filename = output_filename
# This puts one book per line
def merge(self):
with open(self.output_filename, mode='w', newline='\n') as ofile:
for filename in glob.glob(self.books_path + '/' + '*.txt', recursive=True):
with open(filename, mode='r', encoding='utf-8-sig', newline='\n') as file:
for line in file:
if line.strip() != '':
ofile.write(line.strip() + ' ')
ofile.write("\n\n") | FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/data/BookscorpusTextFormatting.py |
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
class NVIDIAPretrainedWeightDownloader:
def __init__(self, save_path):
self.save_path = save_path + '/nvidia_pretrained_weights'
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
pass
def download(self):
assert False, 'NVIDIAPretrainedWeightDownloader not implemented yet.' | FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/data/NVIDIAPretrainedWeightDownloader.py |
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/data/__init__.py |
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bz2
import os
import urllib.request
import subprocess
import sys
class WikiDownloader:
def __init__(self, language, save_path):
self.save_path = save_path + '/wikicorpus_' + language
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
self.language = language
self.download_urls = {
'en' : 'https://dumps.wikimedia.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2',
'zh' : 'https://dumps.wikimedia.org/zhwiki/latest/zhwiki-latest-pages-articles.xml.bz2'
}
self.output_files = {
'en' : 'wikicorpus_en.xml.bz2',
'zh' : 'wikicorpus_zh.xml.bz2'
}
def download(self):
if self.language in self.download_urls:
url = self.download_urls[self.language]
filename = self.output_files[self.language]
print('Downloading:', url)
if os.path.isfile(self.save_path + '/' + filename):
print('** Download file already exists, skipping download')
else:
response = urllib.request.urlopen(url)
with open(self.save_path + '/' + filename, "wb") as handle:
handle.write(response.read())
# Always unzipping since this is relatively fast and will overwrite
print('Unzipping:', self.output_files[self.language])
subprocess.run('bzip2 -dk ' + self.save_path + '/' + filename, shell=True, check=True)
else:
assert False, 'WikiDownloader not implemented for this language yet.' | FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/data/WikiDownloader.py |
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
class WikicorpusTextFormatting:
def __init__(self, wiki_path, output_filename, recursive = False):
self.wiki_path = wiki_path
self.recursive = recursive
self.output_filename = output_filename
# This puts one article per line
def merge(self):
with open(self.output_filename, mode='w', newline='\n') as ofile:
for dirname in glob.glob(self.wiki_path + '/*/', recursive=False):
for filename in glob.glob(dirname + 'wiki_*', recursive=self.recursive):
print(filename)
article_lines = []
article_open = False
with open(filename, mode='r', newline='\n') as file:
for line in file:
if '<doc id=' in line:
article_open = True
elif '</doc>' in line:
article_open = False
for oline in article_lines[1:]:
if oline != '\n':
ofile.write(oline.rstrip() + " ")
ofile.write("\n\n")
article_lines = []
else:
if article_open:
article_lines.append(line) | FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/data/WikicorpusTextFormatting.py |
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import wget
from pathlib import Path
def mkdir(path):
Path(path).mkdir(parents=True, exist_ok=True)
class GLUEDownloader:
def __init__(self, save_path):
self.save_path = save_path + '/glue'
def download(self, task_name):
mkdir(self.save_path)
if task_name in {'mrpc', 'mnli'}:
task_name = task_name.upper()
elif task_name == 'cola':
task_name = 'CoLA'
else: # SST-2
assert task_name == 'sst-2'
task_name = 'SST'
wget.download(
'https://gist.githubusercontent.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e/raw/1502038877f6a88c225a34450793fbc3ea87eaba/download_glue_data.py',
out=self.save_path,
)
sys.path.append(self.save_path)
import download_glue_data
download_glue_data.main(
['--data_dir', self.save_path, '--tasks', task_name])
sys.path.pop()
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/data/GLUEDownloader.py |
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import BookscorpusTextFormatting
import Downloader
import TextSharding
import WikicorpusTextFormatting
import argparse
import itertools
import multiprocessing
import os
import pprint
import subprocess
def main(args):
working_dir = os.environ['BERT_PREP_WORKING_DIR']
print('Working Directory:', working_dir)
print('Action:', args.action)
print('Dataset Name:', args.dataset)
if args.input_files:
args.input_files = args.input_files.split(',')
hdf5_tfrecord_folder_prefix = "_lower_case_" + str(args.do_lower_case) + "_seq_len_" + str(args.max_seq_length) \
+ "_max_pred_" + str(args.max_predictions_per_seq) + "_masked_lm_prob_" + str(args.masked_lm_prob) \
+ "_random_seed_" + str(args.random_seed) + "_dupe_factor_" + str(args.dupe_factor)
directory_structure = {
'download' : working_dir + '/download', # Downloaded and decompressed
'extracted' : working_dir +'/extracted', # Extracted from whatever the initial format is (e.g., wikiextractor)
'formatted' : working_dir + '/formatted_one_article_per_line', # This is the level where all sources should look the same
'sharded' : working_dir + '/sharded_' + "training_shards_" + str(args.n_training_shards) + "_test_shards_" + str(args.n_test_shards) + "_fraction_" + str(args.fraction_test_set),
'tfrecord' : working_dir + '/tfrecord'+ hdf5_tfrecord_folder_prefix,
'hdf5': working_dir + '/hdf5' + hdf5_tfrecord_folder_prefix
}
print('\nDirectory Structure:')
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(directory_structure)
print('')
if args.action == 'download':
if not os.path.exists(directory_structure['download']):
os.makedirs(directory_structure['download'])
downloader = Downloader.Downloader(args.dataset, directory_structure['download'])
downloader.download()
elif args.action == 'text_formatting':
assert args.dataset != 'google_pretrained_weights' and args.dataset != 'nvidia_pretrained_weights' and args.dataset != 'squad' and args.dataset != 'mrpc', 'Cannot perform text_formatting on pretrained weights'
if not os.path.exists(directory_structure['extracted']):
os.makedirs(directory_structure['extracted'])
if not os.path.exists(directory_structure['formatted']):
os.makedirs(directory_structure['formatted'])
if args.dataset == 'bookscorpus':
books_path = directory_structure['download'] + '/bookscorpus'
#books_path = directory_structure['download']
output_filename = directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt'
books_formatter = BookscorpusTextFormatting.BookscorpusTextFormatting(books_path, output_filename, recursive=True)
books_formatter.merge()
elif args.dataset == 'wikicorpus_en':
if args.skip_wikiextractor == 0:
path_to_wikiextractor_in_container = '/workspace/wikiextractor/WikiExtractor.py'
wikiextractor_command = path_to_wikiextractor_in_container + ' ' + directory_structure['download'] + '/' + args.dataset + '/wikicorpus_en.xml ' + '-b 100M --processes ' + str(args.n_processes) + ' -o ' + directory_structure['extracted'] + '/' + args.dataset
print('WikiExtractor Command:', wikiextractor_command)
wikiextractor_process = subprocess.run(wikiextractor_command, shell=True, check=True)
#wikiextractor_process.communicate()
wiki_path = directory_structure['extracted'] + '/wikicorpus_en'
output_filename = directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt'
wiki_formatter = WikicorpusTextFormatting.WikicorpusTextFormatting(wiki_path, output_filename, recursive=True)
wiki_formatter.merge()
elif args.dataset == 'wikicorpus_zh':
assert False, 'wikicorpus_zh not fully supported at this time. The simplified/tradition Chinese data needs to be translated and properly segmented still, and should work once this step is added.'
if args.skip_wikiextractor == 0:
path_to_wikiextractor_in_container = '/workspace/wikiextractor/WikiExtractor.py'
wikiextractor_command = path_to_wikiextractor_in_container + ' ' + directory_structure['download'] + '/' + args.dataset + '/wikicorpus_zh.xml ' + '-b 100M --processes ' + str(args.n_processes) + ' -o ' + directory_structure['extracted'] + '/' + args.dataset
print('WikiExtractor Command:', wikiextractor_command)
wikiextractor_process = subprocess.run(wikiextractor_command, shell=True, check=True)
#wikiextractor_process.communicate()
wiki_path = directory_structure['extracted'] + '/wikicorpus_zh'
output_filename = directory_structure['formatted'] + '/wikicorpus_zh_one_article_per_line.txt'
wiki_formatter = WikicorpusTextFormatting.WikicorpusTextFormatting(wiki_path, output_filename, recursive=True)
wiki_formatter.merge()
assert os.stat(output_filename).st_size > 0, 'File glob did not pick up extracted wiki files from WikiExtractor.'
elif args.action == 'sharding':
# Note: books+wiki requires user to provide list of input_files (comma-separated with no spaces)
if args.dataset == 'bookscorpus' or 'wikicorpus' in args.dataset or 'books_wiki' in args.dataset:
if args.input_files is None:
if args.dataset == 'bookscorpus':
args.input_files = [directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt']
elif args.dataset == 'wikicorpus_en':
args.input_files = [directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt']
elif args.dataset == 'wikicorpus_zh':
args.input_files = [directory_structure['formatted'] + '/wikicorpus_zh_one_article_per_line.txt']
elif args.dataset == 'books_wiki_en_corpus':
args.input_files = [directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt', directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt']
output_file_prefix = directory_structure['sharded'] + '/' + args.dataset + '/' + args.dataset
if not os.path.exists(directory_structure['sharded']):
os.makedirs(directory_structure['sharded'])
if not os.path.exists(directory_structure['sharded'] + '/' + args.dataset):
os.makedirs(directory_structure['sharded'] + '/' + args.dataset)
# Segmentation is here because all datasets look the same in one article/book/whatever per line format, and
# it seemed unnecessarily complicated to add an additional preprocessing step to call just for this.
# Different languages (e.g., Chinese simplified/traditional) may require translation and
# other packages to be called from here -- just add a conditional branch for those extra steps
segmenter = TextSharding.NLTKSegmenter()
sharding = TextSharding.Sharding(args.input_files, output_file_prefix, args.n_training_shards, args.n_test_shards, args.fraction_test_set)
sharding.load_articles()
sharding.segment_articles_into_sentences(segmenter)
sharding.distribute_articles_over_shards()
sharding.write_shards_to_disk()
else:
assert False, 'Unsupported dataset for sharding'
elif args.action == 'create_tfrecord_files':
assert False, 'TFrecord creation not supported in this PyTorch model example release.' \
''
if not os.path.exists(directory_structure['tfrecord'] + "/" + args.dataset):
os.makedirs(directory_structure['tfrecord'] + "/" + args.dataset)
def create_record_worker(filename_prefix, shard_id, output_format='tfrecord'):
bert_preprocessing_command = 'python /workspace/bert/create_pretraining_data.py'
bert_preprocessing_command += ' --input_file=' + directory_structure['sharded'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(shard_id) + '.txt'
bert_preprocessing_command += ' --output_file=' + directory_structure['tfrecord'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(shard_id) + '.' + output_format
bert_preprocessing_command += ' --vocab_file=' + args.vocab_file
bert_preprocessing_command += ' --do_lower_case' if args.do_lower_case else ''
bert_preprocessing_command += ' --max_seq_length=' + str(args.max_seq_length)
bert_preprocessing_command += ' --max_predictions_per_seq=' + str(args.max_predictions_per_seq)
bert_preprocessing_command += ' --masked_lm_prob=' + str(args.masked_lm_prob)
bert_preprocessing_command += ' --random_seed=' + str(args.random_seed)
bert_preprocessing_command += ' --dupe_factor=' + str(args.dupe_factor)
bert_preprocessing_process = subprocess.Popen(bert_preprocessing_command, shell=True)
last_process = bert_preprocessing_process
# This could be better optimized (fine if all take equal time)
if shard_id % args.n_processes == 0 and shard_id > 0:
bert_preprocessing_process.wait()
return last_process
output_file_prefix = args.dataset
for i in range(args.n_training_shards):
last_process =create_record_worker(output_file_prefix + '_training', i)
last_process.wait()
for i in range(args.n_test_shards):
last_process = create_record_worker(output_file_prefix + '_test', i)
last_process.wait()
elif args.action == 'create_hdf5_files':
last_process = None
if not os.path.exists(directory_structure['hdf5'] + "/" + args.dataset):
os.makedirs(directory_structure['hdf5'] + "/" + args.dataset)
def create_record_worker(filename_prefix, shard_id, output_format='hdf5'):
bert_preprocessing_command = 'python /workspace/bert/create_pretraining_data.py'
bert_preprocessing_command += ' --input_file=' + directory_structure['sharded'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(shard_id) + '.txt'
bert_preprocessing_command += ' --output_file=' + directory_structure['hdf5'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(shard_id) + '.' + output_format
bert_preprocessing_command += ' --vocab_file=' + args.vocab_file
bert_preprocessing_command += ' --do_lower_case' if args.do_lower_case else ''
bert_preprocessing_command += ' --max_seq_length=' + str(args.max_seq_length)
bert_preprocessing_command += ' --max_predictions_per_seq=' + str(args.max_predictions_per_seq)
bert_preprocessing_command += ' --masked_lm_prob=' + str(args.masked_lm_prob)
bert_preprocessing_command += ' --random_seed=' + str(args.random_seed)
bert_preprocessing_command += ' --dupe_factor=' + str(args.dupe_factor)
bert_preprocessing_process = subprocess.Popen(bert_preprocessing_command, shell=True)
last_process = bert_preprocessing_process
# This could be better optimized (fine if all take equal time)
if shard_id % args.n_processes == 0 and shard_id > 0:
bert_preprocessing_process.wait()
return last_process
output_file_prefix = args.dataset
for i in range(args.n_training_shards):
last_process = create_record_worker(output_file_prefix + '_training', i)
last_process.wait()
for i in range(args.n_test_shards):
last_process = create_record_worker(output_file_prefix + '_test', i)
last_process.wait()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Preprocessing Application for Everything BERT-related'
)
parser.add_argument(
'--action',
type=str,
help='Specify the action you want the app to take. e.g., generate vocab, segment, create tfrecords',
choices={
'download', # Download and verify mdf5/sha sums
'text_formatting', # Convert into a file that contains one article/book per line
'sharding', # Convert previous formatted text into shards containing one sentence per line
'create_tfrecord_files', # Turn each shard into a TFrecord with masking and next sentence prediction info
'create_hdf5_files' # Turn each shard into a HDF5 file with masking and next sentence prediction info
}
)
parser.add_argument(
'--dataset',
type=str,
help='Specify the dataset to perform --action on',
choices={
'bookscorpus',
'wikicorpus_en',
'wikicorpus_zh',
'books_wiki_en_corpus',
'google_pretrained_weights',
'nvidia_pretrained_weights',
'mrpc',
'sst-2',
'squad',
'all'
}
)
parser.add_argument(
'--input_files',
type=str,
help='Specify the input files in a comma-separated list (no spaces)'
)
parser.add_argument(
'--n_training_shards',
type=int,
help='Specify the number of training shards to generate',
default=256
)
parser.add_argument(
'--n_test_shards',
type=int,
help='Specify the number of test shards to generate',
default=256
)
parser.add_argument(
'--fraction_test_set',
type=float,
help='Specify the fraction (0..1) of the data to withhold for the test data split (based on number of sequences)',
default=0.1
)
parser.add_argument(
'--segmentation_method',
type=str,
help='Specify your choice of sentence segmentation',
choices={
'nltk'
},
default='nltk'
)
parser.add_argument(
'--n_processes',
type=int,
help='Specify the max number of processes to allow at one time',
default=4
)
parser.add_argument(
'--random_seed',
type=int,
help='Specify the base seed to use for any random number generation',
default=12345
)
parser.add_argument(
'--dupe_factor',
type=int,
help='Specify the duplication factor',
default=5
)
parser.add_argument(
'--masked_lm_prob',
type=float,
help='Specify the probability for masked lm',
default=0.15
)
parser.add_argument(
'--max_seq_length',
type=int,
help='Specify the maximum sequence length',
default=512
)
parser.add_argument(
'--max_predictions_per_seq',
type=int,
help='Specify the maximum number of masked words per sequence',
default=20
)
parser.add_argument(
'--do_lower_case',
type=int,
help='Specify whether it is cased (0) or uncased (1) (any number greater than 0 will be treated as uncased)',
default=1
)
parser.add_argument(
'--vocab_file',
type=str,
help='Specify absolute path to vocab file to use)'
)
parser.add_argument(
'--skip_wikiextractor',
type=int,
help='Specify whether to skip wikiextractor step 0=False, 1=True',
default=0
)
parser.add_argument(
'--interactive_json_config_generator',
type=str,
help='Specify the action you want the app to take. e.g., generate vocab, segment, create tfrecords'
)
args = parser.parse_args()
main(args)
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/data/bertPrep.py |
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
class BooksDownloader:
def __init__(self, save_path):
self.save_path = save_path
pass
def download(self):
bookscorpus_download_command = 'python3 /workspace/bookcorpus/download_files.py --list /workspace/bookcorpus/url_list.jsonl --out'
bookscorpus_download_command += ' ' + self.save_path + '/bookscorpus'
bookscorpus_download_command += ' --trash-bad-count'
bookscorpus_download_process = subprocess.run(bookscorpus_download_command, shell=True, check=True)
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/data/BooksDownloader.py |
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from itertools import islice
import multiprocessing
import statistics
class Sharding:
def __init__(self, input_files, output_name_prefix, n_training_shards, n_test_shards, fraction_test_set):
assert len(input_files) > 0, 'The input file list must contain at least one file.'
assert n_training_shards > 0, 'There must be at least one output shard.'
assert n_test_shards > 0, 'There must be at least one output shard.'
self.n_training_shards = n_training_shards
self.n_test_shards = n_test_shards
self.fraction_test_set = fraction_test_set
self.input_files = input_files
self.output_name_prefix = output_name_prefix
self.output_training_identifier = '_training'
self.output_test_identifier = '_test'
self.output_file_extension = '.txt'
self.articles = {} # key: integer identifier, value: list of articles
self.sentences = {} # key: integer identifier, value: list of sentences
self.output_training_files = {} # key: filename, value: list of articles to go into file
self.output_test_files = {} # key: filename, value: list of articles to go into file
self.init_output_files()
# Remember, the input files contain one article per line (the whitespace check is to skip extraneous blank lines)
def load_articles(self):
print('Start: Loading Articles')
global_article_count = 0
for input_file in self.input_files:
print('input file:', input_file)
with open(input_file, mode='r', newline='\n') as f:
for i, line in enumerate(f):
if line.strip():
self.articles[global_article_count] = line.rstrip()
global_article_count += 1
print('End: Loading Articles: There are', len(self.articles), 'articles.')
def segment_articles_into_sentences(self, segmenter):
print('Start: Sentence Segmentation')
if len(self.articles) is 0:
self.load_articles()
assert len(self.articles) is not 0, 'Please check that input files are present and contain data.'
# TODO: WIP: multiprocessing (create independent ranges and spawn processes)
use_multiprocessing = 'serial'
def chunks(data, size=len(self.articles)):
it = iter(data)
for i in range(0, len(data), size):
yield {k: data[k] for k in islice(it, size)}
if use_multiprocessing == 'manager':
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = []
n_processes = 7 # in addition to the main process, total = n_proc+1
def work(articles, return_dict):
sentences = {}
for i, article in enumerate(articles):
sentences[i] = segmenter.segment_string(articles[article])
if i % 5000 == 0:
print('Segmenting article', i)
return_dict.update(sentences)
for item in chunks(self.articles, len(self.articles)):
p = multiprocessing.Process(target=work, args=(item, return_dict))
# Busy wait
while len(jobs) >= n_processes:
pass
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
elif use_multiprocessing == 'queue':
work_queue = multiprocessing.Queue()
jobs = []
for item in chunks(self.articles, len(self.articles)):
pass
else: # serial option
for i, article in enumerate(self.articles):
self.sentences[i] = segmenter.segment_string(self.articles[article])
if i % 5000 == 0:
print('Segmenting article', i)
print('End: Sentence Segmentation')
def init_output_files(self):
print('Start: Init Output Files')
assert len(self.output_training_files) is 0, 'Internal storage self.output_files already contains data. This function is intended to be used by the constructor only.'
assert len(self.output_test_files) is 0, 'Internal storage self.output_files already contains data. This function is intended to be used by the constructor only.'
for i in range(self.n_training_shards):
name = self.output_name_prefix + self.output_training_identifier + '_' + str(i) + self.output_file_extension
self.output_training_files[name] = []
for i in range(self.n_test_shards):
name = self.output_name_prefix + self.output_test_identifier + '_' + str(i) + self.output_file_extension
self.output_test_files[name] = []
print('End: Init Output Files')
def get_sentences_per_shard(self, shard):
result = 0
for article_id in shard:
result += len(self.sentences[article_id])
return result
def distribute_articles_over_shards(self):
print('Start: Distribute Articles Over Shards')
assert len(self.articles) >= self.n_training_shards + self.n_test_shards, 'There are fewer articles than shards. Please add more data or reduce the number of shards requested.'
# Create dictionary with - key: sentence count per article, value: article id number
sentence_counts = defaultdict(lambda: [])
max_sentences = 0
total_sentences = 0
for article_id in self.sentences:
current_length = len(self.sentences[article_id])
sentence_counts[current_length].append(article_id)
max_sentences = max(max_sentences, current_length)
total_sentences += current_length
n_sentences_assigned_to_training = int((1 - self.fraction_test_set) * total_sentences)
nominal_sentences_per_training_shard = n_sentences_assigned_to_training // self.n_training_shards
nominal_sentences_per_test_shard = (total_sentences - n_sentences_assigned_to_training) // self.n_test_shards
consumed_article_set = set({})
unused_article_set = set(self.articles.keys())
# Make first pass and add one article worth of lines per file
for file in self.output_training_files:
current_article_id = sentence_counts[max_sentences][-1]
sentence_counts[max_sentences].pop(-1)
self.output_training_files[file].append(current_article_id)
consumed_article_set.add(current_article_id)
unused_article_set.remove(current_article_id)
# Maintain the max sentence count
while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0:
max_sentences -= 1
if len(self.sentences[current_article_id]) > nominal_sentences_per_training_shard:
nominal_sentences_per_training_shard = len(self.sentences[current_article_id])
print('Warning: A single article contains more than the nominal number of sentences per training shard.')
for file in self.output_test_files:
current_article_id = sentence_counts[max_sentences][-1]
sentence_counts[max_sentences].pop(-1)
self.output_test_files[file].append(current_article_id)
consumed_article_set.add(current_article_id)
unused_article_set.remove(current_article_id)
# Maintain the max sentence count
while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0:
max_sentences -= 1
if len(self.sentences[current_article_id]) > nominal_sentences_per_test_shard:
nominal_sentences_per_test_shard = len(self.sentences[current_article_id])
print('Warning: A single article contains more than the nominal number of sentences per test shard.')
training_counts = []
test_counts = []
for shard in self.output_training_files:
training_counts.append(self.get_sentences_per_shard(self.output_training_files[shard]))
for shard in self.output_test_files:
test_counts.append(self.get_sentences_per_shard(self.output_test_files[shard]))
training_median = statistics.median(training_counts)
test_median = statistics.median(test_counts)
# Make subsequent passes over files to find articles to add without going over limit
history_remaining = []
n_history_remaining = 4
while len(consumed_article_set) < len(self.articles):
for fidx, file in enumerate(self.output_training_files):
nominal_next_article_size = min(nominal_sentences_per_training_shard - training_counts[fidx], max_sentences)
# Maintain the max sentence count
while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0:
max_sentences -= 1
while len(sentence_counts[nominal_next_article_size]) == 0 and nominal_next_article_size > 0:
nominal_next_article_size -= 1
if nominal_next_article_size not in sentence_counts or nominal_next_article_size is 0 or training_counts[fidx] > training_median:
continue # skip adding to this file, will come back later if no file can accept unused articles
current_article_id = sentence_counts[nominal_next_article_size][-1]
sentence_counts[nominal_next_article_size].pop(-1)
self.output_training_files[file].append(current_article_id)
consumed_article_set.add(current_article_id)
unused_article_set.remove(current_article_id)
for fidx, file in enumerate(self.output_test_files):
nominal_next_article_size = min(nominal_sentences_per_test_shard - test_counts[fidx], max_sentences)
# Maintain the max sentence count
while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0:
max_sentences -= 1
while len(sentence_counts[nominal_next_article_size]) == 0 and nominal_next_article_size > 0:
nominal_next_article_size -= 1
if nominal_next_article_size not in sentence_counts or nominal_next_article_size is 0 or test_counts[fidx] > test_median:
continue # skip adding to this file, will come back later if no file can accept unused articles
current_article_id = sentence_counts[nominal_next_article_size][-1]
sentence_counts[nominal_next_article_size].pop(-1)
self.output_test_files[file].append(current_article_id)
consumed_article_set.add(current_article_id)
unused_article_set.remove(current_article_id)
# If unable to place articles a few times, bump up nominal sizes by fraction until articles get placed
if len(history_remaining) == n_history_remaining:
history_remaining.pop(0)
history_remaining.append(len(unused_article_set))
history_same = True
for i in range(1, len(history_remaining)):
history_same = history_same and (history_remaining[i-1] == history_remaining[i])
if history_same:
nominal_sentences_per_training_shard += 1
# nominal_sentences_per_test_shard += 1
training_counts = []
test_counts = []
for shard in self.output_training_files:
training_counts.append(self.get_sentences_per_shard(self.output_training_files[shard]))
for shard in self.output_test_files:
test_counts.append(self.get_sentences_per_shard(self.output_test_files[shard]))
training_median = statistics.median(training_counts)
test_median = statistics.median(test_counts)
print('Distributing data over shards:', len(unused_article_set), 'articles remaining.')
if len(unused_article_set) != 0:
print('Warning: Some articles did not make it into output files.')
for shard in self.output_training_files:
print('Training shard:', self.get_sentences_per_shard(self.output_training_files[shard]))
for shard in self.output_test_files:
print('Test shard:', self.get_sentences_per_shard(self.output_test_files[shard]))
print('End: Distribute Articles Over Shards')
def write_shards_to_disk(self):
print('Start: Write Shards to Disk')
for shard in self.output_training_files:
self.write_single_shard(shard, self.output_training_files[shard])
for shard in self.output_test_files:
self.write_single_shard(shard, self.output_test_files[shard])
print('End: Write Shards to Disk')
def write_single_shard(self, shard_name, shard):
with open(shard_name, mode='w', newline='\n') as f:
for article_id in shard:
for line in self.sentences[article_id]:
f.write(line + '\n')
f.write('\n') # Line break between articles
import nltk
nltk.download('punkt')
class NLTKSegmenter:
def __init(self):
pass
def segment_string(self, article):
return nltk.tokenize.sent_tokenize(article)
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/data/TextSharding.py |
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import os
import urllib.request
import zipfile
class GooglePretrainedWeightDownloader:
def __init__(self, save_path):
self.save_path = save_path + '/google_pretrained_weights'
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
# Download urls
self.model_urls = {
'bert_base_uncased': ('https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip', 'uncased_L-12_H-768_A-12.zip'),
'bert_large_uncased': ('https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-24_H-1024_A-16.zip', 'uncased_L-24_H-1024_A-16.zip'),
'bert_base_cased': ('https://storage.googleapis.com/bert_models/2018_10_18/cased_L-12_H-768_A-12.zip', 'cased_L-12_H-768_A-12.zip'),
'bert_large_cased': ('https://storage.googleapis.com/bert_models/2018_10_18/cased_L-24_H-1024_A-16.zip', 'cased_L-24_H-1024_A-16.zip'),
'bert_base_multilingual_cased': ('https://storage.googleapis.com/bert_models/2018_11_23/multi_cased_L-12_H-768_A-12.zip', 'multi_cased_L-12_H-768_A-12.zip'),
'bert_large_multilingual_uncased': ('https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip', 'multilingual_L-12_H-768_A-12.zip'),
'bert_base_chinese': ('https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip', 'chinese_L-12_H-768_A-12.zip')
}
# SHA256sum verification for file download integrity (and checking for changes from the download source over time)
self.bert_base_uncased_sha = {
'bert_config.json': '7b4e5f53efbd058c67cda0aacfafb340113ea1b5797d9ce6ee411704ba21fcbc',
'bert_model.ckpt.data-00000-of-00001': '58580dc5e0bf0ae0d2efd51d0e8272b2f808857f0a43a88aaf7549da6d7a8a84',
'bert_model.ckpt.index': '04c1323086e2f1c5b7c0759d8d3e484afbb0ab45f51793daab9f647113a0117b',
'bert_model.ckpt.meta': 'dd5682170a10c3ea0280c2e9b9a45fee894eb62da649bbdea37b38b0ded5f60e',
'vocab.txt': '07eced375cec144d27c900241f3e339478dec958f92fddbc551f295c992038a3',
}
self.bert_large_uncased_sha = {
'bert_config.json': 'bfa42236d269e2aeb3a6d30412a33d15dbe8ea597e2b01dc9518c63cc6efafcb',
'bert_model.ckpt.data-00000-of-00001': 'bc6b3363e3be458c99ecf64b7f472d2b7c67534fd8f564c0556a678f90f4eea1',
'bert_model.ckpt.index': '68b52f2205ffc64dc627d1120cf399c1ef1cbc35ea5021d1afc889ffe2ce2093',
'bert_model.ckpt.meta': '6fcce8ff7628f229a885a593625e3d5ff9687542d5ef128d9beb1b0c05edc4a1',
'vocab.txt': '07eced375cec144d27c900241f3e339478dec958f92fddbc551f295c992038a3',
}
self.bert_base_cased_sha = {
'bert_config.json': 'f11dfb757bea16339a33e1bf327b0aade6e57fd9c29dc6b84f7ddb20682f48bc',
'bert_model.ckpt.data-00000-of-00001': '734d5a1b68bf98d4e9cb6b6692725d00842a1937af73902e51776905d8f760ea',
'bert_model.ckpt.index': '517d6ef5c41fc2ca1f595276d6fccf5521810d57f5a74e32616151557790f7b1',
'bert_model.ckpt.meta': '5f8a9771ff25dadd61582abb4e3a748215a10a6b55947cbb66d0f0ba1694be98',
'vocab.txt': 'eeaa9875b23b04b4c54ef759d03db9d1ba1554838f8fb26c5d96fa551df93d02',
}
self.bert_large_cased_sha = {
'bert_config.json': '7adb2125c8225da495656c982fd1c5f64ba8f20ad020838571a3f8a954c2df57',
'bert_model.ckpt.data-00000-of-00001': '6ff33640f40d472f7a16af0c17b1179ca9dcc0373155fb05335b6a4dd1657ef0',
'bert_model.ckpt.index': 'ef42a53f577fbe07381f4161b13c7cab4f4fc3b167cec6a9ae382c53d18049cf',
'bert_model.ckpt.meta': 'd2ddff3ed33b80091eac95171e94149736ea74eb645e575d942ec4a5e01a40a1',
'vocab.txt': 'eeaa9875b23b04b4c54ef759d03db9d1ba1554838f8fb26c5d96fa551df93d02',
}
self.bert_base_multilingual_cased_sha = {
'bert_config.json': 'e76c3964bc14a8bb37a5530cdc802699d2f4a6fddfab0611e153aa2528f234f0',
'bert_model.ckpt.data-00000-of-00001': '55b8a2df41f69c60c5180e50a7c31b7cdf6238909390c4ddf05fbc0d37aa1ac5',
'bert_model.ckpt.index': '7d8509c2a62b4e300feb55f8e5f1eef41638f4998dd4d887736f42d4f6a34b37',
'bert_model.ckpt.meta': '95e5f1997e8831f1c31e5cf530f1a2e99f121e9cd20887f2dce6fe9e3343e3fa',
'vocab.txt': 'fe0fda7c425b48c516fc8f160d594c8022a0808447475c1a7c6d6479763f310c',
}
self.bert_large_multilingual_uncased_sha = {
'bert_config.json': '49063bb061390211d2fdd108cada1ed86faa5f90b80c8f6fdddf406afa4c4624',
'bert_model.ckpt.data-00000-of-00001': '3cd83912ebeb0efe2abf35c9f1d5a515d8e80295e61c49b75c8853f756658429',
'bert_model.ckpt.index': '87c372c1a3b1dc7effaaa9103c80a81b3cbab04c7933ced224eec3b8ad2cc8e7',
'bert_model.ckpt.meta': '27f504f34f02acaa6b0f60d65195ec3e3f9505ac14601c6a32b421d0c8413a29',
'vocab.txt': '87b44292b452f6c05afa49b2e488e7eedf79ea4f4c39db6f2f4b37764228ef3f',
}
self.bert_base_chinese_sha = {
'bert_config.json': '7aaad0335058e2640bcb2c2e9a932b1cd9da200c46ea7b8957d54431f201c015',
'bert_model.ckpt.data-00000-of-00001': '756699356b78ad0ef1ca9ba6528297bcb3dd1aef5feadd31f4775d7c7fc989ba',
'bert_model.ckpt.index': '46315546e05ce62327b3e2cd1bed22836adcb2ff29735ec87721396edb21b82e',
'bert_model.ckpt.meta': 'c0f8d51e1ab986604bc2b25d6ec0af7fd21ff94cf67081996ec3f3bf5d823047',
'vocab.txt': '45bbac6b341c319adc98a532532882e91a9cefc0329aa57bac9ae761c27b291c',
}
# Relate SHA to urls for loop below
self.model_sha = {
'bert_base_uncased': self.bert_base_uncased_sha,
'bert_large_uncased': self.bert_large_uncased_sha,
'bert_base_cased': self.bert_base_cased_sha,
'bert_large_cased': self.bert_large_cased_sha,
'bert_base_multilingual_cased': self.bert_base_multilingual_cased_sha,
'bert_large_multilingual_uncased': self.bert_large_multilingual_uncased_sha,
'bert_base_chinese': self.bert_base_chinese_sha
}
# Helper to get sha256sum of a file
def sha256sum(self, filename):
h = hashlib.sha256()
b = bytearray(128*1024)
mv = memoryview(b)
with open(filename, 'rb', buffering=0) as f:
for n in iter(lambda : f.readinto(mv), 0):
h.update(mv[:n])
return h.hexdigest()
def download(self):
# Iterate over urls: download, unzip, verify sha256sum
found_mismatch_sha = False
for model in self.model_urls:
url = self.model_urls[model][0]
file = self.save_path + '/' + self.model_urls[model][1]
print('Downloading', url)
response = urllib.request.urlopen(url)
with open(file, 'wb') as handle:
handle.write(response.read())
print('Unzipping', file)
zip = zipfile.ZipFile(file, 'r')
zip.extractall(self.save_path)
zip.close()
sha_dict = self.model_sha[model]
for extracted_file in sha_dict:
sha = sha_dict[extracted_file]
if sha != self.sha256sum(file[:-4] + '/' + extracted_file):
found_mismatch_sha = True
print('SHA256sum does not match on file:', extracted_file, 'from download url:', url)
else:
print(file[:-4] + '/' + extracted_file, '\t', 'verified')
if not found_mismatch_sha:
print("All downloads pass sha256sum verification.")
def serialize(self):
pass
def deserialize(self):
pass
def listAvailableWeights(self):
print("Available Weight Datasets")
for item in self.model_urls:
print(item)
def listLocallyStoredWeights(self):
pass
| FasterTransformer-main | examples/pytorch/bert/bert-quantization-sparsity/data/GooglePretrainedWeightDownloader.py |
from pdb import set_trace
import torch
from transformers.models.longformer.modeling_longformer import LongformerBaseModelOutput
def from_hf_longformer_weight_to_ft(weights_file, layer_num, data_type):
weights = torch.load(weights_file)
all_weights = []
for i in range(0, layer_num):
# Need to transpose the kernel for torch.nn.Linear
# q k v kg vg weights and bias should be continuous, required by the ft longformer encoder.
all_weights.append(weights["longformer.encoder.layer.{}.attention.self.query.weight".format(i)].transpose(0, 1))
all_weights.append(weights["longformer.encoder.layer.{}.attention.self.key.weight".format(i)].transpose(0, 1))
all_weights.append(weights["longformer.encoder.layer.{}.attention.self.value.weight".format(i)].transpose(0, 1))
all_weights.append(
weights["longformer.encoder.layer.{}.attention.self.key_global.weight".format(i)].transpose(0, 1))
all_weights.append(
weights["longformer.encoder.layer.{}.attention.self.value_global.weight".format(i)].transpose(0, 1))
all_weights.append(
weights["longformer.encoder.layer.{}.attention.self.query_global.weight".format(i)].transpose(0, 1))
all_weights.append(weights["longformer.encoder.layer.{}.attention.self.query.bias".format(i)])
all_weights.append(weights["longformer.encoder.layer.{}.attention.self.key.bias".format(i)])
all_weights.append(weights["longformer.encoder.layer.{}.attention.self.value.bias".format(i)])
all_weights.append(weights["longformer.encoder.layer.{}.attention.self.key_global.bias".format(i)])
all_weights.append(weights["longformer.encoder.layer.{}.attention.self.value_global.bias".format(i)])
all_weights.append(weights["longformer.encoder.layer.{}.attention.self.query_global.bias".format(i)])
all_weights.append(
weights["longformer.encoder.layer.{}.attention.output.dense.weight".format(i)].transpose(0, 1))
all_weights.append(weights["longformer.encoder.layer.{}.attention.output.dense.bias".format(i)])
all_weights.append(weights["longformer.encoder.layer.{}.attention.output.LayerNorm.weight".format(i)])
all_weights.append(weights["longformer.encoder.layer.{}.attention.output.LayerNorm.bias".format(i)])
all_weights.append(weights["longformer.encoder.layer.{}.intermediate.dense.weight".format(i)].transpose(0, 1))
all_weights.append(weights["longformer.encoder.layer.{}.intermediate.dense.bias".format(i)])
all_weights.append(weights["longformer.encoder.layer.{}.output.dense.weight".format(i)].transpose(0, 1))
all_weights.append(weights["longformer.encoder.layer.{}.output.dense.bias".format(i)])
all_weights.append(weights["longformer.encoder.layer.{}.output.LayerNorm.weight".format(i)])
all_weights.append(weights["longformer.encoder.layer.{}.output.LayerNorm.bias".format(i)])
for i in range(0, len(all_weights)):
all_weights[i] = all_weights[i].flatten()
if data_type == "fp16":
all_weights = torch.cat(all_weights).type(torch.float16)
elif data_type == "bf16":
all_weights = torch.cat(all_weights).type(torch.bfloat16)
elif data_type == "fp32":
all_weights = torch.cat(all_weights).type(torch.float32)
return all_weights.contiguous()
class FTLongformerEncoder(torch.nn.Module):
def __init__(self, weights_file, layer_num, head_num, size_per_head,
intermediate_size, local_attn_window_size,
max_global_token_num, batch_size, seq_len,
attn_scaler, ft_longformer_lib, data_type='fp32', hf_plugin_mode=False):
super().__init__()
self.data_type = data_type
assert seq_len % local_attn_window_size == 0 and seq_len / \
local_attn_window_size >= 2, "seq_len need to be multiple of local_attn_window_size and at least 2 times big."
self.hf_plugin_mode = hf_plugin_mode
all_weight = from_hf_longformer_weight_to_ft(weights_file, layer_num, data_type)
self.all_weight = all_weight.cuda()
torch.classes.load_library(ft_longformer_lib)
self.ft_encoder = torch.classes.FasterTransformer.LongformerEncoder(layer_num, head_num * size_per_head,
head_num, size_per_head,
intermediate_size, local_attn_window_size,
max_global_token_num, batch_size, seq_len,
attn_scaler)
def set_hf_plugin_mode(self, is_plugin):
self.hf_plugin_mode = is_plugin
def forward(self, *args, **kwargs):
encoder_in = args[0]
if self.hf_plugin_mode:
# In this mode, assume that HuggingFace's LongformerModel.encoder has been
# substituted to this class's instance
extended_attention_mask = kwargs['attention_mask']
local_attn_mask = torch.zeros_like(extended_attention_mask)
local_attn_mask[extended_attention_mask > -10000.] = 1.0
global_attn_mask = torch.zeros_like(extended_attention_mask)
global_attn_mask[extended_attention_mask > 0.] = 1.0
output = self.ft_encoder.forward(encoder_in, local_attn_mask, global_attn_mask, self.all_weight, 0)
return LongformerBaseModelOutput(
last_hidden_state=output,
hidden_states=None,
attentions=None,
global_attentions=None,
)
else:
local_attn_mask = args[1]
global_attn_mask = args[2]
return self.ft_encoder.forward(encoder_in, local_attn_mask, global_attn_mask, self.all_weight, 0)
| FasterTransformer-main | examples/pytorch/longformer/model.py |
import argparse
import sys
import os
import json
import time
import torch
from transformers import LongformerTokenizer, LongformerForQuestionAnswering
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
sys.path.insert(0, project_root)
from examples.pytorch.longformer.model import from_hf_longformer_weight_to_ft, FTLongformerEncoder
def parse_from_config(model_dir):
with open(os.path.join(model_dir, 'config.json'), 'r') as f:
config = json.load(f)
layer_num = config['num_hidden_layers']
hidden_size = config['hidden_size']
head_num = config['num_attention_heads']
size_per_head = hidden_size // head_num
intermediate_size = config['intermediate_size']
# assume all local attn window are same size. TODO: Improve later
local_attn_window_size = config['attention_window'][0]
attn_scaler = 1.0 / (size_per_head ** 0.5)
return (layer_num, hidden_size, head_num, size_per_head,
intermediate_size, local_attn_window_size, attn_scaler)
def build_ft_longformer(hf_model_dir, layer_num, head_num, size_per_head,
intermediate_size, local_attn_window_size,
max_global_token_num, batch_size, seq_len,
attn_scaler, ft_longformer_lib, data_type):
weights_file = os.path.join(hf_model_dir, 'pytorch_model.bin')
ft_encoder = FTLongformerEncoder(weights_file, layer_num, head_num, size_per_head,
intermediate_size, local_attn_window_size,
max_global_token_num, batch_size, seq_len,
attn_scaler, ft_longformer_lib, data_type)
ft_longformer = build_hf_longformer(hf_model_dir)
if data_type == 'fp16':
ft_longformer = ft_longformer.half()
elif data_type == 'bf16':
ft_longformer = ft_longformer.bfloat16()
ft_longformer.cuda()
ft_longformer.eval()
ft_encoder.set_hf_plugin_mode(True)
ft_longformer.longformer.encoder = ft_encoder
return ft_longformer
def build_hf_longformer(model_dir):
hf_longformer = LongformerForQuestionAnswering.from_pretrained(model_dir)
hf_longformer.cuda()
hf_longformer.eval()
return hf_longformer
def prepare_input(question, passage_text, seq_len, batch_size, model_dir, data_type):
tokenizer = LongformerTokenizer.from_pretrained(model_dir)
encoding = tokenizer(question, passage_text, return_token_type_ids=True)
qa_sep_index = 0
for token_id in encoding['input_ids']:
if token_id == tokenizer.sep_token_id:
break
qa_sep_index += 1
actual_seq_len = len(encoding['input_ids'])
input_ids = torch.ones((seq_len, ), dtype=torch.int32) # hf use 1 as padding
input_ids[:actual_seq_len] = torch.tensor(encoding['input_ids'], dtype=torch.int32)
local_attn_mask = torch.zeros((seq_len, ), dtype=torch.float32)
local_attn_mask[:actual_seq_len] = torch.tensor(encoding['attention_mask'], dtype=torch.float32)
global_attn_mask = torch.zeros_like(local_attn_mask, dtype=torch.float32)
# mark all question's token as global attention
global_attn_mask[:qa_sep_index] = 1.0
# make a batch
input_ids_b = torch.stack([input_ids for _ in range(batch_size)], axis=0).contiguous().cuda()
local_attn_mask_b = torch.stack([local_attn_mask for _ in range(batch_size)], axis=0).contiguous()
global_attn_mask_b = torch.stack([global_attn_mask for _ in range(batch_size)], axis=0).contiguous()
if data_type == 'fp16':
local_attn_mask_b = local_attn_mask_b.half()
global_attn_mask_b = global_attn_mask_b.half()
elif data_type == 'bf16':
local_attn_mask_b = local_attn_mask_b.bfloat16()
global_attn_mask_b = global_attn_mask_b.bfloat16()
local_attn_mask_b = local_attn_mask_b.cuda()
global_attn_mask_b = global_attn_mask_b.cuda()
return input_ids_b, local_attn_mask_b, global_attn_mask_b, input_ids, actual_seq_len
def decode_output(outputs, model_dir, input_ids, actual_seq_len):
tokenizer = LongformerTokenizer.from_pretrained(model_dir)
start_logits = outputs.start_logits
end_logits = outputs.end_logits
all_tokens = tokenizer.convert_ids_to_tokens(input_ids.tolist()[:actual_seq_len])
# need to
start_logits = start_logits[0, :actual_seq_len]
end_logits = end_logits[0, :actual_seq_len]
answer_tokens = all_tokens[torch.argmax(start_logits):torch.argmax(end_logits) + 1]
answer = tokenizer.decode(tokenizer.convert_tokens_to_ids(answer_tokens)) # remove space prepending space token
return answer
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model-dir', required=True,
help='Path to huggingface model dir where model file and config file is stored')
parser.add_argument('-l', '--ft-longformer-lib', type=str, default=os.path.join(project_root, 'build', 'lib', 'libth_transformer.so'),
help='Path to fastertransformer longformer pytorch op lib')
parser.add_argument('--data_type', type=str, choices=['fp32', 'fp16', 'bf16'], default='fp32')
parser.add_argument('-p', '--passage', type=str, nargs='*', help='Text for paragraph/passage for LongformerBERT QA',
default=None)
parser.add_argument('-pf', '--passage-file', type=str, help='File containing input passage',
default=None)
parser.add_argument('-q', '--question', required=True, type=str, nargs='*', help='Text for query/question for LongformerBERT QA',
default='')
parser.add_argument('-s', '--sequence-length',
help='The sequence length to use. Defaults to 1024',
default=1024, type=int)
parser.add_argument('-b', '--batch-size',
help='Batch size to use. Note, it just copy the single question and passage token to form a batch, just for performance test.',
default=1, type=int)
parser.add_argument("-g", "--max-global-attention-num", default=128,
help="Max global attention token num from start of the sequence to the end.", type=int)
parser.add_argument('-r', '--repeat-test-num',
help='If specified, will run inference several rounds, to test average performance.',
type=int,
default=None)
args, _ = parser.parse_known_args()
print("======== Arguments ========")
print(args)
with open(os.path.join(args.model_dir, 'config.json'), 'r') as f:
config = json.load(f)
# prepare question and passage
question = ' '.join(args.question)
if bool(args.passage) == bool(args.passage_file):
raise RuntimeError("You must specify only one of --passage or --passage-file.")
if args.passage:
passage_text = ' '.join(args.passage)
else:
with open(args.passage_file, 'r', encoding="utf-8") as f:
passage_text = f.read()
# prepare model config and weights
model_dir = args.model_dir
ft_longformer_lib = args.ft_longformer_lib
seq_len = args.sequence_length
batch_size = args.batch_size
repeat_num = args.repeat_test_num if args.repeat_test_num else 0
max_global_token_num = args.max_global_attention_num
(layer_num, hidden_size, head_num, size_per_head,
intermediate_size, local_attn_window_size, attn_scaler) = parse_from_config(model_dir)
# huggeingFace longformer
hf_longformer = build_hf_longformer(model_dir)
if args.data_type == 'fp16':
hf_longformer = hf_longformer.half()
# fastertransformer longformer
ft_longformer = build_ft_longformer(model_dir, layer_num, head_num, size_per_head,
intermediate_size, local_attn_window_size,
max_global_token_num, batch_size, seq_len,
attn_scaler, ft_longformer_lib, args.data_type)
# prepare input
input_ids_b, local_attn_mask_b, global_attn_mask_b, input_ids, actual_seq_len = prepare_input(
question, passage_text, seq_len, batch_size, model_dir, args.data_type)
# 1. Compare the performance between HF and FT, using dummy input
dummy_local_attn_mask_b = torch.ones_like(local_attn_mask_b)
extended_mask_b = (global_attn_mask_b + dummy_local_attn_mask_b) * 10000. - 10000.
dummy_embedding_out = torch.rand(batch_size, seq_len, hidden_size, dtype=torch.float32)
if args.data_type == 'fp16':
dummy_embedding_out = dummy_embedding_out.half()
elif args.data_type == 'bf16':
dummy_embedding_out = dummy_embedding_out.bfloat16()
dummy_embedding_out = dummy_embedding_out.cuda()
hf_encoder = hf_longformer.longformer.encoder
ft_encoder = ft_longformer.longformer.encoder
if args.data_type == 'bf16':
print("HF longerformer encoder doesn't support BFloat16, FallBack to FP32 !")
with torch.no_grad():
# HuggingFace warmup
for i in range(10):
output = hf_encoder(dummy_embedding_out.float(), attention_mask=extended_mask_b, head_mask=None,
output_attentions=None, output_hidden_states=None, return_dict=True)
start = time.time()
for i in range(repeat_num):
output = hf_encoder(dummy_embedding_out.float(), attention_mask=extended_mask_b, head_mask=None,
output_attentions=None, output_hidden_states=None, return_dict=True)
stop = time.time()
print("HuggingFace Longformer encoder average latency {:.3f} second ({} iterations)".format((stop - start) / repeat_num, repeat_num))
ft_longformer.longformer.encoder.set_hf_plugin_mode(False)
with torch.no_grad():
# FT warmup
for i in range(10):
output = ft_encoder.forward(dummy_embedding_out, dummy_local_attn_mask_b, global_attn_mask_b)
start = time.time()
for i in range(repeat_num):
output = ft_encoder.forward(dummy_embedding_out, dummy_local_attn_mask_b, global_attn_mask_b)
stop = time.time()
print("FasterTransformer Longformer encoder average latency {:.3f} second ({} iterations)".format((stop - start) / repeat_num, repeat_num))
# 2. Verify the correctness
ft_longformer.longformer.encoder.set_hf_plugin_mode(True)
with torch.no_grad():
outputs = ft_longformer(input_ids_b,
attention_mask=local_attn_mask_b,
global_attention_mask=global_attn_mask_b)
ft_answer = decode_output(outputs, model_dir, input_ids, actual_seq_len)
outputs = hf_longformer(input_ids_b,
attention_mask=local_attn_mask_b.float(),
global_attention_mask=global_attn_mask_b.float())
hf_answer = decode_output(outputs, model_dir, input_ids, actual_seq_len)
print("HuggingFace Answer: " + hf_answer)
print("FasterTransformer Answer: " + ft_answer)
if __name__ == '__main__':
main()
| FasterTransformer-main | examples/pytorch/longformer/longformer_qa.py |
import torch
from transformers import GPTNeoForCausalLM, AutoConfig, GPT2Tokenizer
from pathlib import Path
# GPT-J 6B config
config = AutoConfig.from_pretrained("EleutherAI/gpt-neo-2.7B")
config.attention_layers = ["global"] * 28
config.attention_types = [["global"], 28]
config.num_layers = 28
config.num_heads = 16
config.hidden_size = 256 * config.num_heads
config.vocab_size = 50400
config.rotary = True
config.rotary_dim = 64
config.jax = True
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
class Checkpoint(MutableMapping):
def __init__(self, chkpt_path, device="cpu"):
self.device = device
self.chkpt_path = chkpt_path
self.checkpoint = torch.load(chkpt_path)
def __len__(self):
return len(self.checkpoint)
def __getitem__(self, key):
return self.checkpoint[key]
def __setitem__(self, key, value):
return
def __delitem__(self, key, value):
return
def keys(self):
return self.checkpoint.keys()
def __iter__(self):
for key in self.checkpoint:
yield (key, self.__getitem__(key))
def __copy__(self):
return Checkpoint(self.chkpt_path, device=self.device)
def copy(self):
return Checkpoint(self.chkpt_path, device=self.device)
model = GPTNeoForCausalLM.from_pretrained(
pretrained_model_name_or_path=None,
config=config,
state_dict=Checkpoint("j6b_ckpt.pt")
)
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
input_ids = torch.as_tensor([
[ 818, 198, 464, 464, 818, 464, 198, 464],
[ 262, 464, 968, 968, 257, 968, 198, 717],
[ 938, 968, 1971, 1971, 1445, 1971, 464, 640],
[3155, 8221, 12056, 3782, 326, 12056, 5398, 314],
[ 286, 2732, 423, 468, 481, 6, 4332, 2497],
[1528, 286, 257, 3199, 1884, 5859, 628, 262],
[ 11, 15198, 649, 663, 787, 41683, 628, 3807],
[ 257, 318, 1182, 5079, 340, 423, 198, 11],
]).T.cuda()
output = model.generate(input_ids, max_length=40, k=1)
# print(f"output ids: \n{output}")
for i in range(8):
print(f"[INFO] batch {i}: {tokenizer.decode(output[i][:])}")
| FasterTransformer-main | examples/pytorch/gptj/utils/reference_gptj.py |
from argparse import ArgumentParser
from os import makedirs
import numpy as np
from pathlib import Path
import torch
import configparser
from transformers import PretrainedConfig
torch.set_printoptions(linewidth=130, sci_mode=False)
np.set_printoptions(linewidth=130, suppress=True)
# This converter is used to convert the huggingface gpt-j-6B model
# in https://huggingface.co/EleutherAI/gpt-j-6B/blob/main/pytorch_model.bin.
def savebin(param, save_path):
if isinstance(param, torch.Tensor):
param = param.cpu().float().numpy()
np.squeeze(param).astype(np.float32).tofile(save_path + ".bin")
def param2file(pt_param, layer_id, save_dir, dest_key):
base_n = save_dir + "/model.layers." + str(layer_id) + "."
save_path = base_n + dest_key
savebin(pt_param, save_path)
def param2distributed(
pt_param,
layer_id,
save_dir,
dest_key,
n_inference_gpus,
split_axis,
):
np_param = pt_param.cpu().float().numpy()
base_n = save_dir + "/model.layers." + str(layer_id) + "."
save_path = base_n + dest_key
split_param = np.split(np_param, n_inference_gpus, axis=split_axis)
for i, p in enumerate(split_param):
savebin(p, save_path + f".{i}")
def save(w, save_dir, n_inference_gpus, n_layers, layer_id):
makedirs(save_dir, exist_ok=True)
savebin(w['transformer.wte.weight'], save_dir + "/model.wte")
l = layer_id
print(f"Saving layer {l + 1} / {n_layers}")
base_k = "transformer.h." + str(l) + "."
param2file(
w[base_k + "ln_1.bias"],
l, save_dir, "input_layernorm.bias"
)
param2file(
w[base_k + "ln_1.weight"],
l, save_dir, "input_layernorm.weight"
)
param2distributed(
w[base_k + "mlp.fc_in.weight"].T,
l, save_dir, "mlp.dense_h_to_4h.weight",
n_inference_gpus, split_axis=-1 # split fast indx
)
param2distributed(
w[base_k + "mlp.fc_in.bias"],
l, save_dir, "mlp.dense_h_to_4h.bias",
n_inference_gpus, split_axis=-1 # split fast indx
)
param2distributed(
w[base_k + "mlp.fc_out.weight"].T,
l, save_dir, "mlp.dense_4h_to_h.weight",
n_inference_gpus, split_axis=0 # split slow indx
)
param2file(
w[base_k + "mlp.fc_out.bias"],
l, save_dir, "mlp.dense_4h_to_h.bias"
)
param2distributed(
w[base_k + "attn.out_proj.weight"].T,
l, save_dir, "attention.dense.weight",
n_inference_gpus, split_axis=0 # split slow indx
)
QKV_w = torch.stack([
w[base_k + "attn.q_proj.weight"],
w[base_k + "attn.k_proj.weight"],
w[base_k + "attn.v_proj.weight"],
]) # [qkv, n_heads * dim_head, latent_space]
QKV_w = QKV_w.permute(2, 0, 1)
param2distributed(
QKV_w, l, save_dir, "attention.query_key_value.weight",
n_inference_gpus, split_axis=-1 # split fast indx
)
# Other unneeded per-layer params:
# attn.attention.masked_bias = torch.tensor(-1e9)
# attn.attention.bias = torch.tril(torch.ones(1, 1, 2048, 2048))
if __name__ == "__main__":
parser = ArgumentParser(
description="Convert GPT-J slim checkpoint to FasterTransformer",
)
parser.add_argument(
"--output-dir", help="Folder where binary files are stored", default="gpt-j-6B/c-models/"
)
parser.add_argument(
"--ckpt-dir", help="File of GPT-J huggingface checkpoint", default="gpt-j-6B/"
)
parser.add_argument(
"--n-inference-gpus", help="Number of GPUs used for inference runtime", default=1, type=int
)
parser.add_argument(
"--n-layers", help="Number of GPT-J decoder layer", default=28, type=int
)
args = parser.parse_args()
ckpt_file = args.ckpt_dir + "/pytorch_model.bin"
checkpoint = torch.load(ckpt_file)
print(f"loading from {ckpt_file}")
out_path = args.output_dir
output_dir = out_path + f"/{args.n_inference_gpus}-gpu/"
print(f"saving to {output_dir}")
config_file = args.ckpt_dir + "/config.json"
hf_config = PretrainedConfig.from_json_file(config_file).to_dict()
# NOTE: save parameters to config files (loaded by triton backends)
config = configparser.ConfigParser()
config["gptj"] = {}
try:
config["gptj"]["model_name"] = "gptj" if hf_config["_name_or_path"] == '' else hf_config["_name_or_path"]
config["gptj"]["head_num"] = str(hf_config["n_head"])
n_embd = hf_config["n_embd"]
config["gptj"]["size_per_head"] = str(n_embd // hf_config["n_head"])
config["gptj"]["inter_size"] = str(n_embd * 4)
config["gptj"]["num_layer"] = str(hf_config["n_layer"])
rotary_dim = n_embd // hf_config["n_head"] if hf_config["rotary_dim"] is None else hf_config["rotary_dim"]
config["gptj"]["rotary_embedding"] = str(hf_config["rotary_dim"])
config["gptj"]["vocab_size"] = str(hf_config["vocab_size"])
config["gptj"]["start_id"] = str(hf_config["bos_token_id"])
config["gptj"]["end_id"] = str(hf_config["eos_token_id"])
config["gptj"]["weight_data_type"] = "fp32"
Path(output_dir).mkdir(exist_ok=True, parents=True)
with open(output_dir + "/config.ini", 'w') as configfile:
config.write(configfile)
except:
print(f"Fail to save the config in config.ini.")
for i in range(args.n_layers):
save(checkpoint, output_dir, args.n_inference_gpus, args.n_layers, i)
savebin(checkpoint['transformer.ln_f.weight'], output_dir + "/model.final_layernorm.weight")
savebin(checkpoint['transformer.ln_f.bias'], output_dir + "/model.final_layernorm.bias")
savebin(checkpoint['lm_head.weight'], output_dir + "/model.lm_head.weight")
savebin(checkpoint['lm_head.bias'], output_dir + "/model.lm_head.bias")
print("done")
| FasterTransformer-main | examples/pytorch/gptj/utils/huggingface_gptj_ckpt_convert.py |
from argparse import ArgumentParser
from io import BytesIO
from os import makedirs
import numpy as np
import torch
import configparser
torch.set_printoptions(linewidth=130, sci_mode=False)
np.set_printoptions(linewidth=130, suppress=True)
def reshard(x, old_shape):
import jax.numpy as jnp
if len(x.shape) == 1:
# print("epoch")
# print(x)
out = x[0:1]
elif len(x.shape) == 2:
#print(f"LN/bias {x.shape}")
#print(x[:, :16])
if (x[1:] == x[-1]).all():
#print("LN")
if (x[1:] == 0).all() or (x[1:] == 1).all():
out = x[0:1]
else:
#print("shard bias")
out = x[0:1] * 8#* x.shape[0] / old_shape[0]
else:
#print("bias")
out = x.reshape(old_shape)
#print(out[:, :16])
elif len(x.shape) == 3:
#print(f"weight {x.shape}")
if x.shape[0] * x.shape[2] == old_shape[2]:
#print("case 1")
out = jnp.transpose(x, (1, 0, 2)).reshape(old_shape)
elif x.shape[0] * x.shape[1] == old_shape[1]:
#print("case 2")
out = x.reshape(old_shape)
else:
raise Exception(f"unimplemented, {x.shape}, {old_shape}")
else:
raise Exception(f"unimplemented, {x}")
#flattened, structure = jax.tree_flatten(out)
#return flattened
return out
def get_old_shape(t, dim=2):
if len(t.shape) == 3:
shard_shape = t.shape
if dim == 1:
return (shard_shape[0] * shard_shape[1], shard_shape[2])
elif dim == 2:
return (shard_shape[1], shard_shape[0] * shard_shape[2])
else:
raise ValueError(f"unsupported dim {dim}")
if len(t.shape) == 2:
return (t.shape[1] * t.shape[0],)
else:
raise ValueError(f"unsupported shape {t.shape}")
def read_shard(ckpt_dir, idx):
out = []
file_path = ckpt_dir + f"{idx}.npz"
#print(f"-- {file_path}")
with open(file_path, "rb") as f:
buf = f.read()
f_io = BytesIO(buf)
deserialized = np.load(f_io)
for i in deserialized:
out.append(deserialized[i])
#print(deserialized[i].shape)
return out
def savebin(param, save_path):
if isinstance(param, torch.Tensor):
param = param.cpu().float().numpy()
np.squeeze(param).astype(np.float32).tofile(save_path + ".bin")
def param2file(pt_param, layer_id, save_dir, dest_key):
base_n = save_dir + "/model.layers." + str(layer_id) + "."
save_path = base_n + dest_key
savebin(pt_param, save_path)
def param2distributed(
pt_param,
layer_id,
save_dir,
dest_key,
n_inference_gpus,
split_axis,
):
np_param = pt_param.cpu().float().numpy()
base_n = save_dir + "/model.layers." + str(layer_id) + "."
save_path = base_n + dest_key
split_param = np.split(np_param, n_inference_gpus, axis=split_axis)
for i, p in enumerate(split_param):
savebin(p, save_path + f".{i}")
def save(w, save_dir, n_inference_gpus, num_layers=28):
makedirs(save_dir, exist_ok=True)
savebin(w['transformer.wte.weight'], save_dir + "/model.wte")
for l in range(num_layers):
print(f"Saving layer {l} / 28")
base_k = "transformer.h." + str(l) + "."
param2file(
w[base_k + "ln_1.bias"],
l, save_dir, "input_layernorm.bias"
)
param2file(
w[base_k + "ln_1.weight"],
l, save_dir, "input_layernorm.weight"
)
param2distributed(
w[base_k + "mlp.c_fc.weight"].T,
l, save_dir, "mlp.dense_h_to_4h.weight",
n_inference_gpus, split_axis=-1 # split fast indx
)
param2distributed(
w[base_k + "mlp.c_fc.bias"],
l, save_dir, "mlp.dense_h_to_4h.bias",
n_inference_gpus, split_axis=-1 # split fast indx
)
param2distributed(
w[base_k + "mlp.c_proj.weight"].T,
l, save_dir, "mlp.dense_4h_to_h.weight",
n_inference_gpus, split_axis=0 # split slow indx
)
param2file(
w[base_k + "mlp.c_proj.bias"],
l, save_dir, "mlp.dense_4h_to_h.bias"
)
param2distributed(
w[base_k + "attn.attention.out_proj.weight"].T,
l, save_dir, "attention.dense.weight",
n_inference_gpus, split_axis=0 # split slow indx
)
QKV_w = torch.stack([
w[base_k + "attn.attention.q_proj.weight"],
w[base_k + "attn.attention.k_proj.weight"],
w[base_k + "attn.attention.v_proj.weight"],
]) # [qkv, n_heads * dim_head, latent_space]
QKV_w = QKV_w.permute(2, 0, 1)
param2distributed(
QKV_w, l, save_dir, "attention.query_key_value.weight",
n_inference_gpus, split_axis=-1 # split fast indx
)
# Other unneeded per-layer params:
# attn.attention.masked_bias = torch.tensor(-1e9)
# attn.attention.bias = torch.tril(torch.ones(1, 1, 2048, 2048))
savebin(w['transformer.ln_f.weight'], save_dir + "/model.final_layernorm.weight")
savebin(w['transformer.ln_f.bias'], save_dir + "/model.final_layernorm.bias")
# lm head fast index should be hidden layer size, not vocab:
savebin(w['lm_head.weight'], save_dir + "/model.lm_head.weight")
savebin(w['lm_head.bias'], save_dir + "/model.lm_head.bias")
def main(ckpt_dir, num_layers=28, total_shards=8):
import jax.numpy as jnp
unshard = None
transforms = [
("transformer.wte.bias", None, None),
("transformer.wte.weight", unshard, 1)
]
checkpoint = {}
layer_names = sorted(map(str, range(num_layers)))
for layer in layer_names:
checkpoint[
f"transformer.h.{layer}.attn.attention.bias"
] = torch.tril(torch.ones(1, 1, 2048, 2048))
checkpoint[
f"transformer.h.{layer}.attn.attention.masked_bias"
] = torch.tensor(-1e9)
transforms.extend([
(f"transformer.h.{layer}.attn.attention.q_proj.weight", unshard, 2),
(f"transformer.h.{layer}.attn.attention.v_proj.weight", unshard, 2),
(f"transformer.h.{layer}.attn.attention.k_proj.weight", unshard, 2),
(f"transformer.h.{layer}.attn.attention.out_proj.weight", unshard, 1),
(f"transformer.h.{layer}.mlp.c_fc.bias", unshard, 1),
(f"transformer.h.{layer}.mlp.c_fc.weight", unshard, 2),
(f"transformer.h.{layer}.mlp.c_proj.bias", None, None),
(f"transformer.h.{layer}.mlp.c_proj.weight", unshard, 1),
(f"transformer.h.{layer}.ln_1.bias", None, None),
(f"transformer.h.{layer}.ln_1.weight", None, None),
])
transforms.extend([
("lm_head.bias", unshard, 1),
("lm_head.weight", unshard, 2),
("transformer.ln_f.bias", None, None),
("transformer.ln_f.weight", None, None),
])
part = 0
element = 0
while len(transforms) > 0:
print(f"loading shards for part {part}")
shards = [
read_shard(f"{ckpt_dir}/shard_{i}/", part) for i in range(total_shards)
]
print(f"read from checkpoint")
unsharded = []
for all_shards in zip(*shards):
x = np.stack(all_shards)
# No idea why this is V2...?
if x.dtype == np.dtype('V2'):
x.dtype = jnp.bfloat16
x = x.astype(np.float32)
unsharded.append(x)
#print(f"unsharded: {x.shape}")
while len(transforms) > 0 and len(unsharded) > 0:
transform = transforms.pop(0)
params = unsharded.pop(0)
if transform[2] is not None:
old_shape = (1,) + get_old_shape(params, transform[2])
else:
old_shape = (params.shape[1],)
print(f"< {params.shape} to {old_shape}")
params = reshard(params, old_shape).squeeze(0).T
params = torch.tensor(np.array(params.copy())).half()
if params.isnan().any() or params.isinf().any():
raise ValueError(f"fp16 over/underflow at {part} {element}")
checkpoint[transform[0]] = params
print(f"> {transform[0]} {params.shape}")
element += 1
part += 1
checkpoint['transformer.wte.weight'] = (
checkpoint['transformer.wte.weight'].T + checkpoint['transformer.wte.bias']
)
del checkpoint['transformer.wte.bias']
print(f"left over: {unsharded}")
return checkpoint
if __name__ == "__main__":
parser = ArgumentParser(
description="Convert GPT-J slim checkpoint to FasterTransformer",
)
parser.add_argument(
"--output-dir", help="Folder where binary files are stored", default="j6b_ckpt/"
)
parser.add_argument(
"--ckpt-dir", help="Folder containing slim GPT-J checkpoint", default="step_383500/"
)
parser.add_argument(
"--n-inference-gpus", help="Number of GPUs used for inference runtime", default=1, type=int
)
args = parser.parse_args()
num_layers = 28
print("loading")
in_path = args.ckpt_dir
if len(in_path)>3 and in_path[-3:] == ".pt":
checkpoint = torch.load(in_path)
else:
checkpoint = main(in_path, num_layers)
print("saving")
# load as in: https://github.com/finetuneanon/misc/blob/main/SizeTest.ipynb
out_path = args.output_dir
output_dir = out_path + f"/{args.n_inference_gpus}-gpu/"
if len(out_path)>3 and out_path[-3:] == ".pt":
torch.save(checkpoint, out_path)
else:
save(checkpoint, output_dir, args.n_inference_gpus, num_layers)
# NOTE: hard code for gptj-6B configuration (TODO: make this automatic)
config = configparser.ConfigParser()
config["gptj"] = {}
try:
config["gptj"]["model_name"] = "gptj-6B"
config["gptj"]["head_num"] = "16"
config["gptj"]["size_per_head"] = "256"
config["gptj"]["inter_size"] = "16384"
config["gptj"]["num_layer"] = "28"
config["gptj"]["rotary_embedding"] = "64"
config["gptj"]["vocab_size"] = "50400"
config["gptj"]["start_id"] = "50256"
config["gptj"]["end_id"] = "50256"
config["gptj"]["weight_data_type"] = "fp32"
with open(output_dir + "/config.ini", 'w') as configfile:
config.write(configfile)
except:
print(f"Fail to save the config in config.ini.")
print("done")
| FasterTransformer-main | examples/pytorch/gptj/utils/gptj_ckpt_convert.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
def generate_gpt_config(args):
config = configparser.ConfigParser()
config["ft_instance_hyperparameter"] = {
"max_batch_size": "{}".format(args['max_batch_size']),
"max_seq_len": "{}".format(args['max_seq_len']),
"beam_width": "{}".format(args['beam_width']),
"top_k": "{}".format(args['sampling_topk']),
"top_p": "{}".format(args['sampling_topp']),
"temperature": "{}".format(args['temperature']),
"tensor_para_size": "{}".format(args['tensor_para_size']),
"pipeline_para_size": "{}".format(args['pipeline_para_size']),
"data_type": "{}".format(args['data_type']),
"sparse": "0",
"int8_mode": "0",
"enable_custom_all_reduce": "0",
"model_name": "tmp_model",
"model_dir": "{}".format(args['model_dir']),
"repetition_penalty": "{}".format(args['repetition_penalty']),
"len_penalty": "{}".format(args['len_penalty']),
"beam_search_diversity_rate": "{}".format(args['beam_search_diversity_rate']),
}
config["request"] = {
"request_batch_size": "{}".format(args['request_batch_size']),
"request_output_len": "{}".format(args['request_output_len']),
"return_log_probs": "false",
"context_log_probs": "false",
}
config["tmp_model"] = {
"head_num": "{}".format(args['head_number']),
"size_per_head": "{}".format(args['size_per_head']),
"inter_size": "{}".format(args['inter_size']),
"vocab_size": "{}".format(args['vocab_size']),
"decoder_layers": "{}".format(args['num_layer']),
"rotary_embedding": f"{args['rotary_embedding']}",
"start_id": "{}".format(args['start_id']),
"end_id": "{}".format(args['end_id']),
}
with open('.tmp.config.ini', 'w') as configfile:
config.write(configfile)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-max_batch_size', '--max_batch_size', type=int, default=8, metavar='NUMBER',
help='batch size (default: 8)')
parser.add_argument('-max_seq_len', '--max_seq_len', type=int, default=256, metavar='NUMBER',
help='max sequence length (default: 256)')
parser.add_argument('-beam_width', '--beam_width', type=int, default=1, metavar='NUMBER',
help='beam width for beam search (default: 1)')
parser.add_argument('-n', '--head_number', type=int, default=16, metavar='NUMBER',
help='head number (default: 16)')
parser.add_argument('-size', '--size_per_head', type=int, default=256, metavar='NUMBER',
help='size per head (default: 256)')
parser.add_argument('-inter_size', '--inter_size', type=int, default=16384, metavar='NUMBER',
help='inter size for ffn (default: 16384)')
parser.add_argument('-l', '--num_layer', type=int, default=28, metavar='NUMBER',
help='number of layers (default: 28)')
parser.add_argument('-v', '--vocab_size', type=int, default=50400, metavar='BOOL',
help='vocabulary size. (default: 50400).')
parser.add_argument('-d', '--data_type', type=str, default="bf16", metavar='STRING',
help='data type (default: bf16)', choices=['fp32', 'fp16', 'bf16'])
parser.add_argument('-topk', '--sampling_topk', type=int, default=0, metavar='NUMBER',
help='Candidate (k) value of top k sampling in decoding. Default is 0.')
parser.add_argument('-topp', '--sampling_topp', type=float, default=0.5, metavar='NUMBER',
help='Probability (p) value of top p sampling in decoding. Default is 0.5.')
parser.add_argument('-tensor_para_size', '--tensor_para_size', type=int, default=1, metavar='NUMBER',
help='tensor parallelism size. Default is 1.')
parser.add_argument('-pipeline_para_size', '--pipeline_para_size', type=int, default=1, metavar='NUMBER',
help='layer parallelism size. Default is 1.')
parser.add_argument('--model_dir', type=str, default="./models/", metavar='STRING',
help='Model path prfix. Default is "./models".')
parser.add_argument('-temperature', '--temperature', type=float, default=1.0, metavar='NUMBER',
help='temperature of penalty. Default is 1.0.')
parser.add_argument('-request_batch_size', '--request_batch_size', type=int, default=8, metavar='NUMBER',
help='batch size (default: 8)')
parser.add_argument('-request_output_len', '--request_output_len', type=int, default=32, metavar='NUMBER',
help='output length (default: 32)')
parser.add_argument('-start_id', '--start_id', type=int, default=50256, metavar='NUMBER',
help='start id (default: 50256)')
parser.add_argument('-end_id', '--end_id', type=int, default=50256, metavar='NUMBER',
help='end id (default: 50256)')
parser.add_argument('-repetition_penalty', '--repetition_penalty', type=float, default=1.0, metavar='NUMBER',
help='repetition_penalty (default: 1.0)')
parser.add_argument('-len_penalty', '--len_penalty', type=float, default=0.0, metavar='NUMBER',
help='len_penalty (default: 0.0)')
parser.add_argument('-beam_search_diversity_rate', '--beam_search_diversity_rate', type=float, default=0.0, metavar='NUMBER',
help='beam_search_diversity_rate (default: 0.0)')
parser.add_argument('-rotary_embedding', '--rotary_embedding', type=int, default=64, metavar='NUMBER',
help='batch size (default: 64)')
args = parser.parse_args()
generate_gpt_config(vars(args))
| FasterTransformer-main | examples/pytorch/gptj/utils/generate_gptj_config.py |
import collections
import sys
colors = ["gray", "darkorange", "limegreen", "royalblue", "lightcoral", "bisque", "forestgreen", "blueviolet", "red", "gold", "cyan", "purple", "saddlebrown", "yellow", "slategray", "magenta", "peachpuff", "darkkhaki", "teal", "pink"]
profile = {}
filename = "profile.txt"
if len(sys.argv) > 1:
filename = sys.argv[1]
for line in open(filename):
if line.startswith("Tile"):
tile_id = line.split(":")[0].split()[1]
profile[tile_id] = {}
else:
toks = line.strip().split(": ")
if (toks[0] != "sm_id"):
timestamp = int(toks[1])
profile[tile_id][toks[0]] = int(toks[1])
min_time = None
for tile in profile:
if profile[tile]["sm_id"] == 0:
start_time = profile[tile]["scheduler_fetch_start"]
if min_time is None or start_time < min_time:
min_time = start_time
for tile in profile:
for key in profile[tile]:
if key != "sm_id":
assert(profile[tile][key] is not None)
profile[tile][key] = profile[tile][key] - min_time
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
fig, ax = plt.subplots()
max_time = 0
tile_offset = 0
totals = collections.defaultdict(float)
for tile_id in profile:
entry = profile[tile_id]
if entry["sm_id"] == 0:
ax.add_patch(Rectangle((entry["scheduler_fetch_start"],tile_offset),entry["scheduler_fetch_complete"]-entry["scheduler_fetch_start"],0.4,facecolor="blue"))
ax.add_patch(Rectangle((entry["dma_tile_wait_start"],tile_offset),entry["dma_tile_wait_complete"]-entry["dma_tile_wait_start"],0.4,facecolor="red"))
ax.add_patch(Rectangle((entry["dma_tile_wait_complete"],tile_offset),entry["dma_loads_issued"]-entry["dma_tile_wait_complete"],0.4,facecolor="yellow"))
ax.add_patch(Rectangle((entry["compute_tile_wait_start"],tile_offset+.25),entry["compute_tile_wait_complete"]-entry["compute_tile_wait_start"],0.4,facecolor="orange"))
totals["tile_wait"] += entry["compute_tile_wait_complete"]-entry["compute_tile_wait_start"]
ax.add_patch(Rectangle((entry["compute_tile_wait_complete"],tile_offset+.25),entry["compute_first_data_wait_complete"]-entry["compute_tile_wait_complete"],0.4,facecolor="green"))
totals["first_data_wait"] += entry["compute_first_data_wait_complete"]-entry["compute_tile_wait_complete"]
ax.add_patch(Rectangle((entry["compute_first_data_wait_complete"],tile_offset+.25),entry["epilogue_begin"]-entry["compute_first_data_wait_complete"],0.4,facecolor="pink"))
totals["mainloop"] += entry["epilogue_begin"]-entry["compute_first_data_wait_complete"]
ax.add_patch(Rectangle((entry["epilogue_begin"],tile_offset+.25),entry["epilogue_complete"]-entry["epilogue_begin"],0.4,facecolor="grey"))
totals["epilogue"] += entry["epilogue_complete"]-entry["epilogue_begin"]
if entry["epilogue_complete"] > max_time:
max_time = entry["epilogue_complete"]
tile_offset += 1
for k in ("tile_wait", "first_data_wait", "mainloop", "epilogue"):
mean = float(totals[k])/tile_offset
print("%s mean: %.02f"%(k,mean))
plt.xlim([0,max_time])
plt.ylim([0,tile_offset+1])
plt.show()
| FasterTransformer-main | 3rdparty/fp8_qgmma_1x1/parse_profile.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Diagnostic functions."""
import os
import re
import tempfile
from unittest.mock import patch, Mock
import pytest # pylint: disable=import-error
from cli_test_helpers import ArgvContext, EnvironContext # pylint: disable=import-error
from spark_rapids_pytools import wrapper
from .mock_cluster import mock_live_cluster
@pytest.mark.parametrize('cloud', ['dataproc', 'emr'])
class TestInfoCollect:
"""Test info collect functions."""
def run_tool(self, cloud, args=['--yes', '--verbose'], expected_exception=None): # pylint: disable=dangerous-default-value
with tempfile.TemporaryDirectory() as tmpdir:
key_file = os.path.join(tmpdir, 'test.pem')
if cloud == 'emr':
# create empty ssh key file for EMR test
with open(key_file, 'a', encoding='utf8') as file:
file.close()
with EnvironContext(RAPIDS_USER_TOOLS_KEY_PAIR_PATH=key_file):
with ArgvContext('spark_rapids_user_tools', cloud, 'diagnostic', 'test-cluster',
'--output_folder', tmpdir, *args):
if expected_exception:
with pytest.raises(expected_exception):
wrapper.main()
else:
wrapper.main()
@patch('spark_rapids_pytools.common.utilities.SysCmd.build')
def test_info_collect(self, build_mock, cloud, capsys):
return_values = mock_live_cluster[cloud].copy()
# Mock return values for info collection
return_values += ['done'] * 6
mock = Mock()
mock.exec = Mock(side_effect=return_values)
build_mock.return_value = mock
self.run_tool(cloud)
if cloud == 'dataproc':
assert len(build_mock.call_args_list) == 13
elif cloud == 'emr':
assert len(build_mock.call_args_list) == 12
_, stderr = capsys.readouterr()
assert re.match(r".*Archive '/(tmp|var)/.*/diag_.*\.tar' is successfully created\..*", stderr, re.DOTALL)
@patch('spark_rapids_pytools.common.utilities.SysCmd.build')
def test_thread_num(self, build_mock, cloud, capsys):
return_values = mock_live_cluster[cloud].copy()
# Mock return values for info collection
return_values += ['done'] * 6
mock = Mock()
mock.exec = Mock(side_effect=return_values)
build_mock.return_value = mock
self.run_tool(cloud, ['--thread_num', '7', '--yes', '--verbose'])
if cloud == 'dataproc':
assert len(build_mock.call_args_list) == 13
elif cloud == 'emr':
assert len(build_mock.call_args_list) == 12
_, stderr = capsys.readouterr()
assert 'Set thread number as: 7' in stderr
assert re.match(r".*Archive '/(tmp|var)/.*/diag_.*\.tar' is successfully created\..*", stderr, re.DOTALL)
@patch('spark_rapids_pytools.common.utilities.SysCmd.build')
@pytest.mark.parametrize('thread_num', ['0', '11', '123'])
def test_invalid_thread_num(self, build_mock, cloud, thread_num, capsys):
return_values = mock_live_cluster[cloud].copy()
# Mock return values for info collection
return_values += ['done'] * 6
mock = Mock()
mock.exec = Mock(side_effect=return_values)
build_mock.return_value = mock
self.run_tool(cloud, ['--thread_num', thread_num, '--yes', '--verbose'], SystemExit)
if cloud == 'dataproc':
assert len(build_mock.call_args_list) == 7
elif cloud == 'emr':
assert len(build_mock.call_args_list) == 6
_, stderr = capsys.readouterr()
assert 'Invalid thread number' in stderr
assert 'Raised an error in phase [Process-Arguments]' in stderr
@patch('spark_rapids_pytools.common.utilities.SysCmd.build')
def test_upload_failed(self, build_mock, cloud, capsys):
return_values = mock_live_cluster[cloud].copy()
return_values.reverse()
# Mock failure for upload
def mock_exec():
if return_values:
return return_values.pop()
raise RuntimeError('mock test_upload_failed')
mock = Mock()
mock.exec = mock_exec
build_mock.return_value = mock
self.run_tool(cloud, ['--thread_num', '1', '--yes', '--verbose'], expected_exception=SystemExit)
if cloud == 'dataproc':
assert len(build_mock.call_args_list) >= 8
elif cloud == 'emr':
assert len(build_mock.call_args_list) >= 7
_, stderr = capsys.readouterr()
assert 'Error while uploading script to node' in stderr
assert 'Raised an error in phase [Execution]' in stderr
@patch('spark_rapids_pytools.common.utilities.SysCmd.build')
def test_download_failed(self, build_mock, cloud, capsys):
return_values = mock_live_cluster[cloud].copy()
# Mock return values for info collection
return_values += ['done'] * 4
return_values.reverse()
# Mock return values for info collection
def mock_exec():
if return_values:
return return_values.pop()
raise RuntimeError('mock test_download_failed')
mock = Mock()
mock.exec = mock_exec
build_mock.return_value = mock
self.run_tool(cloud, ['--thread_num', '1', '--yes', '--verbose'], expected_exception=SystemExit)
if cloud == 'dataproc':
assert len(build_mock.call_args_list) >= 12
elif cloud == 'emr':
assert len(build_mock.call_args_list) >= 11
_, stderr = capsys.readouterr()
assert 'Error while downloading collected info from node' in stderr
assert 'Raised an error in phase [Collecting-Results]' in stderr
@patch('spark_rapids_pytools.common.utilities.SysCmd.build')
@pytest.mark.parametrize('user_input', ['yes', 'YES', 'Yes', 'y', 'Y'])
def test_auto_confirm(self, build_mock, cloud, user_input, capsys):
return_values = mock_live_cluster[cloud].copy()
# Mock return values for info collection
return_values += ['done'] * 6
mock = Mock()
mock.exec = Mock(side_effect=return_values)
build_mock.return_value = mock
with patch('builtins.input', return_value=user_input):
self.run_tool(cloud, ['--verbose'])
if cloud == 'dataproc':
assert len(build_mock.call_args_list) == 13
elif cloud == 'emr':
assert len(build_mock.call_args_list) == 12
_, stderr = capsys.readouterr()
assert re.match(r".*Archive '/(tmp|var)/.*/diag_.*\.tar' is successfully created\..*", stderr, re.DOTALL)
@patch('spark_rapids_pytools.common.utilities.SysCmd.build')
@pytest.mark.parametrize('user_input', ['', 'n', 'no', 'NO', 'nO'])
def test_cancel_confirm(self, build_mock, cloud, user_input, capsys):
return_values = mock_live_cluster[cloud].copy()
mock = Mock()
mock.exec = Mock(side_effect=return_values)
build_mock.return_value = mock
with patch('builtins.input', return_value=user_input):
self.run_tool(cloud, ['--thread_num', '1', '--verbose'], expected_exception=SystemExit)
if cloud == 'dataproc':
assert len(build_mock.call_args_list) >= 7
elif cloud == 'emr':
assert len(build_mock.call_args_list) >= 6
_, stderr = capsys.readouterr()
assert 'User canceled the operation' in stderr
assert 'Raised an error in phase [Process-Arguments]' in stderr
| spark-rapids-tools-dev | user_tools/tests/test_diagnostic.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""init file of the testing package"""
| spark-rapids-tools-dev | user_tools/tests/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock cluster configurations for unit testing."""
import json
mock_live_cluster = {
"dataproc": [
"us-central1", # gcloud config get compute/region
"us-central1-a", # gcloud config get compute/zone
"dataproc-project-id", # gcloud config get core/project
# gcloud dataproc clusters describe test-cluster --format json --region us-central1
json.dumps({
"clusterUuid": "11111111-1111-1111-1111-111111111111",
"config": {
"masterConfig": {
"instanceNames": [
"test-master",
],
"machineTypeUri": "https://www.googleapis.com/compute/v1/projects/project-id/zones/us-central1-a/"\
"machineTypes/n1-standard-2",
},
"workerConfig": {
"accelerators": [{
"acceleratorTypeUri": "https://www.googleapis.com/compute/beta/projects/project-id/zones/"\
"us-central1-a/acceleratorTypes/nvidia-tesla-t4"
}],
"instanceNames": [
"test-worker-0",
],
"machineTypeUri": "https://www.googleapis.com/compute/v1/projects/project-id/zones/us-central1-a/"\
"machineTypes/n1-standard-8",
},
},
"status": {
"state": "RUNNING",
},
}),
# gcloud compute machine-types describe n1-standard-8 --format json --zone us-central1-a
json.dumps({
"guestCpus": 8,
"memoryMb": 30720,
}),
# gcloud compute accelerator-types describe nvidia-tesla-t4 --format json --zone us-central1-a
json.dumps({
"description": "NVIDIA T4",
}),
# gcloud compute machine-types describe n1-standard-2 --format json --zone us-central1-a
json.dumps({
"guestCpus": 2,
"memoryMb": 7680,
}),
],
"emr": [
# aws emr list-clusters --query 'Clusters[?Name==`test-cluster`]'
json.dumps([{
"Id": "j-testemr",
}]),
# aws emr describe-cluster --cluster-id j-testemr
json.dumps({
"Cluster": {
"Id": "j-testcluster",
"Status": {
"State": "RUNNING"
},
"Ec2InstanceAttributes": {
"Ec2AvailabilityZone": "us-west-2b",
},
"InstanceGroups": [
{
"Id": "ig-testinstance1",
"Market": "ON_DEMAND",
"InstanceGroupType": "MASTER",
"InstanceType": "m5a.12xlarge",
"RequestedInstanceCount": 1
},
{
"Id": "ig-testinstance2",
"Market": "ON_DEMAND",
"InstanceGroupType": "CORE",
"InstanceType": "g4dn.12xlarge",
"RequestedInstanceCount": 1
}
]
}
}),
# aws emr list-instances --cluster-id j-testcluster --instance-group-id ig-testinstance1
json.dumps({
"Instances": [{
"Id": "ci-testinstance1",
"Ec2InstanceId": "i-testec2id1",
"PublicDnsName": "ec2-123.456.789.us-west-2.compute.amazonaws.com",
"Status": {
"State": "RUNNING",
},
}]
}),
# aws emr list-instances --cluster-id j-testcluster --instance-group-id ig-testinstance2
json.dumps({
"Instances": [{
"Id": "ci-testinstance2",
"Ec2InstanceId": "i-testec2id2",
"PublicDnsName": "ec2-234.567.890.us-west-2.compute.amazonaws.com",
"Status": {
"State": "RUNNING",
},
}]
}),
# aws ec2 describe-instance-types --region us-west-2 --instance-types m5a.12xlarge
json.dumps({
"InstanceTypes": [{
"VCpuInfo": {
"DefaultVCpus": 48,
},
"MemoryInfo": {
"SizeInMiB": 196608,
},
}]
}),
# aws ec2 describe-instance-types --region us-west-2 --instance-types g4dn.12xlarge
json.dumps({
"InstanceTypes": [{
"VCpuInfo": {
"DefaultVCpus": 48,
},
"MemoryInfo": {
"SizeInMiB": 196608,
},
"GpuInfo": {
"Gpus": [{
"Name": "T4",
"Manufacturer": "NVIDIA",
"Count": 4,
"MemoryInfo": {
"SizeInMiB": 16384,
},
}],
"TotalGpuMemoryInMiB": 65536,
},
}]
}),
],
}
| spark-rapids-tools-dev | user_tools/tests/mock_cluster.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Add common helpers and utilities for unit-tests"""
import sys
import pytest # pylint: disable=import-error
def get_test_resources_path():
# pylint: disable=import-outside-toplevel
if sys.version_info < (3, 9):
import importlib_resources
else:
import importlib.resources as importlib_resources
pkg = importlib_resources.files('tests.spark_rapids_tools_ut')
return pkg / 'resources'
def gen_cpu_cluster_props():
return [
('dataproc', 'cluster/dataproc/cpu-00.yaml'),
('emr', 'cluster/emr/cpu-00.json'),
('onprem', 'cluster/onprem/cpu-00.yaml'),
('databricks_aws', 'cluster/databricks/aws-cpu-00.json'),
('databricks_azure', 'cluster/databricks/azure-cpu-00.json')
]
all_cpu_cluster_props = gen_cpu_cluster_props()
# all cpu_cluster_props except the onPrem
csp_cpu_cluster_props = [(e_1, e_2) for (e_1, e_2) in all_cpu_cluster_props if e_1 != 'onprem']
# all csps except onprem
csps = ['dataproc', 'emr', 'databricks_aws', 'databricks_azure']
all_csps = csps + ['onprem']
class SparkRapidsToolsUT: # pylint: disable=too-few-public-methods
@pytest.fixture(autouse=True)
def get_ut_data_dir(self):
# TODO: find a dynamic way to load the package name, instead of having it hardcoded
return get_test_resources_path()
| spark-rapids-tools-dev | user_tools/tests/spark_rapids_tools_ut/conftest.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Tool argument validators"""
import dataclasses
from collections import defaultdict
from typing import Dict, Callable, List
import fire
import pytest # pylint: disable=import-error
from spark_rapids_tools import CspEnv
from spark_rapids_tools.cmdli.argprocessor import AbsToolUserArgModel, ArgValueCase
from spark_rapids_tools.enums import QualFilterApp
from .conftest import SparkRapidsToolsUT, all_cpu_cluster_props, csp_cpu_cluster_props, csps
@dataclasses.dataclass
class TripletArgCase:
argv_cases: List[ArgValueCase]
label: str = dataclasses.field(init=False)
tests: List[str] = dataclasses.field(init=False, default_factory=lambda: [])
def __post_init__(self):
self.label = str(self.argv_cases)
# We will use this lookup table to check against coverage of all argument cases
# The way this can be done is to write one last unit-test that loop on all the dictionary and make
# sure that all entries have unit tests
triplet_test_registry: Dict[str, TripletArgCase] = defaultdict(TripletArgCase)
def register_triplet_test(argv_cases: list):
def decorator(func_cb: Callable):
obj_k = str(argv_cases)
argv_obj = triplet_test_registry.get(obj_k)
if argv_obj is None:
argv_obj = TripletArgCase(argv_cases)
triplet_test_registry[obj_k] = argv_obj
argv_obj.tests.append(func_cb.__name__)
return func_cb
return decorator
class TestToolArgProcessor(SparkRapidsToolsUT): # pylint: disable=too-few-public-methods
"""
Class testing toolArgProcessor functionalities
"""
@staticmethod
def validate_args_w_savings_enabled(tool_name: str, t_args: dict):
if tool_name == 'qualification':
assert t_args['savingsCalculations']
# filterApps should be set to savings
assert t_args['filterApps'] == QualFilterApp.SAVINGS
@staticmethod
def validate_args_w_savings_disabled(tool_name: str, t_args: dict):
if tool_name == 'qualification':
assert not t_args['savingsCalculations']
# filterApps should be set to savings
assert t_args['filterApps'] == QualFilterApp.SPEEDUPS
@pytest.mark.parametrize('tool_name', ['qualification', 'profiling', 'bootstrap'])
@register_triplet_test([ArgValueCase.IGNORE, ArgValueCase.UNDEFINED, ArgValueCase.UNDEFINED])
def test_no_args(self, tool_name):
fire.core.Display = lambda lines, out: out.write('\n'.join(lines) + '\n')
with pytest.raises(SystemExit) as pytest_wrapped_e:
AbsToolUserArgModel.create_tool_args(tool_name)
assert pytest_wrapped_e.type == SystemExit
@pytest.mark.parametrize('tool_name', ['qualification', 'profiling', 'bootstrap'])
@register_triplet_test([ArgValueCase.UNDEFINED, ArgValueCase.VALUE_A, ArgValueCase.UNDEFINED])
def test_cluster__name_no_hints(self, tool_name):
fire.core.Display = lambda lines, out: out.write('\n'.join(lines) + '\n')
with pytest.raises(SystemExit) as pytest_wrapped_e:
AbsToolUserArgModel.create_tool_args(tool_name, cluster='mycluster')
assert pytest_wrapped_e.type == SystemExit
@pytest.mark.parametrize('tool_name', ['qualification', 'profiling'])
@pytest.mark.parametrize('csp,prop_path', all_cpu_cluster_props)
@register_triplet_test([ArgValueCase.UNDEFINED, ArgValueCase.VALUE_B, ArgValueCase.VALUE_A])
def test_with_eventlogs(self, get_ut_data_dir, tool_name, csp, prop_path):
cluster_prop_file = f'{get_ut_data_dir}/{prop_path}'
tool_args = AbsToolUserArgModel.create_tool_args(tool_name,
cluster=f'{cluster_prop_file}',
eventlogs=f'{get_ut_data_dir}/eventlogs')
assert tool_args['runtimePlatform'] == CspEnv(csp)
# for qualification, passing the cluster properties should be enabled unless it is
# onprem platform that requires target_platform
if CspEnv(csp) != CspEnv.ONPREM:
self.validate_args_w_savings_enabled(tool_name, tool_args)
else:
self.validate_args_w_savings_disabled(tool_name, tool_args)
@pytest.mark.parametrize('tool_name', ['qualification', 'profiling'])
@register_triplet_test([ArgValueCase.UNDEFINED, ArgValueCase.UNDEFINED, ArgValueCase.VALUE_A])
def test_no_cluster_props(self, get_ut_data_dir, tool_name):
# all eventlogs are stored on local path. There is no way to find which cluster
# we refer to.
tool_args = AbsToolUserArgModel.create_tool_args(tool_name,
eventlogs=f'{get_ut_data_dir}/eventlogs')
assert tool_args['runtimePlatform'] == CspEnv.ONPREM
# for qualification, cost savings should be disabled
self.validate_args_w_savings_disabled(tool_name, tool_args)
@pytest.mark.parametrize('tool_name', ['qualification', 'profiling'])
@register_triplet_test([ArgValueCase.UNDEFINED, ArgValueCase.VALUE_A, ArgValueCase.VALUE_A])
@register_triplet_test([ArgValueCase.VALUE_A, ArgValueCase.VALUE_A, ArgValueCase.IGNORE])
def test_onprem_disallow_cluster_by_name(self, get_ut_data_dir, tool_name):
# onprem platform cannot run when the cluster is by_name
with pytest.raises(SystemExit) as pytest_exit_e:
AbsToolUserArgModel.create_tool_args(tool_name,
cluster='my_cluster',
eventlogs=f'{get_ut_data_dir}/eventlogs')
assert pytest_exit_e.type == SystemExit
with pytest.raises(SystemExit) as pytest_wrapped_e:
AbsToolUserArgModel.create_tool_args(tool_name,
platform='onprem',
cluster='my_cluster')
assert pytest_wrapped_e.type == SystemExit
@pytest.mark.parametrize('tool_name', ['qualification', 'profiling'])
@pytest.mark.parametrize('csp', csps)
@register_triplet_test([ArgValueCase.VALUE_A, ArgValueCase.VALUE_A, ArgValueCase.UNDEFINED])
def test_cluster_name_no_eventlogs(self, tool_name, csp):
# Missing eventlogs should be accepted for all CSPs (except onPrem)
# because the eventlogs can be retrieved from the cluster
tool_args = AbsToolUserArgModel.create_tool_args(tool_name,
platform=csp,
cluster='my_cluster')
assert tool_args['runtimePlatform'] == CspEnv(csp)
self.validate_args_w_savings_enabled(tool_name, tool_args)
@pytest.mark.parametrize('tool_name', ['qualification', 'profiling'])
@pytest.mark.parametrize('csp,prop_path', csp_cpu_cluster_props)
@register_triplet_test([ArgValueCase.UNDEFINED, ArgValueCase.VALUE_B, ArgValueCase.UNDEFINED])
def test_cluster_props_no_eventlogs(self, get_ut_data_dir, tool_name, csp, prop_path):
# Missing eventlogs should be accepted for all CSPs (except onPrem)
# because the eventlogs can be retrieved from the cluster
cluster_prop_file = f'{get_ut_data_dir}/{prop_path}'
tool_args = AbsToolUserArgModel.create_tool_args(tool_name,
cluster=f'{cluster_prop_file}')
assert tool_args['runtimePlatform'] == CspEnv(csp)
self.validate_args_w_savings_enabled(tool_name, tool_args)
@pytest.mark.skip(reason='Unit tests are not completed yet')
def test_arg_cases_coverage(self):
args_keys = [
[ArgValueCase.IGNORE, ArgValueCase.UNDEFINED, ArgValueCase.UNDEFINED],
[ArgValueCase.UNDEFINED, ArgValueCase.VALUE_A, ArgValueCase.UNDEFINED],
[ArgValueCase.VALUE_A, ArgValueCase.VALUE_A, ArgValueCase.IGNORE],
[ArgValueCase.UNDEFINED, ArgValueCase.VALUE_B, ArgValueCase.IGNORE],
[ArgValueCase.UNDEFINED, ArgValueCase.UNDEFINED, ArgValueCase.VALUE_A],
[ArgValueCase.UNDEFINED, ArgValueCase.VALUE_A, ArgValueCase.VALUE_A],
[ArgValueCase.IGNORE, ArgValueCase.UNDEFINED, ArgValueCase.VALUE_A]
]
for arg_key in args_keys:
assert str(arg_key) in triplet_test_registry
| spark-rapids-tools-dev | user_tools/tests/spark_rapids_tools_ut/test_tool_argprocessor.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""init file of the unit-tests package"""
| spark-rapids-tools-dev | user_tools/tests/spark_rapids_tools_ut/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Identifying cluster from properties"""
import pytest # pylint: disable=import-error
from spark_rapids_tools import CspPath
from spark_rapids_tools.cloud import ClientCluster
from spark_rapids_tools.exceptions import InvalidPropertiesSchema
from .conftest import SparkRapidsToolsUT, all_cpu_cluster_props
class TestClusterCSP(SparkRapidsToolsUT): # pylint: disable=too-few-public-methods
"""
Class testing identifying the cluster type by comparing the properties to
the defined Schema
"""
def test_cluster_invalid_path(self, get_ut_data_dir):
with pytest.raises(InvalidPropertiesSchema) as ex_schema:
ClientCluster(CspPath(f'{get_ut_data_dir}/non_existing_file.json'))
assert 'Incorrect properties files:' in ex_schema.value.message
@pytest.mark.parametrize('csp,prop_path', all_cpu_cluster_props)
def test_define_cluster_type_from_schema(self, csp, prop_path, get_ut_data_dir):
client_cluster = ClientCluster(CspPath(f'{get_ut_data_dir}/{prop_path}'))
assert client_cluster.platform_name == csp
| spark-rapids-tools-dev | user_tools/tests/spark_rapids_tools_ut/test_cluster.py |
#!/usr/bin/env python3
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Spark RAPIDS speedup factor generation script"""
import argparse
import os
import pandas as pd
parser = argparse.ArgumentParser(description="Speedup Factor Analysis")
parser.add_argument("--cpu", type=str, help="Directory of CPU profiler logs", required=True)
parser.add_argument("--gpu", type=str, help="Directory of GPU profiler logs", required=True)
parser.add_argument("--output", type=str, help="Filename for custom speedup factors", required=True)
parser.add_argument("--verbose", action="store_true", help="flag to generate full verbose output for logging raw node results")
parser.add_argument("--chdir", action="store_true", help="flag to change to work dir that's the script located")
args = parser.parse_args()
cpu_dir = args.cpu
gpu_dir = args.gpu
output = args.output
verbose = args.verbose
cpu_stage_log = {}
gpu_stage_log = {}
cpu_duration = 0.0
gpu_duration = 0.0
min_speedup = 1.0
if args.chdir:
# Change to work dir that's the script located
os.chdir(os.path.dirname(__file__))
# CPU log parsing
for app in os.listdir(cpu_dir):
# - figure out query from application_info.csv
app_info = pd.read_csv(cpu_dir + "/" + app + "/application_information.csv")
app_name = app_info.loc[0]["appName"]
cpu_duration = cpu_duration + app_info.loc[0]["duration"]
cpu_stage_log[app_name] = {}
# - load wholestagecodegen_mapping.csv into a dictionary for lookups (CPU only)
mapping_info = pd.read_csv(cpu_dir + "/" + app + "/wholestagecodegen_mapping.csv")
mapping_info = mapping_info.groupby(['SQL Node'])['Child Node'].apply(','.join).reset_index()
# - process sql_plan_metrics_for_application.csv
# - load in "duration" (CPU)
# - replace WholeStageCodegen (CPU only) with list of operators from mapping lookup file
# - mapping_info.parent = sql_times.nodeName
cpu_sql_info = pd.read_csv(cpu_dir + "/" + app + "/sql_plan_metrics_for_application.csv")
cpu_sql_times = cpu_sql_info[cpu_sql_info["name"] == "duration"]
cpu_sql_combined = cpu_sql_times.set_index('nodeName').join(mapping_info.set_index('SQL Node'), how='left')
# - parse WholeStageCodegen durations with child node mapping
cpu_sql_times_df = cpu_sql_combined[['Child Node', 'total']]
for index, row in cpu_sql_times_df.iterrows():
operators = str(row['Child Node']).split(',')
duration = row['total']/len(operators)/1000.0
for operator in operators:
if operator in cpu_stage_log[app_name]:
cpu_stage_log[app_name][operator] = cpu_stage_log[app_name][operator] + duration
else:
cpu_stage_log[app_name][operator] = duration
# - parse top-level execs from sql_to_stage_information.csv
cpu_stage_info = pd.read_csv(cpu_dir + "/" + app + "/sql_to_stage_information.csv")
cpu_stage_times = cpu_stage_info[['Stage Duration', 'SQL Nodes(IDs)']]
cpu_stage_times_df = cpu_stage_times.dropna()
for index, row in cpu_stage_times_df.iterrows():
node_list = str(row['SQL Nodes(IDs)'])
operators = node_list.split(',')
duration = row['Stage Duration']/(len(operators)-node_list.count("WholeStageCodegen"))
for operator in operators:
if "WholeStageCodegen" in operator:
continue
op_key = operator.split('(')[0]
if op_key in cpu_stage_log[app_name]:
cpu_stage_log[app_name][op_key] = cpu_stage_log[app_name][op_key] + duration
else:
cpu_stage_log[app_name][op_key] = duration
# GPU log parsing
for app in os.listdir(gpu_dir):
# - figure out query from application_info.csv
app_info = pd.read_csv(gpu_dir + "/" + app + "/application_information.csv")
app_name = app_info.loc[0]["appName"]
gpu_duration = gpu_duration + app_info.loc[0]["duration"]
gpu_stage_log[app_name] = {}
# - process sql_to_stage_information.csv to get stage durations
# - split up duration by operators listed in each stage
gpu_stage_info = pd.read_csv(gpu_dir + "/" + app + "/sql_to_stage_information.csv")
gpu_stage_times = gpu_stage_info[['Stage Duration', 'SQL Nodes(IDs)']]
for index, row in gpu_stage_times.iterrows():
operators = str(row['SQL Nodes(IDs)']).split(',')
duration = row['Stage Duration']/len(operators)
for operator in operators:
op_key = operator.split('(')[0]
if op_key in gpu_stage_log[app_name]:
gpu_stage_log[app_name][op_key] = gpu_stage_log[app_name][op_key] + duration
else:
gpu_stage_log[app_name][op_key] = duration
cpu_stage_totals = {}
gpu_stage_totals = {}
cpu_stage_total = 0.0
gpu_stage_total = 0.0
# Sum up SQL operators for each operator found in CPU and GPU
for app_key in cpu_stage_log:
for op_key in cpu_stage_log[app_key]:
if op_key not in cpu_stage_totals:
cpu_stage_totals[op_key] = cpu_stage_log[app_key][op_key]
else:
cpu_stage_totals[op_key] = cpu_stage_totals[op_key] + cpu_stage_log[app_key][op_key]
cpu_stage_total = cpu_stage_total + cpu_stage_log[app_key][op_key]
for app_key in gpu_stage_log:
for op_key in gpu_stage_log[app_key]:
if op_key not in gpu_stage_totals:
gpu_stage_totals[op_key] = gpu_stage_log[app_key][op_key]
else:
gpu_stage_totals[op_key] = gpu_stage_totals[op_key] + gpu_stage_log[app_key][op_key]
gpu_stage_total = gpu_stage_total + gpu_stage_log[app_key][op_key]
# Create dictionary of execs where speedup factors can be calculated
scores_dict = {}
# Scan operators
if 'Scan parquet ' in cpu_stage_totals and 'GpuScan parquet ' in gpu_stage_totals:
scores_dict["BatchScanExec"] = str(round(cpu_stage_totals['Scan parquet '] / gpu_stage_totals['GpuScan parquet '], 2))
scores_dict["FileSourceScanExec"] = str(round(cpu_stage_totals['Scan parquet '] / gpu_stage_totals['GpuScan parquet '], 2))
if 'Scan orc ' in cpu_stage_totals and 'GpuScan orc ' in gpu_stage_totals:
scores_dict["BatchScanExec"] = str(round(cpu_stage_totals['Scan orc '] / gpu_stage_totals['GpuScan orc '], 2))
scores_dict["FileSourceScanExec"] = str(round(cpu_stage_totals['Scan orc '] / gpu_stage_totals['GpuScan orc '], 2))
# Other operators
if 'Expand' in cpu_stage_totals and 'GpuExpand' in gpu_stage_totals:
scores_dict["ExpandExec"] = str(round(cpu_stage_totals['Expand'] / gpu_stage_totals['GpuExpand'], 2))
if 'CartesianProduct' in cpu_stage_totals and 'GpuCartesianProduct' in gpu_stage_totals:
scores_dict["CartesianProductExec"] = str(round(cpu_stage_totals['CartesianProduct'] / gpu_stage_totals['GpuCartesianProduct'], 2))
if 'Filter' in cpu_stage_totals and 'GpuFilter' in gpu_stage_totals:
scores_dict["FilterExec"] = str(round(cpu_stage_totals['Filter'] / gpu_stage_totals['GpuFilter'], 2))
if 'SortMergeJoin' in cpu_stage_totals and 'GpuShuffledHashJoin' in gpu_stage_totals:
scores_dict["SortMergeJoinExec"] = str(round(cpu_stage_totals['SortMergeJoin'] / gpu_stage_totals['GpuShuffledHashJoin'], 2))
if 'BroadcastHashJoin' in cpu_stage_totals and 'GpuBroadcastHashJoin' in gpu_stage_totals:
scores_dict["BroadcastHashJoinExec"] = str(round(cpu_stage_totals['BroadcastHashJoin'] / gpu_stage_totals['GpuBroadcastHashJoin'], 2))
if 'Exchange' in cpu_stage_totals and 'GpuColumnarExchange' in gpu_stage_totals:
scores_dict["ShuffleExchangeExec"] = str(round(cpu_stage_totals['Exchange'] / gpu_stage_totals['GpuColumnarExchange'], 2))
if 'HashAggregate' in cpu_stage_totals and 'GpuHashAggregate' in gpu_stage_totals:
scores_dict["HashAggregateExec"] = str(round(cpu_stage_totals['HashAggregate'] / gpu_stage_totals['GpuHashAggregate'], 2))
scores_dict["ObjectHashAggregateExec"] = str(round(cpu_stage_totals['HashAggregate'] / gpu_stage_totals['GpuHashAggregate'], 2))
scores_dict["SortAggregateExec"] = str(round(cpu_stage_totals['HashAggregate'] / gpu_stage_totals['GpuHashAggregate'], 2))
if 'TakeOrderedAndProject' in cpu_stage_totals and 'GpuTopN' in gpu_stage_totals:
scores_dict["TakeOrderedAndProjectExec"] = str(round(cpu_stage_totals['TakeOrderedAndProject'] / gpu_stage_totals['GpuTopN'], 2))
if 'BroadcastNestedLoopJoin' in cpu_stage_totals and 'GpuBroadcastNestedLoopJoin' in gpu_stage_totals:
scores_dict["BroadcastNestedLoopJoinExec"] = str(round(cpu_stage_totals['BroadcastNestedLoopJoin'] / gpu_stage_totals['GpuBroadcastNestedLoopJoin'], 2))
# Set minimum to 1.0 for speedup factors
for key in scores_dict:
if float(scores_dict[key]) < min_speedup:
scores_dict[key] = f"{min_speedup}"
# Set overall speedup for default value for execs not in logs
overall_speedup = str(max(min_speedup, round(cpu_duration/gpu_duration, 2)))
# Print out node metrics (if verbose)
if verbose:
print("# CPU Operator Metrics")
for key in cpu_stage_totals:
print(key + " = " + str(cpu_stage_totals[key]))
print("# GPU Operator Metrics")
for key in gpu_stage_totals:
print(key + " = " + str(gpu_stage_totals[key]))
print("# Summary Metrics")
print("CPU Total = " + str(cpu_stage_total))
print("GPU Total = " + str(gpu_stage_total))
print("Overall speedup = " + overall_speedup)
# Print out individual exec speedup factors
print("# Speedup Factors ")
for key in scores_dict:
print(f"{key} = {scores_dict[key]}")
# Load in list of operators and set initial values to default speedup
scores_df = pd.read_csv("operatorsList.csv")
scores_df["Score"] = overall_speedup
# Update operators that are found in benchmark
for key in scores_dict:
scores_df.loc[scores_df['CPUOperator'] == key, 'Score'] = scores_dict[key]
# Add in hard-coded defaults
defaults_df = pd.read_csv("defaultScores.csv")
# Generate output CSV file
final_df = pd.concat([scores_df, defaults_df])
final_df.to_csv(output, index=False)
| spark-rapids-tools-dev | user_tools/custom_speedup_factors/generate_speedup_factors.py |
#!/usr/bin/env python3
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Spark RAPIDS speedup factor validation script"""
import argparse
import os
import glob
import subprocess
import pandas as pd
from tabulate import tabulate
parser = argparse.ArgumentParser(description="Speedup Factor Validation")
parser.add_argument("--cpu_log", type=str, help="Directory of CPU event log(s)", required=True)
parser.add_argument("--gpu_log", type=str, help="Directory of GPU event log(s)", required=True)
parser.add_argument("--output", type=str, help="Output folder for storing logs", required=True)
parser.add_argument("--speedups", type=str, help="Custom speedup factor file")
parser.add_argument("--cpu_profile", type=str, help="Directory of CPU profiler log(s)")
parser.add_argument("--gpu_profile", type=str, help="Directory of GPU profiler log(s)")
parser.add_argument("--jar", type=str, help="Custom tools jar")
parser.add_argument("--verbose", action="store_true", help="flag to generate full verbose output for logging raw node results")
args = parser.parse_args()
cpu_log = args.cpu_log
gpu_log = args.gpu_log
cpu_profile = args.cpu_profile
gpu_profile = args.gpu_profile
output = args.output
speedups = args.speedups
jar = args.jar
verbose = args.verbose
print(f"Output folder = {output}")
print(f"CPU event log = {cpu_log}")
print(f"GPU event log = {gpu_log}")
subprocess.run(f"rm -rf {output}", shell=True)
speedups_arg = ""
if speedups is not None:
speedups_arg = f"--speedup-factor-file {speedups}"
else:
speedups_arg = f"--speedup-factor-file {output}/generatedScores.csv"
jar_arg = ""
if jar is not None:
jar_arg = f"--tools_jar {jar}"
# Generate speedup factors
### run GPU profiler if needed
gpu_profile_dir = ""
if gpu_profile is not None:
gpu_profile_dir = gpu_profile
else:
gpu_profile_dir = f"{output}/gpu_profile"
subprocess.run(f"spark_rapids_user_tools onprem profiling --csv {jar_arg} --local_folder {gpu_profile_dir} --eventlogs {gpu_log}", shell=True)
if speedups is None:
### run CPU profiler if needed
cpu_profile_dir = ""
if cpu_profile is not None:
cpu_profile_dir = cpu_profile
else:
cpu_profile_dir = f"{output}/cpu_profile"
subprocess.run(f"spark_rapids_user_tools onprem profiling --csv {jar_arg} --local_folder {cpu_profile_dir} --eventlogs {cpu_log}", shell=True)
### run speedup factor generation
subprocess.run(f"python generate_speedup_factors.py --cpu {cpu_profile_dir}/*/rapids_4_spark_profile --gpu {gpu_profile_dir}/*/rapids_4_spark_profile --output {output}/generatedScores.csv", shell=True)
# Run qualification
### set speedup factors to input or generated
speedups_arg = ""
if speedups is not None:
speedups_arg = f"--speedup-factor-file {speedups}"
else:
speedups_arg = f"--speedup-factor-file {output}/generatedScores.csv"
### run CPU qualification
cpu_tmp_dir = f"{output}/cpu"
subprocess.run(f"spark_rapids_user_tools onprem qualification {speedups_arg} {jar_arg} --local_folder {cpu_tmp_dir} --eventlogs {cpu_log}", shell=True)
# Parse and validate results
### CPU log parsing
cpu_app_info = pd.read_csv(glob.glob(f"{cpu_tmp_dir}/*/rapids_4_spark_qualification_output/rapids_4_spark_qualification_output.csv")[0])
cpu_query_info = cpu_app_info[["App Name", "App Duration", "Estimated GPU Duration", "Estimated GPU Speedup"]]
### GPU log parsing
gpu_query_info = pd.DataFrame(columns = ['App Name', 'GPU Duration'])
counter = 0
for app in glob.glob(f"{gpu_profile_dir}/*/rapids_4_spark_profile/*/application_information.csv"):
app_info = pd.read_csv(app)
new_row = pd.DataFrame({'App Name': app_info.loc[0]["appName"], 'GPU Duration': app_info.loc[0]["duration"]}, index=[counter])
gpu_query_info = pd.concat([gpu_query_info, new_row])
counter = counter+1
merged_info = cpu_query_info.merge(gpu_query_info, left_on='App Name', right_on='App Name')
merged_info["Duration Error (sec)"] = (merged_info["Estimated GPU Duration"] - merged_info["GPU Duration"])/1000.0
merged_info["Duration Error (pct)"] = (100.0*(merged_info["Estimated GPU Duration"] - merged_info["GPU Duration"])/merged_info["Estimated GPU Duration"]).apply(lambda x: round(x,2))
merged_info["GPU Speedup"] = (merged_info["App Duration"]/merged_info["GPU Duration"]).apply(lambda x: round(x,2))
merged_info["Speedup Error (abs)"] = merged_info["Estimated GPU Speedup"] - merged_info["GPU Speedup"]
merged_info["Speedup Error (pct)"] = (100.0*(merged_info["Estimated GPU Speedup"] - merged_info["GPU Speedup"])/merged_info["Estimated GPU Speedup"]).apply(lambda x: round(x,2))
print("==================================================")
print(" Application Details")
print("==================================================")
print(tabulate(merged_info, headers='keys', tablefmt='psql'))
print("==================================================")
print(" Duration Error Metrics ")
print("==================================================")
print("Average duration error (seconds) = " + str(round(merged_info["Duration Error (sec)"].mean(),2)))
print("Median duration error (seconds) = " + str(round(merged_info["Duration Error (sec)"].median(),2)))
print("Min duration error (seconds) = " + str(round(merged_info["Duration Error (sec)"].min(),2)))
print("Max duration error (seconds) = " + str(round(merged_info["Duration Error (sec)"].max(),2)))
print("Average duration error (diff pct) = " + str(round(merged_info["Duration Error (pct)"].mean(),2)))
print("Median duration error (diff pct) = " + str(round(merged_info["Duration Error (pct)"].median(),2)))
print("Max duration error (diff pct) = " + str(round(merged_info["Duration Error (pct)"].max(),2)))
print("Average duration error (diff sec) = " + str(round(merged_info["Duration Error (sec)"].abs().mean(),2)))
print("Median duration error (diff sec) = " + str(round(merged_info["Duration Error (sec)"].abs().median(),2)))
print("Max duration error (diff sec) = " + str(round(merged_info["Duration Error (sec)"].abs().max(),2)))
print("Average duration error (abs pct) = " + str(round(merged_info["Duration Error (pct)"].abs().mean(),2)))
print("Median duration error (abs pct) = " + str(round(merged_info["Duration Error (pct)"].abs().median(),2)))
print("Max duration error (abs pct) = " + str(round(merged_info["Duration Error (pct)"].abs().max(),2)))
print("==================================================")
print(" Speedup Error Metrics ")
print("==================================================")
print("Average speedup error (diff) = " + str(round(merged_info["Speedup Error (abs)"].mean(),2)))
print("Median speedup error (diff) = " + str(round(merged_info["Speedup Error (abs)"].median(),2)))
print("Min speedup error (diff) = " + str(round(merged_info["Speedup Error (abs)"].min(),2)))
print("Max speedup error (diff) = " + str(round(merged_info["Speedup Error (abs)"].max(),2)))
print("Average speedup error (diff pct) = " + str(round(merged_info["Speedup Error (pct)"].mean(),2)))
print("Median speedup error (diff pct = " + str(round(merged_info["Speedup Error (pct)"].median(),2)))
print("Max speedup error (diff pct) = " + str(round(merged_info["Speedup Error (pct)"].max(),2)))
print("Average speedup error (abs diff) = " + str(round(merged_info["Speedup Error (abs)"].abs().mean(),2)))
print("Median speedup error (abs diff) = " + str(round(merged_info["Speedup Error (abs)"].abs().median(),2)))
print("Max speedup error (abs diff) = " + str(round(merged_info["Speedup Error (abs)"].abs().max(),2)))
print("Average speedup error (abs pct) = " + str(round(merged_info["Speedup Error (pct)"].abs().mean(),2)))
print("Median speedup error (abs pct) = " + str(round(merged_info["Speedup Error (pct)"].abs().median(),2)))
print("Max speedup error (abs pct) = " + str(round(merged_info["Speedup Error (pct)"].abs().max(),2)))
| spark-rapids-tools-dev | user_tools/custom_speedup_factors/validate_qualification_estimates.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build helpers."""
import datetime
import os
def get_version(main=None):
if main is None:
# pylint: disable=import-outside-toplevel
from spark_rapids_pytools import VERSION as main
suffix = ''
nightly = os.environ.get('USERTOOLS_NIGHTLY')
if nightly == '1':
suffix = '.dev' + datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
return main + suffix
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/build.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper class to run tools associated with RAPIDS Accelerator for Apache Spark plugin."""
import fire
from spark_rapids_pytools.wrappers.databricks_aws_wrapper import DBAWSWrapper
from spark_rapids_pytools.wrappers.databricks_azure_wrapper import DBAzureWrapper
from spark_rapids_pytools.wrappers.dataproc_wrapper import DataprocWrapper
from spark_rapids_pytools.wrappers.emr_wrapper import EMRWrapper
from spark_rapids_pytools.wrappers.onprem_wrapper import OnPremWrapper
def main():
fire.Fire({
'emr': EMRWrapper,
'dataproc': DataprocWrapper,
'databricks-aws': DBAWSWrapper,
'databricks-azure': DBAzureWrapper,
'onprem': OnPremWrapper
})
if __name__ == '__main__':
main()
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/wrapper.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""init file of the spark_rapids_pytools package."""
from spark_rapids_pytools.build import get_version
VERSION = '23.08.2'
__version__ = get_version(VERSION)
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper class to run tools associated with RAPIDS Accelerator for Apache Spark plugin on AWS-EMR."""
from spark_rapids_tools import CspEnv
from spark_rapids_pytools.cloud_api.sp_types import DeployMode
from spark_rapids_pytools.common.utilities import ToolLogging
from spark_rapids_pytools.rapids.bootstrap import Bootstrap
from spark_rapids_pytools.rapids.diagnostic import Diagnostic
from spark_rapids_pytools.rapids.qualification import QualFilterApp, QualificationAsLocal, \
QualGpuClusterReshapeType
from spark_rapids_pytools.rapids.profiling import ProfilingAsLocal
class CliEmrLocalMode: # pylint: disable=too-few-public-methods
"""
A wrapper that runs the RAPIDS Accelerator tools locally on the dev machine.
"""
@staticmethod
def qualification(cpu_cluster: str = None,
eventlogs: str = None,
profile: str = None,
local_folder: str = None,
remote_folder: str = None,
gpu_cluster: str = None,
tools_jar: str = None,
filter_apps: str = QualFilterApp.tostring(QualFilterApp.SAVINGS),
gpu_cluster_recommendation: str = QualGpuClusterReshapeType.tostring(
QualGpuClusterReshapeType.get_default()),
jvm_heap_size: int = 24,
verbose: bool = False,
**rapids_options) -> None:
"""
The Qualification tool analyzes Spark events generated from CPU based Spark applications to
help quantify the expected acceleration and costs savings of migrating a Spark application
or query to GPU. The wrapper downloads dependencies and executes the analysis on the local
dev machine
:param cpu_cluster: The EMR-cluster on which the Spark applications were executed. The argument
can be an EMR-cluster or a valid path to the cluster's properties file (json format)
generated by the AWS CLI
:param eventlogs: Event log filenames or S3 storage directories
containing event logs (comma separated). If missing, the wrapper Reads the Spark's
property `spark.eventLog.dir` defined in `cpu_cluster`. This property should be included
in the output of `emr describe-cluster`. Note that the wrapper will raise an exception
if the property is not set
:param profile: A named AWS profile to get the settings/credentials of the AWS account
:param local_folder: Local work-directory path to store the output and to be used as root
directory for temporary folders/files. The final output will go into a subdirectory called
${local_folder}/qual-${EXEC_ID} where exec_id is an auto-generated unique identifier of the
execution. If the argument is NONE, the default value is the env variable
RAPIDS_USER_TOOLS_OUTPUT_DIRECTORY if any; or the current working directory.
:param remote_folder: An S3 folder where the output is uploaded at the end of execution.
If no value is provided, the output will be only available on local disk
:param gpu_cluster: The EMR-cluster on which the Spark applications is planned to be migrated.
The argument can be an EMR-cluster or a valid path to the cluster's properties file
(json format) generated by the AWS CLI. If missing, the wrapper maps the EC2 machine
instances of the original cluster into EC2 instances that support GPU acceleration
:param tools_jar: Path to a bundled jar including Rapids tool. The path is a local filesystem,
or remote S3 url. If missing, the wrapper downloads the latest rapids-4-spark-tools_*.jar
from maven repo
:param filter_apps: filtering criteria of the applications listed in the final STDOUT table
is one of the following (ALL, SPEEDUPS, SAVINGS). Default is "SAVINGS".
Note that this filter does not affect the CSV report.
"ALL" means no filter applied. "SPEEDUPS" lists all the apps that are either
'Recommended', or 'Strongly Recommended' based on speedups. "SAVINGS"
lists all the apps that have positive estimated GPU savings except for the apps that
are "Not Applicable"
:param gpu_cluster_recommendation: The type of GPU cluster recommendation to generate.
It accepts one of the following ("CLUSTER", "JOB" and the default value "MATCH").
"MATCH": keep GPU cluster same number of nodes as CPU cluster;
"CLUSTER": recommend optimal GPU cluster by cost for entire cluster;
"JOB": recommend optimal GPU cluster by cost per job
:param jvm_heap_size: The maximum heap size of the JVM in gigabytes
:param verbose: True or False to enable verbosity to the wrapper script
:param rapids_options: A list of valid Qualification tool options.
Note that the wrapper ignores ["output-directory", "platform"] flags, and it does not support
multiple "spark-property" arguments.
For more details on Qualification tool options, please visit
https://nvidia.github.io/spark-rapids/docs/spark-qualification-tool.html#qualification-tool-options
"""
if verbose:
# when debug is set to true set it in the environment.
ToolLogging.enable_debug_mode()
wrapper_qual_options = {
'platformOpts': {
'profile': profile,
'deployMode': DeployMode.LOCAL,
},
'migrationClustersProps': {
'cpuCluster': cpu_cluster,
'gpuCluster': gpu_cluster
},
'jobSubmissionProps': {
'remoteFolder': remote_folder,
'platformArgs': {
'jvmMaxHeapSize': jvm_heap_size
}
},
'eventlogs': eventlogs,
'filterApps': filter_apps,
'toolsJar': tools_jar,
'gpuClusterRecommendation': gpu_cluster_recommendation
}
QualificationAsLocal(platform_type=CspEnv.EMR,
cluster=None,
output_folder=local_folder,
wrapper_options=wrapper_qual_options,
rapids_options=rapids_options).launch()
@staticmethod
def profiling(gpu_cluster: str = None,
worker_info: str = None,
eventlogs: str = None,
profile: str = None,
local_folder: str = None,
remote_folder: str = None,
tools_jar: str = None,
jvm_heap_size: int = 24,
verbose: bool = False,
**rapids_options) -> None:
"""
The Profiling tool analyzes both CPU or GPU generated event logs and generates information
which can be used for debugging and profiling Apache Spark applications.
:param gpu_cluster: The EMR-cluster on which the Spark applications were executed. The argument
can be an EMR-cluster or a valid path to the cluster's properties file (json format)
generated by the AWS CLI. If missing, then the argument worker_info has to be provided.
:param worker_info: A path pointing to a yaml file containing the system information of a
worker node. It is assumed that all workers are homogenous.
If missing, the wrapper pulls the worker info from the "gpu_cluster".
:param eventlogs: Event log filenames or S3 storage directories
containing event logs (comma separated). If missing, the wrapper Reads the Spark's
property `spark.eventLog.dir` defined in `gpu_cluster`. This property should be included
in the output of `aws emr describe-cluster`. Note that the wrapper will raise an exception
if the property is not set.
:param profile: A named AWS profile to get the settings/credentials of the AWS account.
:param local_folder: Local work-directory path to store the output and to be used as root
directory for temporary folders/files. The final output will go into a subdirectory called
${local_folder}/prof-${EXEC_ID} where exec_id is an auto-generated unique identifier of the
execution. If the argument is NONE, the default value is the env variable
RAPIDS_USER_TOOLS_OUTPUT_DIRECTORY if any; or the current working directory.
:param remote_folder: A S3 folder where the output is uploaded at the end of execution.
If no value is provided, the output will be only available on local disk.
:param tools_jar: Path to a bundled jar including Rapids tool. The path is a local filesystem,
or remote S3 url. If missing, the wrapper downloads the latest rapids-4-spark-tools_*.jar
from maven repo.
:param verbose: True or False to enable verbosity to the wrapper script.
:param jvm_heap_size: The maximum heap size of the JVM in gigabytes.
:param rapids_options: A list of valid Profiling tool options.
Note that the wrapper ignores ["output-directory", "worker-info"] flags, and it does not support
multiple "spark-property" arguments.
For more details on Profiling tool options, please visit
https://nvidia.github.io/spark-rapids/docs/spark-profiling-tool.html#profiling-tool-options
"""
if verbose:
# when debug is set to true set it in the environment.
ToolLogging.enable_debug_mode()
wrapper_prof_options = {
'platformOpts': {
'profile': profile,
'deployMode': DeployMode.LOCAL,
},
'migrationClustersProps': {
'gpuCluster': gpu_cluster
},
'jobSubmissionProps': {
'remoteFolder': remote_folder,
'platformArgs': {
'jvmMaxHeapSize': jvm_heap_size
}
},
'eventlogs': eventlogs,
'toolsJar': tools_jar,
'autoTunerFileInput': worker_info
}
ProfilingAsLocal(platform_type=CspEnv.EMR,
output_folder=local_folder,
wrapper_options=wrapper_prof_options,
rapids_options=rapids_options).launch()
@staticmethod
def bootstrap(cluster: str,
profile: str = None,
output_folder: str = None,
dry_run: bool = True,
key_pair_path: str = None,
verbose: bool = False) -> None:
"""
Bootstrap tool analyzes the CPU and GPU configuration of the EMR cluster
and updates the Spark default configuration on the cluster's master nodes.
:param cluster: Name of the EMR cluster running an accelerated computing instance class g4dn.*
:param profile: A named AWS profile to get the settings/credentials of the AWS account.
:param output_folder: Local path where the final recommendations will be saved.
Note that this argument only accepts local filesystem. If the argument is NONE,
the default value is the env variable "RAPIDS_USER_TOOLS_OUTPUT_DIRECTORY" if any;
or the current working directory.
:param dry_run: True or False to update the Spark config settings on EMR master node.
:param key_pair_path: A '.pem' file path that enables to connect to EC2 instances using SSH.
If missing, the wrapper reads the env variable 'RAPIDS_USER_TOOLS_KEY_PAIR_PATH' if any.
For more details on creating key pairs,
visit https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/create-key-pairs.html.
:param verbose: True or False to enable verbosity to the wrapper script.
"""
if verbose:
# when debug is set to true set it in the environment.
ToolLogging.enable_debug_mode()
wrapper_boot_options = {
'platformOpts': {
'profile': profile,
'keyPairPath': key_pair_path
},
'dryRun': dry_run
}
bootstrap_tool = Bootstrap(platform_type=CspEnv.EMR,
cluster=cluster,
output_folder=output_folder,
wrapper_options=wrapper_boot_options)
bootstrap_tool.launch()
@staticmethod
def diagnostic(cluster: str,
profile: str = None,
output_folder: str = None,
key_pair_path: str = None,
thread_num: int = 3,
yes: bool = False,
verbose: bool = False) -> None:
"""
Diagnostic tool to collect information from EMR cluster, such as OS version, # of worker nodes,
Yarn configuration, Spark version and error logs etc. Please note, some sensitive information might
be collected by this tool, e.g. access secret configured in configuration files or dumped to log files.
:param cluster: Name of the EMR cluster running an accelerated computing instance class g4dn.*
:param profile: A named AWS profile to get the settings/credentials of the AWS account.
:param output_folder: Local path where the archived result will be saved.
Note that this argument only accepts local filesystem. If the argument is NONE,
the default value is the env variable "RAPIDS_USER_TOOLS_OUTPUT_DIRECTORY" if any;
or the current working directory.
:param key_pair_path: A '.pem' file path that enables to connect to EC2 instances using SSH.
If missing, the wrapper reads the env variable 'RAPIDS_USER_TOOLS_KEY_PAIR_PATH' if any.
For more details on creating key pairs,
visit https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/create-key-pairs.html.
:param thread_num: Number of threads to access remote cluster nodes in parallel. The valid value
is 1~10. The default value is 3.
:param yes: auto confirm to interactive question.
:param verbose: True or False to enable verbosity to the wrapper script.
"""
if verbose:
# when debug is set to true set it in the environment.
ToolLogging.enable_debug_mode()
wrapper_diag_options = {
'platformOpts': {
'profile': profile,
'keyPairPath': key_pair_path,
},
'threadNum': thread_num,
'yes': yes,
}
diag_tool = Diagnostic(platform_type=CspEnv.EMR,
cluster=cluster,
output_folder=output_folder,
wrapper_options=wrapper_diag_options)
diag_tool.launch()
class EMRWrapper: # pylint: disable=too-few-public-methods
"""
A wrapper script to run RAPIDS Accelerator tools (Qualification, Profiling, and Bootstrap) on Amazon EMR.
"""
def __init__(self):
self.qualification = CliEmrLocalMode.qualification
self.profiling = CliEmrLocalMode.profiling
self.bootstrap = CliEmrLocalMode.bootstrap
self.diagnostic = CliEmrLocalMode.diagnostic
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/wrappers/emr_wrapper.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper class to run tools associated with RAPIDS Accelerator for Apache Spark plugin on DATABRICKS_AZURE."""
from spark_rapids_tools import CspEnv
from spark_rapids_pytools.cloud_api.sp_types import DeployMode
from spark_rapids_pytools.common.utilities import ToolLogging
from spark_rapids_pytools.rapids.profiling import ProfilingAsLocal
from spark_rapids_pytools.rapids.qualification import QualFilterApp, QualificationAsLocal, QualGpuClusterReshapeType
class CliDBAzureLocalMode: # pylint: disable=too-few-public-methods
"""
A wrapper that runs the RAPIDS Accelerator tools locally on the dev machine for DATABRICKS_AZURE.
"""
@staticmethod
def qualification(cpu_cluster: str = None,
eventlogs: str = None,
profile: str = None,
local_folder: str = None,
remote_folder: str = None,
gpu_cluster: str = None,
tools_jar: str = None,
credentials_file: str = None,
filter_apps: str = QualFilterApp.tostring(QualFilterApp.SAVINGS),
gpu_cluster_recommendation: str = QualGpuClusterReshapeType.tostring(
QualGpuClusterReshapeType.get_default()),
jvm_heap_size: int = 24,
verbose: bool = False,
**rapids_options) -> None:
"""
The Qualification tool analyzes Spark events generated from CPU based Spark applications to
help quantify the expected acceleration and costs savings of migrating a Spark application
or query to GPU. The wrapper downloads dependencies and executes the analysis on the local
dev machine.
:param cpu_cluster: The Databricks-cluster on which the Spark applications were executed. The argument
can be a Databricks-cluster or a valid path to the cluster's properties file (json format)
generated by the databricks-CLI.
:param eventlogs: Event log filenames or ABFS (Azure Blob File System) storage directories
containing event logs (comma separated). If missing, the wrapper reads the Spark's
property `spark.eventLog.dir` defined in `cpu_cluster`. This property should be included
in the output of `databricks clusters get [--cluster-id CLUSTER_ID| --cluster-name CLUSTER_NAME]`.
Note that the wrapper will raise an exception if the property is not set.
:param profile: A named Databricks profile to get the settings/credentials of the Databricks CLI.
:param local_folder: Local work-directory path to store the output and to be used as root
directory for temporary folders/files. The final output will go into a subdirectory called
${local_folder}/qual-${EXEC_ID} where exec_id is an auto-generated unique identifier of the
execution. If the argument is NONE, the default value is the env variable
RAPIDS_USER_TOOLS_OUTPUT_DIRECTORY if any; or the current working directory.
:param remote_folder: An ABFS (Azure Blob File System) folder where the output is uploaded at the end
of execution. If no value is provided, the output will be only available on local disk.
:param gpu_cluster: The Databricks-cluster on which the Spark applications are planned to be migrated.
The argument can be a Databricks-cluster or a valid path to the cluster's properties file
(json format) generated by the databricks-CLI. If missing, the wrapper maps the databricks machine
instances of the original cluster into databricks instances that support GPU acceleration.
:param tools_jar: Path to a bundled jar including Rapids tool. The path is a local filesystem,
or remote ABFS url. If missing, the wrapper downloads the latest rapids-4-spark-tools_*.jar
from maven repo.
:param credentials_file: The local path of JSON file that contains the application credentials.
If missing, the wrapper looks for "DATABRICKS_CONFIG_FILE" environment variable
to provide the location of a credential file. The default credentials file exists as
"~/.databrickscfg" on Unix, Linux, or macOS
:param filter_apps: filtering criteria of the applications listed in the final STDOUT table
is one of the following (ALL, SPEEDUPS, SAVINGS).
Note that this filter does not affect the CSV report.
"ALL" means no filter applied. "SPEEDUPS" lists all the apps that are either
'Recommended', or 'Strongly Recommended' based on speedups. "SAVINGS"
lists all the apps that have positive estimated GPU savings except for the apps that
are "Not Applicable".
:param gpu_cluster_recommendation: The type of GPU cluster recommendation to generate.
It accepts one of the following ("CLUSTER", "JOB" and the default value "MATCH").
"MATCH": keep GPU cluster same number of nodes as CPU cluster;
"CLUSTER": recommend optimal GPU cluster by cost for entire cluster;
"JOB": recommend optimal GPU cluster by cost per job
:param verbose: True or False to enable verbosity to the wrapper script.
:param jvm_heap_size: The maximum heap size of the JVM in gigabytes.
:param rapids_options: A list of valid Qualification tool options.
Note that the wrapper ignores ["output-directory", "platform"] flags, and it does not support
multiple "spark-property" arguments.
For more details on Qualification tool options, please visit
https://nvidia.github.io/spark-rapids/docs/spark-qualification-tool.html#qualification-tool-options
"""
if verbose:
# when debug is set to true set it in the environment.
ToolLogging.enable_debug_mode()
wrapper_qual_options = {
'platformOpts': {
# the databricks profile
'profile': profile,
'credentialFile': credentials_file,
'deployMode': DeployMode.LOCAL,
},
'migrationClustersProps': {
'cpuCluster': cpu_cluster,
'gpuCluster': gpu_cluster
},
'jobSubmissionProps': {
'remoteFolder': remote_folder,
'platformArgs': {
'jvmMaxHeapSize': jvm_heap_size
}
},
'eventlogs': eventlogs,
'filterApps': filter_apps,
'toolsJar': tools_jar,
'gpuClusterRecommendation': gpu_cluster_recommendation
}
QualificationAsLocal(platform_type=CspEnv.DATABRICKS_AZURE,
cluster=None,
output_folder=local_folder,
wrapper_options=wrapper_qual_options,
rapids_options=rapids_options).launch()
@staticmethod
def profiling(gpu_cluster: str = None,
worker_info: str = None,
eventlogs: str = None,
profile: str = None,
local_folder: str = None,
remote_folder: str = None,
tools_jar: str = None,
credentials_file: str = None,
jvm_heap_size: int = 24,
verbose: bool = False,
**rapids_options) -> None:
"""
The Profiling tool analyzes both CPU or GPU generated event logs and generates information
which can be used for debugging and profiling Apache Spark applications.
:param gpu_cluster: The Databricks-cluster on which the Spark applications were executed. The argument
can be a Databricks-cluster or a valid path to the cluster's properties file (json format)
generated by the databricks-CLI. If missing, then the argument worker_info has to be provided.
:param worker_info: A path pointing to a yaml file containing the system information of a
worker node. It is assumed that all workers are homogenous.
If missing, the wrapper pulls the worker info from the "gpu_cluster".
:param eventlogs: Event log filenames or ABFS (Azure Blob File System) storage directories
containing event logs (comma separated). If missing, the wrapper reads the Spark's
property `spark.eventLog.dir` defined in `gpu_cluster`. This property should be included
in the output of `databricks clusters get [--cluster-id CLUSTER_ID| --cluster-name CLUSTER_NAME]`.
Note that the wrapper will raise an exception if the property is not set.
:param profile: A named Databricks profile to get the settings/credentials of the Databricks CLI.
:param local_folder: Local work-directory path to store the output and to be used as root
directory for temporary folders/files. The final output will go into a subdirectory called
${local_folder}/prof-${EXEC_ID} where exec_id is an auto-generated unique identifier of the
execution. If the argument is NONE, the default value is the env variable
RAPIDS_USER_TOOLS_OUTPUT_DIRECTORY if any; or the current working directory.
:param remote_folder: An ABFS (Azure Blob File System) folder where the output is uploaded at the end
of execution. If no value is provided, the output will be only available on local disk.
:param tools_jar: Path to a bundled jar including Rapids tool. The path is a local filesystem,
or remote S3 url. If missing, the wrapper downloads the latest rapids-4-spark-tools_*.jar
from maven repo.
:param credentials_file: The local path of JSON file that contains the application credentials.
If missing, the wrapper looks for "DATABRICKS_CONFIG_FILE" environment variable
to provide the location of a credential file. The default credentials file exists as
"~/.databrickscfg" on Unix, Linux, or macOS.
:param verbose: True or False to enable verbosity to the wrapper script.
:param jvm_heap_size: The maximum heap size of the JVM in gigabytes.
:param rapids_options: A list of valid Profiling tool options.
Note that the wrapper ignores ["output-directory", "worker-info"] flags, and it does not support
multiple "spark-property" arguments.
For more details on Profiling tool options, please visit
https://nvidia.github.io/spark-rapids/docs/spark-profiling-tool.html#profiling-tool-options
"""
if verbose:
# when debug is set to true set it in the environment.
ToolLogging.enable_debug_mode()
wrapper_prof_options = {
'platformOpts': {
# the databricks profile
'profile': profile,
'credentialFile': credentials_file,
'deployMode': DeployMode.LOCAL,
},
'migrationClustersProps': {
'gpuCluster': gpu_cluster
},
'jobSubmissionProps': {
'remoteFolder': remote_folder,
'platformArgs': {
'jvmMaxHeapSize': jvm_heap_size
}
},
'eventlogs': eventlogs,
'toolsJar': tools_jar,
'autoTunerFileInput': worker_info
}
ProfilingAsLocal(platform_type=CspEnv.DATABRICKS_AZURE,
output_folder=local_folder,
wrapper_options=wrapper_prof_options,
rapids_options=rapids_options).launch()
class DBAzureWrapper: # pylint: disable=too-few-public-methods
"""
A wrapper script to run RAPIDS Accelerator tools (Qualification, Profiling, and Bootstrap) on Databricks_Azure.
"""
def __init__(self):
self.qualification = CliDBAzureLocalMode.qualification
self.profiling = CliDBAzureLocalMode.profiling
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/wrappers/databricks_azure_wrapper.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""init file of the wrappers package."""
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/wrappers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper class to run tools associated with RAPIDS Accelerator for Apache Spark plugin on Dataproc."""
from spark_rapids_tools import CspEnv
from spark_rapids_pytools.cloud_api.sp_types import DeployMode
from spark_rapids_pytools.common.utilities import ToolLogging
from spark_rapids_pytools.rapids.bootstrap import Bootstrap
from spark_rapids_pytools.rapids.diagnostic import Diagnostic
from spark_rapids_pytools.rapids.profiling import ProfilingAsLocal
from spark_rapids_pytools.rapids.qualification import QualFilterApp, QualificationAsLocal, QualGpuClusterReshapeType
class CliDataprocLocalMode: # pylint: disable=too-few-public-methods
"""
A wrapper that runs the RAPIDS Accelerator tools locally on the dev machine for Dataproc.
"""
@staticmethod
def qualification(cpu_cluster: str = None,
eventlogs: str = None,
local_folder: str = None,
remote_folder: str = None,
gpu_cluster: str = None,
tools_jar: str = None,
credentials_file: str = None,
filter_apps: str = QualFilterApp.tostring(QualFilterApp.SAVINGS),
gpu_cluster_recommendation: str = QualGpuClusterReshapeType.tostring(
QualGpuClusterReshapeType.get_default()),
jvm_heap_size: int = 24,
verbose: bool = False,
**rapids_options) -> None:
"""
The Qualification tool analyzes Spark events generated from CPU based Spark applications to
help quantify the expected acceleration and costs savings of migrating a Spark application
or query to GPU. The wrapper downloads dependencies and executes the analysis on the local
dev machine
:param cpu_cluster: The Dataproc-cluster on which the Spark applications were executed. The argument
can be a Dataproc-cluster or a valid path to the cluster's properties file (json format)
generated by the gcloud-CLI.
:param eventlogs: Event log filenames or gcs storage directories
containing event logs (comma separated). If missing, the wrapper Reads the Spark's
property `spark.eventLog.dir` defined in `cpu_cluster`. This property should be included
in the output of `gcloud dataproc clusters describe`
Note that the wrapper will raise an exception if the property is not set
:param local_folder: Local work-directory path to store the output and to be used as root
directory for temporary folders/files. The final output will go into a subdirectory called
${local_folder}/qual-${EXEC_ID} where exec_id is an auto-generated unique identifier of the
execution. If the argument is NONE, the default value is the env variable
RAPIDS_USER_TOOLS_OUTPUT_DIRECTORY if any; or the current working directory
:param remote_folder: A gcs folder where the output is uploaded at the end of execution.
If no value is provided, the output will be only available on local disk
:param gpu_cluster: The Dataproc-cluster on which the Spark applications is planned to be migrated.
The argument can be a Dataproc-cluster or a valid path to the cluster's properties file
(json format) generated by the gcloud-CLI. If missing, the wrapper maps the dataproc machine
instances of the original cluster into dataproc instances that support GPU acceleration
:param tools_jar: Path to a bundled jar including Rapids tool. The path is a local filesystem,
or remote gcs url. If missing, the wrapper downloads the latest rapids-4-spark-tools_*.jar
from maven repo
:param credentials_file: The local path of JSON file that contains the application credentials.
If missing, the wrapper looks for "GOOGLE_APPLICATION_CREDENTIALS" environment variable
to provide the location of a credential JSON file. The default credentials file exists as
"$HOME/.config/gcloud/application_default_credentials.json"
:param filter_apps: filtering criteria of the applications listed in the final STDOUT table
is one of the following (ALL, SPEEDUPS, savings).
Note that this filter does not affect the CSV report.
"ALL" means no filter applied. "SPEEDUPS" lists all the apps that are either
'Recommended', or 'Strongly Recommended' based on speedups. "SAVINGS"
lists all the apps that have positive estimated GPU savings except for the apps that
are "Not Applicable"
:param gpu_cluster_recommendation: The type of GPU cluster recommendation to generate.
It accepts one of the following ("CLUSTER", "JOB" and the default value "MATCH").
"MATCH": keep GPU cluster same number of nodes as CPU cluster;
"CLUSTER": recommend optimal GPU cluster by cost for entire cluster;
"JOB": recommend optimal GPU cluster by cost per job
:param jvm_heap_size: The maximum heap size of the JVM in gigabytes
:param verbose: True or False to enable verbosity to the wrapper script
:param rapids_options: A list of valid Qualification tool options.
Note that the wrapper ignores ["output-directory", "platform"] flags, and it does not support
multiple "spark-property" arguments.
For more details on Qualification tool options, please visit
https://nvidia.github.io/spark-rapids/docs/spark-qualification-tool.html#qualification-tool-options
"""
if verbose:
# when debug is set to true set it in the environment.
ToolLogging.enable_debug_mode()
wrapper_qual_options = {
'platformOpts': {
'credentialFile': credentials_file,
'deployMode': DeployMode.LOCAL,
},
'migrationClustersProps': {
'cpuCluster': cpu_cluster,
'gpuCluster': gpu_cluster
},
'jobSubmissionProps': {
'remoteFolder': remote_folder,
'platformArgs': {
'jvmMaxHeapSize': jvm_heap_size
}
},
'eventlogs': eventlogs,
'filterApps': filter_apps,
'toolsJar': tools_jar,
'gpuClusterRecommendation': gpu_cluster_recommendation
}
tool_obj = QualificationAsLocal(platform_type=CspEnv.DATAPROC,
output_folder=local_folder,
wrapper_options=wrapper_qual_options,
rapids_options=rapids_options)
tool_obj.launch()
@staticmethod
def profiling(gpu_cluster: str = None,
worker_info: str = None,
eventlogs: str = None,
local_folder: str = None,
remote_folder: str = None,
tools_jar: str = None,
credentials_file: str = None,
jvm_heap_size: int = 24,
verbose: bool = False,
**rapids_options) -> None:
"""
The Profiling tool analyzes both CPU or GPU generated event logs and generates information
which can be used for debugging and profiling Apache Spark applications.
:param gpu_cluster: The Dataproc-cluster on which the Spark applications were executed. The argument
can be a Dataproc-cluster or a valid path to the cluster's properties file (json format)
generated by the gcloud-CLI. If missing, then the argument worker_info has to be provided
:param worker_info: A path pointing to a yaml file containing the system information of a
worker node. It is assumed that all workers are homogenous.
If missing, the wrapper pulls the worker info from the "gpu_cluster"
:param eventlogs: Event log filenames or gcs directories
containing event logs (comma separated). If missing, the wrapper reads the Spark's
property `spark.eventLog.dir` defined in `gpu_cluster`. This property should be included
in the output of `gcloud dataproc clusters describe`.
Note that the wrapper will raise an exception if the property is not set
:param local_folder: Local work-directory path to store the output and to be used as root
directory for temporary folders/files. The final output will go into a subdirectory called
${local_folder}/prof-${EXEC_ID} where exec_id is an auto-generated unique identifier of the
execution. If the argument is NONE, the default value is the env variable
RAPIDS_USER_TOOLS_OUTPUT_DIRECTORY if any; or the current working directory.
:param remote_folder: A gcs folder where the output is uploaded at the end of execution.
If no value is provided, the output will be only available on local disk
:param tools_jar: Path to a bundled jar including Rapids tool. The path is a local filesystem,
or remote gcs url. If missing, the wrapper downloads the latest rapids-4-spark-tools_*.jar
from maven repo
:param credentials_file: The local path of JSON file that contains the application credentials.
If missing, the wrapper looks for "GOOGLE_APPLICATION_CREDENTIALS" environment variable
to provide the location of a credential JSON file. The default credentials file exists as
"$HOME/.config/gcloud/application_default_credentials.json"
:param verbose: True or False to enable verbosity to the wrapper script
:param jvm_heap_size: The maximum heap size of the JVM in gigabytes
:param rapids_options: A list of valid Profiling tool options.
Note that the wrapper ignores ["output-directory", "worker-info"] flags, and it does not support
multiple "spark-property" arguments.
For more details on Profiling tool options, please visit
https://nvidia.github.io/spark-rapids/docs/spark-profiling-tool.html#profiling-tool-options
"""
if verbose:
# when debug is set to true set it in the environment.
ToolLogging.enable_debug_mode()
wrapper_prof_options = {
'platformOpts': {
'credentialFile': credentials_file,
'deployMode': DeployMode.LOCAL,
},
'migrationClustersProps': {
'gpuCluster': gpu_cluster
},
'jobSubmissionProps': {
'remoteFolder': remote_folder,
'platformArgs': {
'jvmMaxHeapSize': jvm_heap_size
}
},
'eventlogs': eventlogs,
'toolsJar': tools_jar,
'autoTunerFileInput': worker_info
}
ProfilingAsLocal(platform_type=CspEnv.DATAPROC,
output_folder=local_folder,
wrapper_options=wrapper_prof_options,
rapids_options=rapids_options).launch()
@staticmethod
def bootstrap(cluster: str,
output_folder: str = None,
dry_run: bool = True,
verbose: bool = False) -> None:
"""
Bootstrap tool analyzes the CPU and GPU configuration of the Dataproc cluster
and updates the Spark default configuration on the cluster's master nodes
:param cluster: Name of the Dataproc cluster running an accelerated computing instance class
:param output_folder: Local path where the final recommendations will be saved.
Note that this argument only accepts local filesystem. If the argument is NONE,
the default value is the env variable "RAPIDS_USER_TOOLS_OUTPUT_DIRECTORY" if any;
or the current working directory
:param dry_run: True or False to update the Spark config settings on Dataproc driver node
:param verbose: True or False to enable verbosity to the wrapper script.
"""
if verbose:
# when debug is set to true set it in the environment.
ToolLogging.enable_debug_mode()
wrapper_boot_options = {
'platformOpts': {},
'dryRun': dry_run
}
bootstrap_tool = Bootstrap(platform_type=CspEnv.DATAPROC,
cluster=cluster,
output_folder=output_folder,
wrapper_options=wrapper_boot_options)
bootstrap_tool.launch()
@staticmethod
def diagnostic(cluster: str,
output_folder: str = None,
thread_num: int = 3,
yes: bool = False,
verbose: bool = False) -> None:
"""
Diagnostic tool to collect information from Dataproc cluster, such as OS version, # of worker nodes,
Yarn configuration, Spark version and error logs etc. Please note, some sensitive information might
be collected by this tool, e.g. access secret configured in configuration files or dumped to log files.
:param cluster: Name of the Dataproc cluster running an accelerated computing instance class
:param output_folder: Local path where the final recommendations will be saved.
Note that this argument only accepts local filesystem. If the argument is NONE,
the default value is the env variable "RAPIDS_USER_TOOLS_OUTPUT_DIRECTORY" if any;
or the current working directory
:param thread_num: Number of threads to access remote cluster nodes in parallel. The valid value
is 1~10. The default value is 3.
:param yes: auto confirm to interactive question.
:param verbose: True or False to enable verbosity to the wrapper script.
"""
if verbose:
# when debug is set to true set it in the environment.
ToolLogging.enable_debug_mode()
wrapper_diag_options = {
'platformOpts': {},
'threadNum': thread_num,
'yes': yes,
}
diag_tool = Diagnostic(platform_type=CspEnv.DATAPROC,
cluster=cluster,
output_folder=output_folder,
wrapper_options=wrapper_diag_options)
diag_tool.launch()
class DataprocWrapper: # pylint: disable=too-few-public-methods
"""
A wrapper script to run RAPIDS Accelerator tools (Qualification, Profiling, and Bootstrap) on Gcloud Dataproc.
"""
def __init__(self):
self.qualification = CliDataprocLocalMode.qualification
self.profiling = CliDataprocLocalMode.profiling
self.bootstrap = CliDataprocLocalMode.bootstrap
self.diagnostic = CliDataprocLocalMode.diagnostic
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/wrappers/dataproc_wrapper.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper class to run tools associated with RAPIDS Accelerator for Apache Spark plugin on DATABRICKS_AWS."""
from spark_rapids_tools import CspEnv
from spark_rapids_pytools.cloud_api.sp_types import DeployMode
from spark_rapids_pytools.common.utilities import ToolLogging
from spark_rapids_pytools.rapids.profiling import ProfilingAsLocal
from spark_rapids_pytools.rapids.qualification import QualFilterApp, QualificationAsLocal, QualGpuClusterReshapeType
class CliDBAWSLocalMode: # pylint: disable=too-few-public-methods
"""
A wrapper that runs the RAPIDS Accelerator tools locally on the dev machine for DATABRICKS_AWS.
"""
@staticmethod
def qualification(cpu_cluster: str = None,
eventlogs: str = None,
profile: str = None,
aws_profile: str = None,
local_folder: str = None,
remote_folder: str = None,
gpu_cluster: str = None,
tools_jar: str = None,
credentials_file: str = None,
filter_apps: str = QualFilterApp.tostring(QualFilterApp.SAVINGS),
gpu_cluster_recommendation: str = QualGpuClusterReshapeType.tostring(
QualGpuClusterReshapeType.get_default()),
jvm_heap_size: int = 24,
verbose: bool = False,
**rapids_options) -> None:
"""
The Qualification tool analyzes Spark events generated from CPU based Spark applications to
help quantify the expected acceleration and costs savings of migrating a Spark application
or query to GPU. The wrapper downloads dependencies and executes the analysis on the local
dev machine.
:param cpu_cluster: The Databricks-cluster on which the Spark applications were executed. The argument
can be a Databricks-cluster or a valid path to the cluster's properties file (json format)
generated by the databricks-CLI.
:param eventlogs: Event log filenames or S3 storage directories
containing event logs (comma separated). If missing, the wrapper reads the Spark's
property `spark.eventLog.dir` defined in `cpu_cluster`. This property should be included
in the output of `databricks clusters get [--cluster-id CLUSTER_ID| --cluster-name CLUSTER_NAME]`.
Note that the wrapper will raise an exception if the property is not set.
:param profile: A named Databricks profile to get the settings/credentials of the Databricks CLI.
:param aws_profile: A named AWS profile to get the settings/credentials of the AWS account.
:param local_folder: Local work-directory path to store the output and to be used as root
directory for temporary folders/files. The final output will go into a subdirectory called
${local_folder}/qual-${EXEC_ID} where exec_id is an auto-generated unique identifier of the
execution. If the argument is NONE, the default value is the env variable
RAPIDS_USER_TOOLS_OUTPUT_DIRECTORY if any; or the current working directory.
:param remote_folder: An S3 folder where the output is uploaded at the end of execution.
If no value is provided, the output will be only available on local disk.
:param gpu_cluster: The Databricks-cluster on which the Spark applications is planned to be migrated.
The argument can be a Databricks-cluster or a valid path to the cluster's properties file
(json format) generated by the databricks-CLI. If missing, the wrapper maps the databricks machine
instances of the original cluster into databricks instances that support GPU acceleration.
:param tools_jar: Path to a bundled jar including Rapids tool. The path is a local filesystem,
or remote S3 url. If missing, the wrapper downloads the latest rapids-4-spark-tools_*.jar
from maven repo.
:param credentials_file: The local path of JSON file that contains the application credentials.
If missing, the wrapper looks for "DATABRICKS_CONFIG_FILE" environment variable
to provide the location of a credential file. The default credentials file exists as
"~/.databrickscfg" on Unix, Linux, or macOS
:param filter_apps: filtering criteria of the applications listed in the final STDOUT table
is one of the following (ALL, SPEEDUPS, savings).
Note that this filter does not affect the CSV report.
"ALL" means no filter applied. "SPEEDUPS" lists all the apps that are either
'Recommended', or 'Strongly Recommended' based on speedups. "SAVINGS"
lists all the apps that have positive estimated GPU savings except for the apps that
are "Not Applicable".
:param gpu_cluster_recommendation: The type of GPU cluster recommendation to generate.
It accepts one of the following ("CLUSTER", "JOB" and the default value "MATCH").
"MATCH": keep GPU cluster same number of nodes as CPU cluster;
"CLUSTER": recommend optimal GPU cluster by cost for entire cluster;
"JOB": recommend optimal GPU cluster by cost per job
:param verbose: True or False to enable verbosity to the wrapper script.
:param jvm_heap_size: The maximum heap size of the JVM in gigabytes.
:param rapids_options: A list of valid Qualification tool options.
Note that the wrapper ignores ["output-directory", "platform"] flags, and it does not support
multiple "spark-property" arguments.
For more details on Qualification tool options, please visit
https://nvidia.github.io/spark-rapids/docs/spark-qualification-tool.html#qualification-tool-options
"""
if verbose:
# when debug is set to true set it in the environment.
ToolLogging.enable_debug_mode()
wrapper_qual_options = {
'platformOpts': {
# the databricks profile
'profile': profile,
'awsProfile': aws_profile,
'credentialFile': credentials_file,
'deployMode': DeployMode.LOCAL,
},
'migrationClustersProps': {
'cpuCluster': cpu_cluster,
'gpuCluster': gpu_cluster
},
'jobSubmissionProps': {
'remoteFolder': remote_folder,
'platformArgs': {
'jvmMaxHeapSize': jvm_heap_size
}
},
'eventlogs': eventlogs,
'filterApps': filter_apps,
'toolsJar': tools_jar,
'gpuClusterRecommendation': gpu_cluster_recommendation
}
QualificationAsLocal(platform_type=CspEnv.DATABRICKS_AWS,
cluster=None,
output_folder=local_folder,
wrapper_options=wrapper_qual_options,
rapids_options=rapids_options).launch()
@staticmethod
def profiling(gpu_cluster: str = None,
worker_info: str = None,
eventlogs: str = None,
profile: str = None,
aws_profile: str = None,
local_folder: str = None,
remote_folder: str = None,
tools_jar: str = None,
credentials_file: str = None,
jvm_heap_size: int = 24,
verbose: bool = False,
**rapids_options) -> None:
"""
The Profiling tool analyzes both CPU or GPU generated event logs and generates information
which can be used for debugging and profiling Apache Spark applications.
:param gpu_cluster: The Databricks-cluster on which the Spark applications were executed. The argument
can be a Databricks-cluster or a valid path to the cluster's properties file (json format)
generated by the databricks-CLI. If missing, then the argument worker_info has to be provided.
:param worker_info: A path pointing to a yaml file containing the system information of a
worker node. It is assumed that all workers are homogenous.
If missing, the wrapper pulls the worker info from the "gpu_cluster".
:param eventlogs: Event log filenames or S3 storage directories
containing event logs (comma separated). If missing, the wrapper reads the Spark's
property `spark.eventLog.dir` defined in `gpu_cluster`. This property should be included
in the output of `databricks clusters get [--cluster-id CLUSTER_ID| --cluster-name CLUSTER_NAME]`.
Note that the wrapper will raise an exception if the property is not set.
:param profile: A named Databricks profile to get the settings/credentials of the Databricks CLI.
:param aws_profile: A named AWS profile to get the settings/credentials of the AWS account.
:param local_folder: Local work-directory path to store the output and to be used as root
directory for temporary folders/files. The final output will go into a subdirectory called
${local_folder}/prof-${EXEC_ID} where exec_id is an auto-generated unique identifier of the
execution. If the argument is NONE, the default value is the env variable
RAPIDS_USER_TOOLS_OUTPUT_DIRECTORY if any; or the current working directory.
:param remote_folder: A S3 folder where the output is uploaded at the end of execution.
If no value is provided, the output will be only available on local disk.
:param tools_jar: Path to a bundled jar including Rapids tool. The path is a local filesystem,
or remote S3 url. If missing, the wrapper downloads the latest rapids-4-spark-tools_*.jar
from maven repo.
:param credentials_file: The local path of JSON file that contains the application credentials.
If missing, the wrapper looks for "DATABRICKS_CONFIG_FILE" environment variable
to provide the location of a credential file. The default credentials file exists as
"~/.databrickscfg" on Unix, Linux, or macOS.
:param verbose: True or False to enable verbosity to the wrapper script.
:param jvm_heap_size: The maximum heap size of the JVM in gigabytes.
:param rapids_options: A list of valid Profiling tool options.
Note that the wrapper ignores ["output-directory", "worker-info"] flags, and it does not support
multiple "spark-property" arguments.
For more details on Profiling tool options, please visit
https://nvidia.github.io/spark-rapids/docs/spark-profiling-tool.html#profiling-tool-options
"""
if verbose:
# when debug is set to true set it in the environment.
ToolLogging.enable_debug_mode()
wrapper_prof_options = {
'platformOpts': {
# the databricks profile
'profile': profile,
'awsProfile': aws_profile,
'credentialFile': credentials_file,
'deployMode': DeployMode.LOCAL,
},
'migrationClustersProps': {
'gpuCluster': gpu_cluster
},
'jobSubmissionProps': {
'remoteFolder': remote_folder,
'platformArgs': {
'jvmMaxHeapSize': jvm_heap_size
}
},
'eventlogs': eventlogs,
'toolsJar': tools_jar,
'autoTunerFileInput': worker_info
}
ProfilingAsLocal(platform_type=CspEnv.DATABRICKS_AWS,
output_folder=local_folder,
wrapper_options=wrapper_prof_options,
rapids_options=rapids_options).launch()
class DBAWSWrapper: # pylint: disable=too-few-public-methods
"""
A wrapper script to run RAPIDS Accelerator tools (Qualification, Profiling, and Bootstrap) on Databricks_AWS.
"""
def __init__(self):
self.qualification = CliDBAWSLocalMode.qualification
self.profiling = CliDBAWSLocalMode.profiling
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/wrappers/databricks_aws_wrapper.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper class to run tools associated with RAPIDS Accelerator for Apache Spark plugin on On-Prem cluster."""
from spark_rapids_tools import CspEnv
from spark_rapids_pytools.cloud_api.sp_types import DeployMode
from spark_rapids_pytools.common.utilities import ToolLogging
from spark_rapids_pytools.rapids.profiling import ProfilingAsLocal
from spark_rapids_pytools.rapids.qualification import QualFilterApp, QualificationAsLocal, QualGpuClusterReshapeType
class CliOnpremLocalMode: # pylint: disable=too-few-public-methods
"""
A wrapper that runs the RAPIDS Accelerator tools locally on the dev machine for OnPrem
platform. Apps are qualified based on speedup.
"""
@staticmethod
def qualification(cpu_cluster: str = None,
eventlogs: str = None,
local_folder: str = None,
tools_jar: str = None,
filter_apps: str = QualFilterApp.tostring(QualFilterApp.SPEEDUPS),
target_platform: str = None,
gpu_cluster_recommendation: str = QualGpuClusterReshapeType.tostring(
QualGpuClusterReshapeType.get_default()),
jvm_heap_size: int = 24,
verbose: bool = False,
**rapids_options) -> None:
"""
The Qualification tool analyzes Spark events generated from CPU based Spark applications to
help quantify the expected acceleration and costs savings of migrating a Spark application
or query to GPU. The wrapper downloads dependencies and executes the analysis on the local
dev machine
:param cpu_cluster: The on-premises cluster on which the Apache Spark applications were executed.
Accepted value is valid path to the cluster properties file (json format).
:param eventlogs: A comma separated list of urls pointing to event logs in local directory.
:param local_folder: Local work-directory path to store the output and to be used as root
directory for temporary folders/files. The final output will go into a subdirectory
named `qual-${EXEC_ID}` where `exec_id` is an auto-generated unique identifier of the execution.
:param tools_jar: Path to a bundled jar including RAPIDS tool. The path is a local filesystem path
:param filter_apps: Filtering criteria of the applications listed in the final STDOUT table is one of
the following (`ALL`, `SPEEDUPS`). "`ALL`" means no filter applied. "`SPEEDUPS`" lists all the
apps that are either '_Recommended_', or '_Strongly Recommended_' based on speedups.
:param target_platform: Cost savings and speedup recommendation for comparable cluster in target_platform
based on on-premises cluster configuration. Currently only `dataproc` is supported for
target_platform.If not provided, the final report will be limited to GPU speedups only
without cost-savings.
:param gpu_cluster_recommendation: The type of GPU cluster recommendation to generate.
It accepts one of the following ("CLUSTER", "JOB" and the default value "MATCH").
"MATCH": keep GPU cluster same number of nodes as CPU cluster;
"CLUSTER": recommend optimal GPU cluster by cost for entire cluster;
"JOB": recommend optimal GPU cluster by cost per job
:param jvm_heap_size: The maximum heap size of the JVM in gigabytes
:param verbose: True or False to enable verbosity to the wrapper script
:param rapids_options: A list of valid Qualification tool options.
Note that the wrapper ignores ["output-directory", "platform"] flags, and it does not support
multiple "spark-property" arguments.
For more details on Qualification tool options, please visit
https://nvidia.github.io/spark-rapids/docs/spark-qualification-tool.html#qualification-tool-options
"""
if verbose:
# when debug is set to true set it in the environment.
ToolLogging.enable_debug_mode()
# if target_platform is specified, check if it's valid supported platform and filter the
# apps based on savings
if target_platform is not None:
if CliOnpremLocalMode.is_target_platform_supported(target_platform):
if cpu_cluster is None:
raise RuntimeError('OnPrem\'s cluster property file required to calculate'
'savings for ' + target_platform + ' platform')
filter_apps: str = QualFilterApp.tostring(QualFilterApp.SAVINGS)
else:
raise RuntimeError(target_platform + ' platform is currently not supported to calculate savings'
' from OnPrem cluster')
wrapper_qual_options = {
'platformOpts': {
'deployMode': DeployMode.LOCAL,
'targetPlatform': target_platform
},
'migrationClustersProps': {
'cpuCluster': cpu_cluster
},
'jobSubmissionProps': {
'platformArgs': {
'jvmMaxHeapSize': jvm_heap_size
}
},
'eventlogs': eventlogs,
'filterApps': filter_apps,
'toolsJar': tools_jar,
'gpuClusterRecommendation': gpu_cluster_recommendation,
'targetPlatform': target_platform
}
tool_obj = QualificationAsLocal(platform_type=CspEnv.ONPREM,
output_folder=local_folder,
wrapper_options=wrapper_qual_options,
rapids_options=rapids_options)
tool_obj.launch()
@staticmethod
def is_target_platform_supported(target_platform: str):
return target_platform == 'dataproc'
@staticmethod
def profiling(worker_info: str = None,
eventlogs: str = None,
local_folder: str = None,
tools_jar: str = None,
jvm_heap_size: int = 24,
verbose: bool = False,
**rapids_options) -> None:
"""
The Profiling tool analyzes both CPU or GPU generated event logs and generates information
which can be used for debugging and profiling Apache Spark applications.
:param worker_info: A path pointing to a yaml file containing the system information of a
worker node. It is assumed that all workers are homogenous.
If missing, it throws an error.
:param eventlogs: Event log filenames or directories containing event logs (comma separated).
:param local_folder: Local work-directory path to store the output and to be used as root
directory for temporary folders/files. The final output will go into a subdirectory called
${local_folder}/prof-${EXEC_ID} where exec_id is an auto-generated unique identifier of the
execution. If the argument is NONE, the default value is the env variable
RAPIDS_USER_TOOLS_OUTPUT_DIRECTORY if any; or the current working directory.
:param tools_jar: Path to a bundled jar including Rapids tool. The path is a local filesystem.
If missing, the wrapper downloads the latest rapids-4-spark-tools_*.jar from maven repo
:param verbose: True or False to enable verbosity to the wrapper script
:param jvm_heap_size: The maximum heap size of the JVM in gigabytes
:param rapids_options: A list of valid Profiling tool options.
Note that the wrapper ignores ["output-directory", "worker-info"] flags, and it does not support
multiple "spark-property" arguments.
For more details on Profiling tool options, please visit
https://nvidia.github.io/spark-rapids/docs/spark-profiling-tool.html#profiling-tool-options
"""
if verbose:
# when debug is set to true set it in the environment.
ToolLogging.enable_debug_mode()
wrapper_prof_options = {
'platformOpts': {
'deployMode': DeployMode.LOCAL,
'targetPlatform': CspEnv.ONPREM
},
'jobSubmissionProps': {
'platformArgs': {
'jvmMaxHeapSize': jvm_heap_size
}
},
'eventlogs': eventlogs,
'toolsJar': tools_jar,
'autoTunerFileInput': worker_info
}
ProfilingAsLocal(platform_type=CspEnv.ONPREM,
output_folder=local_folder,
wrapper_options=wrapper_prof_options,
rapids_options=rapids_options).launch()
class OnPremWrapper: # pylint: disable=too-few-public-methods
"""
A wrapper script to run RAPIDS Accelerator tools (Qualification, Profiling) on On-prem cluster.
"""
def __init__(self):
self.qualification = CliOnpremLocalMode.qualification
self.profiling = CliOnpremLocalMode.profiling
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/wrappers/onprem_wrapper.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation class representing wrapper around the RAPIDS acceleration Bootstrap tool."""
from dataclasses import dataclass
from spark_rapids_pytools.cloud_api.sp_types import ClusterBase
from spark_rapids_pytools.common.sys_storage import FSUtil
from spark_rapids_pytools.common.utilities import Utils
from spark_rapids_pytools.rapids.rapids_tool import RapidsTool
@dataclass
class Bootstrap(RapidsTool):
"""
Wrapper layer around Bootstrap Tool.
"""
name = 'bootstrap'
def _process_custom_args(self):
dry_run_opt = self.wrapper_options.get('dryRun', 'False')
self.ctxt.set_ctxt('dryRunOpt', bool(dry_run_opt))
def requires_cluster_connection(self) -> bool:
return True
def _run_rapids_tool(self):
"""
Run the bootstrap on the driver node
:return:
"""
self.logger.info('Executing Bootstrap commands on remote cluster to calculate default configurations.')
exec_cluster: ClusterBase = self.get_exec_cluster()
worker_hw_info = exec_cluster.get_worker_hw_info()
self.logger.debug('Worker hardware INFO %s', worker_hw_info)
try:
spark_settings = self._calculate_spark_settings(worker_info=worker_hw_info)
self.ctxt.set_ctxt('bootstrap_results', spark_settings)
self.logger.debug('%s Tool finished calculating recommended Apache Spark configurations for cluster %s: %s',
self.pretty_name(),
self.cluster,
str(spark_settings))
except Exception as e:
self.logger.error('Error while calculating spark configurations')
raise e
def _apply_changes_to_remote_cluster(self):
ssh_cmd = "\"sudo bash -c 'cat >> /etc/spark/conf/spark-defaults.conf'\""
cmd_input = self.ctxt.get_ctxt('wrapperOutputContent')
exec_cluster = self.get_exec_cluster()
try:
exec_cluster.run_cmd_driver(ssh_cmd, cmd_input=cmd_input)
except RuntimeError as re:
self.logger.warning('An exception was raised while applying the '
'recommendation to the cluster: %s', re)
def _process_output(self):
self.logger.info('Processing the result of Spark properties')
tool_result = self.ctxt.get_ctxt('bootstrap_results')
exec_cluster = self.get_exec_cluster()
dry_run = self.ctxt.get_ctxt('dryRunOpt')
if tool_result is not None and any(tool_result):
# write the result to log file
# Now create the new folder
FSUtil.make_dirs(self.ctxt.get_output_folder(), exist_ok=True)
wrapper_out_content_arr = [f'##### BEGIN : RAPIDS bootstrap settings for {exec_cluster.name}']
for conf_key, conf_val in tool_result.items():
wrapper_out_content_arr.append(f'{conf_key}={conf_val}')
wrapper_out_content_arr.append(f'##### END : RAPIDS bootstrap settings for {exec_cluster.name}\n')
shuffle_manager_note = 'Note: to turn on the Spark RAPIDS multithreaded shuffle, you will also\n' \
'have to enable this setting based on the Spark version of your cluster:\n' \
'spark.shuffle.manager=com.nvidia.spark.rapids.spark3xx.RapidShuffleManager.\n'
wrapper_out_content_arr.append(shuffle_manager_note)
wrapper_out_content = Utils.gen_multiline_str(wrapper_out_content_arr)
self.ctxt.set_ctxt('wrapperOutputContent', wrapper_out_content)
if dry_run:
self.logger.info('Skipping applying configurations to remote cluster %s. DRY_RUN is enabled.',
exec_cluster.name)
else:
# apply the changes to remote cluster
try:
self._apply_changes_to_remote_cluster()
except RuntimeError as err:
self.logger.error('Error applying changes to driver node on cluster %s.', exec_cluster.name)
raise err
# write the result to log file
out_file_path = self.ctxt.get_wrapper_summary_file_path()
self.logger.info('Saving configuration to local file %s', out_file_path)
with open(out_file_path, 'w', encoding='utf-8') as wrapper_output:
wrapper_output.write(wrapper_out_content)
else:
# results are empty
self.ctxt.set_ctxt('wrapperOutputContent',
self._report_results_are_empty())
def _delete_remote_dep_folder(self):
self.logger.debug('%s mode skipping deleting the remote workdir', self.pretty_name())
def _download_remote_output_folder(self):
self.logger.debug('%s skipping downloading the remote output workdir', self.pretty_name())
def _report_tool_full_location(self) -> str:
out_file_path = self.ctxt.get_wrapper_summary_file_path()
return Utils.gen_multiline_str(f'{self.pretty_name()} tool output: {out_file_path}')
def _write_summary(self):
wrapper_out_content = self.ctxt.get_ctxt('wrapperOutputContent')
print(Utils.gen_multiline_str(self._report_tool_full_location(),
'Recommended Configurations:',
wrapper_out_content))
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/rapids/bootstrap.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Declaration and implementation of the RAPIDS plugin accelerator plugin"""
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/rapids/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract representation of a wrapper Job"""
from dataclasses import dataclass, field
from logging import Logger
from typing import List
from spark_rapids_pytools.common.prop_manager import JSONPropertiesContainer
from spark_rapids_pytools.common.utilities import ToolLogging, Utils
from spark_rapids_pytools.rapids.tool_ctxt import ToolContext
@dataclass
class RapidsJobPropContainer(JSONPropertiesContainer):
"""
Container to manage the properties and arguments needed to submit a job running RAPIDS plugin.
"""
def _init_fields(self):
if self.get_value_silent('rapidsArgs') is None:
self.props['rapidsArgs'] = {}
if self.get_value_silent('sparkConfArgs') is None:
self.props['sparkConfArgs'] = {}
if self.get_value_silent('platformArgs') is None:
self.props['platformArgs'] = {}
def get_jar_file(self):
return self.get_value('rapidsArgs', 'jarFile')
def get_jar_main_class(self):
return self.get_value('rapidsArgs', 'className')
def get_rapids_args(self):
return self.get_value('rapidsArgs', 'jarArgs')
@dataclass
class RapidsJob:
"""
A wrapper class to represent the actual execution of a RAPIDS plugin job on the cloud platform.
"""
prop_container: RapidsJobPropContainer
exec_ctxt: ToolContext
output_path: str = field(default=None, init=False)
job_label: str = field(default=None, init=False)
logger: Logger = field(default=None, init=False)
def get_platform_name(self):
return self.exec_ctxt.get_platform_name()
def _init_fields(self):
self.logger = ToolLogging.get_and_setup_logger(f'rapids.tools.submit.{self.job_label}')
self.output_path = self.prop_container.get_value_silent('outputDirectory')
def __post_init__(self):
self._init_fields()
def _get_rapids_args_per_platform(self) -> List[str]:
"""Left as placeholder for future use"""
return []
def _get_persistent_rapids_args(self):
rapids_args = self._get_rapids_args_per_platform()[:]
rapids_args.extend(['--output-directory', self.output_path])
return rapids_args
def _build_rapids_args(self):
rapids_arguments = self._get_persistent_rapids_args()
extra_rapids_args = self.prop_container.get_rapids_args()
if extra_rapids_args is None:
return rapids_arguments
rapids_arguments.extend(extra_rapids_args)
return rapids_arguments
def _build_submission_cmd(self) -> list:
raise NotImplementedError
def _submit_job(self, cmd_args: list) -> str:
raise NotImplementedError
def _print_job_output(self, job_output: str):
stdout_splits = job_output.splitlines()
if len(stdout_splits) > 0:
std_out_lines = Utils.gen_multiline_str([f'\t| {line}' for line in stdout_splits])
stdout_str = f'\n\t<STDOUT>\n{std_out_lines}'
self.logger.info('%s job output:%s', self.get_platform_name(), stdout_str)
def run_job(self):
self.logger.info('Prepare job submission command')
cmd_args = self._build_submission_cmd()
self.logger.info('Running the Rapids Job...')
job_output = self._submit_job(cmd_args)
if not ToolLogging.is_debug_mode_enabled():
# we check the debug level because we do not want the output to be displayed twice
self._print_job_output(job_output)
return job_output
@dataclass
class RapidsLocalJob(RapidsJob):
"""
Implementation of a RAPIDS job that runs local on a local machine.
"""
def _build_classpath(self):
deps_arr = [self.prop_container.get_jar_file()]
dependencies = self.prop_container.get_value_silent('platformArgs', 'dependencies')
if dependencies is not None:
deps_arr.extend(dependencies)
dps_str = Utils.gen_joined_str(':', deps_arr)
return ['-cp', dps_str]
def _build_jvm_args(self):
jvm_args = self.prop_container.get_value_silent('platformArgs', 'jvmArgs')
vm_args = []
if jvm_args is not None:
for jvm_k, jvm_arg in jvm_args.items():
if jvm_k.startswith('D'):
val = f'-{jvm_k}={jvm_arg}'
else:
val = f'-{jvm_k}'
vm_args.append(val)
return vm_args
def _build_submission_cmd(self) -> list:
# env vars are added later as a separate dictionary
classpath_arr = self._build_classpath()
jvm_args_arr = self._build_jvm_args()
cmd_arg = ['java']
cmd_arg.extend(jvm_args_arr)
cmd_arg.extend(classpath_arr)
cmd_arg.append(self.prop_container.get_jar_main_class())
cmd_arg.extend(self._build_rapids_args())
return cmd_arg
def _submit_job(self, cmd_args: list) -> str:
env_args = self.prop_container.get_value_silent('platformArgs', 'envArgs')
out_std = self.exec_ctxt.platform.cli.run_sys_cmd(cmd=cmd_args,
env_vars=env_args)
return out_std
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/rapids/rapids_job.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation class representing wrapper around diagnostic tool."""
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass
from spark_rapids_pytools.cloud_api.sp_types import ClusterBase, SparkNodeType
from spark_rapids_pytools.common.sys_storage import FSUtil
from spark_rapids_pytools.common.utilities import Utils
from spark_rapids_pytools.rapids.rapids_tool import RapidsTool
@dataclass
class Diagnostic(RapidsTool):
"""
Wrapper layer around Diagnostic Tool.
"""
name = 'diagnostic'
exec_cluster: ClusterBase = None
all_nodes: list = None
thread_num: int = 3
def _process_custom_args(self):
thread_num = self.wrapper_options.get('threadNum', 3)
if thread_num < 1 or thread_num > 10:
raise RuntimeError(f'Invalid thread number: {thread_num} (Valid value: 1~10)')
self.thread_num = thread_num
self.logger.debug('Set thread number as: %d', self.thread_num)
self.logger.warning('This operation will collect sensitive information from your cluster, '
'such as OS & HW info, Yarn/Spark configurations and log files etc.')
yes = self.wrapper_options.get('yes', False)
if yes:
self.logger.info('Confirmed by command line option.')
else:
user_input = input('Do you want to continue (yes/no): ')
if user_input.lower() not in ['yes', 'y']:
raise RuntimeError('User canceled the operation.')
def requires_cluster_connection(self) -> bool:
return True
def _connect_to_execution_cluster(self):
super()._connect_to_execution_cluster()
self.exec_cluster = self.get_exec_cluster()
self.all_nodes = self.exec_cluster.get_all_nodes()
def _process_output_args(self):
super()._process_output_args()
# Set remote output folder same as local output folder name
output_path = self.ctxt.get_output_folder()
folder_name = FSUtil.get_resource_name(output_path)
self.ctxt.set_remote('outputFolder', folder_name)
def _upload_scripts(self, node):
"""
Upload scripts to specified node
:return:
"""
script = Utils.resource_path('collect.sh')
try:
self.logger.info('Uploading script to node: %s', node.get_name())
self.exec_cluster.scp_to_node(node, str(script), '/tmp/')
except Exception as e:
self.logger.error('Error while uploading script to node: %s', node.get_name())
raise e
def _collect_info(self, node):
"""
Run task to collect info from specified node
:return:
"""
self._upload_scripts(node)
remote_output_folder = self.ctxt.get_remote('outputFolder')
ssh_cmd = f'"PREFIX={remote_output_folder} /tmp/collect.sh"'
try:
self.logger.info('Collecting info on node: %s', node.get_name())
self.exec_cluster.run_cmd_node(node, ssh_cmd)
except Exception as e:
self.logger.error('Error while collecting info from node: %s', node.get_name())
raise e
def _run_rapids_tool(self):
"""
Run diagnostic tool from both driver & worker nodes to collect info
:return:
"""
with ThreadPoolExecutor(max_workers=self.thread_num) as executor:
for e in executor.map(self._collect_info, self.all_nodes):
# Raise exception if any error occurred
if e:
raise e
def _download_output(self):
self.logger.info('Downloading results from remote nodes:')
output_path = self.ctxt.get_output_folder()
remote_output_folder = self.ctxt.get_remote('outputFolder')
remote_output_result = f'/tmp/{remote_output_folder}*.tgz'
def _download_result(node):
try:
node_output_path = FSUtil.build_path(output_path, node.get_name())
FSUtil.make_dirs(node_output_path, exist_ok=True)
self.logger.info('Downloading results from node: %s', node.get_name())
self.exec_cluster.scp_from_node(node, remote_output_result, node_output_path)
except Exception as e:
self.logger.error('Error while downloading collected info from node: %s', node.get_name())
raise e
with ThreadPoolExecutor(max_workers=self.thread_num) as executor:
for e in executor.map(_download_result, self.all_nodes):
# Raise exception if any error occurred
if e:
raise e
def _process_output(self):
self.logger.info('Processing the collected results.')
output_path = self.ctxt.get_output_folder()
region = self.exec_cluster.get_region()
worker_count = self.exec_cluster.get_nodes_cnt(SparkNodeType.WORKER)
master_type = self.exec_cluster.get_node_instance_type(SparkNodeType.MASTER)
worker_type = self.exec_cluster.get_node_instance_type(SparkNodeType.WORKER)
# Cleanup unused work dir
work_dir = FSUtil.build_path(output_path, self.ctxt.get_local_work_dir())
FSUtil.remove_path(work_dir, fail_ok=True)
# Save cluster info
self.logger.info('Saving cluster info.')
output_file = FSUtil.build_path(output_path, 'cluster.info')
with open(output_file, 'w', encoding='UTF-8') as f:
f.write(f'Region: {region}\n')
f.write(f'Worker count: {worker_count}\n')
f.write(f'Master type: {master_type}\n')
f.write(f'Worker type: {worker_type}\n')
def _archive_results(self):
output_path = self.ctxt.get_output_folder()
Utils.make_archive(output_path, 'tar', output_path)
self.logger.info("Archive '%s.tar' is successfully created.", output_path)
def _finalize(self):
pass
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/rapids/diagnostic.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation class representing wrapper around the RAPIDS acceleration Qualification tool."""
import textwrap
from dataclasses import dataclass, field
from math import ceil
from typing import Any, List, Callable
import pandas as pd
from tabulate import tabulate
from spark_rapids_tools.enums import QualFilterApp, QualGpuClusterReshapeType
from spark_rapids_pytools.cloud_api.sp_types import ClusterReshape, NodeHWInfo
from spark_rapids_pytools.common.sys_storage import FSUtil
from spark_rapids_pytools.common.utilities import Utils, TemplateGenerator
from spark_rapids_pytools.pricing.price_provider import SavingsEstimator
from spark_rapids_pytools.rapids.rapids_tool import RapidsJarTool
@dataclass
class QualificationSummary:
"""
Encapsulates the logic to organize Qualification report.
"""
comments: Any = None
all_apps: pd.DataFrame = None
recommended_apps: pd.DataFrame = None
df_result: pd.DataFrame = None
irrelevant_speedups: bool = False
savings_report_flag: bool = False
sections_generators: List[Callable] = field(default_factory=lambda: [])
def _get_total_durations(self) -> int:
if not self.is_empty():
return self.all_apps['App Duration'].sum()
return 0
def _get_total_gpu_durations(self) -> int:
if not self.is_empty():
return self.all_apps['Estimated GPU Duration'].sum()
return 0
def _get_stats_total_cost(self) -> float:
return self.df_result['Estimated App Cost'].sum()
def _get_stats_total_gpu_cost(self) -> float:
return self.df_result['Estimated GPU Cost'].sum()
def _get_stats_total_apps(self) -> int:
if not self.is_empty():
return len(self.all_apps)
return 0
def _get_stats_recommended_apps(self) -> int:
if self.has_gpu_recommendation():
return len(self.recommended_apps)
return 0
def is_empty(self) -> bool:
if self.all_apps is not None:
return self.all_apps.empty
return True
def has_gpu_recommendation(self) -> bool:
if self.recommended_apps is not None:
return not self.recommended_apps.empty
return False
def has_tabular_result(self) -> bool:
if self.df_result is not None:
return not self.df_result.empty
return False
def generate_report(self,
app_name: str,
wrapper_csv_file: str = None,
csp_report_provider: Callable[[], List[str]] = lambda: [],
df_pprinter: Any = None,
output_pprinter: Any = None):
def format_float(x: float) -> str:
return f'{x:.2f}'
report_content = []
if self.is_empty():
# Qualification tool has no output
report_content.append(f'{app_name} tool did not generate any valid rows')
if self.comments:
report_content.append(Utils.gen_multiline_str(self.comments))
return report_content
if output_pprinter is not None:
report_content.append(output_pprinter())
if not self.has_gpu_recommendation():
if not self.irrelevant_speedups:
report_content.append(f'{app_name} tool found no recommendations for GPU.')
if self.has_tabular_result():
if wrapper_csv_file is not None:
abs_path = FSUtil.get_abs_path(wrapper_csv_file)
report_content.append(f' - Full savings and speedups CSV report: {abs_path}')
pretty_df = df_pprinter(self.df_result)
if pretty_df.empty:
# the results were reduced to no rows because of the filters
report_content.append(
f'{app_name} tool found no qualified applications after applying the filters.\n'
f'See the CSV file for full report or disable the filters.')
else:
report_content.append(tabulate(pretty_df, headers='keys', tablefmt='psql', floatfmt='.2f'))
elif not self.savings_report_flag:
report_content.append(f'pricing information not found for ${app_name}')
else:
report_content.append(f'{app_name} tool found no records to show.')
overall_speedup = 0.0
total_apps_durations = 1.0 * self._get_total_durations()
total_gpu_durations = self._get_total_gpu_durations()
if total_gpu_durations > 0:
overall_speedup = total_apps_durations / total_gpu_durations
if not self.savings_report_flag:
report_content.append(Utils.gen_report_sec_header('Report Summary', hrule=False))
report_summary = [['Total applications', self._get_stats_total_apps()],
['Overall estimated speedup', format_float(overall_speedup)]]
else:
total_app_cost = self._get_stats_total_cost()
total_gpu_cost = self._get_stats_total_gpu_cost()
estimated_gpu_savings = 0.0
if total_app_cost > 0.0:
estimated_gpu_savings = 100.0 - (100.0 * total_gpu_cost / total_app_cost)
report_content.append(Utils.gen_report_sec_header('Report Summary', hrule=False))
report_summary = [['Total applications', self._get_stats_total_apps()],
['Overall estimated speedup', format_float(overall_speedup)],
['Overall estimated cost savings', f'{format_float(estimated_gpu_savings)}%']]
if not self.irrelevant_speedups:
# do not display speedups stats if the speedup is being overriden by the shape recommendations
report_summary.insert(1, ['RAPIDS candidates', self._get_stats_recommended_apps()])
report_content.append(tabulate(report_summary, colalign=('left', 'right')))
if self.comments:
report_content.append(Utils.gen_report_sec_header('Notes'))
report_content.extend(f' - {line}' for line in self.comments)
if self.sections_generators:
for section_generator in self.sections_generators:
if section_generator:
report_content.append(Utils.gen_multiline_str(section_generator()))
if self.has_gpu_recommendation():
csp_report = csp_report_provider()
if csp_report:
report_content.extend(csp_report)
# append an empty line at the end of the report
report_content.append('')
return report_content
@dataclass
class Qualification(RapidsJarTool):
"""
Wrapper layer around Qualification Tool.
"""
name = 'qualification'
def _process_rapids_args(self):
"""
Qualification tool processes extra arguments:
1. filter out applications.
"""
self.logger.info('Qualification tool processing the arguments')
super()._process_rapids_args()
def _process_cpu_cluster_args(self, offline_cluster_opts: dict = None):
# get the name of the cpu_cluster
cpu_cluster_arg = offline_cluster_opts.get('cpuCluster')
if cpu_cluster_arg is not None:
cpu_cluster_obj = self._create_migration_cluster('CPU', cpu_cluster_arg)
self.ctxt.set_ctxt('cpuClusterProxy', cpu_cluster_obj)
def _process_gpu_cluster_args(self, offline_cluster_opts: dict = None) -> bool:
def _process_gpu_cluster_worker_node():
try:
worker_node = gpu_cluster_obj.get_worker_node()
worker_node._pull_and_set_mc_props(cli=self.ctxt.platform.cli) # pylint: disable=protected-access
sys_info = worker_node._pull_sys_info(cli=self.ctxt.platform.cli) # pylint: disable=protected-access
gpu_info = worker_node._pull_gpu_hw_info(cli=self.ctxt.platform.cli) # pylint: disable=protected-access
worker_node.hw_info = NodeHWInfo(sys_info=sys_info, gpu_info=gpu_info)
except Exception: # pylint: disable=broad-except
return
gpu_cluster_arg = offline_cluster_opts.get('gpuCluster')
if gpu_cluster_arg:
gpu_cluster_obj = self._create_migration_cluster('GPU', gpu_cluster_arg)
else:
orig_cluster = self.ctxt.get_ctxt('cpuClusterProxy')
gpu_cluster_obj = None
if orig_cluster:
# Convert the CPU instances to support gpu. Otherwise, gpuCluster is not set
self.logger.info('Creating GPU cluster by converting the CPU cluster instances to GPU supported types')
gpu_cluster_obj = self.ctxt.platform.migrate_cluster_to_gpu(orig_cluster)
self.ctxt.set_ctxt('gpuClusterProxy', gpu_cluster_obj)
_process_gpu_cluster_worker_node()
worker_node_hw_info = gpu_cluster_obj.get_worker_hw_info()
if gpu_cluster_obj:
self.ctxt.set_ctxt('recommendedConfigs', self._calculate_spark_settings(worker_node_hw_info))
return gpu_cluster_obj is not None
def _process_offline_cluster_args(self):
offline_cluster_opts = self.wrapper_options.get('migrationClustersProps', {})
self._process_cpu_cluster_args(offline_cluster_opts)
if self.ctxt.get_ctxt('cpuClusterProxy') is None:
# if no cpu-cluster is defined, then we are not supposed to run cost calculations
enable_savings_flag = False
else:
# if no gpu-cluster is defined, then we are not supposed to run cost calculations
enable_savings_flag = self._process_gpu_cluster_args(offline_cluster_opts)
self._set_savings_calculations_flag(enable_savings_flag)
def _set_savings_calculations_flag(self, enable_flag: bool):
self.ctxt.set_ctxt('enableSavingsCalculations', enable_flag)
if not enable_flag:
self.logger.info('Savings estimates are disabled because the cluster-information is '
'not provided.')
# revisit the filtering-apps flag
if self.ctxt.get_ctxt('filterApps') == QualFilterApp.SAVINGS:
# When no cost calculations, the filters should be revisited
# set it to none
new_filter = QualFilterApp.ALL
self.logger.info('Filtering criteria `filter_apps` will be reset to %s because savings '
'estimates are disabled', QualFilterApp.tostring(new_filter))
self.ctxt.set_ctxt('filterApps', new_filter)
def __process_gpu_cluster_recommendation(self, arg_val: str):
available_types = [filter_enum.value for filter_enum in QualGpuClusterReshapeType]
default_recommendation_txt = self.ctxt.get_value('sparkRapids', 'cli', 'defaults',
'gpuClusterRecommendation',
'defaultRecommendation')
if arg_val:
try:
selected_recommendation = QualGpuClusterReshapeType.fromstring(arg_val)
except Exception: # pylint: disable=broad-except
selected_recommendation = QualGpuClusterReshapeType.fromstring(default_recommendation_txt)
self.logger.warning(
'Invalid argument gpu_cluster_recommendation=%s.\n\t'
'Accepted options are: [%s].\n\t'
'Falling-back to default filter: %s',
arg_val, Utils.gen_joined_str(' | ', available_types), default_recommendation_txt)
else:
selected_recommendation = QualFilterApp.fromstring(default_recommendation_txt)
self.ctxt.set_ctxt('gpuClusterShapeRecommendation', selected_recommendation)
def __process_filter_args(self, arg_val: str):
available_filters = [filter_enum.value for filter_enum in QualFilterApp]
default_filter_txt = self.ctxt.get_value('sparkRapids', 'cli', 'defaults', 'filters',
'defaultFilter')
if arg_val is not None:
try:
selected_filter = QualFilterApp.fromstring(arg_val)
except Exception: # pylint: disable=broad-except
selected_filter = QualFilterApp.fromstring(default_filter_txt)
self.logger.warning(
'Invalid argument filter_apps=%s.\n\t'
'Accepted options are: [%s].\n\t'
'Falling-back to default filter: %s',
arg_val, Utils.gen_joined_str(' | ', available_filters), default_filter_txt)
else:
selected_filter = QualFilterApp.fromstring(default_filter_txt)
if self.__recommendation_is_non_standard():
# SpeedupFilter cannot be applied with the current cluster_gpu_recommendation
if selected_filter == QualFilterApp.SPEEDUPS:
self.logger.info('Cannot apply Filter argument filter_apps=%s with the selected '
'gpu_cluster_shape recommendation. Setting the filter to %s',
QualFilterApp.tostring(selected_filter),
default_filter_txt)
selected_filter = QualFilterApp.fromstring(default_filter_txt)
self.ctxt.set_ctxt('filterApps', selected_filter)
def _process_custom_args(self):
"""
Qualification tool processes extra arguments:
1. filter out applications.
2. gpu-device type to be used for the cost estimation.
3. gpu_per_machine: number of gpu installed on a worker node.
4. cuda version
"""
gpu_device = self.ctxt.get_value('sparkRapids', 'gpu', 'device')
gpu_device_arg = self.wrapper_options.get('gpuDevice')
if gpu_device_arg is not None:
gpu_device = gpu_device_arg
gpu_per_machine = int(self.ctxt.get_value('sparkRapids', 'gpu', 'workersPerNode'))
gpu_per_machine_arg = self.wrapper_options.get('gpuPerMachine')
if gpu_per_machine_arg is not None:
gpu_per_machine = gpu_per_machine_arg
cuda = self.ctxt.get_value('sparkRapids', 'gpu', 'cudaVersion')
cuda_arg = self.wrapper_options.get('cuda')
if cuda_arg is not None:
cuda = cuda_arg
target_platform = self.wrapper_options.get('targetPlatform')
self.ctxt.set_ctxt('targetPlatform', target_platform)
self.ctxt.set_ctxt('gpuPerMachine', gpu_per_machine)
self.ctxt.set_ctxt('gpuDevice', gpu_device)
self.ctxt.set_ctxt('cuda', cuda)
# we need to process each argument to verify it is valid. otherwise, we may crash late
self.__process_gpu_cluster_recommendation(self.wrapper_options.get('gpuClusterRecommendation'))
self.__process_filter_args(self.wrapper_options.get('filterApps'))
self._process_offline_cluster_args()
self._process_eventlogs_args()
# This is noise to dump everything
# self.logger.debug('%s custom arguments = %s', self.pretty_name(), self.ctxt.props['wrapperCtx'])
def __is_savings_calc_enabled(self):
return self.ctxt.get_ctxt('enableSavingsCalculations')
def __get_recommended_apps(self, all_rows, selected_cols=None) -> pd.DataFrame:
speed_up_col = self.ctxt.get_value('toolOutput', 'csv', 'summaryReport',
'recommendations', 'speedUp', 'columnName')
recommended_vals = self.ctxt.get_value('toolOutput', 'csv', 'summaryReport',
'recommendations', 'speedUp', 'selectedRecommendations')
mask = all_rows[speed_up_col].isin(recommended_vals)
if selected_cols is None:
return all_rows.loc[mask]
return all_rows.loc[mask, selected_cols]
def __remap_columns_and_prune(self, all_rows) -> pd.DataFrame:
cols_subset = self.ctxt.get_value('toolOutput', 'csv', 'summaryReport', 'columns')
# for backward compatibility, filter out non-existing columns
existing_cols_subset = [col for col in cols_subset if col in all_rows.columns]
cols_map = self.ctxt.get_value('toolOutput', 'csv', 'summaryReport', 'mapColumns')
subset_data = all_rows.loc[:, existing_cols_subset]
if cols_map:
for col_rename in cols_map:
subset_data.columns = subset_data.columns.str.replace(col_rename,
cols_map.get(col_rename),
regex=False)
# for TCO, group by app name and average durations, then recalculate Estimated GPU Speedup
group_map = self.ctxt.get_value('toolOutput', 'csv', 'summaryReport', 'groupColumns')
if group_map:
for group_key, group_value in group_map.items():
subset_data[group_key] = subset_data.groupby(group_value)[group_key].transform('mean')
drop_arr = self.ctxt.get_value('toolOutput', 'csv', 'summaryReport', 'dropDuplicates')
subset_data = subset_data.drop_duplicates(subset=drop_arr)
notes = []
if len(subset_data) != len(all_rows):
notes = 'Apps with the same name are grouped together and their metrics are averaged'
subset_data['Estimated GPU Speedup'] = subset_data['App Duration'] / subset_data['Estimated GPU Duration']
return subset_data, notes
def __remap_cols_for_shape_type(self,
data_set: pd.DataFrame,
initial_cols_set: List[str],
reshape_type: QualGpuClusterReshapeType) -> pd.DataFrame:
cols_conf = self.ctxt.get_value('local', 'output', 'processDFProps',
'clusterShapeCols', 'colsPerShapeType',
QualGpuClusterReshapeType.tostring(reshape_type))
deleted_cols = cols_conf.get('excludeColumns')
cols_map = cols_conf.get('mapColumns')
appended_cols = cols_conf.get('appendColumns')
if deleted_cols:
new_cols = [col for col in initial_cols_set if col not in deleted_cols]
else:
new_cols = initial_cols_set[:]
if appended_cols:
for col_conf in appended_cols:
col_name = col_conf.get('columnName')
col_ind = col_conf.get('index')
if col_ind < 0 or col_ind >= len(new_cols):
new_cols.append(col_name)
else:
new_cols.insert(col_ind, col_name)
subset_data = data_set.loc[:, new_cols]
if cols_map:
for col_rename in cols_map:
subset_data.columns = subset_data.columns.str.replace(col_rename,
cols_map.get(col_rename),
regex=False)
return subset_data
def __generate_mc_types_conversion_report(self):
report_content = []
if bool(self.ctxt.platform.ctxt['notes']):
# get the converted instance types
node_conversions = self.ctxt.platform.ctxt['notes'].get('nodeConversions')
if node_conversions is not None:
report_content = [
Utils.gen_report_sec_header('Instance types conversions', hrule=False),
]
conversion_items = []
for mc_src, mc_target in node_conversions.items():
conversion_items.append([mc_src, 'to', mc_target])
report_content.append(tabulate(conversion_items))
report_content.append(self.ctxt.platform.get_footer_message())
return report_content
def __generate_recommended_configs_report(self) -> list:
report_content = []
if self.ctxt.get_ctxt('recommendedConfigs'):
conversion_items = []
recommended_configs = self.ctxt.get_ctxt('recommendedConfigs')
for config in recommended_configs:
conversion_items.append([config, recommended_configs[config]])
report_content.append(tabulate(conversion_items))
# the report should be appended to the log_summary file
rapids_output_dir = self.ctxt.get_rapids_output_folder()
rapids_log_file = FSUtil.build_path(rapids_output_dir,
self.ctxt.get_value('toolOutput', 'textFormat', 'summaryLog',
'fileName'))
with open(rapids_log_file, 'a', encoding='UTF-8') as summary_log_file:
log_report = [Utils.gen_report_sec_header('Recommended Spark configurations for running on GPUs',
hrule=False)]
log_report.extend(report_content)
summary_log_file.write(Utils.gen_multiline_str(log_report))
return report_content
def __generate_cluster_shape_report(self) -> str:
if bool(self.ctxt.platform.ctxt['notes']):
return Utils.gen_multiline_str(self.ctxt.platform.ctxt['notes'].get('clusterShape'))
return None
def __recommendation_is_non_standard(self):
cluster_shape_type = self.ctxt.get_ctxt('gpuClusterShapeRecommendation')
if cluster_shape_type:
return cluster_shape_type != QualGpuClusterReshapeType.get_default()
return False
def __apply_non_standard_gpu_shape(self,
all_apps: pd.DataFrame,
cluster_workers_cnt: int,
cluster_shape_t: QualGpuClusterReshapeType):
min_w_cnt_from_conf = self.ctxt.platform.configs.get_value_silent('clusterSpecs',
'minWorkerNodes')
scale_factor_from_conf = self.ctxt.platform.configs.get_value_silent('clusterSpecs',
'gpuScaleFactor')
# get the min_worker_cnt from the qualification config in case it is not defined for the platform
default_min_w_cnt = self.ctxt.get_value('local', 'output', 'processDFProps',
'minimumWorkerCount')
# get the scale factor from the qualification config in case it is not defined for the platform
default_scale_factor = self.ctxt.get_value('local', 'output', 'processDFProps', 'gpuScaleFactor')
# As you reduce nodes, performance will be slightly better than linear based on benchmarks
scale_f = scale_factor_from_conf if scale_factor_from_conf else default_scale_factor
min_w_cnt = min_w_cnt_from_conf if min_w_cnt_from_conf else default_min_w_cnt
# calculate the reshape_cluster_column
reshape_col = self.ctxt.get_value('local', 'output', 'processDFProps',
'clusterShapeCols', 'columnName')
speedup_col = 'Estimated GPU Speedup'
gpu_dur_col = 'Estimated GPU Duration'
cpu_dur_col = 'App Duration'
def f_cell(x):
return ceil(x * 100) / 100
def calc_cluster_shape_col(df_row, min_worker_cnt: int, old_workers_cnt: int) -> pd.Series:
gpu_speedup = df_row[speedup_col]
# We should not worry about division by 0 because speedup is BGE 1.0
cluster_shape = max(min_worker_cnt, ceil(scale_f * old_workers_cnt / gpu_speedup))
return pd.Series([cluster_shape])
def update_cols_with_new_shape(apps_df: pd.DataFrame,
old_workers_cnt: int) -> (pd.DataFrame, bool):
apps_df[gpu_dur_col] = apps_df.apply(lambda row: f_cell(
(old_workers_cnt / row[reshape_col]) * scale_f * row[cpu_dur_col] / row[speedup_col]), axis=1)
apps_df[speedup_col] = apps_df.apply(
lambda row: f_cell(row[cpu_dur_col] / row[gpu_dur_col]), axis=1
)
return apps_df
all_apps[[reshape_col]] = all_apps.apply(
lambda row: calc_cluster_shape_col(row, min_w_cnt, cluster_workers_cnt), axis=1)
recalc_speedups_flag = True
if cluster_shape_t == QualGpuClusterReshapeType.CLUSTER:
# the column value should be reset to the maximum of all the rows
max_workers_cnt = all_apps[reshape_col].max()
all_apps[reshape_col] = max_workers_cnt
# Append a node to be part of the summary report
reshape_msg_plain = self.ctxt.get_value('local', 'output', 'processDFProps',
'clusterShapeCols', 'noteMsg')
self.ctxt.platform.update_ctxt_notes('clusterShape',
reshape_msg_plain.format(max_workers_cnt))
# If max_workers_cnt EQ gpu_cluster nodes then no need to recalculate the columns
recalc_speedups_flag = max_workers_cnt != cluster_workers_cnt
# check if we need to recalculate the flags
if not recalc_speedups_flag:
return all_apps, False
return update_cols_with_new_shape(all_apps, cluster_workers_cnt), True
def __apply_gpu_cluster_reshape(self, all_apps: pd.DataFrame) -> (pd.DataFrame, bool):
gpu_reshape_type = self.ctxt.get_ctxt('gpuClusterShapeRecommendation')
gpu_cluster = ClusterReshape(self.ctxt.get_ctxt('gpuClusterProxy'))
per_row_flag = False
if self.__recommendation_is_non_standard():
apps_df, per_row_flag = self.__apply_non_standard_gpu_shape(all_apps,
gpu_cluster.get_workers_count(),
gpu_reshape_type)
else:
apps_df = all_apps
return apps_df, per_row_flag
def __calc_apps_cost(self,
app_df_set: pd.DataFrame,
shape_col: str,
speedup_rec_col: str,
cost_per_row: bool = False):
# used for the caching of the per-row estimator for optimizations
saving_estimator_cache = {}
savings_ranges = self.ctxt.get_value('local', 'output', 'processDFProps',
'savingRecommendationsRanges')
def get_costs_for_single_app(df_row, estimator: SavingsEstimator) -> pd.Series:
cpu_cost, gpu_cost, est_savings = estimator.get_costs_and_savings(df_row['App Duration'],
df_row['Estimated GPU Duration'])
# We do not want to mistakenly mark a Not-applicable app as Recommended in the savings column
if df_row[speedup_rec_col] == 'Not Applicable':
savings_recommendations = 'Not Applicable'
else:
for s_range in savings_ranges.values():
if s_range.get('lowerBound') <= est_savings < s_range.get('upperBound'):
savings_recommendations = s_range.get('title')
break
# For TCO, calculating annual cost savings based on job frequency
job_frequency = 30 # default frequency is daily
if 'Estimated Job Frequency (monthly)' in df_row:
job_frequency = df_row['Estimated Job Frequency (monthly)']
annual_cost_savings = job_frequency * 12 * (cpu_cost - gpu_cost)
return pd.Series([savings_recommendations, cpu_cost, gpu_cost,
est_savings, job_frequency, annual_cost_savings])
def get_cost_per_row(df_row, reshape_col: str) -> pd.Series:
nonlocal saving_estimator_cache
workers_cnt = df_row[reshape_col]
estimator_obj = saving_estimator_cache.get(workers_cnt)
if not estimator_obj:
# create the object and add it to the caching dict
reshaped_cluster = ClusterReshape(self.ctxt.get_ctxt('gpuClusterProxy'),
reshape_workers_cnt=lambda x: workers_cnt)
estimator_obj = self.ctxt.platform.create_saving_estimator(self.ctxt.get_ctxt('cpuClusterProxy'),
reshaped_cluster)
saving_estimator_cache.setdefault(workers_cnt, estimator_obj)
cost_pd_series = get_costs_for_single_app(df_row, estimator_obj)
return cost_pd_series
cost_cols = self.ctxt.get_value('local', 'output', 'costColumns')
if not cost_per_row:
# initialize the savings estimator only once
reshaped_gpu_cluster = ClusterReshape(self.ctxt.get_ctxt('gpuClusterProxy'))
savings_estimator = self.ctxt.platform.create_saving_estimator(self.ctxt.get_ctxt('cpuClusterProxy'),
reshaped_gpu_cluster)
app_df_set[cost_cols] = app_df_set.apply(
lambda row: get_costs_for_single_app(row, estimator=savings_estimator), axis=1)
else:
# this is per row calculation and saving estimator should be created for each row
app_df_set[cost_cols] = app_df_set.apply(
lambda row: get_cost_per_row(row, shape_col), axis=1)
return app_df_set
def __build_global_report_summary(self,
all_apps: pd.DataFrame,
csv_out: str) -> QualificationSummary:
if all_apps.empty:
# No need to run saving estimator or process the data frames.
return QualificationSummary(comments=self.__generate_mc_types_conversion_report())
apps_pruned_df, prune_notes = self.__remap_columns_and_prune(all_apps)
recommended_apps = self.__get_recommended_apps(apps_pruned_df)
# if the gpu_reshape_type is set to JOB then, then we should ignore recommended apps
speedups_irrelevant_flag = self.__recommendation_is_non_standard()
reshaped_notes = self.__generate_cluster_shape_report()
report_comments = [prune_notes] if prune_notes else []
if reshaped_notes:
report_comments.append(reshaped_notes)
pricing_config = self.ctxt.platform.configs.get_value_silent('pricing')
target_platform = self.ctxt.get_ctxt('targetPlatform')
if target_platform is not None:
pricing_config = self.ctxt.platform.configs.get_value_silent('csp_pricing')
if pricing_config is None:
# OnPrem platform doesn't have pricing information. We do not calculate cost savings for
# OnPrem platform if the target_platform is not specified.
self.logger.warning('The pricing configuration for the given platform is not defined.\n\t'
'Savings estimates cannot be generated.')
# enable savings report only if the price_config exists and the estimates are enabled
launch_savings_calc = self.__is_savings_calc_enabled() and (pricing_config is not None)
reshape_col = self.ctxt.get_value('local', 'output', 'processDFProps',
'clusterShapeCols', 'columnName')
speed_recommendation_col = self.ctxt.get_value('local', 'output', 'speedupRecommendColumn')
apps_reshaped_df, per_row_flag = self.__apply_gpu_cluster_reshape(apps_pruned_df)
if launch_savings_calc:
# Now, the dataframe is ready to calculate the cost and the savings
apps_working_set = self.__calc_apps_cost(apps_reshaped_df,
reshape_col,
speed_recommendation_col,
per_row_flag)
df_final_result = apps_working_set
if not apps_working_set.empty:
self.logger.info('Generating GPU Estimated Speedup and Savings as: %s', csv_out)
# we can use the general format as well but this will transform numbers to E+. So, stick with %f
apps_working_set.to_csv(csv_out, float_format='%.2f')
else:
df_final_result = apps_reshaped_df
if not apps_reshaped_df.empty:
# Do not include estimated job frequency in csv file
apps_reshaped_df = apps_reshaped_df.drop(columns=['Estimated Job Frequency (monthly)'])
self.logger.info('Generating GPU Estimated Speedup: as %s', csv_out)
apps_reshaped_df.to_csv(csv_out, float_format='%.2f')
return QualificationSummary(comments=report_comments,
all_apps=apps_pruned_df,
recommended_apps=recommended_apps,
savings_report_flag=launch_savings_calc,
df_result=df_final_result,
irrelevant_speedups=speedups_irrelevant_flag,
sections_generators=[self.__generate_mc_types_conversion_report])
def _process_output(self):
def process_df_for_stdout(raw_df):
"""
process the dataframe to be more readable on the stdout
1- convert time durations to second
2- shorten headers
"""
savings_report_enabled = self.__is_savings_calc_enabled()
# summary columns depend on the type of the generated report
selected_cols = self.ctxt.get_value('local', 'output', 'summaryColumns',
f'savingsReportEnabled{str(savings_report_enabled)}')
# check if any filters apply
filter_recommendation_enabled = self.ctxt.get_ctxt('filterApps') == QualFilterApp.SPEEDUPS
filter_pos_enabled = self.ctxt.get_ctxt('filterApps') == QualFilterApp.SAVINGS
if self.__recommendation_is_non_standard():
# During processing of arguments phase, we verified that the filter does not conflict
# with the shape recommendation
raw_df = self.__remap_cols_for_shape_type(raw_df,
selected_cols,
self.ctxt.get_ctxt('gpuClusterShapeRecommendation'))
# update the selected columns
selected_cols = list(raw_df.columns)
# filter by recommendations if enabled
if filter_recommendation_enabled:
df_row = self.__get_recommended_apps(raw_df, selected_cols)
else:
df_row = raw_df.loc[:, selected_cols]
if df_row.empty:
return df_row
# filter by savings if enabled
if filter_pos_enabled:
saving_cost_col = self.ctxt.get_value('local', 'output', 'savingRecommendColumn')
recommended_vals = self.ctxt.get_value('toolOutput', 'csv', 'summaryReport',
'recommendations', 'speedUp',
'selectedRecommendations')
cost_mask = df_row[saving_cost_col].isin(recommended_vals)
df_row = df_row.loc[cost_mask, selected_cols]
if df_row.empty:
self.ctxt.set_ctxt('wrapperOutputContent',
'Found no qualified apps for cost savings.')
return df_row
time_unit = '(ms)'
time_from_conf = self.ctxt.get_value('toolOutput', 'stdout', 'summaryReport', 'timeUnits')
if time_from_conf == 's':
time_unit = '(s)'
# convert to seconds
for column in df_row[[col for col in df_row.columns if 'Duration' in col]]:
df_row[column] = df_row[column].div(1000).round(2)
# change the header to include time unit
df_row.columns = df_row.columns.str.replace('Duration',
f'Duration{time_unit}', regex=False)
# squeeze the header titles if enabled
if self.ctxt.get_value('toolOutput', 'stdout', 'summaryReport', 'compactWidth'):
col_w_conf = self.ctxt.get_value('toolOutput', 'stdout', 'summaryReport', 'columnWidth')
for column in df_row.columns:
if len(column) > col_w_conf:
new_column_name = textwrap.fill(column, col_w_conf, break_long_words=False)
if new_column_name != column:
df_row.columns = df_row.columns.str.replace(column,
new_column_name, regex=False)
return df_row
if not self._evaluate_rapids_jar_tool_output_exist():
return
rapids_output_dir = self.ctxt.get_rapids_output_folder()
rapids_summary_file = FSUtil.build_path(rapids_output_dir,
self.ctxt.get_value('toolOutput', 'csv', 'summaryReport', 'fileName'))
self.ctxt.logger.debug('Rapids CSV summary file is located as: %s', rapids_summary_file)
df = pd.read_csv(rapids_summary_file)
csv_file_name = self.ctxt.get_value('local', 'output', 'fileName')
csv_summary_file = FSUtil.build_path(self.ctxt.get_output_folder(), csv_file_name)
report_gen = self.__build_global_report_summary(df, csv_summary_file)
summary_report = report_gen.generate_report(app_name=self.pretty_name(),
wrapper_csv_file=csv_summary_file,
csp_report_provider=self._generate_platform_report_sections,
df_pprinter=process_df_for_stdout,
output_pprinter=self._report_tool_full_location)
self.ctxt.set_ctxt('wrapperOutputContent', summary_report)
def _write_summary(self):
wrapper_out_content = self.ctxt.get_ctxt('wrapperOutputContent')
if wrapper_out_content is not None:
print(Utils.gen_multiline_str(wrapper_out_content))
def _init_rapids_arg_list(self) -> List[str]:
# TODO: Make sure we add this argument only for jar versions 23.02+
return ['--platform', self.ctxt.platform.get_platform_name().replace('_', '-')]
def _generate_section_lines(self, sec_conf: dict) -> List[str]:
# TODO: we may like to show the scripts even when the gpu-cluster is not defined
# this requires that we allow to generate the script without the gpu-cluster
if sec_conf.get('sectionID') == 'initializationScript':
# format the initialization scripts
reshaped_gpu_cluster = ClusterReshape(self.ctxt.get_ctxt('gpuClusterProxy'))
gpu_per_machine, gpu_device = reshaped_gpu_cluster.get_gpu_per_worker()
fill_map = {
0: self.ctxt.platform.cli.get_region(),
1: [gpu_device.lower(), gpu_per_machine]
}
res = []
for ind, l_str in enumerate(sec_conf['content'].get('lines')):
if ind in fill_map:
rep_var = fill_map.get(ind)
new_value = l_str.format(*rep_var) if isinstance(rep_var, list) else l_str.format(rep_var)
res.append(new_value)
else:
res.append(l_str)
return res
if sec_conf.get('sectionID') == 'gpuClusterCreationScript':
gpu_cluster = self.ctxt.get_ctxt('gpuClusterProxy')
script_content = gpu_cluster.generate_create_script()
highlighted_code = TemplateGenerator.highlight_bash_code(script_content)
return ['```bash', highlighted_code, '```']
if sec_conf.get('sectionID') == 'runUserToolsBootstrap':
gpu_cluster = self.ctxt.get_ctxt('gpuClusterProxy')
override_args = {'CLUSTER_NAME': '$CLUSTER_NAME'}
script_content = gpu_cluster.generate_bootstrap_script(overridden_args=override_args)
highlighted_code = TemplateGenerator.highlight_bash_code(script_content)
return ['```bash', highlighted_code, '```', '']
if sec_conf.get('sectionID') == 'gpuBootstrapRecommendedConfigs':
return self.__generate_recommended_configs_report()
return super()._generate_section_content(sec_conf)
@dataclass
class QualificationAsLocal(Qualification):
"""
Qualification tool running on local development.
"""
description: str = 'This is the localQualification'
def _copy_dependencies_to_remote(self):
self.logger.info('Skipping preparing remote dependency folder')
def _process_job_submission_args(self):
self._process_local_job_submission_args()
def _prepare_job_arguments(self):
self._prepare_local_job_arguments()
def _delete_remote_dep_folder(self):
self.logger.debug('Local mode skipping deleting the remote workdir')
def _download_remote_output_folder(self):
self.logger.debug('Local mode skipping downloading the remote output workdir')
def _archive_results(self):
self._archive_local_results()
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/rapids/qualification.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of class holding the execution context of a rapids tool"""
import os
import tarfile
from glob import glob
from dataclasses import dataclass, field
from logging import Logger
from typing import Type, Any, ClassVar, List
from spark_rapids_tools import CspEnv
from spark_rapids_pytools.cloud_api.sp_types import PlatformBase
from spark_rapids_pytools.common.prop_manager import YAMLPropertiesContainer
from spark_rapids_pytools.common.sys_storage import FSUtil
from spark_rapids_pytools.common.utilities import ToolLogging, Utils
@dataclass
class ToolContext(YAMLPropertiesContainer):
"""
A container that holds properties and characteristics of a given execution.
"""
name: str = None
platform_cls: Type[PlatformBase] = None
platform_opts: dict = field(default_factory=dict)
logger: Logger = field(default=None, init=False)
platform: PlatformBase = field(default=None, init=False)
uuid: str = field(default=None, init=False)
prepackage_paths: ClassVar[List[str]] = [
Utils.resource_path('csp-resources.tgz'),
Utils.resource_path('csp-resources')
]
@classmethod
def are_resources_prepackaged(cls) -> bool:
return any(os.path.exists(f) for f in cls.prepackage_paths)
def __connect_to_platform(self):
self.logger.info('Start connecting to the platform')
self.platform = self.platform_cls(ctxt_args=self.platform_opts)
def __create_and_set_uuid(self):
self.uuid = Utils.gen_uuid_with_ts(suffix_len=8)
def __create_and_set_cache_folder(self):
# get the cache folder from environment variables or set it to default
cache_folder = Utils.get_rapids_tools_env('CACHE_FOLDER', '/var/tmp/spark_rapids_user_tools_cache')
# make sure the environment is set
Utils.set_rapids_tools_env('CACHE_FOLDER', cache_folder)
FSUtil.make_dirs(cache_folder)
self.set_local('cacheFolder', cache_folder)
def get_cache_folder(self) -> str:
return self.get_local('cacheFolder')
def _init_fields(self):
self.logger = ToolLogging.get_and_setup_logger(f'rapids.tools.{self.name}.ctxt')
self.__connect_to_platform()
self.__create_and_set_uuid()
self.props['localCtx'] = {}
self.props['remoteCtx'] = {}
self.props['wrapperCtx'] = {}
# add a dictionary that holds all the rapids plugin args
self.props['wrapperCtx']['rapidsArgs'] = {}
# add a dictionary that holds arguments to be passed to the plugin args
self.props['wrapperCtx']['jobArgs'] = {}
# create cache_folder that will be used to hold large downloaded files
self.__create_and_set_cache_folder()
def get_deploy_mode(self) -> Any:
return self.platform_opts.get('deployMode')
def is_fatwheel_mode(self) -> bool:
return self.get_ctxt('fatwheelModeEnabled')
def set_ctxt(self, key: str, val: Any):
self.props['wrapperCtx'][key] = val
def add_rapids_args(self, key: str, val: Any):
self.props['wrapperCtx']['rapidsArgs'][key] = val
def add_job_args(self, key: str, val: Any):
self.props['wrapperCtx']['jobArgs'][key] = val
def update_job_args(self, extra_args: dict):
self.props['wrapperCtx']['jobArgs'].update(extra_args)
def get_ctxt(self, key: str):
return self.props['wrapperCtx'].get(key)
def set_remote(self, key: str, val: Any):
self.props['remoteCtx'][key] = val
def set_local(self, key: str, val: Any):
self.props['localCtx'][key] = val
def get_local(self, key: str):
return self.props['localCtx'].get(key)
def get_remote(self, key: str):
return self.props['remoteCtx'].get(key)
def set_local_workdir(self, parent: str):
short_name = self.get_value('platform', 'shortName')
exec_dir_name = f'{short_name}_{self.uuid}'
self.set_ctxt('execFullName', exec_dir_name)
exec_root_dir = FSUtil.build_path(parent, exec_dir_name)
self.logger.info('Local workdir root folder is set as %s', exec_root_dir)
# It should never happen that the exec_root_dir exists
FSUtil.make_dirs(exec_root_dir, exist_ok=False)
# Create the dependency folder. It is a subdirectory in the output folder
# because we want that same name appear on the remote storage when copying
dep_folder_name = 'work_dir'
self.set_ctxt('depFolderName', dep_folder_name)
dep_folder = FSUtil.build_path(exec_root_dir, dep_folder_name)
FSUtil.make_dirs(dep_folder, exist_ok=False)
self.set_local('outputFolder', exec_root_dir)
self.set_local('depFolder', dep_folder)
self.logger.info('Dependencies are generated locally in local disk as: %s', dep_folder)
self.logger.info('Local output folder is set as: %s', exec_root_dir)
def load_prepackaged_resources(self):
"""
Checks if the packaging includes the CSP dependencies. If so, it moves the dependencies
into the tmp folder. This allows the tool to pick the resources from cache folder.
"""
if not self.are_resources_prepackaged():
return
self.set_ctxt('fatwheelModeEnabled', True)
self.logger.info(Utils.gen_str_header('Fat Wheel Mode Is Enabled',
ruler='_', line_width=50))
for res_path in self.prepackage_paths:
if os.path.exists(res_path):
if os.path.isdir(res_path):
# this is a directory, copy all the contents to the tmp
FSUtil.copy_resource(res_path, self.get_cache_folder())
else:
# this is an archived file
with tarfile.open(res_path, mode='r:*') as tar_file:
tar_file.extractall(self.get_cache_folder())
tar_file.close()
def get_output_folder(self) -> str:
return self.get_local('outputFolder')
def get_wrapper_summary_file_path(self) -> str:
summary_file_name = self.get_value('local', 'output', 'fileName')
summary_path = FSUtil.build_path(self.get_output_folder(), summary_file_name)
return summary_path
def get_local_work_dir(self) -> str:
return self.get_local('depFolder')
def get_rapids_jar_url(self) -> str:
# get the version from the package, instead of the yaml file
# jar_version = self.get_value('sparkRapids', 'version')
if self.is_fatwheel_mode():
offline_path_regex = FSUtil.build_path(self.get_cache_folder(), 'rapids-4-spark-tools_*.jar')
matching_files = glob(offline_path_regex)
if not matching_files:
raise FileNotFoundError('In Fat Mode. No matching JAR files found.')
return matching_files[0]
mvn_base_url = self.get_value('sparkRapids', 'mvnUrl')
jar_version = Utils.get_latest_available_jar_version(mvn_base_url, Utils.get_base_release())
rapids_url = self.get_value('sparkRapids', 'repoUrl').format(mvn_base_url, jar_version, jar_version)
return rapids_url
def get_tool_main_class(self) -> str:
return self.get_value('sparkRapids', 'mainClass')
def get_rapids_output_folder(self) -> str:
root_dir = self.get_local('outputFolder')
rapids_subfolder = self.get_value_silent('toolOutput', 'subFolder')
if rapids_subfolder is None:
return root_dir
return FSUtil.build_path(root_dir, rapids_subfolder)
def get_platform_name(self) -> str:
"""
This used to get the lower case of the platform of the runtime.
:return: the name of the platform of the runtime in lower_case.
"""
return CspEnv.pretty_print(self.platform.type_id)
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/rapids/tool_ctxt.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract class representing wrapper around the RAPIDS acceleration tools."""
import concurrent
import logging
import os
import re
import sys
import tarfile
import time
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass, field
from logging import Logger
from typing import Any, Callable, Dict, List
from spark_rapids_tools import CspEnv
from spark_rapids_pytools.cloud_api.sp_types import get_platform, \
ClusterBase, DeployMode, NodeHWInfo
from spark_rapids_pytools.common.prop_manager import YAMLPropertiesContainer
from spark_rapids_pytools.common.sys_storage import FSUtil, FileVerifier
from spark_rapids_pytools.common.utilities import ToolLogging, Utils
from spark_rapids_pytools.rapids.rapids_job import RapidsJobPropContainer
from spark_rapids_pytools.rapids.tool_ctxt import ToolContext
@dataclass
class RapidsTool(object):
"""
A generic class that represents a RAPIDS plugin tool.
:param platform_type: the type of platform associated with the current execution.
:param cluster: name of the cluster on which the application will be running
:param output_folder: location to store the output of the execution
:param config_path: location of the configuration file of the current tool
:param wrapper_options: dictionary containing options specific to the wrapper tool execution.
:param rapids_options: dictionary containing the options to be passed as CLI arguments to the RAPIDS Accelerator.
:param name: the name of the tool
:param ctxt: context manager for the current tool execution.
:param logger: the logger instant associated to the current tool.
"""
platform_type: CspEnv
cluster: str = None
output_folder: str = None
config_path: str = None
wrapper_options: dict = field(default_factory=dict)
rapids_options: dict = field(default_factory=dict)
name: str = field(default=None, init=False)
ctxt: ToolContext = field(default=None, init=False)
logger: Logger = field(default=None, init=False)
def pretty_name(self):
return self.name.capitalize()
def get_exec_cluster(self) -> ClusterBase:
return self.ctxt.get_ctxt('execCluster')
def is_remote_cluster_execution(self) -> bool:
"""
used to verify whether a rapids tool runs on a remote cluster submission.
This does not include the serverlessMode
:return: True when the tool needs to have a remote cluster established
"""
return self.ctxt.get_deploy_mode() == DeployMode.REMOTE_CLUSTER
def requires_remote_folder(self) -> bool:
"""
used to verify whether a rapids tool running remotely has a defined remote path to generate the
rapids tool output.
:return: True when the tool needs to have a remote cluster folder
"""
deply_mode: DeployMode = self.ctxt.get_deploy_mode()
return deply_mode.requires_remote_storage()
def requires_cluster_connection(self) -> bool:
return self.is_remote_cluster_execution()
def timeit(timed_item: str): # pylint: disable=no-self-argument
def decorator(func_cb: Callable):
def wrapper(self, *args, **kwargs):
start_time = time.monotonic()
func_cb(self, *args, **kwargs) # pylint: disable=not-callable
end_time = time.monotonic()
self.logger.info('Total Execution Time: %s => %s seconds', timed_item,
f'{(end_time-start_time):,.3f}')
return wrapper
return decorator
def phase_banner(phase_name: str, # pylint: disable=no-self-argument
enable_prologue: bool = True,
enable_epilogue: bool = True):
def decorator(func_cb: Callable):
def wrapper(self, *args, **kwargs):
try:
if enable_prologue:
self.logger.info('******* [%s]: Starting *******', phase_name)
func_cb(self, *args, **kwargs) # pylint: disable=not-callable
if enable_epilogue:
self.logger.info('======= [%s]: Finished =======', phase_name)
except Exception: # pylint: disable=broad-except
logging.exception('%s. Raised an error in phase [%s]\n',
self.pretty_name(),
phase_name)
sys.exit(1)
return wrapper
return decorator
def __post_init__(self):
# when debug is set to true set it in the environment.
self.logger = ToolLogging.get_and_setup_logger(f'rapids.tools.{self.name}')
def _check_environment(self) -> None:
self.ctxt.platform.setup_and_validate_env()
def _process_output_args(self):
self.logger.debug('Processing Output Arguments')
# make sure that output_folder is being absolute
if self.output_folder is None:
self.output_folder = Utils.get_rapids_tools_env('OUTPUT_DIRECTORY', os.getcwd())
self.output_folder = FSUtil.get_abs_path(self.output_folder)
self.logger.debug('Root directory of local storage is set as: %s', self.output_folder)
self.ctxt.set_local_workdir(self.output_folder)
self.ctxt.load_prepackaged_resources()
def _process_rapids_args(self):
pass
def _process_custom_args(self):
pass
def _process_job_submission_args(self):
pass
@phase_banner('Process-Arguments')
def _process_arguments(self):
# 0- process the output location
self._process_output_args()
# 1- process any arguments to be passed to the RAPIDS tool
self._process_rapids_args()
# 2- we need to process the arguments of the CLI
self._process_custom_args()
# 3- process submission arguments
self._process_job_submission_args()
@phase_banner('Initialization')
def _init_tool(self):
self._init_ctxt()
self._check_environment()
def _init_ctxt(self):
if self.config_path is None:
self.config_path = Utils.resource_path(f'{self.name}-conf.yaml')
self.ctxt = ToolContext(platform_cls=get_platform(self.platform_type),
platform_opts=self.wrapper_options.get('platformOpts'),
prop_arg=self.config_path,
name=self.name)
def _run_rapids_tool(self):
# 1- copy dependencies to remote server
# 2- prepare the arguments
# 3- create a submission job
# 4- execute
pass
@phase_banner('Execution')
def _execute(self):
"""
Phase representing actual execution of the wrapper command.
"""
self._run_rapids_tool()
def _process_output(self):
pass
def _delete_local_dep_folder(self):
# clean_up the local dependency folder
local_dep_folder = self.ctxt.get_local_work_dir()
if self.ctxt.platform.storage.resource_exists(local_dep_folder):
self.ctxt.platform.storage.remove_resource(local_dep_folder)
def _delete_remote_dep_folder(self):
# clean up the remote dep folder first
remote_dep_folder = self.ctxt.get_remote('depFolder')
if self.ctxt.platform.storage.resource_exists(remote_dep_folder):
# delete the folder. Note that for dataproc this will also delete the parent directory when it is empty
self.ctxt.platform.storage.remove_resource(remote_dep_folder)
def _download_remote_output_folder(self):
# download the output folder in to the local one with overriding
remote_output_folder = self.ctxt.get_remote('workDir')
# for dataproc it is possible that the entire directory has been deleted when it is empty
if self.ctxt.platform.storage.resource_exists(remote_output_folder):
local_folder = self.ctxt.get_local('outputFolder')
self.ctxt.platform.storage.download_resource(remote_output_folder, local_folder)
def _download_output(self):
self._delete_local_dep_folder()
# clean up the remote dep folder first
self._delete_remote_dep_folder()
# download the output folder in to the local one with overriding
self._download_remote_output_folder()
@phase_banner('Generating Report Summary',
enable_epilogue=False)
def _finalize(self):
print(Utils.gen_str_header(f'{self.pretty_name().upper()} Report',
ruler='_',
line_width=100))
self._write_summary()
def _write_summary(self):
pass
@phase_banner('Archiving Tool Output')
def _archive_phase(self):
self._archive_results()
def _archive_results(self):
pass
@phase_banner('Collecting-Results')
def _collect_result(self):
"""
Following a successful run, collect and process data as needed
:return:
"""
self._download_output()
self._process_output()
@phase_banner('Connecting to Execution Cluster')
def _connect_to_execution_cluster(self):
"""
Connecting to execution cluster
:return:
"""
if self.requires_cluster_connection():
self.logger.info('%s requires the execution cluster %s to be running. '
'Establishing connection to cluster',
self.pretty_name(),
self.cluster)
exec_cluster = self.ctxt.platform.connect_cluster_by_name(self.cluster)
self.ctxt.set_ctxt('execCluster', exec_cluster)
self._verify_exec_cluster()
else:
self.logger.info('%s requires no execution cluster. Skipping phase', self.pretty_name())
def _handle_non_running_exec_cluster(self, err_msg: str) -> None:
self.logger.warning(err_msg)
def _verify_exec_cluster(self):
# For remote job we should fail once we find that the cluster is not actually running
exec_cluster = self.get_exec_cluster()
if exec_cluster and exec_cluster.is_cluster_running():
return
# For remote cluster mode, the execution cluster must be running
if not exec_cluster:
msg = 'An error initializing the execution cluster'
else:
msg = f'Remote execution Cluster [{exec_cluster.get_name()}] is not active. ' \
f'The execution cluster should be in RUNNING state'
self._handle_non_running_exec_cluster(msg)
def launch(self):
self._init_tool()
self._connect_to_execution_cluster()
self._process_arguments()
self._execute()
self._collect_result()
self._archive_phase()
self._finalize()
def _report_tool_full_location(self) -> str:
pass
def _report_results_are_empty(self):
return [f'The {self.pretty_name()} tool did not generate any output. Nothing to display.']
def _generate_section_lines(self, sec_conf: dict) -> List[str]:
all_lines = sec_conf['content'].get('lines')
if all_lines:
return all_lines
return None
def _generate_section_content(self, sec_conf: dict) -> List[str]:
sec_title = sec_conf.get('sectionName')
rep_lines = []
if sec_title:
rep_lines.append(Utils.gen_report_sec_header(sec_title, title_width=20))
if sec_conf.get('content'):
headers = sec_conf['content'].get('header')
if headers:
rep_lines.extend(headers)
all_lines = self._generate_section_lines(sec_conf)
if all_lines:
rep_lines.extend(all_lines)
return rep_lines
def _generate_platform_report_sections(self) -> List[str]:
section_arr = self.ctxt.platform.configs.get_value_silent('wrapperReporting',
self.name,
'sections')
if section_arr:
rep_lines = []
for curr_sec in section_arr:
required_flag = curr_sec.get('requiresBoolFlag')
# if section requires a condition that was not enabled the section is skipped
if not required_flag or self.ctxt.get_ctxt(required_flag):
rep_lines.extend(self._generate_section_content(curr_sec))
return rep_lines
return None
def _calculate_spark_settings(self, worker_info: NodeHWInfo) -> dict:
"""
Calculate the cluster properties that we need to append to the /etc/defaults of the spark
if necessary.
:param worker_info: the hardware info as extracted from the worker. Note that we assume
that all the workers have the same configurations.
:return: dictionary containing 7 spark properties to be set by default on the cluster.
"""
num_gpus = worker_info.gpu_info.num_gpus
gpu_mem = worker_info.gpu_info.gpu_mem
num_cpus = worker_info.sys_info.num_cpus
cpu_mem = worker_info.sys_info.cpu_mem
config_path = Utils.resource_path('cluster-configs.yaml')
constants = YAMLPropertiesContainer(prop_arg=config_path).get_value('clusterConfigs', 'constants')
executors_per_node = num_gpus
num_executor_cores = max(1, num_cpus // executors_per_node)
gpu_concurrent_tasks = min(constants.get('maxGpuConcurrent'), gpu_mem // constants.get('gpuMemPerTaskMB'))
# account for system overhead
usable_worker_mem = max(0, cpu_mem - constants.get('systemReserveMB'))
executor_container_mem = usable_worker_mem // executors_per_node
# reserve 10% of heap as memory overhead
max_executor_heap = max(0, int(executor_container_mem * (1 - constants.get('heapOverheadFraction'))))
# give up to 2GB of heap to each executor core
executor_heap = min(max_executor_heap, constants.get('heapPerCoreMB') * num_executor_cores)
executor_mem_overhead = int(executor_heap * constants.get('heapOverheadFraction'))
# use default for pageable_pool to add to memory overhead
pageable_pool = constants.get('defaultPageablePoolMB')
# pinned memory uses any unused space up to 4GB
pinned_mem = min(constants.get('maxPinnedMemoryMB'),
executor_container_mem - executor_heap - executor_mem_overhead - pageable_pool)
executor_mem_overhead += pinned_mem + pageable_pool
res = {
'spark.executor.cores': num_executor_cores,
'spark.executor.memory': f'{executor_heap}m',
'spark.executor.memoryOverhead': f'{executor_mem_overhead}m',
'spark.rapids.sql.concurrentGpuTasks': gpu_concurrent_tasks,
'spark.rapids.memory.pinnedPool.size': f'{pinned_mem}m',
'spark.sql.files.maxPartitionBytes': f'{constants.get("maxSqlFilesPartitionsMB")}m',
'spark.task.resource.gpu.amount': 1 / num_executor_cores,
'spark.rapids.shuffle.multiThreaded.reader.threads': num_executor_cores,
'spark.rapids.shuffle.multiThreaded.writer.threads': num_executor_cores,
'spark.rapids.sql.multiThreadedRead.numThreads': max(20, num_executor_cores)
}
return res
@dataclass
class RapidsJarTool(RapidsTool):
"""
A wrapper class to represent wrapper commands that require RAPIDS jar file.
"""
def _process_jar_arg(self):
tools_jar_url = self.wrapper_options.get('toolsJar')
if tools_jar_url is None:
tools_jar_url = self.ctxt.get_rapids_jar_url()
# download the jar
jar_path = self.ctxt.platform.storage.download_resource(tools_jar_url,
self.ctxt.get_local_work_dir(),
fail_ok=False,
create_dir=True)
self.logger.info('RAPIDS accelerator jar is downloaded to work_dir %s', jar_path)
# get the jar file name and add it to the tool args
jar_file_name = FSUtil.get_resource_name(jar_path)
self.ctxt.add_rapids_args('jarFileName', jar_file_name)
self.ctxt.add_rapids_args('jarFilePath', jar_path)
def __accept_tool_option(self, option_key: str) -> bool:
defined_tool_options = self.ctxt.get_value_silent('sparkRapids', 'cli', 'toolOptions')
if defined_tool_options is not None:
if option_key not in defined_tool_options:
self.logger.warning('Ignoring tool option [%s]. Invalid option.', option_key)
return False
return True
def _process_tool_args_from_input(self) -> list:
"""
Process the arguments passed from the CLI if any and return a list of strings representing
the arguments to be passed to the final command running the job. This needs processing
because we need to verify the arguments and handle hiphens
:return: list of the rapids arguments added by the user
"""
arguments_list = []
self.logger.debug('Processing Rapids plugin Arguments %s', self.rapids_options)
raw_tool_opts: Dict[str, Any] = {}
for key, value in self.rapids_options.items():
if not isinstance(value, bool):
# a boolean flag, does not need to have its value added to the list
if isinstance(value, str):
# if the argument is multiple word, then protect it with single quotes.
if re.search(r'\s|\(|\)|,', value):
value = f"'{value}'"
raw_tool_opts.setdefault(key, []).append(value)
else:
if value:
raw_tool_opts.setdefault(key, [])
else:
# argument parser removes the "no-" prefix and set the value to false.
# we need to restore the original key
raw_tool_opts.setdefault(f'no{key}', [])
for key, value in raw_tool_opts.items():
self.logger.debug('Processing tool CLI argument.. %s:%s', key, value)
if len(key) > 1:
# python forces "_" to "-". we need to reverse that back.
fixed_key = key.replace('_', '-')
prefix = '--'
else:
# shortcut argument
fixed_key = key
prefix = '-'
if self.__accept_tool_option(fixed_key):
k_arg = f'{prefix}{fixed_key}'
if len(value) >= 1:
# handle list options
for value_entry in value[0:]:
arguments_list.append(f'{k_arg}')
arguments_list.append(f'{value_entry}')
else:
# this could be a boolean type flag that has no arguments
arguments_list.append(f'{k_arg}')
return arguments_list
def _process_tool_args(self):
"""
Process the arguments passed from the CLI if any and return a string representing the
arguments to be passed to the final command running the job.
:return:
"""
self.ctxt.add_rapids_args('rapidsOpts', self._process_tool_args_from_input())
def _process_dependencies(self):
"""
For local deployment mode, we need to process the extra dependencies specific to the platform
:return:
"""
if 'deployMode' in self.ctxt.platform_opts:
# process the deployment
# we need to download the dependencies locally if necessary
self._download_dependencies()
def timeit(timed_item: str): # pylint: disable=no-self-argument
def decorator(func_cb: Callable):
def wrapper(self, *args, **kwargs):
start_time = time.monotonic()
func_cb(self, *args, **kwargs) # pylint: disable=not-callable
end_time = time.monotonic()
self.logger.info('Total Execution Time: %s => %s seconds', timed_item,
f'{(end_time-start_time):,.3f}')
return wrapper
return decorator
@timeit('Downloading dependencies for local Mode') # pylint: disable=too-many-function-args
def _download_dependencies(self):
def exception_handler(future):
# Handle any exceptions raised by the task
exception = future.exception()
if exception:
self.logger.error('Error while downloading dependency: %s', exception)
def cache_single_dependency(dep: dict) -> str:
"""
Downloads the specified URL and saves it to disk
"""
start_time = time.monotonic()
self.logger.info('Checking dependency %s', dep['name'])
dest_folder = self.ctxt.get_cache_folder()
resource_file_name = FSUtil.get_resource_name(dep['uri'])
resource_file = FSUtil.build_path(dest_folder, resource_file_name)
file_check_dict = {'size': dep['size']}
signature_file = FileVerifier.get_signature_file(dep['uri'], dest_folder)
if signature_file is not None:
file_check_dict['signatureFile'] = signature_file
algorithm = FileVerifier.get_integrity_algorithm(dep)
if algorithm is not None:
file_check_dict['hashlib'] = {
'algorithm': algorithm,
'hash': dep[algorithm]
}
is_created = FSUtil.cache_from_url(dep['uri'], resource_file, file_checks=file_check_dict)
if is_created:
self.logger.info('The dependency %s has been downloaded into %s', dep['uri'],
resource_file)
# check if we need to decompress files
if dep['type'] == 'archive':
destination_path = self.ctxt.get_local_work_dir()
with tarfile.open(resource_file, mode='r:*') as tar:
tar.extractall(destination_path)
tar.close()
dep_item = FSUtil.remove_ext(resource_file_name)
if dep.get('relativePath') is not None:
dep_item = FSUtil.build_path(dep_item, dep.get('relativePath'))
dep_item = FSUtil.build_path(destination_path, dep_item)
else:
# copy the jar into dependency folder
dep_item = self.ctxt.platform.storage.download_resource(resource_file,
self.ctxt.get_local_work_dir())
end_time = time.monotonic()
self.logger.info('Completed downloading of dependency [%s] => %s seconds',
dep['name'],
f'{(end_time-start_time):,.3f}')
return dep_item
def cache_all_dependencies(dep_arr: List[dict]):
"""
Create a thread pool and download specified urls
"""
futures_list = []
results = []
with ThreadPoolExecutor(max_workers=4) as executor:
for dep in dep_arr:
futures = executor.submit(cache_single_dependency, dep)
futures.add_done_callback(exception_handler)
futures_list.append(futures)
try:
# set the timeout to 30 minutes.
for future in concurrent.futures.as_completed(futures_list, timeout=1800):
result = future.result()
results.append(result)
except Exception as ex: # pylint: disable=broad-except
self.logger.error('Failed to download dependencies %s', ex)
raise ex
return results
# TODO: Verify the downloaded file by checking their MD5
deploy_mode = DeployMode.tostring(self.ctxt.get_deploy_mode())
depend_arr = self.ctxt.platform.configs.get_value_silent('dependencies',
'deployMode',
deploy_mode)
if depend_arr:
dep_list = cache_all_dependencies(depend_arr)
if any(dep_item is None for dep_item in dep_list):
raise RuntimeError('Could not download all dependencies. Aborting Executions.')
self.logger.info('Dependencies are processed as: %s',
Utils.gen_joined_str(join_elem='; ',
items=dep_list))
self.ctxt.add_rapids_args('javaDependencies', dep_list)
def _process_rapids_args(self):
# add a dictionary to hold the rapids arguments
self._process_jar_arg()
self._process_dependencies()
self._process_tool_args()
def _process_offline_cluster_args(self):
pass
def _process_gpu_cluster_args(self, offline_cluster_opts: dict = None) -> bool:
pass
def _copy_dependencies_to_remote(self):
self.logger.info('Skipping preparing remote dependency folder')
def _prepare_job_arguments(self):
self._prepare_local_job_arguments()
def _run_rapids_tool(self):
# 1- copy dependencies to remote server
self._copy_dependencies_to_remote()
# 2- prepare the arguments
# 2.a -check if the app_id is not none
self._prepare_job_arguments()
#
# 3- create a submission job
# 4- execute
def _get_main_cluster_obj(self):
return self.ctxt.get_ctxt('cpuClusterProxy')
def _process_eventlogs_args(self):
eventlog_arg = self.wrapper_options.get('eventlogs')
if eventlog_arg is None:
# get the eventlogs from spark properties
cpu_cluster_obj = self._get_main_cluster_obj()
if cpu_cluster_obj:
spark_event_logs = cpu_cluster_obj.get_eventlogs_from_config()
else:
self.logger.warning('Eventlogs is not set properly. The property cannot be pulled '
'from cluster because it is not defined')
spark_event_logs = []
else:
if isinstance(eventlog_arg, tuple):
spark_event_logs = List[eventlog_arg]
elif isinstance(eventlog_arg, str):
spark_event_logs = eventlog_arg.split(',')
else:
spark_event_logs = eventlog_arg
if len(spark_event_logs) < 1:
self.logger.error('Eventlogs list is empty. '
'The cluster Spark properties may be missing "spark.eventLog.dir". '
'Re-run the command passing "--eventlogs" flag to the wrapper.')
raise RuntimeError('Invalid arguments. The list of Apache Spark event logs is empty.')
self.ctxt.set_ctxt('eventLogs', spark_event_logs)
def _create_migration_cluster(self, cluster_type: str, cluster_arg: str) -> ClusterBase:
if cluster_arg is None:
raise RuntimeError(f'The {cluster_type} cluster argument is not set.')
arg_is_file = self.ctxt.platform.storage.is_file_path(cluster_arg)
if not arg_is_file:
self.logger.info('Loading %s cluster properties by name %s. Note that this will fail '
'if the cluster was permanently deleted.',
cluster_type,
cluster_arg)
# create a cluster by name
cluster_obj = self.ctxt.platform.connect_cluster_by_name(cluster_arg)
else:
self.logger.info('Loading %s cluster properties from file %s',
cluster_type,
cluster_arg)
# create cluster by loading properties files
# download the file to the working directory
cluster_conf_path = self.ctxt.platform.storage.download_resource(cluster_arg,
self.ctxt.get_local_work_dir())
cluster_obj = self.ctxt.platform.load_cluster_by_prop_file(cluster_conf_path)
return cluster_obj
def _gen_output_tree(self) -> List[str]:
tree_conf = self.ctxt.get_value('local', 'output', 'treeDirectory')
if tree_conf and tree_conf.get('enabled'):
level = tree_conf.get('depthLevel')
indentation = tree_conf.get('indentation', '\t')
ex_patterns = tree_conf.get('excludedPatterns', {})
exc_dirs = ex_patterns.get('directories')
exc_files = ex_patterns.get('files')
out_folder_path = self.ctxt.get_local('outputFolder')
out_tree_list = FSUtil.gen_dir_tree(out_folder_path,
depth_limit=level,
indent=indentation,
exec_dirs=exc_dirs,
exec_files=exc_files)
doc_url = self.ctxt.get_value('sparkRapids', 'outputDocURL')
out_tree_list.append(f'{indentation}- To learn more about the output details, visit {doc_url}')
return out_tree_list
return None
def _report_tool_full_location(self) -> str:
if not self._rapids_jar_tool_has_output():
return None
out_folder_path = self.ctxt.get_rapids_output_folder()
res_arr = [Utils.gen_report_sec_header('Output'),
f'{self.pretty_name()} tool output: {out_folder_path}']
out_tree_list = self._gen_output_tree()
return Utils.gen_multiline_str(res_arr, out_tree_list)
def _evaluate_rapids_jar_tool_output_exist(self) -> bool:
"""
Used as a subtask of self._process_output(). this method has the responsibility of
checking if the tools produced no output and take the necessary action
:return: True if the tool has generated an output
"""
rapids_output_dir = self.ctxt.get_rapids_output_folder()
res = True
if not self.ctxt.platform.storage.resource_exists(rapids_output_dir):
res = False
self.ctxt.set_ctxt('wrapperOutputContent',
self._report_results_are_empty())
self.logger.info('The Rapids jar tool did not generate an output directory')
self.ctxt.set_ctxt('rapidsOutputIsGenerated', res)
return res
def _rapids_jar_tool_has_output(self) -> bool:
return self.ctxt.get_ctxt('rapidsOutputIsGenerated')
@timeit('Processing job submission arguments') # pylint: disable=too-many-function-args
def _process_job_submission_args(self):
self._process_local_job_submission_args()
def _set_remote_folder_for_submission(self, requires_remote_storage: bool) -> dict:
res = {}
submission_args = self.wrapper_options.get('jobSubmissionProps')
# get the root remote folder and make sure it exists
remote_folder = submission_args.get('remoteFolder')
# If remote_folder is not specified, then ignore it
if requires_remote_storage:
# if the remote storage required and no remote folder specified. then try to assign the
# tmp storage of the exec_cluster to be used for storage
archive_enabled = True
if not remote_folder:
# get the execCluster
exec_cluster = self.get_exec_cluster()
if exec_cluster:
remote_folder = exec_cluster.get_tmp_storage()
if remote_folder:
archive_enabled = False
self.ctxt.set_ctxt('archiveToRemote', archive_enabled)
if remote_folder is None:
# the output is only for local machine
self.logger.info('No remote output folder specified.')
if requires_remote_storage:
raise RuntimeError(f'Remote folder [{remote_folder}] is invalid.')
else:
if not self.ctxt.platform.storage.resource_exists(remote_folder):
raise RuntimeError(f'Remote folder invalid path. [{remote_folder}] does not exist.')
# now we should make the subdirectory to indicate the output folder,
# by appending the name of the execution folder
exec_full_name = self.ctxt.get_ctxt('execFullName')
remote_workdir = FSUtil.build_url_from_parts(remote_folder, exec_full_name)
self.ctxt.set_remote('rootFolder', remote_folder)
self.ctxt.set_remote('workDir', remote_workdir)
self.logger.info('Remote workdir is set as %s', remote_workdir)
remote_dep_folder = FSUtil.build_url_from_parts(remote_workdir,
self.ctxt.get_ctxt('depFolderName'))
self.ctxt.set_remote('depFolder', remote_dep_folder)
self.logger.info('Remote dependency folder is set as %s', remote_dep_folder)
if requires_remote_storage:
res.update({'outputDirectory': self.ctxt.get_remote('workDir')})
else:
# the output folder has to be set any way
res.update({'outputDirectory': self.ctxt.get_output_folder()})
return res
def _process_local_job_submission_args(self):
job_args = {}
submission_args = self.wrapper_options.get('jobSubmissionProps')
job_args.update(self._set_remote_folder_for_submission(self.requires_remote_folder()))
platform_args = submission_args.get('platformArgs')
if platform_args is not None:
processed_platform_args = self.ctxt.platform.cli.build_local_job_arguments(platform_args)
ctxt_rapids_args = self.ctxt.get_ctxt('rapidsArgs')
dependencies = ctxt_rapids_args.get('javaDependencies')
processed_platform_args.update({'dependencies': dependencies})
job_args['platformArgs'] = processed_platform_args
self.ctxt.update_job_args(job_args)
def _init_rapids_arg_list(self) -> List[str]:
return []
@timeit('Building Job Arguments and Executing Job CMD') # pylint: disable=too-many-function-args
def _prepare_local_job_arguments(self):
job_args = self.ctxt.get_ctxt('jobArgs')
# now we can create the job object
# Todo: For dataproc, this can be autogenerated from cluster name
rapids_arg_list = self._init_rapids_arg_list()
ctxt_rapids_args = self.ctxt.get_ctxt('rapidsArgs')
jar_file_path = ctxt_rapids_args.get('jarFilePath')
rapids_opts = ctxt_rapids_args.get('rapidsOpts')
if rapids_opts:
rapids_arg_list.extend(rapids_opts)
# add the eventlogs at the end of all the tool options
rapids_arg_list.extend(self.ctxt.get_ctxt('eventLogs'))
class_name = self.ctxt.get_value('sparkRapids', 'mainClass')
rapids_arg_obj = {
'jarFile': jar_file_path,
'jarArgs': rapids_arg_list,
'className': class_name
}
platform_args = job_args.get('platformArgs')
spark_conf_args = {}
job_properties_json = {
'outputDirectory': job_args.get('outputDirectory'),
'rapidsArgs': rapids_arg_obj,
'sparkConfArgs': spark_conf_args,
'platformArgs': platform_args
}
job_properties = RapidsJobPropContainer(prop_arg=job_properties_json,
file_load=False)
job_obj = self.ctxt.platform.create_local_submission_job(job_prop=job_properties,
ctxt=self.ctxt)
job_obj.run_job()
def _archive_results(self):
self._archive_local_results()
def _archive_local_results(self):
remote_work_dir = self.ctxt.get_remote('workDir')
if remote_work_dir and self._rapids_jar_tool_has_output():
local_folder = self.ctxt.get_output_folder()
# TODO make sure it worth issuing the command
self.ctxt.platform.storage.upload_resource(local_folder, remote_work_dir)
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/rapids/rapids_tool.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation class representing wrapper around the RAPIDS acceleration Profiling tool."""
import re
from copy import deepcopy
from dataclasses import dataclass
from itertools import chain
from typing import List
import yaml
from tabulate import tabulate
from spark_rapids_pytools.cloud_api.sp_types import ClusterBase
from spark_rapids_pytools.common.sys_storage import FSUtil
from spark_rapids_pytools.common.utilities import Utils, TemplateGenerator
from spark_rapids_pytools.rapids.rapids_tool import RapidsJarTool
@dataclass
class Profiling(RapidsJarTool):
"""
Wrapper layer around Profiling Tool.
"""
name = 'profiling'
def _process_worker_info_arg(self):
worker_info_arg = self.wrapper_options.get('autoTunerFileInput')
if not worker_info_arg:
return
self.logger.info('Processing WorkerInfo argument [%s]', worker_info_arg)
# download the worker_file into the work dir
input_path = self.ctxt.platform.storage.download_resource(worker_info_arg,
self.ctxt.get_local_work_dir(),
fail_ok=False,
create_dir=True)
self.ctxt.set_ctxt('autoTunerFilePath', input_path)
self.ctxt.set_ctxt('autoTunerFileName', FSUtil.get_resource_name(input_path))
self.logger.info('WorkerInfo successfully processed into workDir [%s]', input_path)
def _process_custom_args(self):
"""
Profiling tool processes extra arguments:
1. the worker_info argument
2. the clusters
"""
self._process_worker_info_arg()
# if the workerInfo is not set, then we need to use the gpu_cluster
if not self.ctxt.get_ctxt('autoTunerFilePath'):
self._process_offline_cluster_args()
else:
self.logger.info('Skipping building of GPU_CLUSTER because WorkerInfo is defined')
self._process_eventlogs_args()
def _process_offline_cluster_args(self):
offline_cluster_opts = self.wrapper_options.get('migrationClustersProps', {})
if self._process_gpu_cluster_args(offline_cluster_opts):
# only if we succeed to get the GPU cluster, we can generate auto-tuner-input
self._generate_autotuner_input()
def __load_disabled_recommendation_report(self) -> str:
template_file_name = self.ctxt.get_value('toolOutput', 'recommendations', 'disabledInfoMsgTemplate')
template_path = Utils.resource_path(f'templates/{template_file_name}')
return TemplateGenerator.render_template_file(template_path, {'CLUSTER_ARG': 'cluster'})
def _process_gpu_cluster_args(self, offline_cluster_opts: dict = None):
gpu_cluster_arg = offline_cluster_opts.get('gpuCluster')
if gpu_cluster_arg:
gpu_cluster_obj = self._create_migration_cluster('GPU', gpu_cluster_arg)
self.ctxt.set_ctxt('gpuClusterProxy', gpu_cluster_obj)
return True
# If we are here, we know that the workerInfoPath was not set as well.
# Then we can remind the user that recommendations won't be calculated
disabled_recommendations_msg = self.__load_disabled_recommendation_report()
self.ctxt.set_ctxt('disabledRecommendationsMsg', disabled_recommendations_msg)
self.logger.info(disabled_recommendations_msg)
return False
def _generate_autotuner_file_for_cluster(self, file_path: str, cluster_ob: ClusterBase):
"""
Given file path and the cluster object, it will generate the formatted input file in yaml
that can be used by the autotuner to run the profiling tool.
:param file_path: local path whether the file should be stored
:param cluster_ob: the object representing the cluster proxy.
:return:
"""
self.logger.info('Generating input file for Auto-tuner')
worker_hw_info = cluster_ob.get_worker_hw_info()
worker_info = {
'system': {
'numCores': worker_hw_info.sys_info.num_cpus,
'memory': f'{worker_hw_info.sys_info.cpu_mem}MiB',
'numWorkers': cluster_ob.get_workers_count()
},
'gpu': {
# the scala code expects a unit
'memory': f'{worker_hw_info.gpu_info.gpu_mem}MiB',
'count': worker_hw_info.gpu_info.num_gpus,
'name': worker_hw_info.gpu_info.get_gpu_device_name()
},
'softwareProperties': cluster_ob.get_all_spark_properties()
}
worker_info_redacted = deepcopy(worker_info)
if worker_info_redacted['softwareProperties']:
for key in worker_info_redacted['softwareProperties']:
if 's3a.secret.key' in key:
worker_info_redacted['softwareProperties'][key] = 'MY_S3A_SECRET_KEY'
elif 's3a.access.key' in key:
worker_info_redacted['softwareProperties'][key] = 'MY_S3A_ACCESS_KEY'
self.logger.debug('Auto-tuner worker info: %s', worker_info_redacted)
with open(file_path, 'w', encoding='utf-8') as worker_info_file:
self.logger.debug('Opening file %s to write worker info', file_path)
yaml.dump(worker_info, worker_info_file, sort_keys=False)
def _generate_autotuner_input(self):
gpu_cluster_obj = self.ctxt.get_ctxt('gpuClusterProxy')
input_file_name = 'worker_info.yaml'
self.ctxt.set_ctxt('autoTunerFileName', input_file_name)
autotuner_input_path = FSUtil.build_path(self.ctxt.get_local_work_dir(), 'worker_info.yaml')
self._generate_autotuner_file_for_cluster(file_path=autotuner_input_path,
cluster_ob=gpu_cluster_obj)
self.logger.info('Generated autotuner worker info: %s', autotuner_input_path)
self.ctxt.set_ctxt('autoTunerFilePath', autotuner_input_path)
def _create_autotuner_rapids_args(self) -> list:
# Add the autotuner argument if the autotunerPath exists
autotuner_path = self.ctxt.get_ctxt('autoTunerFilePath')
if autotuner_path is None:
return []
return ['--auto-tuner', '--worker-info', autotuner_path]
def __read_single_app_output(self, file_path: str) -> (str, List[str], List[str]):
def split_list_str_by_pattern(input_seq: List[str], pattern: str) -> int:
ind = 0
while ind < len(input_seq):
if input_seq[ind].find(pattern) != -1:
return ind
ind += 1
return -1
try:
props_list = []
comments_list = []
app_name: str = ''
with open(file_path, 'rt', encoding='utf-8') as app_profiler:
raw_lines = [line.strip() for line in app_profiler.readlines() if line.strip()]
# find the app_name
app_name_candidates = re.findall(r'(\|spark\.app\.name\s+\|)(.+)\|',
Utils.gen_multiline_str(raw_lines),
flags=re.MULTILINE)
if len(app_name_candidates) > 0:
_, grp_2 = app_name_candidates[0]
app_name = grp_2.strip()
header_pattern = self.ctxt.get_value('toolOutput', 'recommendations', 'headers',
'section')
spark_pattern = self.ctxt.get_value('toolOutput', 'recommendations', 'headers',
'sparkProperties')
comments_pattern = self.ctxt.get_value('toolOutput', 'recommendations', 'headers',
'comments')
begin_props_ind = -1
last_props_ind = -1
begin_comm_ind = -1
last_comm_ind = -1
section_ind = split_list_str_by_pattern(raw_lines, header_pattern)
if section_ind != -1:
recom_section = raw_lines[section_ind:]
recom_properties_ind = split_list_str_by_pattern(recom_section,
spark_pattern)
if recom_properties_ind not in (-1, len(recom_section) - 1):
begin_props_ind = recom_properties_ind + 1
recom_comments_ind = split_list_str_by_pattern(recom_section, comments_pattern)
if recom_comments_ind != -1:
last_props_ind = recom_comments_ind
begin_comm_ind = recom_comments_ind + 1
last_comm_ind = len(recom_section)
else:
last_props_ind = len(recom_section)
last_comm_ind = len(recom_section)
if begin_props_ind != -1:
props_list = recom_section[begin_props_ind: last_props_ind]
if begin_comm_ind != -1:
comments_list = recom_section[begin_comm_ind: last_comm_ind]
except OSError:
self.logger.error('Could not open output of profiler %s', file_path)
if len(props_list) == 0:
props_list = ['- No recommendations']
if len(comments_list) == 0:
comments_list = ['- No comments']
# Note that sorting the comments is disabled because it will change the order
# of multiline entries
# Recommendations can be sorted so that the two values are aligned
# comments_list.sort()
props_list.sort()
return app_name, props_list, comments_list
def _write_summary(self):
print(Utils.gen_multiline_str(self._report_tool_full_location(),
self.ctxt.get_ctxt('wrapperOutputContent')))
def __generate_report_no_recommendations(self):
prof_app_dirs = FSUtil.get_subdirectories(self.ctxt.get_rapids_output_folder())
wrapper_content = [Utils.gen_report_sec_header('Recommendations'),
self.ctxt.get_ctxt('disabledRecommendationsMsg'),
Utils.gen_report_sec_header('Profiling status'),
f'Total application profiled: {len(prof_app_dirs)}']
self.ctxt.set_ctxt('wrapperOutputContent', wrapper_content)
def __generate_report_with_recommendations(self):
prof_app_dirs = FSUtil.get_subdirectories(self.ctxt.get_rapids_output_folder())
profiling_log = self.ctxt.get_value('toolOutput', 'recommendations', 'fileName')
recommendations_table = []
log_lines = []
header_str = '### Recommended configurations ###'
sec_props_head = ['\tSpark Properties:']
sec_comments_head = ['\tComments:']
log_lines.append(header_str)
headers = self.ctxt.get_value('local', 'output', 'summaryColumns')
for app_folder in prof_app_dirs:
app_id = FSUtil.get_resource_name(app_folder)
app_name, recommendations, comments = self.__read_single_app_output(f'{app_folder}/{profiling_log}')
row = [app_id,
app_name,
Utils.gen_multiline_str(recommendations),
Utils.gen_multiline_str(comments)]
log_lines.append(app_id)
sec_props = Utils.gen_joined_str(join_elem='\n\t',
items=list(chain(sec_props_head, recommendations)))
sec_comments = Utils.gen_joined_str(join_elem='\n\t',
items=list(chain(sec_comments_head, comments)))
log_lines.append(f'{sec_props}')
log_lines.append(f'{sec_comments}')
recommendations_table.append(row)
log_file_name = self.ctxt.get_value('local', 'output', 'fileName')
summary_file = FSUtil.build_path(self.ctxt.get_output_folder(), log_file_name)
self.logger.info('Writing recommendations into local file %s', summary_file)
log_file_lines_str = Utils.gen_multiline_str(log_lines)
with open(summary_file, 'w', encoding='utf-8') as wrapper_summary:
wrapper_summary.write(log_file_lines_str)
self.logger.info('Generating Full STDOUT summary report')
# wrapper STDOUT report contains both tabular and plain text format of recommendations
wrapper_content = [Utils.gen_report_sec_header('Recommendations'),
log_file_lines_str,
'### Recommendations Table Summary ###',
tabulate(recommendations_table, headers, tablefmt='grid')]
self.ctxt.set_ctxt('wrapperOutputContent', wrapper_content)
def _process_output(self):
if not self._evaluate_rapids_jar_tool_output_exist():
return
if self.ctxt.get_ctxt('autoTunerFilePath'):
# if autotuner is enabled, generate full recommendations summary
self.__generate_report_with_recommendations()
else:
# generate a brief summary
self.__generate_report_no_recommendations()
def _init_rapids_arg_list(self) -> List[str]:
return self._create_autotuner_rapids_args()
@dataclass
class ProfilingAsLocal(Profiling):
"""
Profiling tool running on local development.
"""
description: str = 'This is the localProfiling'
def _get_main_cluster_obj(self):
return self.ctxt.get_ctxt('gpuClusterProxy')
def _download_remote_output_folder(self):
self.logger.debug('Local mode skipping downloading the remote output workdir')
def _delete_remote_dep_folder(self):
self.logger.debug('Local mode skipping deleting the remote workdir')
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/rapids/profiling.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module defines the logic to fetch all dependencies and resources required to run the user-tools
without need to access the web during runtime.
"""
import os
import shutil
import tarfile
from concurrent.futures import ThreadPoolExecutor
from typing import Optional
import fire
from spark_rapids_tools import CspEnv
from spark_rapids_pytools.common.prop_manager import JSONPropertiesContainer
from spark_rapids_pytools.common.sys_storage import FSUtil
from spark_rapids_pytools.common.utilities import Utils
# Defines the constants and static configurations
prepackage_conf = {
'_supported_platforms': [csp.value for csp in CspEnv if csp != CspEnv.NONE],
'_configs_suffix': '-configs.json',
'_mvn_base_url': 'https://repo1.maven.org/maven2/com/nvidia/rapids-4-spark-tools_2.12',
'_folder_name': 'csp-resources'
}
class PrepackageMgr: # pylint: disable=too-few-public-methods
""" Class that handles downloading dependencies to be pre-packaged ahead of runtime.
The script can be triggered by passing the arguments and the properties of the class
For example:
$> python prepackage_mgr.py --resource_dir=RESOURCE_DIR --tools_jar=TOOLS_JAR run
$> python prepackage_mgr.py run --resource_dir=RESOURCE_DIR --tools_jar=TOOLS_JAR
For more information:
$> python prepackage_mgr.py --help
:param resource_dir: Root folder where the configuration files are located.
:param dest_dir: Directory in which the resources are downloaded and stored.
If missing, the tool creates a subfolder '$resource_dir/csp-resources'.
Warning: the 'dest_dir' may be deleted when 'archive_enabled' is set to True.
:param tools_jar: Path of the spark-rapids-user-tools jar file. Typically, this is the snapshot
jar file of the current build.
:param archive_enabled: A flag to enable/disable compressing the resources.
Note that a wheel-package with compressed prepackaged resources is 30% less size
compared to the non-compressed one (~ 600 Vs. 900 MB).
When enabled, the prepackaged-resources are stored in '$resource_dir/csp-resources.tgz'.
If the 'dest_dir' is provided, then the output is stored as '$dest_dir/../csp-resources.tgz'
"""
def __init__(self,
resource_dir: str,
dest_dir: str = None,
tools_jar: str = None,
archive_enabled: bool = True):
for field_name in prepackage_conf:
setattr(self, field_name, prepackage_conf.get(field_name))
self.resource_dir = resource_dir
self.dest_dir = dest_dir
self.tools_jar = tools_jar
self.archive_enabled = archive_enabled
# process the arguments for default values
print(f'Resource directory is: {self.resource_dir}')
print(f'tools_jar = {tools_jar}')
self.resource_dir = FSUtil.get_abs_path(self.resource_dir)
if self.dest_dir is None:
self.dest_dir = FSUtil.build_full_path(self.resource_dir, self._folder_name) # pylint: disable=no-member
else:
self.dest_dir = FSUtil.get_abs_path(self.dest_dir)
def _get_spark_rapids_jar_url(self) -> str:
jar_version = Utils.get_latest_available_jar_version(self._mvn_base_url, # pylint: disable=no-member
Utils.get_base_release())
return (f'{self._mvn_base_url}/' # pylint: disable=no-member
f'{jar_version}/rapids-4-spark-tools_2.12-{jar_version}.jar')
def _fetch_resources(self) -> dict:
"""
Fetches the resource information from configuration files for each supported platform.
Returns a dictionary of resource details.
"""
resource_uris = {}
# Add RAPIDS JAR as dependency
if self.tools_jar:
# copy from existing file. replace snapshot
jar_file_name = FSUtil.get_resource_name(self.tools_jar)
FSUtil.make_dirs(self.dest_dir)
dest_file = FSUtil.build_path(self.dest_dir, jar_file_name)
shutil.copy2(self.tools_jar, dest_file)
else:
# get the latest tools_jar from mvn
rapids_url = self._get_spark_rapids_jar_url()
rapids_name = FSUtil.get_resource_name(rapids_url)
resource_uris[rapids_url] = {'name': rapids_name, 'pbar_enabled': True}
for platform in self._supported_platforms: # pylint: disable=no-member
config_file = FSUtil.build_full_path(self.resource_dir,
f'{platform}{self._configs_suffix}') # pylint: disable=no-member
platform_conf = JSONPropertiesContainer(config_file)
for dependency in platform_conf.get_value('dependencies', 'deployMode', 'LOCAL'):
uri = dependency.get('uri')
name = FSUtil.get_resource_name(uri)
if uri:
resource_uris[uri] = {'name': name, 'pbar_enabled': True}
resource_uris[uri + '.asc'] = {'name': name + '.asc', 'pbar_enabled': False}
# Add pricing files as resources
if platform_conf.get_value_silent('pricing'):
for pricing_entry in platform_conf.get_value('pricing', 'catalog', 'onlineResources'):
uri = pricing_entry.get('onlineURL')
name = pricing_entry.get('localFile')
if uri and name:
resource_uris[uri] = {'name': name, 'pbar_enabled': False}
return resource_uris
def _download_resources(self, resource_uris: dict):
resource_uris_list = list(resource_uris.items())
def download_task(resource_uri, resource_info):
resource_name = resource_info['name']
pbar_enabled = resource_info['pbar_enabled']
resource_file_path = FSUtil.build_full_path(self.dest_dir, resource_name)
print(f'Downloading {resource_name}')
FSUtil.fast_download_url(resource_uri, resource_file_path, pbar_enabled=pbar_enabled)
with ThreadPoolExecutor() as executor:
executor.map(lambda x: download_task(x[0], x[1]), resource_uris_list)
def _compress_resources(self) -> Optional[str]:
if not self.archive_enabled:
return self.dest_dir
root_dir = os.path.dirname(self.dest_dir)
tar_file = FSUtil.build_full_path(root_dir, f'{self._folder_name}.tgz') # pylint: disable=no-member
print('Creating archive.....')
with tarfile.open(tar_file, 'w:gz') as tarhandle:
tarhandle.add(self.dest_dir, arcname='.')
tarhandle.close()
print('Created archived resources successfully')
# delete the csp-resources folder
FSUtil.remove_path(self.dest_dir)
return tar_file
def run(self):
"""
Main method to fetch and download dependencies.
"""
resources_to_download = self._fetch_resources()
self._download_resources(resources_to_download)
output_res = self._compress_resources()
print(f'CSP-prepackaged resources stored as {output_res}')
if __name__ == '__main__':
fire.Fire(PrepackageMgr)
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/resources/dev/prepackage_mgr.py |
"""Python script used to process Databricks Azure instance pricing information."""
import json
# We use this Python script to process Databricks Azure instance pricing information
# and store the results in a file `premium-databricks-azure-catalog.json`, used by the
# Databricks Azure user tools.
# Follow the instructions below:
# 1. Go to https://azure.microsoft.com/en-us/pricing/details/databricks/ and select
# the prices for `Jobs Cmpute` Workload, `Premium` Tier, `West US 2` Region,
# `United States -Dollar ($) USD` Currency, and Display pricing by `Hour`.
# 2. Copy the prices into `databricks-azure-price-jobs-compute-premium-westus2-raw.txt`,
# under the same directory as this script, the file is already included under `/dev`
# 3. Run the script:
# `python process_databricks_azure_pricing.py`
# 4. The output is a file named `premium-databricks-azure-catalog.json` under the same
# directory
with open('databricks-azure-price-jobs-compute-premium-westus2-raw.txt', 'r', encoding='utf-8') as f:
all_lines = f.read().splitlines()
instances_dict = {}
for _, line in enumerate(all_lines):
cur_dict = {}
cur_line = line.split()
gb_idx = cur_line.index('GiB')
cur_dict['RAMinMB'] = 1024 * int(float(cur_line[gb_idx - 1]))
vCPUs = cur_line[gb_idx - 2]
cur_dict['vCPUs'] = int(vCPUs)
instance_name = '_'.join(cur_line[:gb_idx - 2])
cur_dict['Instance'] = instance_name
cur_dict['DBUCount'] = float(cur_line[gb_idx + 1])
DBU_price_per_hour = cur_line[gb_idx + 2].split('$')[1].split('/')[0]
cur_dict['DBUPricePerHour'] = float(DBU_price_per_hour)
try:
total_price_per_hour = cur_line[gb_idx + 3].split('$')[1].split('/')[0]
total_price_per_hour_float = float(total_price_per_hour)
except RuntimeError: # price could be 'N/A'
total_price_per_hour_float = -1.0
cur_dict['TotalPricePerHour'] = total_price_per_hour_float
instances_dict[instance_name] = cur_dict
final_dict = {'Jobs Compute': {'Instances': instances_dict}}
with open('./premium-databricks-azure-catalog.json', 'w', encoding='utf-8') as output_file:
json.dump(final_dict, output_file, indent=2)
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/resources/dev/process_databricks_azure_pricing.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of storage related functionalities."""
import datetime
import glob
import hashlib
import os
import pathlib
import re
import shutil
import ssl
import subprocess
import urllib
from dataclasses import dataclass
from functools import partial
from itertools import islice
from shutil import rmtree
from typing import List
import certifi
from fastcore.all import urlsave
from fastprogress.fastprogress import progress_bar
from spark_rapids_pytools.common.exceptions import StorageException
from spark_rapids_pytools.common.utilities import Utils, SysCmd
class FSUtil:
"""Implementation of storage functionality for local disk."""
@classmethod
def remove_ext(cls, file_path) -> str:
return os.path.splitext(file_path)[0]
@classmethod
def get_all_files(cls, curr_path) -> list:
return glob.glob(f'{curr_path}/*', recursive=False)
@classmethod
def get_abs_path(cls, curr_path) -> str:
return os.path.abspath(curr_path)
@classmethod
def get_resource_name(cls, full_path: str) -> str:
url_parts = full_path.split('/')[-1:]
return url_parts[0]
@classmethod
def build_full_path(cls, parent_path, item) -> str:
full_path = os.path.abspath(parent_path)
return os.path.join(full_path, item)
@classmethod
def build_path(cls, path, item) -> str:
return os.path.join(path, item)
@classmethod
def build_url_from_parts(cls, *parts) -> str:
url_parts = [part.strip('/') for part in parts[:-1]]
# we do not want to remove the rightmost slash if any
url_parts.append(parts[-1].lstrip('/'))
return Utils.gen_joined_str('/', url_parts)
@classmethod
def remove_path(cls, file_path: str, fail_ok: bool = False):
try:
if os.path.isdir(file_path):
rmtree(file_path, ignore_errors=fail_ok)
else:
os.remove(file_path)
except OSError as err:
if not fail_ok:
raise StorageException(f'Could not remove directory {file_path}') from err
@classmethod
def make_dirs(cls, dir_path: str, exist_ok: bool = True):
try:
os.makedirs(dir_path, exist_ok=exist_ok)
except OSError as err:
raise StorageException(f'Error Creating directories {dir_path}') from err
@classmethod
def copy_resource(cls, src: str, dest: str) -> str:
abs_src = os.path.abspath(src)
abs_dest = os.path.abspath(dest)
# check if path exists
if not os.path.exists(abs_src):
raise StorageException('Error copying resource on local disk. '
f'Resource {abs_src} does not exist')
# Determine whether to copy a single resource or a directory
if os.path.isfile(abs_src):
return shutil.copy2(abs_src, abs_dest)
return shutil.copytree(abs_src, abs_dest, dirs_exist_ok=True)
@classmethod
def cache_resource(cls, src: str, dest: str):
abs_src = os.path.abspath(src)
abs_dest = os.path.abspath(dest)
# check if path exists
if not os.path.exists(abs_src):
raise StorageException('Error copying resource on local disk. '
f'Resource {abs_src} does not exist')
with open(abs_src, 'rb') as s:
with open(abs_dest, 'wb') as d:
shutil.copyfileobj(s, d)
@classmethod
def download_from_url(cls,
src_url: str,
dest: str) -> str:
resource_name = cls.get_resource_name(src_url)
# We create a context here to fix and issue with urlib requests issue
dest_file = cls.build_path(dest, resource_name)
context = ssl.create_default_context(cafile=certifi.where())
with urllib.request.urlopen(src_url, context=context) as resp:
with open(dest_file, 'wb') as f:
shutil.copyfileobj(resp, f)
return dest_file
@classmethod
def fast_download_url(cls, url: str, fpath: str, timeout=None, pbar_enabled=True) -> str:
"""
Download the given url and display a progress bar
"""
pbar = progress_bar([])
def progress_bar_cb(count=1, bsize=1, total_size=None):
pbar.total = total_size
pbar.update(count * bsize)
return urlsave(url, fpath, reporthook=progress_bar_cb if pbar_enabled else None, timeout=timeout)
@classmethod
def verify_file(cls, fpath: str, file_checks: dict) -> bool:
if not os.path.exists(fpath):
return False
if not file_checks:
return True
expiration_time_s = file_checks.get('cacheExpirationSecs')
if expiration_time_s:
modified_time = os.path.getmtime(fpath)
diff_time = int(datetime.datetime.now().timestamp() - modified_time)
if diff_time > expiration_time_s:
return False
return FileVerifier.check_integrity(fpath, file_checks)
@classmethod
def cache_from_url(cls,
src_url: str,
cache_file: str,
file_checks: dict = None) -> bool:
"""
download a resource from given URL as a destination cache_file
:param src_url: HTTP url containing the resource
:param cache_file: the file where the resource is saved. It is assumed that this the file
:param file_checks: a dictionary that contains the criteria to check that the file is the
same.
:return: true if the file is re-downloaded. False, if the cached file is not modified.
"""
curr_time_stamp = datetime.datetime.now().timestamp()
if cls.verify_file(cache_file, file_checks):
# the file already exists and matches the validation
# update the access-time and return True
# update modified time and access time
return False
# download the file
cls.fast_download_url(src_url, cache_file)
# update modified time and access time
os.utime(cache_file, times=(curr_time_stamp, curr_time_stamp))
if not cls.verify_file(cache_file, file_checks):
raise RuntimeError(f'Failed downloading resource {src_url}')
return True
@classmethod
def get_home_directory(cls) -> str:
return os.path.expanduser('~')
@classmethod
def expand_path(cls, file_path) -> str:
"""
Used to expand the files with path starting with home directory. this is used because some
libraries like configparser does not expand correctly
:param file_path: the file path
:return: the expanded file path
"""
if file_path.startswith('~'):
new_path = pathlib.PosixPath(file_path)
return str(new_path.expanduser())
return file_path
@classmethod
def get_subdirectories(cls, dir_path) -> list:
"""
Given a directory, list all the subdirectories without recursion
:param dir_path: the directory parent that we are interested in
:return: a list of subdirectories with full path
"""
res = []
subfolders = glob.glob(f'{dir_path}/*', recursive=False)
for subfolder in subfolders:
if os.path.isdir(subfolder):
res.append(subfolder)
return res
@classmethod
def gen_dir_tree(cls,
dir_path: pathlib.Path,
depth_limit: int = -1,
population_limit: int = 1024,
limit_to_directories: bool = False,
exec_dirs: List[str] = None,
exec_files: List[str] = None,
indent=''):
# the implementation is based on the answer posted on stackoverflow
# https://stackoverflow.com/a/59109706
dir_patterns = [re.compile(rf'{p}') for p in exec_dirs] if exec_dirs else []
file_patterns = [re.compile(rf'{p}') for p in exec_files] if exec_files else []
res_arr = []
dir_path = pathlib.Path(dir_path)
token_ws = ' '
token_child = '│ '
token_sibling = '├── '
token_leaf = '└── '
files_count = 0
dir_count = 0
def inner(dir_p: pathlib.Path, prefix: str = '', level=-1):
nonlocal files_count, dir_count, dir_patterns, file_patterns
if not level:
return # 0, stop iterating
sub_items = []
for f in dir_p.iterdir():
if f.is_dir():
is_excluded = any(p.match(f.name) for p in dir_patterns)
else:
is_excluded = limit_to_directories
if not is_excluded:
is_excluded = any(p.match(f.name) for p in file_patterns)
if not is_excluded:
sub_items.append(f)
pointers = [token_sibling] * (len(sub_items) - 1) + [token_leaf]
for pointer, path in zip(pointers, sub_items):
if path.is_dir():
yield prefix + pointer + path.name
dir_count += 1
extension = token_child if pointer == token_sibling else token_ws
yield from inner(path, prefix=prefix + extension, level=level - 1)
elif not limit_to_directories:
yield prefix + pointer + path.name
files_count += 1
res_arr.append(f'{indent}{dir_path.name}')
iterator = inner(dir_path, level=depth_limit)
for line in islice(iterator, population_limit):
res_arr.append(f'{indent}{line}')
if next(iterator, None):
res_arr.append(f'{indent}... length_limit, {population_limit}, reached, counted:')
res_arr.append(f'{indent}{dir_count} directories'
f', {files_count} files' if files_count else '')
return res_arr
@dataclass
class StorageDriver:
"""
Wrapper to interface with archiving command, such as copying/moving/listing files.
"""
def resource_exists(self, src) -> bool:
return os.path.exists(src)
def resource_is_dir(self, src) -> bool:
return os.path.isdir(src)
def _download_remote_resource(self, src: str, dest: str) -> str:
"""
given a path or url file, downloads the resource to local disk.
Note that the dest needs to be absolute path. src can be either folder or a single file
:param src: url/local path of the resource
:param dest: directory folder where the resource is downloaded
:return: the full path of the target
"""
if src.startswith('http'):
# this is url resource
return FSUtil.download_from_url(src, dest)
# this is a folder-to-folder download
return FSUtil.copy_resource(src, dest)
def download_resource(self,
src: str,
dest: str,
fail_ok: bool = False,
create_dir: bool = True) -> str:
"""
Copy a resource from remote storage or from external local storage into the dest directory
:param src: the path/url of the resource to be copied. It can be a single file or a directory
:param dest: the directory where the resource is being copied
:param fail_ok: whether to raise an exception on failure
:param create_dir: create the directories of the destination if they do not exist
:return: full path of the destination resource dest/resource_name
"""
try:
abs_dest = FSUtil.get_abs_path(dest)
if create_dir:
FSUtil.make_dirs(abs_dest)
return self._download_remote_resource(src, abs_dest)
except StorageException as store_ex:
if not fail_ok:
raise store_ex
return None
def _upload_remote_dest(self, src: str, dest: str, exclude_pattern: str = None):
del exclude_pattern
return FSUtil.copy_resource(src, dest)
def upload_resource(self,
src: str,
dest: str,
fail_ok: bool = False,
exclude_pattern: str = None) -> str:
try:
abs_src = FSUtil.get_abs_path(src)
if not self.resource_exists(abs_src):
raise StorageException(f'Resource {abs_src} cannot be copied to {dest}. '
f'{abs_src} does not exist')
return self._upload_remote_dest(abs_src, dest, exclude_pattern=exclude_pattern)
except StorageException as store_ex:
if not fail_ok:
raise store_ex
return None
def _delete_path(self, src, fail_ok: bool = False):
FSUtil.remove_path(src, fail_ok=fail_ok)
def remove_resource(self,
src: str,
fail_ok: bool = False):
"""
Given a path delete it permanently and all its contents recursively
:param src: the path of the resource to be removed
:param fail_ok: raise exception
:return:
"""
try:
self._delete_path(src, fail_ok=fail_ok)
except StorageException as store_ex:
if not fail_ok:
raise store_ex
def is_file_path(self, value: str):
"""
given a string value, check whether this is a valid file path or url
:param value: the string to be evaluated
:return: True if formatting wise, it matches file path
"""
if value is None:
return False
if value.startswith('http'):
return True
if '/' in value:
# slash means this is a file
return True
# check if the file ends with common extension
return value.endswith('.json') or value.endswith('.yaml') or value.endswith('.yml')
class FileVerifier:
"""
Utility class to verify the integrity of a downloaded file using hash algorithms.
Supported hash algorithms: md5, sha1, sha256, sha512.
"""
SUPPORTED_ALGORITHMS = {
'md5': hashlib.md5,
'sha1': hashlib.sha1,
'sha256': hashlib.sha256,
'sha512': hashlib.sha512
}
GPG_TIMEOUT_SEC = 60 # Timeout for GPG process
READ_CHUNK_SIZE = 8192 # Size of chunk in bytes
GPG_SIGNATURE_ENABLED = False # enable/disable gpg-signature usage
@classmethod
def get_signature_file(cls, file_url: str, dest_folder: str):
try:
return FSUtil.download_from_url(file_url + '.asc', dest_folder)
except urllib.error.URLError:
return None
@classmethod
def get_integrity_algorithm(cls, hash_info: dict):
for algorithm in cls.SUPPORTED_ALGORITHMS:
if algorithm in hash_info:
return algorithm
return None
@classmethod
def _gpg_prerequisites_satisfied(cls) -> bool:
return cls.GPG_SIGNATURE_ENABLED and Utils.is_system_tool('gpg')
@classmethod
def _check_integrity_using_gpg(cls, file_path: str, signature_file_path: str) -> bool:
"""
Verify file integrity using GPG and its corresponding .asc signature file.
Note - The verification has a timeout of `GPG_TIMEOUT_SEC`
:param file_path: Path to the file to be verified.
:param signature_file_path: Path to the .asc signature file.
:return: True if the file integrity is valid and the signature is verified, False otherwise.
"""
if not (os.path.isfile(file_path) and os.path.isfile(signature_file_path)):
return False
assert cls._gpg_prerequisites_satisfied()
gpg_command = [
'gpg',
'--auto-key-locate keyserver',
'--keyserver pgp.mit.edu',
'--keyserver-options auto-key-retrieve',
'--verify',
signature_file_path,
file_path
]
gpg_cmd_args = {
'cmd': gpg_command,
'timeout_secs': cls.GPG_TIMEOUT_SEC
}
try:
gpg_cmd_obj = SysCmd().build(gpg_cmd_args)
result = gpg_cmd_obj.exec()
return 'Good signature' in result
except (subprocess.TimeoutExpired, subprocess.CalledProcessError):
return False
@classmethod
def _check_integrity_using_algorithm(cls, file_path: str, algorithm: str, expected_hash: str) -> bool:
"""
Checks the integrity of a downloaded file by calculating its hash and comparing it with the expected hash.
:param file_path: Path of the downloaded file.
:param algorithm: Name of the hash algorithm to use for calculating
:param expected_hash: Expected hash value for the file
:return: True if the calculated hash matches the expected hash, False otherwise.
"""
if not os.path.isfile(file_path):
# Cannot verify file
return False
# Helper function to calculate the hash of the file using the specified algorithm
def calculate_hash(hash_algorithm):
hash_function = cls.SUPPORTED_ALGORITHMS[hash_algorithm]()
with open(file_path, 'rb') as file:
while chunk := file.read(cls.READ_CHUNK_SIZE):
hash_function.update(chunk)
return hash_function.hexdigest()
calculated_hash = calculate_hash(algorithm)
return calculated_hash == expected_hash
@classmethod
def check_integrity(cls, file_path: str, check_args: dict) -> bool:
"""
Check the integrity of a downloaded file.This method checks the integrity of a downloaded file using
hash algorithms or GPG verification, if available.
:param file_path: Path of the downloaded file.
:param check_args: Dictionary containing the hash algorithm, expected hash value and/or the signature file path.
:return:
"""
# shortcircuit. if size is available. verify the size if correct.
expected_size = check_args.get('size')
if expected_size:
if expected_size != os.path.getsize(file_path):
return False
# Return True if no verification can be performed.
# Otherwise, the verification would fail on every single time a file is downloaded;
# especially if the gpg is not installed
result = True
cb = None
if 'signatureFile' in check_args and cls._gpg_prerequisites_satisfied():
# try to use gpg if available
signature_file_path = check_args.get('signatureFile')
if signature_file_path is not None:
cb = partial(cls._check_integrity_using_gpg, file_path, signature_file_path)
if cb is None:
# if cb is still None, Verify integrity using hashing algorithm
hashlib_args = check_args.get('hashlib')
if hashlib_args:
algo = hashlib_args['algorithm']
hash_value = hashlib_args['hash']
cb = partial(cls._check_integrity_using_algorithm, file_path, algo, hash_value)
if cb is not None:
# the call back is set, then we can run the verification
result = cb()
return result
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/common/sys_storage.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Declaration and implementation of common helpers and utilities"""
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/common/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of helpers and utilities related to manage the properties and dictionaries."""
import json
from dataclasses import field, dataclass
from json import JSONDecodeError
from pathlib import Path
from typing import Any, Callable
import yaml
from spark_rapids_tools import get_elem_from_dict, get_elem_non_safe
def convert_dict_to_camel_case(dic: dict):
"""
Given a dictionary with underscore keys. This method converts the keys to a camelcase.
Example, gce_cluster_config -> gceClusterConfig
:param dic: the dictionary to be converted
:return: a dictionary where all the keys are camelcase.
"""
def to_camel_case(word: str) -> str:
return word.split('_')[0] + ''.join(x.capitalize() or '_' for x in word.split('_')[1:])
if isinstance(dic, list):
return [convert_dict_to_camel_case(i) if isinstance(i, (dict, list)) else i for i in dic]
res = {}
for key, value in dic.items():
if isinstance(value, (dict, list)):
res[to_camel_case(key)] = convert_dict_to_camel_case(value)
else:
res[to_camel_case(key)] = value
return res
@dataclass
class AbstractPropertiesContainer(object):
"""
An abstract class that loads properties (dictionary).
"""
prop_arg: str
file_load: bool = True
props: Any = field(default=None, init=False)
def apply_conversion(self, func_cb: Callable):
self.props = func_cb(self.props)
def get_value(self, *key_strs):
return get_elem_from_dict(self.props, key_strs)
def get_value_silent(self, *key_strs):
return get_elem_non_safe(self.props, key_strs)
def _init_fields(self):
pass
def _load_properties_from_file(self):
"""
In some case, we want to be able to accept both json and yaml format when the properties are saved as a file.
:return:
"""
file_suffix = Path(self.prop_arg).suffix
if file_suffix in ('.yaml', '.yml'):
# this is a yaml property
self.__open_yaml_file()
else:
# this is a jso file
self.__open_json_file()
def __open_json_file(self):
try:
with open(self.prop_arg, 'r', encoding='utf-8') as json_file:
try:
self.props = json.load(json_file)
except JSONDecodeError as e:
raise RuntimeError('Incorrect format of JSON File') from e
except TypeError as e:
raise RuntimeError('Incorrect Type of JSON content') from e
except OSError as err:
raise RuntimeError('Please ensure the json file exists '
'and you have the required access privileges.') from err
def __open_yaml_file(self):
try:
with open(self.prop_arg, 'r', encoding='utf-8') as yaml_file:
try:
self.props = yaml.safe_load(yaml_file)
except yaml.YAMLError as e:
raise RuntimeError('Incorrect format of Yaml File') from e
except OSError as err:
raise RuntimeError('Please ensure the properties file exists '
'and you have the required access privileges.') from err
def _load_as_yaml(self):
if self.file_load:
# this is a file argument
self._load_properties_from_file()
else:
try:
self.props = yaml.safe_load(self.prop_arg)
except yaml.YAMLError as e:
raise RuntimeError('Incorrect format of Yaml File') from e
def _load_as_json(self):
if self.file_load:
# this is a file argument
self._load_properties_from_file()
else:
try:
if isinstance(self.prop_arg, str):
self.props = json.loads(self.prop_arg)
else:
self.props = self.prop_arg
except JSONDecodeError as e:
raise RuntimeError('Incorrect format of JSON File') from e
except TypeError as e:
raise RuntimeError('Incorrect Type of JSON content') from e
@dataclass
class YAMLPropertiesContainer(AbstractPropertiesContainer):
def __post_init__(self):
self._load_as_yaml()
self._init_fields()
@dataclass
class JSONPropertiesContainer(AbstractPropertiesContainer):
def __post_init__(self):
self._load_as_json()
self._init_fields()
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/common/prop_manager.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of custom Exceptions"""
class StorageException(Exception):
"""Exception raised for errors in the storage layer"""
def __init__(self,
message: str = 'Exception in storage operation',
chained_err: OSError = None):
self.msg = message if chained_err is None else f'{message}: {chained_err}'
super().__init__(self.msg)
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/common/exceptions.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of global utilities and helpers methods."""
import datetime
import logging.config
import os
import re
import secrets
import ssl
import string
import subprocess
import sys
import urllib
from dataclasses import dataclass, field
from logging import Logger
from shutil import which, make_archive
from typing import Callable, Any
import certifi
import chevron
from bs4 import BeautifulSoup
from packaging.version import Version
from pygments import highlight
from pygments.formatters import get_formatter_by_name
from pygments.lexers import get_lexer_by_name
from spark_rapids_pytools import get_version
class Utils:
"""Utility class used to enclose common helpers and utilities."""
@classmethod
def gen_random_string(cls, str_length: int) -> str:
return ''.join(secrets.choice(string.hexdigits) for _ in range(str_length))
@classmethod
def gen_uuid_with_ts(cls, pref: str = None, suffix_len: int = 0) -> str:
"""
Generate uuid in the form of YYYYmmddHHmmss
:param pref:
:param suffix_len:
:return:
"""
ts = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
uuid_parts = [] if pref is None else [pref]
uuid_parts.append(ts)
if suffix_len > 0:
uuid_parts.append(cls.gen_random_string(suffix_len))
return Utils.gen_joined_str('_', uuid_parts)
@classmethod
def resource_path(cls, resource_name: str) -> str:
# pylint: disable=import-outside-toplevel
if sys.version_info < (3, 9):
import importlib_resources
else:
import importlib.resources as importlib_resources
pkg = importlib_resources.files('spark_rapids_pytools')
return pkg / 'resources' / resource_name
@classmethod
def reformat_release_version(cls, defined_version: Version) -> str:
# get the release from version
version_tuple = defined_version.release
version_comp = list(version_tuple)
# release format is under url YY.MM.MICRO where MM is 02, 04, 06, 08, 10, and 12
res = f'{version_comp[0]}.{version_comp[1]:02}.{version_comp[2]}'
return res
@classmethod
def get_latest_available_jar_version(cls, url_base: str, loaded_version: str) -> str:
"""
Given the defined version in the python tools build, we want to be able to get the highest
version number of the jar available for download from the mvn repo.
The returned version is guaranteed to be LEQ to the defined version. For example, it is not
allowed to use jar version higher than the python tool itself.
:param url_base: the base url from which the jar file is downloaded. It can be mvn repo.
:param loaded_version: the version from the python tools in string format
:return: the string value of the jar that should be downloaded.
"""
context = ssl.create_default_context(cafile=certifi.where())
defined_version = Version(loaded_version)
jar_version = Version(loaded_version)
version_regex = r'\d{2}\.\d{2}\.\d+'
version_pattern = re.compile(version_regex)
with urllib.request.urlopen(url_base, context=context) as resp:
html_content = resp.read()
# Parse the HTML content using BeautifulSoup
soup = BeautifulSoup(html_content, 'html.parser')
# Find all the links with title in the format of "xx.xx.xx"
links = soup.find_all('a', {'title': version_pattern})
# Get the link with the highest value
for link in links:
curr_title = re.search(version_regex, link.get('title'))
if curr_title:
curr_version = Version(curr_title.group())
if curr_version <= defined_version:
jar_version = curr_version
# get formatted string
return cls.reformat_release_version(jar_version)
@classmethod
def get_base_release(cls) -> str:
"""
For now the tools_jar is always with major.minor.0.
this method makes sure that even if the package version is incremented, we will still
get the correct url.
:return: a string containing the release number 22.12.0, 23.02.0, amd 23.04.0..etc
"""
defined_version = Version(get_version(main=None))
# get the release from version
return cls.reformat_release_version(defined_version)
@classmethod
def is_system_tool(cls, tool_name: str) -> bool:
"""
check whether a tool is installed on the system.
:param tool_name: name of the tool to check
:return: True or False
"""
return which(tool_name) is not None
@classmethod
def make_archive(cls, base_name, fmt, root_dir) -> None:
"""
check whether a tool is installed on the system.
:param base_name: the name of the file to create
:param format: the archive format: "zip", "tar", "gztar"
:param root_dir: the root directory of the archive
:return:
"""
return make_archive(base_name=base_name, format=fmt, root_dir=root_dir)
@classmethod
def find_full_rapids_tools_env_key(cls, actual_key: str) -> str:
return f'RAPIDS_USER_TOOLS_{actual_key}'
@classmethod
def get_sys_env_var(cls, k: str, def_val=None):
return os.environ.get(k, def_val)
@classmethod
def get_rapids_tools_env(cls, k: str, def_val=None):
val = cls.get_sys_env_var(cls.find_full_rapids_tools_env_key(k), def_val)
return val
@classmethod
def set_rapids_tools_env(cls, k: str, val):
os.environ[cls.find_full_rapids_tools_env_key(k)] = str(val)
@classmethod
def gen_str_header(cls, title: str, ruler='-', line_width: int = 40) -> str:
dash = ruler * line_width
return cls.gen_multiline_str('', dash, f'{title:^{line_width}}', dash)
@classmethod
def gen_report_sec_header(cls,
title: str,
ruler='-',
title_width: int = 20,
hrule: bool = True) -> str:
line_width = max(title_width, len(title) + 1)
if hrule:
dash = ruler * line_width
return cls.gen_multiline_str('', f'{title}:', dash)
return cls.gen_multiline_str('', f'{title}:')
@classmethod
def gen_joined_str(cls, join_elem: str, items) -> str:
"""
Given a variable length of String arguments (or list), returns a single string
:param items: the items to be concatenated together. it could be a hybrid of str and lists
:param join_elem: the character to use as separator of the join
:return: a single string joining the items
"""
res_arr = []
for item in list(filter(lambda i: i is not None, items)):
if isinstance(item, list):
# that's an array
res_arr.extend(list(filter(lambda i: i is not None, item)))
else:
res_arr.append(item)
return join_elem.join(res_arr)
@classmethod
def gen_multiline_str(cls, *items) -> str:
return cls.gen_joined_str(join_elem='\n', items=items)
@classmethod
def get_os_name(cls) -> str:
return os.uname().sysname
class ToolLogging:
"""Holds global utilities used for logging."""
@classmethod
def get_log_dict(cls, args):
return {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '{asctime} {levelname} {name}: {message}',
'style': '{',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
},
'root': {
'handlers': ['console'],
'level': 'DEBUG' if args.get('debug') else 'INFO',
},
}
@classmethod
def enable_debug_mode(cls):
Utils.set_rapids_tools_env('LOG_DEBUG', 'True')
@classmethod
def is_debug_mode_enabled(cls):
return Utils.get_rapids_tools_env('LOG_DEBUG')
@classmethod
def get_and_setup_logger(cls, type_label: str, debug_mode: bool = False):
debug_enabled = bool(Utils.get_rapids_tools_env('LOG_DEBUG', debug_mode))
logging.config.dictConfig(cls.get_log_dict({'debug': debug_enabled}))
logger = logging.getLogger(type_label)
log_file = Utils.get_rapids_tools_env('LOG_FILE')
if log_file:
# create file handler which logs even debug messages
fh = logging.FileHandler(log_file)
# TODO: set the formatter and handler for file logging
# fh.setLevel(log_level)
# fh.setFormatter(ExtraLogFormatter())
logger.addHandler(fh)
return logger
class TemplateGenerator:
"""A class to manage templates and content generation"""
@classmethod
def render_template_file(cls, fpath: string, template_args: dict) -> str:
with open(fpath, 'r', encoding='UTF-8') as f:
return chevron.render(f, data=template_args)
@classmethod
def highlight_bash_code(cls, bash_script: str) -> str:
return highlight(bash_script, get_lexer_by_name('Bash'), get_formatter_by_name('terminal'))
@dataclass
class SysCmd:
"""
Run command and check return code, capture output etc.
"""
cmd: Any = None
cmd_input: str = None
env_vars: dict = None
expected: int = 0
fail_ok: bool = False
process_streams_cb: Callable = None
logger: Logger = ToolLogging.get_and_setup_logger('rapids.tools.cmd')
res: int = field(default=0, init=False)
out_std: str = field(default=None, init=False)
err_std: str = field(default=None, init=False)
timeout_secs: float = None
def has_failed(self):
return self.expected != self.res and not self.fail_ok
def build(self, field_values: dict = None):
if field_values is not None:
for field_name in field_values:
setattr(self, field_name, field_values.get(field_name))
return self
def _process_env_vars(self):
sys_env_vars = []
if self.env_vars is not None:
for env_k, env_arg in self.env_vars.items():
val = f'{env_k}={env_arg}'
sys_env_vars.append(val)
return sys_env_vars
def exec(self) -> str:
def process_credentials_option(cmd: list):
res = []
for i, arg in enumerate(cmd):
if 'account-key' in cmd[i - 1]:
arg = 'MY_ACCESS_KEY'
elif 'fs.azure.account.key' in arg:
arg = arg.split('=')[0] + '=MY_ACCESS_KEY'
res.append(arg)
return res
# pylint: disable=subprocess-run-check
if isinstance(self.cmd, str):
cmd_args = [self.cmd]
else:
cmd_args = self.cmd[:]
if ToolLogging.is_debug_mode_enabled():
# do not dump the entire command to debugging to avoid exposing the env-variables
self.logger.debug('submitting system command: <%s>',
Utils.gen_joined_str(' ', process_credentials_option(cmd_args)))
full_cmd = self._process_env_vars()
full_cmd.extend(cmd_args)
actual_cmd = Utils.gen_joined_str(' ', full_cmd)
stdout = subprocess.PIPE
stderr = subprocess.PIPE
# pylint: disable=subprocess-run-check
if self.cmd_input is None:
c = subprocess.run(actual_cmd,
executable='/bin/bash',
shell=True,
timeout=self.timeout_secs,
stdout=stdout,
stderr=stderr)
else:
# apply input to the command
c = subprocess.run(actual_cmd,
executable='/bin/bash',
shell=True,
input=self.cmd_input,
text=True,
timeout=self.timeout_secs,
stdout=stdout,
stderr=stderr)
self.res = c.returncode
# pylint: enable=subprocess-run-check
self.err_std = c.stderr if isinstance(c.stderr, str) else c.stderr.decode('utf-8', errors='ignore')
if self.has_failed():
std_error_lines = [f'\t| {line}' for line in self.err_std.splitlines()]
stderr_str = ''
if len(std_error_lines) > 0:
error_lines = Utils.gen_multiline_str(std_error_lines)
stderr_str = f'\n{error_lines}'
processed_cmd_args = process_credentials_option(cmd_args)
cmd_err_msg = f'Error invoking CMD <{Utils.gen_joined_str(" ", processed_cmd_args)}>: {stderr_str}'
raise RuntimeError(f'{cmd_err_msg}')
self.out_std = c.stdout if isinstance(c.stdout, str) else c.stdout.decode('utf-8', errors='ignore')
if self.process_streams_cb is not None:
self.process_streams_cb(self.out_std, self.err_std)
if self.out_std:
return self.out_std.strip()
return self.out_std
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/common/utilities.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Job submissions on Databricks Azure"""
from dataclasses import dataclass
from spark_rapids_pytools.common.prop_manager import JSONPropertiesContainer
from spark_rapids_pytools.rapids.rapids_job import RapidsLocalJob
@dataclass
class DBAzureLocalRapidsJob(RapidsLocalJob):
"""
Implementation of a RAPIDS job that runs local on a local machine.
"""
job_label = 'DBAzureLocal'
@classmethod
def get_account_name(cls, eventlogs: list):
if not eventlogs:
return ''
for path in eventlogs:
if path.startswith('abfss://'):
# assume all eventlogs are under the same storage account
return path.split('@')[1].split('.')[0]
return ''
def _build_jvm_args(self):
vm_args = super()._build_jvm_args()
eventlogs = self.exec_ctxt.get_value('wrapperCtx', 'eventLogs')
if not eventlogs:
self.logger.info('The list of Apache Spark event logs is empty.')
key = ''
account_name = self.get_account_name(eventlogs)
if account_name:
try:
cmd_args = ['az storage account show-connection-string', '--name', account_name]
std_out = self.exec_ctxt.platform.cli.run_sys_cmd(cmd_args)
conn_str = JSONPropertiesContainer(prop_arg=std_out, file_load=False).get_value('connectionString')
key = conn_str.split('AccountKey=')[1].split(';')[0]
except Exception as ex: # pylint: disable=broad-except
self.logger.info('Error retrieving access key for storage account %s: %s', account_name, ex)
key = ''
if key:
vm_args.append(f'-Drapids.tools.hadoop.fs.azure.account.key.{account_name}.dfs.core.windows.net={key}')
return vm_args
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/cloud_api/databricks_azure_job.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation specific to EMR"""
import json
import os
from dataclasses import field, dataclass
from typing import Any, List
from spark_rapids_tools import CspEnv
from spark_rapids_pytools.cloud_api.emr_job import EmrLocalRapidsJob
from spark_rapids_pytools.cloud_api.s3storage import S3StorageDriver
from spark_rapids_pytools.cloud_api.sp_types import PlatformBase, ClusterBase, CMDDriverBase, \
ClusterState, SparkNodeType, ClusterNode, GpuHWInfo, SysInfo, GpuDevice, \
ClusterGetAccessor
from spark_rapids_pytools.common.prop_manager import JSONPropertiesContainer, \
AbstractPropertiesContainer
from spark_rapids_pytools.common.utilities import Utils
from spark_rapids_pytools.pricing.emr_pricing import EMREc2PriceProvider
from spark_rapids_pytools.pricing.price_provider import SavingsEstimator
@dataclass
class EMRPlatform(PlatformBase):
"""
Represents the interface and utilities required by AWS EMR.
Prerequisites:
- install aws command lines (aws cli)
- configure the aws
- this may be done by region
- aws has no staging available in the cluster properties.
- gsutil is used to move data from/to storage
"""
def set_offline_cluster(self, cluster_args: dict = None):
pass
@classmethod
def get_spark_node_type_fromstring(cls, value) -> SparkNodeType:
if value.upper() in ['TASK', 'CORE']:
return SparkNodeType.WORKER
return SparkNodeType.fromstring(value)
@classmethod
def process_raw_cluster_prop(cls, prop_container: AbstractPropertiesContainer) -> str:
if prop_container.get_value_silent('Cluster'):
_, prop_container.props = prop_container.props.popitem()
return json.dumps(prop_container.props)
def __post_init__(self):
self.type_id = CspEnv.EMR
super().__post_init__()
def _construct_cli_object(self) -> CMDDriverBase:
return EMRCMDDriver(timeout=0, cloud_ctxt=self.ctxt)
def _install_storage_driver(self):
self.storage = S3StorageDriver(self.cli)
def _construct_cluster_from_props(self,
cluster: str,
props: str = None):
return EMRCluster(self).set_connection(cluster_id=cluster, props=props)
def migrate_cluster_to_gpu(self, orig_cluster):
"""
given a cluster, convert it to run NVIDIA Gpu based on mapping instance types
:param orig_cluster: the original cluster to migrate from
:return: a new object cluster that supports GPU
"""
gpu_cluster_ob = EMRCluster(self)
gpu_cluster_ob.migrate_from_cluster(orig_cluster)
return gpu_cluster_ob
def validate_job_submission_args(self, submission_args: dict) -> dict:
"""
process the job submission and return the final arguments to be used for the execution.
:param submission_args: dictionary containing the job submission arguments
:return: a dictionary with the processed arguments.
"""
# TODO: verify that all arguments are valid
return submission_args
def create_saving_estimator(self,
source_cluster: ClusterGetAccessor,
reshaped_cluster: ClusterGetAccessor):
raw_pricing_config = self.configs.get_value_silent('pricing')
if raw_pricing_config:
pricing_config = JSONPropertiesContainer(prop_arg=raw_pricing_config, file_load=False)
else:
pricing_config: JSONPropertiesContainer = None
emr_price_provider = EMREc2PriceProvider(region=self.cli.get_region(),
pricing_configs={'emr': pricing_config})
saving_estimator = EmrSavingsEstimator(price_provider=emr_price_provider,
reshaped_cluster=reshaped_cluster,
source_cluster=source_cluster)
return saving_estimator
def create_local_submission_job(self, job_prop, ctxt) -> Any:
return EmrLocalRapidsJob(prop_container=job_prop, exec_ctxt=ctxt)
@dataclass
class EMRCMDDriver(CMDDriverBase):
"""Represents the command interface that will be used by EMR"""
def _list_inconsistent_configurations(self) -> list:
incorrect_envs = super()._list_inconsistent_configurations()
# check that private key file path is correct
emr_pem_path = self.env_vars.get('keyPairPath')
if emr_pem_path is not None:
if not os.path.exists(emr_pem_path):
incorrect_envs.append(f'Private key file path [{emr_pem_path}] does not exist. '
'It is required to SSH on driver node.')
else:
# check valid extension
if not (emr_pem_path.endswith('.pem') or emr_pem_path.endswith('ppk')):
incorrect_envs.append(f'Private key file path [{emr_pem_path}] should be ppk or pem format')
else:
incorrect_envs.append(
f'Private key file path is not set. It is required to SSH on driver node. '
f'Set {Utils.find_full_rapids_tools_env_key("KEY_PAIR_PATH")}')
return incorrect_envs
def pull_cluster_props_by_args(self, args: dict) -> str:
aws_cluster_id = args.get('Id')
cluster_name = args.get('cluster')
if args.get('Id') is None:
# use cluster name to get the cluster values
# we need to get the cluster_id from the list command first.
list_cmd_res = self.exec_platform_list_cluster_by_name(cluster_name)
error_msg = f'Could not find EMR cluster {cluster_name} by name'
if not list_cmd_res:
raise RuntimeError(error_msg)
# listed_cluster is json formatted string of array, but we need only the first entry
# to read the clusterID
cluster_headers: list = json.loads(list_cmd_res)
if len(cluster_headers) == 0:
raise RuntimeError(error_msg)
existing_cluster = cluster_headers[0]
aws_cluster_id = existing_cluster['Id']
self.logger.debug('Cluster %s has an Id %s', cluster_name, aws_cluster_id)
cluster_described = self.exec_platform_describe_cluster_by_id(aws_cluster_id)
if cluster_described is not None:
raw_prop_container = JSONPropertiesContainer(prop_arg=cluster_described, file_load=False)
return EMRPlatform.process_raw_cluster_prop(raw_prop_container)
return cluster_described
def _build_ssh_cmd_prefix_for_node(self, node: ClusterNode) -> str:
# get the pem file
pem_file_path = self.env_vars.get('keyPairPath')
prefix_args = ['ssh',
'-o StrictHostKeyChecking=no',
f'-i {pem_file_path}',
f'hadoop@{node.name}']
return Utils.gen_joined_str(' ', prefix_args)
def _build_cmd_scp_to_node(self, node: ClusterNode, src: str, dest: str) -> str:
# get the pem file
pem_file_path = self.env_vars.get('keyPairPath')
prefix_args = ['scp',
'-o StrictHostKeyChecking=no',
f'-i {pem_file_path}',
src,
f'hadoop@{node.name}:{dest}']
return Utils.gen_joined_str(' ', prefix_args)
def _build_cmd_scp_from_node(self, node: ClusterNode, src: str, dest: str) -> str:
# get the pem file
pem_file_path = self.env_vars.get('keyPairPath')
prefix_args = ['scp',
'-o StrictHostKeyChecking=no',
f'-i {pem_file_path}',
f'hadoop@{node.name}:{src}',
dest]
return Utils.gen_joined_str(' ', prefix_args)
def _build_platform_describe_node_instance(self, node: ClusterNode) -> list:
cmd_params = ['aws ec2 describe-instance-types',
'--region', f'{self.get_region()}',
'--instance-types', f'{node.instance_type}']
return cmd_params
def _build_platform_list_cluster(self,
cluster,
query_args: dict = None) -> list:
# aws emr list-instances --cluster-id j-2DDF0Q87QOXON
cmd_params = ['aws emr list-instances',
'--cluster-id',
f'{cluster.uuid}']
if query_args is not None:
for q_key in query_args:
cmd_params.append(f'--{q_key}')
cmd_params.append(f'{query_args.get(q_key)}')
return cmd_params
def exec_platform_list_cluster_by_name(self,
cluster_name: str):
list_cmd = f"aws emr list-clusters --query 'Clusters[?Name==`{cluster_name}`]'"
return self.run_sys_cmd(list_cmd)
def exec_platform_describe_cluster_by_id(self,
cluster_id: str):
describe_cmd = f'aws emr describe-cluster --cluster-id {cluster_id}'
return self.run_sys_cmd(describe_cmd)
def get_submit_spark_job_cmd_for_cluster(self, cluster_name: str, submit_args: dict) -> List[str]:
raise NotImplementedError
@dataclass
class InstanceGroup:
"""
Holds information about instance groups
"""
id: str # group ID
instance_type: str # the machine type
count: int # Number of requested instances associated to that group
market: str # ON_DEMAND OR ON_SPOT
group_type: str # Master, TASK, or CORE
spark_grp_type: SparkNodeType = field(default=None, init=False) # map the group_type to Spark type.
def __post_init__(self):
self.spark_grp_type = EMRPlatform.get_spark_node_type_fromstring(self.group_type)
@dataclass
class Ec2Instance:
"""
Holds information about instance groups
"""
id: str
ec2_instance_id: str
dns_name: str
group: InstanceGroup
state: ClusterState # RUNNING, TERMINATED..etc.
@dataclass
class EMRNode(ClusterNode):
"""
Represents EMR cluster Node.
We assume that all nodes are running on EC2 instances.
"""
ec2_instance: Ec2Instance = field(default=None, init=False)
def _pull_and_set_mc_props(self, cli=None):
instance_description = cli.exec_platform_describe_node_instance(self)
mc_description = json.loads(instance_description)['InstanceTypes'][0]
self.mc_props = JSONPropertiesContainer(prop_arg=mc_description, file_load=False)
def _set_fields_from_props(self):
self.name = self.ec2_instance.dns_name
self.instance_type = self.ec2_instance.group.instance_type
def _pull_sys_info(self, cli=None) -> SysInfo:
cpu_mem = self.mc_props.get_value('MemoryInfo', 'SizeInMiB')
# TODO: should we use DefaultVCpus or DefaultCores
num_cpus = self.mc_props.get_value('VCpuInfo', 'DefaultVCpus')
return SysInfo(num_cpus=num_cpus, cpu_mem=cpu_mem)
def _pull_gpu_hw_info(self, cli=None) -> GpuHWInfo or None:
raw_gpus = self.mc_props.get_value_silent('GpuInfo')
if raw_gpus is None:
return None
# TODO: we assume all gpus of the same type
raw_gpu_arr = raw_gpus.get('Gpus')
if raw_gpu_arr is None:
return None
raw_gpu = raw_gpu_arr[0]
gpu_device = GpuDevice.fromstring(raw_gpu['Name'])
gpu_cnt = raw_gpu['Count']
gpu_mem = raw_gpu['MemoryInfo']['SizeInMiB']
return GpuHWInfo(num_gpus=gpu_cnt,
gpu_device=gpu_device,
gpu_mem=gpu_mem)
@dataclass
class EMRCluster(ClusterBase):
"""
Represents an instance of running cluster on EMR.
"""
instance_groups: list = field(default=None, init=False)
ec2_instances: list = field(default=None, init=False)
def _process_loaded_props(self) -> None:
"""
After loading the raw properties, perform any necessary processing to clean up the
properties. We want to get rid of Cluster
"""
if self.props.get_value_silent('Cluster') is not None:
_, new_props = self.props.props.popitem()
self.props.props = new_props
def __create_ec2_list_by_group(self, group_arg):
if isinstance(group_arg, InstanceGroup):
group_obj = group_arg
group_id = group_arg.id
else:
group_id = group_arg
group_obj = None
query_args = {'instance-group-id': group_id}
raw_instance_list = self.cli.exec_platform_list_cluster_instances(self, query_args=query_args)
instances_list = json.loads(raw_instance_list).get('Instances')
ec2_instances = []
for raw_inst in instances_list:
parsed_state = raw_inst['Status']['State']
ec2_instance = Ec2Instance(
id=raw_inst['Id'],
ec2_instance_id=raw_inst['Ec2InstanceId'],
dns_name=raw_inst['PublicDnsName'],
group=group_obj,
state=ClusterState.fromstring(parsed_state)
)
ec2_instances.append(ec2_instance)
return ec2_instances
def _build_migrated_cluster(self, orig_cluster):
"""
specific to the platform on how to build a cluster based on migration
:param orig_cluster:
"""
group_cache = {}
self.instance_groups = []
self.ec2_instances = []
# get the map of the instance types
mc_type_map, _ = orig_cluster.find_matches_for_node()
# convert instances and groups
# master groups should stay the same
for curr_group in orig_cluster.instance_groups:
if curr_group.spark_grp_type == SparkNodeType.MASTER:
new_inst_grp = curr_group
else:
# convert the instance_type
new_instance_type = mc_type_map.get(curr_group.instance_type, curr_group.instance_type)
if new_instance_type == curr_group.instance_type:
new_inst_grp = curr_group
else:
new_inst_grp = InstanceGroup(
id=curr_group.id,
instance_type=new_instance_type,
count=curr_group.count,
market=curr_group.market,
group_type=curr_group.group_type)
group_cache.update({new_inst_grp.id: new_inst_grp})
self.instance_groups.append(new_inst_grp)
# convert the instances
for ec2_inst in orig_cluster.ec2_instances:
if ec2_inst.group.spark_grp_type == SparkNodeType.MASTER:
new_group_obj = ec2_inst.group
else:
# get the new group
new_group_obj = group_cache.get(ec2_inst.group.id)
new_inst = Ec2Instance(
id=ec2_inst.id,
ec2_instance_id=ec2_inst.ec2_instance_id,
dns_name=None,
group=new_group_obj,
state=ec2_inst.state)
self.ec2_instances.append(new_inst)
self.nodes = self.__create_node_from_instances()
if bool(mc_type_map):
# update the platform notes
self.platform.update_ctxt_notes('nodeConversions', mc_type_map)
def __create_node_from_instances(self):
worker_nodes = []
master_nodes = []
for ec2_inst in self.ec2_instances:
node_props = {
'ec2_instance': ec2_inst
}
c_node = EMRNode.create_node(ec2_inst.group.spark_grp_type).set_fields_from_dict(node_props)
c_node.fetch_and_set_hw_info(self.cli)
if c_node.node_type == SparkNodeType.WORKER:
worker_nodes.append(c_node)
else:
master_nodes.append(c_node)
return {
SparkNodeType.WORKER: worker_nodes,
SparkNodeType.MASTER: master_nodes[0]
}
def _init_nodes(self):
def process_cluster_group_list(inst_groups: list) -> list:
instance_group_list = []
for inst_grp in inst_groups:
inst_group = InstanceGroup(
id=inst_grp['Id'],
instance_type=inst_grp['InstanceType'],
count=inst_grp['RequestedInstanceCount'],
market=inst_grp['Market'],
group_type=inst_grp['InstanceGroupType'],
)
instance_group_list.append(inst_group)
return instance_group_list
# get instance_groups from the cluster props.
inst_grps = self.props.get_value('InstanceGroups')
self.instance_groups = process_cluster_group_list(inst_grps)
self.ec2_instances = []
for curr_group in self.instance_groups:
instances_list = self.__create_ec2_list_by_group(curr_group)
self.ec2_instances.extend(instances_list)
self.nodes = self.__create_node_from_instances()
def _set_fields_from_props(self):
super()._set_fields_from_props()
self.uuid = self.props.get_value('Id')
self.state = ClusterState.fromstring(self.props.get_value('Status', 'State'))
self.zone = self.props.get_value('Ec2InstanceAttributes',
'Ec2AvailabilityZone')
def _set_name_from_props(self) -> None:
self.name = self.props.get_value('Name')
def is_cluster_running(self) -> bool:
acceptable_init_states = [
ClusterState.RUNNING,
ClusterState.STARTING,
ClusterState.BOOTSTRAPPING,
ClusterState.WAITING
]
return self.state in acceptable_init_states
def get_all_spark_properties(self) -> dict:
res = {}
configs_list = self.props.get_value_silent('Configurations')
for conf_item in configs_list:
if conf_item['Classification'].startswith('spark'):
curr_spark_props = conf_item['Properties']
res.update(curr_spark_props)
return res
def get_tmp_storage(self) -> str:
raise NotImplementedError
def get_image_version(self) -> str:
return self.props.get_value('ReleaseLabel')
def _set_render_args_create_template(self) -> dict:
worker_node = self.get_worker_node()
return {
'CLUSTER_NAME': self.get_name(),
'ZONE': self.zone,
'IMAGE': self.get_image_version(),
'MASTER_MACHINE': self.get_master_node().instance_type,
'WORKERS_COUNT': self.get_workers_count(),
'WORKERS_MACHINE': worker_node.instance_type
}
@dataclass
class EmrSavingsEstimator(SavingsEstimator):
"""
A class that calculates the savings based on an EMR price provider
"""
def _calculate_ec2_cost(self,
cluster_inst: ClusterGetAccessor,
node_type: SparkNodeType) -> float:
nodes_cnt = cluster_inst.get_nodes_cnt(node_type)
node_mc_type = cluster_inst.get_node_instance_type(node_type)
ec2_unit_cost = self.price_provider.catalogs['aws'].get_value('ec2', node_mc_type)
ec2_cost = ec2_unit_cost * nodes_cnt
return ec2_cost
def _calculate_emr_cost(self,
cluster_inst: ClusterGetAccessor,
node_type: SparkNodeType) -> float:
nodes_cnt = cluster_inst.get_nodes_cnt(node_type)
node_mc_type = cluster_inst.get_node_instance_type(node_type)
emr_unit_cost = self.price_provider.catalogs['aws'].get_value('emr', node_mc_type)
emr_cost = emr_unit_cost * nodes_cnt
return emr_cost
def _get_cost_per_cluster(self, cluster: ClusterGetAccessor):
total_cost = 0.0
for node_type in [SparkNodeType.MASTER, SparkNodeType.WORKER]:
total_cost += self._calculate_ec2_cost(cluster, node_type)
total_cost += self._calculate_emr_cost(cluster, node_type)
return total_cost
def _setup_costs(self):
# calculate target_cost
self.target_cost = self._get_cost_per_cluster(self.reshaped_cluster)
self.source_cost = self._get_cost_per_cluster(self.source_cluster)
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/cloud_api/emr.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of AWS storage related functionalities."""
import re
from dataclasses import dataclass
from spark_rapids_pytools.cloud_api.sp_types import CMDDriverBase
from spark_rapids_pytools.common.sys_storage import StorageDriver, FSUtil
@dataclass
class S3StorageDriver(StorageDriver):
"""
Wrapper around aws-s3 commands such as copying/moving/listing files.
"""
cli: CMDDriverBase
@classmethod
def get_cmd_prefix(cls):
pref_arr = ['aws', 's3']
return pref_arr[:]
def resource_is_dir(self, src: str) -> bool:
# if the resource is a directory, the S3 ls command would return PRE dir_name/
if not src.startswith('s3://'):
return super().resource_is_dir(src)
# we do not want the resource name to be followed by a slash when we check whether
# it is a directory.
full_src = src if not src.endswith('/') else src[:-1]
cmd_args = self.get_cmd_prefix()
cmd_args.extend(['ls', full_src])
# run command and make sure we return 0.
res = False
try:
ls_out = self.cli.run_sys_cmd(cmd_args)
folder_name = FSUtil.get_resource_name(src)
matched_lines = re.findall(rf'(PRE)\s+({folder_name})/', ls_out)
if len(matched_lines) > 0:
res = True
except RuntimeError:
res = False
return res
def resource_exists(self, src) -> bool:
if not src.startswith('s3://'):
return super().resource_exists(src)
# run s3 ls src if result is 0, then the resource exists
cmd_args = self.get_cmd_prefix()
cmd_args.extend(['ls', src])
# run command and make sure we return 0.
try:
self.cli.run_sys_cmd(cmd_args)
res = True
except RuntimeError:
res = False
return res
def _download_remote_resource(self, src: str, dest: str) -> str:
if not src.startswith('s3://'):
return super()._download_remote_resource(src, dest)
# this is s3 storage
cmd_args = self.get_cmd_prefix()
cmd_args.extend(['cp', src, dest])
if self.resource_is_dir(src):
cmd_args.append('--recursive')
self.cli.run_sys_cmd(cmd_args)
return FSUtil.build_full_path(dest, FSUtil.get_resource_name(src))
def _upload_remote_dest(self, src: str, dest: str, exclude_pattern: str = None) -> str:
if not dest.startswith('s3://'):
return super()._upload_remote_dest(src, dest)
# this is s3 storage
cmd_args = self.get_cmd_prefix()
cmd_args.extend(['cp', src, dest])
if self.resource_is_dir(src):
cmd_args.append('--recursive')
if exclude_pattern:
cmd_args.extend(['--exclude', exclude_pattern])
self.cli.run_sys_cmd(cmd_args)
return FSUtil.build_path(dest, FSUtil.get_resource_name(src))
def is_file_path(self, value: str):
if value.startswith('s3://'):
return True
return super().is_file_path(value)
def _delete_path(self, src, fail_ok: bool = False):
if not src.startswith('s3://'):
super()._delete_path(src)
else:
cmd_args = self.get_cmd_prefix()
cmd_args.extend(['rm', src])
if self.resource_is_dir(src):
cmd_args.append('--recursive')
self.cli.run_sys_cmd(cmd_args)
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/cloud_api/s3storage.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Azure Data Lake Storage with ABFS (Azure Blob File System) related functionalities."""
from dataclasses import dataclass, field
from logging import Logger
from spark_rapids_pytools.cloud_api.sp_types import CMDDriverBase
from spark_rapids_pytools.common.prop_manager import JSONPropertiesContainer
from spark_rapids_pytools.common.sys_storage import StorageDriver, FSUtil
from spark_rapids_pytools.common.utilities import ToolLogging
@dataclass
class AzureStorageDriver(StorageDriver):
"""
Wrapper around azure commands such as copying/moving/listing files.
"""
cli: CMDDriverBase
account_keys: dict = field(default_factory=dict, init=False)
logger: Logger = field(default=ToolLogging.get_and_setup_logger('rapids.tools.azurestoragedriver'), init=False)
@classmethod
def get_cmd_prefix(cls):
pref_arr = ['az storage fs']
return pref_arr[:]
@classmethod
def get_file_system(cls, url: str):
return url.split('@')[0].split('://')[1]
@classmethod
def get_account_name(cls, url: str):
return url.split('@')[1].split('.')[0]
@classmethod
def get_path(cls, url: str):
return url.split('dfs.core.windows.net')[1]
def get_account_key(self, account_name: str):
if account_name in self.account_keys:
return self.account_keys[account_name]
try:
cmd_args = ['az storage account show-connection-string', '--name', account_name]
std_out = self.cli.run_sys_cmd(cmd_args)
conn_str = JSONPropertiesContainer(prop_arg=std_out, file_load=False).get_value('connectionString')
key = conn_str.split('AccountKey=')[1].split(';')[0]
self.account_keys[account_name] = key
except Exception as ex: # pylint: disable=broad-except
self.logger.info('Error retrieving access key for storage account %s: %s', account_name, ex)
key = ''
return key
def resource_is_dir(self, src: str) -> bool:
if not src.startswith('abfss://'):
return super().resource_is_dir(src)
try:
file_system = self.get_file_system(src)
account_name = self.get_account_name(src)
path = self.get_path(src)
cmd_args = self.get_cmd_prefix()
cmd_args.extend(['file list', '-f', file_system, '--account-name', account_name])
if path:
cmd_args.extend(['--path', path])
account_key = self.get_account_key(account_name)
if account_key:
cmd_args.extend(['--account-key', account_key])
std_out = self.cli.run_sys_cmd(cmd_args)
stdout_info = JSONPropertiesContainer(prop_arg=std_out, file_load=False)
path = path.lstrip('/')
if not (len(stdout_info.props) == 1 and stdout_info.props[0]['name'] == path): # not a file
return True
except RuntimeError:
self.cli.logger.debug('Error in checking resource [%s] is directory', src)
return False
def resource_exists(self, src) -> bool:
if not src.startswith('abfss://'):
return super().resource_exists(src)
# run 'az storage fs file list' if result is 0, then the resource exists.
try:
file_system = self.get_file_system(src)
account_name = self.get_account_name(src)
path = self.get_path(src)
cmd_args = self.get_cmd_prefix()
cmd_args.extend(['file list', '-f', file_system, '--account-name', account_name])
if path:
cmd_args.extend(['--path', path])
account_key = self.get_account_key(account_name)
if account_key:
cmd_args.extend(['--account-key', account_key])
self.cli.run_sys_cmd(cmd_args)
res = True
except RuntimeError:
res = False
return res
def _download_remote_resource(self, src: str, dest: str) -> str:
if not src.startswith('abfss://'):
return super()._download_remote_resource(src, dest)
# this is azure data lake storage
file_system = self.get_file_system(src)
account_name = self.get_account_name(src)
path = self.get_path(src)
cmd_args = self.get_cmd_prefix()
if self.resource_is_dir(src):
cmd_args.extend(['directory download', '-f', file_system, '--account-name', account_name])
cmd_args.extend(['-s', path, '-d', dest, '--recursive'])
else:
cmd_args.extend(['file download', '-f', file_system, '--account-name', account_name])
cmd_args.extend(['-p', path, '-d', dest])
account_key = self.get_account_key(account_name)
if account_key:
cmd_args.extend(['--account-key', account_key])
self.cli.run_sys_cmd(cmd_args)
return FSUtil.build_full_path(dest, FSUtil.get_resource_name(src))
def _upload_remote_dest(self, src: str, dest: str, exclude_pattern: str = None) -> str:
if not dest.startswith('abfss://'):
return super()._upload_remote_dest(src, dest)
# this is azure data lake storage
file_system = self.get_file_system(dest)
account_name = self.get_account_name(dest)
dest_path = self.get_path(dest)
src_resource_name = FSUtil.get_resource_name(src)
dest_resource_name = FSUtil.get_resource_name(dest)
cmd_args = self.get_cmd_prefix()
# source is a directory
if self.resource_is_dir(src):
# for azure cli, specifying a directory to copy will result in a duplicate; so we will double-check
# that if the dest already has the name of the src, then we move level up.
if src_resource_name == dest_resource_name:
# go to the parent level for destination
dest_path = dest_path.split(src_resource_name)[0].rstrip('/')
cmd_args.extend(['directory upload', '-f', file_system, '--account-name', account_name])
cmd_args.extend(['-s', src, '-d', dest_path, '--recursive'])
else: # source is a file
cmd_args.extend(['file upload', '-f', file_system, '--account-name', account_name])
# dest is a directory, we will append the source resource name to it
if self.resource_is_dir(dest):
dest_path = dest_path if dest_path[-1] == '/' else dest_path + '/'
dest_path = dest_path + src_resource_name
cmd_args.extend(['-s', src, '-p', dest_path])
account_key = self.get_account_key(account_name)
if account_key:
cmd_args.extend(['--account-key', account_key])
self.cli.run_sys_cmd(cmd_args)
return FSUtil.build_path(dest, FSUtil.get_resource_name(src))
def is_file_path(self, value: str):
if value.startswith('https://'):
return True
return super().is_file_path(value)
def _delete_path(self, src, fail_ok: bool = False):
if not src.startswith('abfss://'):
super()._delete_path(src)
return
# this is azure data lake storage
file_system = self.get_file_system(src)
account_name = self.get_account_name(src)
path = self.get_path(src)
cmd_args = self.get_cmd_prefix()
if self.resource_is_dir(src):
cmd_args.extend(['directory delete', '-f', file_system, '--account-name', account_name])
cmd_args.extend(['-n', path, '-y'])
else:
cmd_args.extend(['file delete', '-f', file_system, '--account-name', account_name])
cmd_args.extend(['-p', path, '-y'])
account_key = self.get_account_key(account_name)
if account_key:
cmd_args.extend(['--account-key', account_key])
self.cli.run_sys_cmd(cmd_args)
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/cloud_api/azurestorage.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""provides interface to the cloud service providers."""
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/cloud_api/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation specific to DATABRICKS_AZURE"""
import datetime
import json
import os
from dataclasses import dataclass, field
from typing import Any, List
from spark_rapids_tools import CspEnv
from spark_rapids_pytools.cloud_api.azurestorage import AzureStorageDriver
from spark_rapids_pytools.cloud_api.databricks_azure_job import DBAzureLocalRapidsJob
from spark_rapids_pytools.cloud_api.sp_types import CMDDriverBase, ClusterBase, ClusterNode, \
PlatformBase, SysInfo, GpuHWInfo, ClusterState, SparkNodeType, ClusterGetAccessor, NodeHWInfo, GpuDevice
from spark_rapids_pytools.common.prop_manager import JSONPropertiesContainer
from spark_rapids_pytools.common.sys_storage import FSUtil
from spark_rapids_pytools.common.utilities import Utils
from spark_rapids_pytools.pricing.databricks_azure_pricing import DatabricksAzurePriceProvider
from spark_rapids_pytools.pricing.price_provider import SavingsEstimator
@dataclass
class DBAzurePlatform(PlatformBase):
"""
Represents the interface and utilities required by DATABRICKS_AZURE.
Prerequisites:
- install databricks, azure command lines (databricks cli, azure cli)
- configure the databricks cli (token, workspace, profile)
- configure the azure cli
"""
def __post_init__(self):
self.type_id = CspEnv.DATABRICKS_AZURE
super().__post_init__()
def _construct_cli_object(self) -> CMDDriverBase:
return DBAzureCMDDriver(configs=self.configs, timeout=0, cloud_ctxt=self.ctxt)
def _install_storage_driver(self):
self.storage = AzureStorageDriver(self.cli)
def _construct_cluster_from_props(self, cluster: str, props: str = None):
return DatabricksAzureCluster(self).set_connection(cluster_id=cluster, props=props)
def set_offline_cluster(self, cluster_args: dict = None):
pass
def migrate_cluster_to_gpu(self, orig_cluster):
"""
given a cluster, convert it to run NVIDIA Gpu based on mapping instance types
:param orig_cluster: the original cluster to migrate from
:return: a new object cluster that supports GPU
"""
gpu_cluster_ob = DatabricksAzureCluster(self)
gpu_cluster_ob.migrate_from_cluster(orig_cluster)
return gpu_cluster_ob
def create_saving_estimator(self,
source_cluster: ClusterGetAccessor,
reshaped_cluster: ClusterGetAccessor):
raw_pricing_config = self.configs.get_value_silent('pricing')
if raw_pricing_config:
pricing_config = JSONPropertiesContainer(prop_arg=raw_pricing_config, file_load=False)
else:
pricing_config: JSONPropertiesContainer = None
db_azure_price_provider = DatabricksAzurePriceProvider(region=self.cli.get_region(),
pricing_configs={'databricks-azure': pricing_config})
saving_estimator = DBAzureSavingsEstimator(price_provider=db_azure_price_provider,
reshaped_cluster=reshaped_cluster,
source_cluster=source_cluster)
return saving_estimator
def create_local_submission_job(self, job_prop, ctxt) -> Any:
return DBAzureLocalRapidsJob(prop_container=job_prop, exec_ctxt=ctxt)
def validate_job_submission_args(self, submission_args: dict) -> dict:
pass
def get_supported_gpus(self) -> dict:
gpus_from_configs = self.configs.get_value('gpuConfigs', 'user-tools', 'supportedGpuInstances')
gpu_scopes = {}
for mc_prof, mc_info in gpus_from_configs.items():
hw_info_json = mc_info['SysInfo']
hw_info_ob = SysInfo(num_cpus=hw_info_json['num_cpus'], cpu_mem=hw_info_json['cpu_mem'])
gpu_info_json = mc_info['GpuInfo']['GPUs'][0]
gpu_info_obj = GpuHWInfo(num_gpus=gpu_info_json['Count'], gpu_mem=gpu_info_json['MemoryInfo']['SizeInMiB'])
gpu_scopes[mc_prof] = NodeHWInfo(sys_info=hw_info_ob, gpu_info=gpu_info_obj)
return gpu_scopes
@dataclass
class DBAzureCMDDriver(CMDDriverBase):
"""Represents the command interface that will be used by DATABRICKS_AZURE"""
configs: JSONPropertiesContainer = None
cache_expiration_secs: int = field(default=604800, init=False) # update the file once a week
# logger: Logger = field(default=ToolLogging.get_and_setup_logger('rapids.tools.databricks.azure'), init=False)
def _list_inconsistent_configurations(self) -> list:
incorrect_envs = super()._list_inconsistent_configurations()
return incorrect_envs
def _build_platform_list_cluster(self, cluster, query_args: dict = None) -> list:
pass
def pull_cluster_props_by_args(self, args: dict) -> str:
get_cluster_cmd = ['databricks', 'clusters', 'get']
if 'Id' in args:
get_cluster_cmd.extend(['--cluster-id', args.get('Id')])
elif 'cluster' in args:
get_cluster_cmd.extend(['--cluster-name', args.get('cluster')])
else:
self.logger.error('Invalid arguments to pull the cluster properties')
return self.run_sys_cmd(get_cluster_cmd)
def process_instances_description(self, raw_instances_description: str) -> dict:
processed_instances_description = {}
instances_description = JSONPropertiesContainer(prop_arg=raw_instances_description, file_load=False)
for instance in instances_description.props:
instance_dict = {}
v_cpus = 0
memory_gb = 0
gpus = 0
if not instance['capabilities']:
continue
for item in instance['capabilities']:
if item['name'] == 'vCPUs':
v_cpus = int(item['value'])
elif item['name'] == 'MemoryGB':
memory_gb = int(float(item['value']) * 1024)
elif item['name'] == 'GPUs':
gpus = int(item['value'])
instance_dict['VCpuInfo'] = {'DefaultVCpus': v_cpus}
instance_dict['MemoryInfo'] = {'SizeInMiB': memory_gb}
if gpus > 0:
gpu_list = [{'Name': '', 'Manufacturer': '', 'Count': gpus, 'MemoryInfo': {'SizeInMiB': 0}}]
instance_dict['GpuInfo'] = {'GPUs': gpu_list}
processed_instances_description[instance['name']] = instance_dict
return processed_instances_description
def generate_instances_description(self, fpath: str):
cmd_params = ['az vm list-skus',
'--location', f'{self.get_region()}']
raw_instances_description = self.run_sys_cmd(cmd_params)
json_instances_description = self.process_instances_description(raw_instances_description)
with open(fpath, 'w', encoding='UTF-8') as output_file:
json.dump(json_instances_description, output_file, indent=2)
def _build_platform_describe_node_instance(self, node: ClusterNode) -> list:
pass
def _caches_expired(self, cache_file) -> bool:
if not os.path.exists(cache_file):
return True
modified_time = os.path.getmtime(cache_file)
diff_time = int(datetime.datetime.now().timestamp() - modified_time)
if diff_time > self.cache_expiration_secs:
return True
return False
def init_instances_description(self) -> str:
cache_dir = Utils.get_rapids_tools_env('CACHE_FOLDER')
fpath = FSUtil.build_path(cache_dir, 'azure-instances-catalog.json')
if self._caches_expired(fpath):
self.logger.info('Downloading the Azure instance type descriptions catalog')
self.generate_instances_description(fpath)
else:
self.logger.info('The Azure instance type descriptions catalog is loaded from the cache')
return fpath
def get_submit_spark_job_cmd_for_cluster(self, cluster_name: str, submit_args: dict) -> List[str]:
raise NotImplementedError
def get_region(self) -> str:
if self.env_vars.get('location'):
return self.env_vars.get('location')
return self.env_vars.get('region')
@dataclass
class DatabricksAzureNode(ClusterNode):
"""Implementation of Databricks Azure cluster node."""
region: str = field(default=None, init=False)
def _pull_and_set_mc_props(self, cli=None):
instances_description_path = cli.init_instances_description()
self.mc_props = JSONPropertiesContainer(prop_arg=instances_description_path)
def _set_fields_from_props(self):
self.name = self.props.get_value_silent('public_dns')
def _pull_sys_info(self, cli=None) -> SysInfo:
cpu_mem = self.mc_props.get_value(self.instance_type, 'MemoryInfo', 'SizeInMiB')
# TODO: should we use DefaultVCpus or DefaultCores
num_cpus = self.mc_props.get_value(self.instance_type, 'VCpuInfo', 'DefaultVCpus')
return SysInfo(num_cpus=num_cpus, cpu_mem=cpu_mem)
def _pull_gpu_hw_info(self, cli=None) -> GpuHWInfo or None:
gpu_info = cli.configs.get_value('gpuConfigs', 'user-tools', 'supportedGpuInstances')
if gpu_info is None:
return None
if self.instance_type not in gpu_info:
return None
gpu_instance = gpu_info[self.instance_type]['GpuInfo']['GPUs'][0]
gpu_device = GpuDevice.fromstring(gpu_instance['Name'])
return GpuHWInfo(num_gpus=gpu_instance['Count'],
gpu_device=gpu_device,
gpu_mem=gpu_instance['MemoryInfo']['SizeInMiB'])
@dataclass
class DatabricksAzureCluster(ClusterBase):
"""
Represents an instance of running cluster on Databricks.
"""
def _set_fields_from_props(self):
super()._set_fields_from_props()
self.uuid = self.props.get_value('cluster_id')
self.state = ClusterState.fromstring(self.props.get_value('state'))
def _set_name_from_props(self) -> None:
self.name = self.props.get_value('cluster_name')
def _init_nodes(self):
# assume that only one driver node
driver_nodes_from_conf = self.props.get_value_silent('driver')
worker_nodes_from_conf = self.props.get_value_silent('executors')
num_workers = self.props.get_value_silent('num_workers')
if num_workers is None:
num_workers = 0
# construct driver node info when cluster is inactive
if driver_nodes_from_conf is None:
driver_node_type_id = self.props.get_value('driver_node_type_id')
if driver_node_type_id is None:
raise RuntimeError('Failed to find driver node information from cluster properties')
driver_nodes_from_conf = {'node_id': None}
# construct worker nodes info when cluster is inactive
if worker_nodes_from_conf is None:
worker_node_type_id = self.props.get_value('node_type_id')
if worker_node_type_id is None:
raise RuntimeError('Failed to find worker node information from cluster properties')
worker_nodes_from_conf = [{'node_id': None} for i in range(num_workers)]
# create workers array
worker_nodes: list = []
for worker_node in worker_nodes_from_conf:
worker_props = {
'Id': worker_node['node_id'],
'props': JSONPropertiesContainer(prop_arg=worker_node, file_load=False),
# set the node region based on the wrapper defined region
'region': self.region,
'instance_type': self.props.get_value('node_type_id')
}
worker = DatabricksAzureNode.create_worker_node().set_fields_from_dict(worker_props)
worker.fetch_and_set_hw_info(self.cli)
worker_nodes.append(worker)
driver_props = {
'Id': driver_nodes_from_conf['node_id'],
'props': JSONPropertiesContainer(prop_arg=driver_nodes_from_conf, file_load=False),
# set the node region based on the wrapper defined region
'region': self.region,
'instance_type': self.props.get_value('driver_node_type_id')
}
driver_node = DatabricksAzureNode.create_master_node().set_fields_from_dict(driver_props)
driver_node.fetch_and_set_hw_info(self.cli)
self.nodes = {
SparkNodeType.WORKER: worker_nodes,
SparkNodeType.MASTER: driver_node
}
def _init_connection(self, cluster_id: str = None,
props: str = None) -> dict:
cluster_args = super()._init_connection(cluster_id=cluster_id, props=props)
# propagate region to the cluster
cluster_args.setdefault('region', self.cli.get_env_var('region'))
return cluster_args
def get_all_spark_properties(self) -> dict:
return self.props.get_value_silent('spark_conf')
def _build_migrated_cluster(self, orig_cluster):
"""
specific to the platform on how to build a cluster based on migration
:param orig_cluster: the cpu_cluster that does not support the GPU devices.
"""
# get the map of the instance types
mc_type_map, _ = orig_cluster.find_matches_for_node()
new_worker_nodes: list = []
for anode in orig_cluster.nodes.get(SparkNodeType.WORKER):
# loop on all worker nodes.
# even if the node is the same type, we still need to set the hardware
if anode.instance_type not in mc_type_map:
# the node stays the same
# skip converting the node
new_instance_type = anode.instance_type
self.logger.info('Node with %s supports GPU devices.',
anode.instance_type)
else:
new_instance_type = mc_type_map.get(anode.instance_type)
self.logger.info('Converting node %s into GPU supported instance-type %s',
anode.instance_type,
new_instance_type)
worker_props = {
'instance_type': new_instance_type,
'name': anode.name,
'Id': anode.Id,
'region': anode.region,
'props': anode.props,
}
new_node = DatabricksAzureNode.create_worker_node().set_fields_from_dict(worker_props)
new_worker_nodes.append(new_node)
self.nodes = {
SparkNodeType.WORKER: new_worker_nodes,
SparkNodeType.MASTER: orig_cluster.nodes.get(SparkNodeType.MASTER)
}
if bool(mc_type_map):
# update the platform notes
self.platform.update_ctxt_notes('nodeConversions', mc_type_map)
def get_tmp_storage(self) -> str:
raise NotImplementedError
@dataclass
class DBAzureSavingsEstimator(SavingsEstimator):
"""
A class that calculates the savings based on a Databricks-Azure price provider
"""
def _get_cost_per_cluster(self, cluster: ClusterGetAccessor):
db_azure_cost = 0.0
for node_type in [SparkNodeType.MASTER, SparkNodeType.WORKER]:
instance_type = cluster.get_node_instance_type(node_type)
nodes_cnt = cluster.get_nodes_cnt(node_type)
cost = self.price_provider.get_instance_price(instance=instance_type)
db_azure_cost += cost * nodes_cnt
return db_azure_cost
def _setup_costs(self):
# calculate target_cost
self.target_cost = self._get_cost_per_cluster(self.reshaped_cluster)
self.source_cost = self._get_cost_per_cluster(self.source_cluster)
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/cloud_api/databricks_azure.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Job submissions on Databricks AWS"""
from dataclasses import dataclass
from spark_rapids_pytools.rapids.rapids_job import RapidsLocalJob
@dataclass
class DBAWSLocalRapidsJob(RapidsLocalJob):
"""
Implementation of a RAPIDS job that runs local on a local machine.
"""
job_label = 'DBAWSLocal'
def _build_submission_cmd(self) -> list:
# env vars are added later as a separate dictionary
cmd_arg = super()._build_submission_cmd()
# any s3 link has to be converted to S3a:
for index, arr_entry in enumerate(cmd_arg):
cmd_arg[index] = arr_entry.replace('s3://', 's3a://')
return cmd_arg
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/cloud_api/databricks_aws_job.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of google storage related functionalities."""
from dataclasses import dataclass
from spark_rapids_pytools.cloud_api.sp_types import CMDDriverBase
from spark_rapids_pytools.common.sys_storage import StorageDriver, FSUtil
@dataclass
class GStorageDriver(StorageDriver):
"""
Wrapper around gsutil commands such as copying/moving/listing files.
"""
cli: CMDDriverBase
@classmethod
def get_cmd_prefix(cls):
pref_arr = ['gsutil']
return pref_arr[:]
@classmethod
def get_cmd_cp_prefix(cls, is_dir: bool):
"""
Note that using -m flag for multithreaded processing of copying cause the process to hang
forever. So, we overcome this by limiting the parallel process property in each command
"""
if not is_dir:
return ['gsutil', 'cp']
# the bug is more emphasized on macOS (Utils.get_os_name() == 'Darwin')
return ['gsutil', '-o', '\"GSUtil:parallel_process_count=3\"', '-m', 'cp', '-r']
def resource_is_dir(self, src: str) -> bool:
if not src.startswith('gs://'):
return super().resource_is_dir(src)
# for gsutil, running ls command on a file, will return an output string that has
# the same resource.
# if the resource is a directory, the output will contain an extra slash at the end.
cmd_args = self.get_cmd_prefix()
pruned_src = src.rstrip('/')
dir_path = f'{pruned_src}/'
cmd_args.extend(['ls', dir_path])
try:
std_out = self.cli.run_sys_cmd(cmd_args)
stdout_lines = std_out.splitlines()
if stdout_lines:
for out_line in stdout_lines:
if out_line.startswith(dir_path):
# if any path starts with the directory path return True
return True
except RuntimeError:
self.cli.logger.debug('Error in checking resource [%s] is directory', src)
return False
def resource_exists(self, src) -> bool:
if not src.startswith('gs://'):
return super().resource_exists(src)
# run gsutil ls src if result is 0, then the resource exists.
cmd_args = self.get_cmd_prefix()
cmd_args.extend(['ls', src])
# run command and make sure we return 0.
try:
self.cli.run_sys_cmd(cmd_args)
res = True
except RuntimeError:
res = False
return res
def _download_remote_resource(self, src: str, dest: str) -> str:
if not src.startswith('gs://'):
return super()._download_remote_resource(src, dest)
# this is gstorage
return self.__internal_resource_mv(src, dest)
def _upload_remote_dest(self, src: str, dest: str, exclude_pattern: str = None) -> str:
if not dest.startswith('gs://'):
return super()._upload_remote_dest(src, dest)
# this is gstorage
return self.__internal_resource_mv(src, dest)
def is_file_path(self, value: str):
if value.startswith('gs://'):
return True
return super().is_file_path(value)
def _delete_path(self, src, fail_ok: bool = False):
if not src.startswith('gs://'):
super()._delete_path(src)
else:
res_is_dir = self.resource_is_dir(src)
recurse_arg = '-r' if res_is_dir else ''
cmd_args = self.get_cmd_prefix()
cmd_args.extend(['rm', recurse_arg, src])
self.cli.run_sys_cmd(cmd_args)
def __internal_resource_mv(self, src: str, dest: str) -> str:
is_dir = self.resource_is_dir(src)
# for gsutil. specifying a directory to copy will result in a duplicate; so we will double-check
# that if the dest already has the name of the src, then we move level up.
dest_resource_name = FSUtil.get_resource_name(dest)
src_resource_name = FSUtil.get_resource_name(src)
if src_resource_name == dest_resource_name:
# go to the parent level for destination
dest = dest.split(src_resource_name)[0].rstrip('/')
cmd_args = self.get_cmd_cp_prefix(is_dir)
cmd_args.extend([src, dest])
self.cli.run_sys_cmd(cmd_args)
return FSUtil.build_path(dest, FSUtil.get_resource_name(src))
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/cloud_api/gstorage.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation specific to Dataproc"""
import json
from dataclasses import dataclass, field
from typing import Any, List
from spark_rapids_tools import CspEnv
from spark_rapids_pytools.cloud_api.dataproc_job import DataprocLocalRapidsJob
from spark_rapids_pytools.cloud_api.gstorage import GStorageDriver
from spark_rapids_pytools.cloud_api.sp_types import PlatformBase, CMDDriverBase, \
ClusterBase, ClusterNode, SysInfo, GpuHWInfo, SparkNodeType, ClusterState, GpuDevice, \
NodeHWInfo, ClusterGetAccessor
from spark_rapids_pytools.common.prop_manager import JSONPropertiesContainer
from spark_rapids_pytools.common.sys_storage import FSUtil
from spark_rapids_pytools.common.utilities import SysCmd, Utils
from spark_rapids_pytools.pricing.dataproc_pricing import DataprocPriceProvider
from spark_rapids_pytools.pricing.price_provider import SavingsEstimator
@dataclass
class DataprocPlatform(PlatformBase):
"""
Represents the interface and utilities required by Dataproc.
Prerequisites:
- install gcloud command lines (gcloud, gsutil)
- configure the gcloud CLI.
- dataproc has staging temporary storage. we can retrieve that from the cluster properties.
"""
def __post_init__(self):
self.type_id = CspEnv.DATAPROC
super().__post_init__()
def _set_remaining_configuration_list(self) -> None:
remaining_props = self._get_config_environment('loadedConfigProps')
if not remaining_props:
return
properties_map_arr = self._get_config_environment('cliConfig',
'confProperties',
'propertiesMap')
if properties_map_arr:
config_cmd_prefix = ['gcloud', 'config', 'get']
for prop_entry in properties_map_arr:
prop_entry_key = prop_entry.get('propKey')
if self.ctxt.get(prop_entry_key):
# Skip if the property already set
continue
prop_cmd = config_cmd_prefix[:]
prop_cmd.append(f'{prop_entry.get("section")}/{prop_entry_key}')
cmd_args = {
'cmd': prop_cmd,
}
prop_cmd_obj = SysCmd().build(cmd_args)
prop_cmd_res = prop_cmd_obj.exec()
if prop_cmd_res:
self.ctxt.update({prop_entry_key: prop_cmd_res})
for prop_entry in properties_map_arr:
prop_entry_key = prop_entry.get('propKey')
if self.ctxt.get(prop_entry_key) is None:
# set it using environment variable if possible
self._set_env_prop_from_env_var(prop_entry_key)
def _construct_cli_object(self) -> CMDDriverBase:
return DataprocCMDDriver(timeout=0, cloud_ctxt=self.ctxt)
def _install_storage_driver(self):
self.storage = GStorageDriver(self.cli)
def _construct_cluster_from_props(self, cluster: str, props: str = None):
return DataprocCluster(self).set_connection(cluster_id=cluster, props=props)
def set_offline_cluster(self, cluster_args: dict = None):
pass
def migrate_cluster_to_gpu(self, orig_cluster):
"""
given a cluster, convert it to run NVIDIA Gpu based on mapping instance types
:param orig_cluster: the original cluster to migrate from
:return: a new object cluster that supports GPU.
"""
gpu_cluster_ob = DataprocCluster(self)
gpu_cluster_ob.migrate_from_cluster(orig_cluster)
return gpu_cluster_ob
def create_saving_estimator(self,
source_cluster: ClusterGetAccessor,
reshaped_cluster: ClusterGetAccessor):
raw_pricing_config = self.configs.get_value_silent('pricing')
if raw_pricing_config:
pricing_config = JSONPropertiesContainer(prop_arg=raw_pricing_config,
file_load=False)
else:
pricing_config: JSONPropertiesContainer = None
pricing_provider = DataprocPriceProvider(region=self.cli.get_region(),
pricing_configs={'gcloud': pricing_config})
saving_estimator = DataprocSavingsEstimator(price_provider=pricing_provider,
reshaped_cluster=reshaped_cluster,
source_cluster=source_cluster)
return saving_estimator
def create_local_submission_job(self, job_prop, ctxt) -> Any:
return DataprocLocalRapidsJob(prop_container=job_prop, exec_ctxt=ctxt)
def validate_job_submission_args(self, submission_args: dict) -> dict:
pass
def get_supported_gpus(self) -> dict:
def calc_num_gpus(gpus_criteria_conf: List[dict], num_cores: int) -> int:
if gpus_criteria_conf:
for c_conf in gpus_criteria_conf:
if c_conf.get('lowerBound') <= num_cores < c_conf.get('upperBound'):
return c_conf.get('gpuCount')
# Use default if the configuration is not loaded. This should not happen anyway.
return 2 if num_cpu >= 16 else 1
gpus_from_configs = self.configs.get_value('gpuConfigs', 'user-tools', 'supportedGpuInstances')
gpu_count_criteria = self.configs.get_value('gpuConfigs',
'user-tools',
'gpuPerMachine', 'criteria', 'numCores')
gpu_scopes = {}
for mc_prof, mc_info in gpus_from_configs.items():
unit_info = mc_info['seriesInfo']
for num_cpu in unit_info['vCPUs']:
prof_name = f'{mc_prof}-{num_cpu}'
# create the sys info
memory_mb = num_cpu * unit_info['memPerCPU']
sys_info_obj = SysInfo(num_cpus=num_cpu, cpu_mem=memory_mb)
# create gpu_info
gpu_cnt = calc_num_gpus(gpu_count_criteria, num_cpu)
# default memory
gpu_device = GpuDevice.get_default_gpu()
gpu_mem = gpu_device.get_gpu_mem()[0]
gpu_info_obj = GpuHWInfo(num_gpus=gpu_cnt, gpu_mem=gpu_mem, gpu_device=gpu_device)
gpu_scopes[prof_name] = NodeHWInfo(sys_info=sys_info_obj, gpu_info=gpu_info_obj)
return gpu_scopes
@dataclass
class DataprocCMDDriver(CMDDriverBase):
"""Represents the command interface that will be used by Dataproc"""
def _list_inconsistent_configurations(self) -> list:
incorrect_envs = super()._list_inconsistent_configurations()
required_props = self.get_required_props()
if required_props:
for prop_entry in required_props:
prop_value = self.env_vars.get(prop_entry)
if prop_value is None:
incorrect_envs.append(f'Property {prop_entry} is not set.')
return incorrect_envs
def _build_platform_describe_node_instance(self, node: ClusterNode) -> list:
cmd_params = ['gcloud',
'compute',
'machine-types',
'describe',
f'{node.instance_type}',
'--zone',
f'{node.zone}']
return cmd_params
def _build_platform_list_cluster(self,
cluster,
query_args: dict = None) -> list:
cmd_params = ['gcloud', 'dataproc', 'clusters', 'list',
f"--region='{self.get_region()}'"]
filter_args = [f'clusterName = {cluster.name}']
if query_args is not None:
if 'state' in query_args:
state_param = query_args.get('state')
filter_args.append(f'status.state = {state_param}')
filter_arg = Utils.gen_joined_str(' AND ', filter_args)
cmd_params.append(f"--filter='{filter_arg}'")
return cmd_params
def pull_cluster_props_by_args(self, args: dict) -> str:
cluster_name = args.get('cluster')
# TODO: We should piggyback on the cmd so that we do not have to add region in each cmd
# region is already set in the instance
if 'region' in args:
region_name = args.get('region')
else:
region_name = self.get_region()
describe_cluster_cmd = ['gcloud',
'dataproc',
'clusters',
'describe',
cluster_name,
'--region',
region_name]
return self.run_sys_cmd(describe_cluster_cmd)
def exec_platform_describe_accelerator(self,
accelerator_type: str,
**cmd_args) -> str:
cmd_args = ['gcloud', 'compute', 'accelerator-types', 'describe',
accelerator_type, '--zone',
self.get_env_var('zone')]
return self.run_sys_cmd(cmd_args)
def _build_ssh_cmd_prefix_for_node(self, node: ClusterNode) -> str:
pref_args = ['gcloud',
'compute', 'ssh',
node.name,
'--zone',
self.get_env_var('zone'),
'--command=']
return Utils.gen_joined_str(' ', pref_args)
def _build_cmd_scp_to_node(self, node: ClusterNode, src: str, dest: str) -> str:
pref_args = ['gcloud',
'compute', 'scp',
'--zone',
self.get_env_var('zone'),
src,
f'{node.name}:{dest}']
return Utils.gen_joined_str(' ', pref_args)
def _build_cmd_scp_from_node(self, node: ClusterNode, src: str, dest: str) -> str:
pref_args = ['gcloud',
'compute', 'scp',
'--zone',
self.get_env_var('zone'),
f'{node.name}:{src}',
dest]
return Utils.gen_joined_str(' ', pref_args)
def _construct_ssh_cmd_with_prefix(self, prefix: str, remote_cmd: str) -> str:
# for dataproc, the remote should not be preceded by ws
return f'{prefix}{remote_cmd}'
def get_submit_spark_job_cmd_for_cluster(self,
cluster_name: str,
submit_args: dict) -> List[str]:
cmd = ['gcloud',
'dataproc',
'jobs',
'submit',
'spark',
'--cluster',
cluster_name,
'--region',
self.get_region()]
# add the platform arguments: jars, class
if 'platformSparkJobArgs' in submit_args:
for arg_k, arg_val in submit_args.get('platformSparkJobArgs').items():
if arg_val:
cmd.append(f'--{arg_k}={arg_val}')
# add the jar arguments
jar_args = submit_args.get('jarArgs')
if jar_args:
cmd.append('--')
# expects a list of string
cmd.extend(jar_args)
return cmd
@dataclass
class DataprocNode(ClusterNode):
"""Implementation of Dataproc cluster node."""
zone: str = field(default=None, init=False)
@staticmethod
def __extract_info_from_value(conf_val: str):
if '/' in conf_val:
# this is a valid url-path
return FSUtil.get_resource_name(conf_val)
# this is a value
return conf_val
def _pull_gpu_hw_info(self, cli=None) -> GpuHWInfo:
# https://cloud.google.com/compute/docs/gpus
# the gpu info is not included in the instance type
# we need to:
# 1- get the accelerator of the node if any
# "gcloud compute accelerator-types describe nvidia-tesla-a100 --zone=us-central1-a"
# 2- Read the description flag to determine the memory size. (applies for A100)
# If it is not included, then load the gpu-memory from a lookup table
def parse_accelerator_description(raw_description: str) -> dict:
parsing_res = {}
descr_json = json.loads(raw_description)
description_field = descr_json.get('description')
field_components = description_field.split()
# filter out non-used tokens
dumped_tokens = ['NVIDIA', 'Tesla']
final_entries = [entry.lower() for entry in field_components if entry not in dumped_tokens]
gpu_device: GpuDevice = None
for token_entry in final_entries:
if 'GB' in token_entry:
# this is the memory value
memory_in_gb_str = token_entry.removesuffix('GB')
gpu_mem = 1024 * int(memory_in_gb_str)
parsing_res.setdefault('gpu_mem', gpu_mem)
else:
gpu_device = GpuDevice.fromstring(token_entry)
parsing_res.setdefault('gpu_device', gpu_device)
if 'gpu_mem' not in parsing_res:
# get the GPU memory size from lookup
parsing_res.setdefault('gpu_mem', gpu_device.get_gpu_mem()[0])
return parsing_res
accelerator_arr = self.props.get_value_silent('accelerators')
if not accelerator_arr:
return None
for defined_acc in accelerator_arr:
# TODO: if the accelerator_arr has other non-gpu ones, then we need to loop until we
# find the gpu accelerators
gpu_configs = {'num_gpus': defined_acc.get('acceleratorCount')}
accelerator_type = defined_acc.get('acceleratorTypeUri')
gpu_device_type = self.__extract_info_from_value(accelerator_type)
gpu_description = cli.exec_platform_describe_accelerator(accelerator_type=gpu_device_type,
cmd_args=None)
extra_gpu_info = parse_accelerator_description(gpu_description)
gpu_configs.update(extra_gpu_info)
return GpuHWInfo(num_gpus=gpu_configs.get('num_gpus'),
gpu_device=gpu_configs.get('gpu_device'),
gpu_mem=gpu_configs.get('gpu_mem'))
def _pull_sys_info(self, cli=None) -> SysInfo:
cpu_mem = self.mc_props.get_value('memoryMb')
num_cpus = self.mc_props.get_value('guestCpus')
return SysInfo(num_cpus=num_cpus, cpu_mem=cpu_mem)
def _pull_and_set_mc_props(self, cli=None):
instance_description = cli.exec_platform_describe_node_instance(self)
self.mc_props = JSONPropertiesContainer(prop_arg=instance_description, file_load=False)
def _set_fields_from_props(self):
# set the machine type
if not self.props:
return
mc_type_uri = self.props.get_value('machineTypeUri')
if mc_type_uri:
self.instance_type = self.__extract_info_from_value(mc_type_uri)
else:
# check if the machine type is under a different name
mc_type = self.props.get_value('machineType')
if mc_type:
self.instance_type = self.__extract_info_from_value(mc_type)
@dataclass
class DataprocCluster(ClusterBase):
"""
Represents an instance of running cluster on Dataproc.
"""
def _get_temp_gs_storage(self) -> str:
temp_bucket = self.props.get_value_silent('config', 'tempBucket')
if temp_bucket:
return f'gs://{temp_bucket}/{self.uuid}'
return None
def get_eventlogs_from_config(self) -> List[str]:
res_arr = super().get_eventlogs_from_config()
if not res_arr:
# The SHS was not set for the cluster. Use the tmp bucket storage as the default SHS log directory
# append the temporary gstorage followed by the SHS folder
tmp_gs = self._get_temp_gs_storage()
res_arr.append(f'{tmp_gs}/spark-job-history')
return res_arr
def _set_fields_from_props(self):
super()._set_fields_from_props()
self.uuid = self.props.get_value('clusterUuid')
self.state = ClusterState.fromstring(self.props.get_value('status', 'state'))
def _set_name_from_props(self) -> None:
self.name = self.props.get_value('clusterName')
def _init_nodes(self):
# assume that only one master node
master_nodes_from_conf = self.props.get_value('config', 'masterConfig', 'instanceNames')
raw_worker_prop = self.props.get_value_silent('config', 'workerConfig')
worker_nodes: list = []
if raw_worker_prop:
worker_nodes_from_conf = self.props.get_value('config', 'workerConfig', 'instanceNames')
# create workers array
for worker_node in worker_nodes_from_conf:
worker_props = {
'name': worker_node,
'props': JSONPropertiesContainer(prop_arg=raw_worker_prop, file_load=False),
# set the node zone based on the wrapper defined zone
'zone': self.zone
}
worker = DataprocNode.create_worker_node().set_fields_from_dict(worker_props)
# TODO for optimization, we should set HW props for 1 worker
worker.fetch_and_set_hw_info(self.cli)
worker_nodes.append(worker)
raw_master_props = self.props.get_value('config', 'masterConfig')
master_props = {
'name': master_nodes_from_conf[0],
'props': JSONPropertiesContainer(prop_arg=raw_master_props, file_load=False),
# set the node zone based on the wrapper defined zone
'zone': self.zone
}
master_node = DataprocNode.create_master_node().set_fields_from_dict(master_props)
master_node.fetch_and_set_hw_info(self.cli)
self.nodes = {
SparkNodeType.WORKER: worker_nodes,
SparkNodeType.MASTER: master_node
}
def _init_connection(self, cluster_id: str = None,
props: str = None) -> dict:
cluster_args = super()._init_connection(cluster_id=cluster_id, props=props)
# propagate zone to the cluster
cluster_args.setdefault('zone', self.cli.get_env_var('zone'))
return cluster_args
def _build_migrated_cluster(self, orig_cluster):
"""
specific to the platform on how to build a cluster based on migration
:param orig_cluster: the cpu_cluster that does not support the GPU devices.
"""
# get the map of the instance types
mc_type_map, supported_mc_map = orig_cluster.find_matches_for_node()
new_worker_nodes: list = []
for anode in orig_cluster.nodes.get(SparkNodeType.WORKER):
# loop on all worker nodes.
# even if the node is the same type, we still need to set the hardware
if anode.instance_type not in mc_type_map:
# the node stays the same
# skip converting the node
new_instance_type = anode.instance_type
self.logger.info('Node with %s supports GPU devices.',
anode.instance_type)
else:
new_instance_type = mc_type_map.get(anode.instance_type)
self.logger.info('Converting node %s into GPU supported instance-type %s',
anode.instance_type,
new_instance_type)
worker_props = {
'instance_type': new_instance_type,
'name': anode.name,
'zone': anode.zone,
}
new_node = DataprocNode.create_worker_node().set_fields_from_dict(worker_props)
# we cannot rely on setting gpu info from the SDK because
# dataproc does not bind machine types to GPUs
# new_node.fetch_and_set_hw_info(self.cli)
gpu_mc_hw: ClusterNode = supported_mc_map.get(new_instance_type)
new_node.construct_hw_info(cli=None,
gpu_info=gpu_mc_hw.gpu_info,
sys_info=gpu_mc_hw.sys_info)
new_worker_nodes.append(new_node)
self.nodes = {
SparkNodeType.WORKER: new_worker_nodes,
SparkNodeType.MASTER: orig_cluster.nodes.get(SparkNodeType.MASTER)
}
if bool(mc_type_map):
# update the platform notes
self.platform.update_ctxt_notes('nodeConversions', mc_type_map)
def get_all_spark_properties(self) -> dict:
"""Returns a dictionary containing the spark configurations defined in the cluster properties"""
sw_props = self.props.get_value_silent('config', 'softwareConfig', 'properties')
if sw_props:
k_prefix = 'spark:'
return {key[len(k_prefix):]: value for (key, value) in sw_props.items() if key.startswith(k_prefix)}
return {}
def get_tmp_storage(self) -> str:
return self._get_temp_gs_storage()
def get_image_version(self) -> str:
return self.props.get_value_silent('config', 'softwareConfig', 'imageVersion')
def _set_render_args_create_template(self) -> dict:
worker_node = self.get_worker_node()
gpu_per_machine, gpu_device = self.get_gpu_per_worker()
# map the gpu device to the equivalent accepted argument
gpu_device_hash = {
'T4': 'nvidia-tesla-t4',
'L4': 'nvidia-l4'
}
return {
'CLUSTER_NAME': self.get_name(),
'REGION': self.region,
'ZONE': self.zone,
'IMAGE': self.get_image_version(),
'MASTER_MACHINE': self.get_master_node().instance_type,
'WORKERS_COUNT': self.get_workers_count(),
'WORKERS_MACHINE': worker_node.instance_type,
'LOCAL_SSD': 2,
'GPU_DEVICE': gpu_device_hash.get(gpu_device),
'GPU_PER_WORKER': gpu_per_machine
}
@dataclass
class DataprocSavingsEstimator(SavingsEstimator):
"""
A class that calculates the savings based on Dataproc price provider
"""
def __calculate_group_cost(self, cluster_inst: ClusterGetAccessor, node_type: SparkNodeType):
nodes_cnt = cluster_inst.get_nodes_cnt(node_type)
cores_count = cluster_inst.get_node_core_count(node_type)
mem_mb = cluster_inst.get_node_mem_mb(node_type)
node_mc_type = cluster_inst.get_node_instance_type(node_type)
# memory here is in mb, we need to convert it to gb
mem_gb = float(mem_mb) / 1024
cores_cost = self.price_provider.get_cpu_price(node_mc_type) * int(cores_count)
memory_cost = self.price_provider.get_ram_price(node_mc_type) * mem_gb
# calculate the GPU cost
gpu_per_machine, gpu_type = cluster_inst.get_gpu_per_node(node_type)
gpu_cost = 0.0
if gpu_per_machine > 0:
gpu_unit_price = self.price_provider.get_gpu_price(gpu_type)
gpu_cost = gpu_unit_price * gpu_per_machine
return nodes_cnt * (cores_cost + memory_cost + gpu_cost)
def _get_cost_per_cluster(self, cluster: ClusterGetAccessor):
master_cost = self.__calculate_group_cost(cluster, SparkNodeType.MASTER)
workers_cost = self.__calculate_group_cost(cluster, SparkNodeType.WORKER)
dataproc_cost = self.price_provider.get_container_cost()
return master_cost + workers_cost + dataproc_cost
def _setup_costs(self):
# calculate target_cost
self.target_cost = self._get_cost_per_cluster(self.reshaped_cluster)
self.source_cost = self._get_cost_per_cluster(self.source_cluster)
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/cloud_api/dataproc.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Job submissions on GCloud Dataproc"""
from dataclasses import dataclass
from spark_rapids_pytools.rapids.rapids_job import RapidsLocalJob
@dataclass
class DataprocLocalRapidsJob(RapidsLocalJob):
"""
Implementation of a RAPIDS job that runs on a local machine.
"""
job_label = 'dataprocLocal'
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/cloud_api/dataproc_job.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation specific to OnPrem"""
from dataclasses import dataclass
from typing import Any, List
from spark_rapids_tools import CspEnv
from spark_rapids_pytools.rapids.rapids_job import RapidsLocalJob
from spark_rapids_pytools.cloud_api.sp_types import PlatformBase, ClusterBase, ClusterNode, \
CMDDriverBase, ClusterGetAccessor, GpuDevice, \
GpuHWInfo, NodeHWInfo, SparkNodeType, SysInfo
from spark_rapids_pytools.common.prop_manager import JSONPropertiesContainer
from spark_rapids_pytools.common.sys_storage import StorageDriver
from spark_rapids_pytools.pricing.dataproc_pricing import DataprocPriceProvider
from spark_rapids_pytools.pricing.price_provider import SavingsEstimator
@dataclass
class OnPremPlatform(PlatformBase):
"""
Represents the interface and utilities required by OnPrem platform.
"""
def __post_init__(self):
self.type_id = CspEnv.ONPREM
self.platform = self.ctxt_args.get('targetPlatform')
super().__post_init__()
def _construct_cli_object(self):
return CMDDriverBase(timeout=0, cloud_ctxt=self.ctxt)
def _install_storage_driver(self):
self.storage = OnPremStorageDriver(self.cli)
def create_local_submission_job(self, job_prop, ctxt) -> Any:
return OnPremLocalRapidsJob(prop_container=job_prop, exec_ctxt=ctxt)
def _construct_cluster_from_props(self, cluster: str, props: str = None):
if self.platform is not None:
onprem_cluster = OnPremCluster(self).set_connection(cluster_id=cluster, props=props)
return onprem_cluster
return None
def migrate_cluster_to_gpu(self, orig_cluster):
"""
given a cluster, convert it to run NVIDIA Gpu based on mapping instance types
:param orig_cluster: the original cluster to migrate from
:return: a new object cluster that supports GPU.
"""
gpu_cluster_ob = OnPremCluster(self)
gpu_cluster_ob.migrate_from_cluster(orig_cluster)
return gpu_cluster_ob
def get_platform_name(self) -> str:
"""
This used to get the lower case of the platform of the runtime.
:return: the name of the platform of the runtime in lower_case.
"""
if self.platform is not None:
if self.platform == 'dataproc':
self_id = CspEnv.DATAPROC
else:
self_id = self.type_id
return CspEnv.pretty_print(self_id)
def get_footer_message(self) -> str:
return 'To support acceleration with T4 GPUs, please use these worker node instance types.'
def create_saving_estimator(self,
source_cluster: ClusterGetAccessor,
reshaped_cluster: ClusterGetAccessor):
if self.platform == 'dataproc':
region = 'us-central1'
raw_pricing_config = self.configs.get_value_silent('csp_pricing')
if raw_pricing_config:
pricing_config = JSONPropertiesContainer(prop_arg=raw_pricing_config,
file_load=False)
else:
pricing_config: JSONPropertiesContainer = None
pricing_provider = DataprocPriceProvider(region=region,
pricing_configs={'gcloud': pricing_config})
saving_estimator = OnpremSavingsEstimator(price_provider=pricing_provider,
reshaped_cluster=reshaped_cluster,
source_cluster=source_cluster)
return saving_estimator
def set_offline_cluster(self, cluster_args: dict = None):
pass
def validate_job_submission_args(self, submission_args: dict) -> dict:
pass
def get_supported_gpus(self) -> dict:
def calc_num_gpus(gpus_criteria_conf: List[dict], num_cores: int) -> int:
if gpus_criteria_conf:
for c_conf in gpus_criteria_conf:
if c_conf.get('lowerBound') <= num_cores < c_conf.get('upperBound'):
return c_conf.get('gpuCount')
# Use default if the configuration is not loaded. This should not happen anyway.
return 2 if num_cpu >= 16 else 1
gpus_from_configs = self.configs.get_value('gpuConfigs', 'dataproc', 'user-tools', 'supportedGpuInstances')
gpu_count_criteria = self.configs.get_value('gpuConfigs', 'dataproc', 'user-tools',
'gpuPerMachine', 'criteria', 'numCores')
gpu_scopes = {}
for mc_prof, mc_info in gpus_from_configs.items():
unit_info = mc_info['seriesInfo']
for num_cpu in unit_info['vCPUs']:
prof_name = f'{mc_prof}-{num_cpu}'
# create the sys info
memory_mb = num_cpu * unit_info['memPerCPU']
sys_info_obj = SysInfo(num_cpus=num_cpu, cpu_mem=memory_mb)
# create gpu_info
gpu_cnt = calc_num_gpus(gpu_count_criteria, num_cpu)
# default memory
gpu_device = GpuDevice.get_default_gpu()
gpu_mem = gpu_device.get_gpu_mem()[0]
gpu_info_obj = GpuHWInfo(num_gpus=gpu_cnt, gpu_mem=gpu_mem, gpu_device=gpu_device)
gpu_scopes[prof_name] = NodeHWInfo(sys_info=sys_info_obj, gpu_info=gpu_info_obj)
return gpu_scopes
@dataclass
class OnPremStorageDriver(StorageDriver):
cli: CMDDriverBase
@dataclass
class OnPremLocalRapidsJob(RapidsLocalJob):
"""
Implementation of a RAPIDS job that runs on a local machine.
"""
job_label = 'onpremLocal'
@dataclass
class OnPremNode(ClusterNode):
"""Implementation of Onprem cluster node."""
def fetch_and_set_hw_info(self, cli=None):
sys_info = self._pull_sys_info(cli)
self.construct_hw_info(cli=cli, sys_info=sys_info)
def _pull_sys_info(self, cli=None) -> SysInfo:
cpu_mem = self.props.get_value('memory')
cpu_mem = cpu_mem.replace('MiB', '')
num_cpus = self.props.get_value('numCores')
return SysInfo(num_cpus=num_cpus, cpu_mem=cpu_mem)
def _get_dataproc_nearest_cpu_cores(self, num_cores):
if num_cores == 1:
cpu_cores = 1
elif num_cores == 2:
cpu_cores = 2
elif 3 <= num_cores <= 4:
cpu_cores = 4
elif 5 <= num_cores <= 8:
cpu_cores = 8
elif 9 <= num_cores <= 16:
cpu_cores = 16
elif 17 <= num_cores <= 32:
cpu_cores = 32
elif 33 <= num_cores <= 64:
cpu_cores = 64
else:
cpu_cores = 96
return cpu_cores
def _get_instance_type(self, platform_name=None):
instance_type = None
if platform_name == 'dataproc':
cpu_cores = self.props.get_value('numCores')
cpu_cores = self._get_dataproc_nearest_cpu_cores(cpu_cores)
instance_type = 'n1-standard-' + str(cpu_cores)
return instance_type
def _set_fields_from_props(self):
# set the machine type
if not self.props:
return
if self.platform_name is not None:
self.instance_type = self._get_instance_type(self.platform_name)
def _pull_gpu_hw_info(self, cli=None) -> None:
pass
@dataclass
class OnPremCluster(ClusterBase):
"""
Represents an instance of running cluster on OnPrem platform.
"""
def _init_nodes(self):
raw_worker_prop = self.props.get_value_silent('config', 'workerConfig')
worker_nodes: list = []
if raw_worker_prop:
worker_nodes_total = self.props.get_value('config', 'workerConfig', 'numWorkers')
for i in range(worker_nodes_total):
worker_props = {
'name': 'worker' + str(i),
'props': JSONPropertiesContainer(prop_arg=raw_worker_prop, file_load=False),
# set the node zone based on the wrapper defined zone
'zone': self.zone,
'platform_name': self.platform.get_platform_name()
}
worker = OnPremNode.create_worker_node().set_fields_from_dict(worker_props)
# TODO for optimization, we should set HW props for 1 worker
worker.fetch_and_set_hw_info(self.cli)
worker_nodes.append(worker)
raw_master_props = self.props.get_value('config', 'masterConfig')
master_props = {
'name': 'master',
'props': JSONPropertiesContainer(prop_arg=raw_master_props, file_load=False),
# set the node zone based on the wrapper defined zone
'zone': self.zone,
'platform_name': self.platform.get_platform_name()
}
master_node = OnPremNode.create_master_node().set_fields_from_dict(master_props)
master_node.fetch_and_set_hw_info(self.cli)
self.nodes = {
SparkNodeType.WORKER: worker_nodes,
SparkNodeType.MASTER: master_node
}
def _build_migrated_cluster(self, orig_cluster):
"""
specific to the platform on how to build a cluster based on migration
:param orig_cluster: the cpu_cluster
"""
# get the map of the instance types
_, supported_mc_map = orig_cluster.find_matches_for_node()
new_worker_nodes: list = []
for anode in orig_cluster.nodes.get(SparkNodeType.WORKER):
new_instance_type = anode.instance_type
worker_props = {
'instance_type': new_instance_type,
'name': anode.name,
'zone': anode.zone,
}
new_node = OnPremNode.create_worker_node().set_fields_from_dict(worker_props)
gpu_mc_hw: ClusterNode = supported_mc_map.get(new_instance_type)
new_node.construct_hw_info(cli=None,
gpu_info=gpu_mc_hw.gpu_info,
sys_info=gpu_mc_hw.sys_info)
new_worker_nodes.append(new_node)
master_node = orig_cluster.nodes.get(SparkNodeType.MASTER)
self.nodes = {
SparkNodeType.WORKER: new_worker_nodes,
SparkNodeType.MASTER: orig_cluster.nodes.get(SparkNodeType.MASTER)
}
# force filling mc_type_map for on_prem platform.
mc_type_map = {
'Driver node': master_node.instance_type,
'Worker node': new_worker_nodes[0].instance_type
}
self.platform.update_ctxt_notes('nodeConversions', mc_type_map)
def _set_render_args_create_template(self) -> dict:
pass
def get_all_spark_properties(self) -> dict:
pass
def get_tmp_storage(self) -> str:
pass
@dataclass
class OnpremSavingsEstimator(SavingsEstimator):
"""
A class that calculates the savings based on Onprem price provider
"""
def __calculate_dataproc_group_cost(self, cluster_inst: ClusterGetAccessor, node_type: SparkNodeType):
nodes_cnt = cluster_inst.get_nodes_cnt(node_type)
cores_count = cluster_inst.get_node_core_count(node_type)
mem_mb = cluster_inst.get_node_mem_mb(node_type)
node_mc_type = cluster_inst.get_node_instance_type(node_type)
# memory here is in mb, we need to convert it to gb
mem_gb = float(mem_mb) / 1024
cores_cost = self.price_provider.get_cpu_price(node_mc_type) * int(cores_count)
memory_cost = self.price_provider.get_ram_price(node_mc_type) * mem_gb
# calculate the GPU cost
gpu_per_machine, gpu_type = cluster_inst.get_gpu_per_node(node_type)
gpu_cost = 0.0
if gpu_per_machine > 0:
gpu_unit_price = self.price_provider.get_gpu_price(gpu_type)
gpu_cost = gpu_unit_price * gpu_per_machine
return nodes_cnt * (cores_cost + memory_cost + gpu_cost)
def _get_cost_per_cluster(self, cluster: ClusterGetAccessor):
if self.price_provider.name.casefold() == 'dataproc':
master_cost = self.__calculate_dataproc_group_cost(cluster, SparkNodeType.MASTER)
workers_cost = self.__calculate_dataproc_group_cost(cluster, SparkNodeType.WORKER)
dataproc_cost = self.price_provider.get_container_cost()
total_cost = master_cost + workers_cost + dataproc_cost
return total_cost
def _setup_costs(self):
# calculate target_cost
self.target_cost = self._get_cost_per_cluster(self.reshaped_cluster)
self.source_cost = self._get_cost_per_cluster(self.source_cluster)
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/cloud_api/onprem.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Job submissions on EMR"""
from dataclasses import dataclass
from spark_rapids_pytools.rapids.rapids_job import RapidsLocalJob
@dataclass
class EmrLocalRapidsJob(RapidsLocalJob):
"""
Implementation of a RAPIDS job that runs local on a local machine.
"""
job_label = 'emrLocal'
def _build_submission_cmd(self) -> list:
# env vars are added later as a separate dictionary
cmd_arg = super()._build_submission_cmd()
# any s3 link has to be converted to S3a:
for index, arr_entry in enumerate(cmd_arg):
cmd_arg[index] = arr_entry.replace('s3://', 's3a://')
return cmd_arg
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/cloud_api/emr_job.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation specific to DATABRICKS_AWS"""
import json
from dataclasses import dataclass, field
from typing import Any, List
from spark_rapids_tools import CspEnv
from spark_rapids_pytools.cloud_api.databricks_aws_job import DBAWSLocalRapidsJob
from spark_rapids_pytools.cloud_api.emr import EMRNode, EMRPlatform
from spark_rapids_pytools.cloud_api.s3storage import S3StorageDriver
from spark_rapids_pytools.cloud_api.sp_types import CMDDriverBase, ClusterBase, ClusterNode, \
ClusterGetAccessor
from spark_rapids_pytools.cloud_api.sp_types import ClusterState, SparkNodeType
from spark_rapids_pytools.common.prop_manager import JSONPropertiesContainer
from spark_rapids_pytools.pricing.databricks_pricing import DatabricksPriceProvider
from spark_rapids_pytools.pricing.price_provider import SavingsEstimator
@dataclass
class DBAWSPlatform(EMRPlatform):
"""
Represents the interface and utilities required by DATABRICKS_AWS.
Prerequisites:
- install databricks, aws command lines (databricks cli, aws cli)
- configure the databricks cli (token, workspace, profile)
- configure the aws cli
"""
def __post_init__(self):
self.type_id = CspEnv.DATABRICKS_AWS
super(EMRPlatform, self).__post_init__()
def _construct_cli_object(self) -> CMDDriverBase:
return DBAWSCMDDriver(timeout=0, cloud_ctxt=self.ctxt)
def _install_storage_driver(self):
self.storage = S3StorageDriver(self.cli)
def _construct_cluster_from_props(self, cluster: str, props: str = None):
return DatabricksCluster(self).set_connection(cluster_id=cluster, props=props)
def set_offline_cluster(self, cluster_args: dict = None):
pass
def migrate_cluster_to_gpu(self, orig_cluster):
"""
given a cluster, convert it to run NVIDIA Gpu based on mapping instance types
:param orig_cluster: the original cluster to migrate from
:return: a new object cluster that supports GPU
"""
gpu_cluster_ob = DatabricksCluster(self)
gpu_cluster_ob.migrate_from_cluster(orig_cluster)
return gpu_cluster_ob
def create_saving_estimator(self,
source_cluster: ClusterGetAccessor,
reshaped_cluster: ClusterGetAccessor):
raw_pricing_config = self.configs.get_value_silent('pricing')
if raw_pricing_config:
pricing_config = JSONPropertiesContainer(prop_arg=raw_pricing_config, file_load=False)
else:
pricing_config: JSONPropertiesContainer = None
databricks_price_provider = DatabricksPriceProvider(region=self.cli.get_region(),
pricing_configs={'databricks': pricing_config})
saving_estimator = DBAWSSavingsEstimator(price_provider=databricks_price_provider,
reshaped_cluster=reshaped_cluster,
source_cluster=source_cluster)
return saving_estimator
def create_local_submission_job(self, job_prop, ctxt) -> Any:
return DBAWSLocalRapidsJob(prop_container=job_prop, exec_ctxt=ctxt)
def validate_job_submission_args(self, submission_args: dict) -> dict:
pass
@dataclass
class DBAWSCMDDriver(CMDDriverBase):
"""Represents the command interface that will be used by DATABRICKS_AWS"""
def _list_inconsistent_configurations(self) -> list:
incorrect_envs = super()._list_inconsistent_configurations()
required_props = self.get_required_props()
if required_props is not None:
for prop_entry in required_props:
prop_value = self.env_vars.get(prop_entry)
if prop_value is None and prop_entry.startswith('aws_'):
incorrect_envs.append('AWS credentials are not set correctly ' +
'(this is required to access resources on S3)')
return incorrect_envs
return incorrect_envs
def _build_platform_list_cluster(self, cluster, query_args: dict = None) -> list:
pass
def pull_cluster_props_by_args(self, args: dict) -> str:
get_cluster_cmd = ['databricks', 'clusters', 'get']
if 'Id' in args:
get_cluster_cmd.extend(['--cluster-id', args.get('Id')])
elif 'cluster' in args:
get_cluster_cmd.extend(['--cluster-name', args.get('cluster')])
else:
self.logger.error('Invalid arguments to pull the cluster properties')
cluster_described = self.run_sys_cmd(get_cluster_cmd)
if cluster_described is not None:
raw_prop_container = JSONPropertiesContainer(prop_arg=cluster_described, file_load=False)
return json.dumps(raw_prop_container.props)
return cluster_described
def _build_platform_describe_node_instance(self, node: ClusterNode) -> list:
cmd_params = ['aws ec2 describe-instance-types',
'--region', f'{self.get_region()}',
'--instance-types', f'{node.instance_type}']
return cmd_params
def get_submit_spark_job_cmd_for_cluster(self, cluster_name: str, submit_args: dict) -> List[str]:
raise NotImplementedError
@dataclass
class DatabricksNode(EMRNode):
"""Implementation of Databricks cluster node."""
region: str = field(default=None, init=False)
def _set_fields_from_props(self):
self.name = self.props.get_value_silent('public_dns')
@dataclass
class DatabricksCluster(ClusterBase):
"""
Represents an instance of running cluster on Databricks.
"""
def _set_fields_from_props(self):
super()._set_fields_from_props()
self.uuid = self.props.get_value('cluster_id')
self.state = ClusterState.fromstring(self.props.get_value('state'))
def _set_name_from_props(self) -> None:
self.name = self.props.get_value('cluster_name')
def _init_nodes(self):
# assume that only one master node
master_nodes_from_conf = self.props.get_value_silent('driver')
worker_nodes_from_conf = self.props.get_value_silent('executors')
num_workers = self.props.get_value_silent('num_workers')
if num_workers is None:
num_workers = 0
# construct master node info when cluster is inactive
if master_nodes_from_conf is None:
master_node_type_id = self.props.get_value('driver_node_type_id')
if master_node_type_id is None:
raise RuntimeError('Failed to find master node information from cluster properties')
master_nodes_from_conf = {'node_id': None}
# construct worker nodes info when cluster is inactive
if worker_nodes_from_conf is None:
worker_node_type_id = self.props.get_value('node_type_id')
if worker_node_type_id is None:
raise RuntimeError('Failed to find worker node information from cluster properties')
worker_nodes_from_conf = [{'node_id': None} for i in range(num_workers)]
# create workers array
worker_nodes: list = []
for worker_node in worker_nodes_from_conf:
worker_props = {
'Id': worker_node['node_id'],
'props': JSONPropertiesContainer(prop_arg=worker_node, file_load=False),
# set the node region based on the wrapper defined region
'region': self.region,
'instance_type': self.props.get_value('node_type_id')
}
worker = DatabricksNode.create_worker_node().set_fields_from_dict(worker_props)
worker.fetch_and_set_hw_info(self.cli)
worker_nodes.append(worker)
master_props = {
'Id': master_nodes_from_conf['node_id'],
'props': JSONPropertiesContainer(prop_arg=master_nodes_from_conf, file_load=False),
# set the node region based on the wrapper defined region
'region': self.region,
'instance_type': self.props.get_value('driver_node_type_id')
}
master_node = DatabricksNode.create_master_node().set_fields_from_dict(master_props)
master_node.fetch_and_set_hw_info(self.cli)
self.nodes = {
SparkNodeType.WORKER: worker_nodes,
SparkNodeType.MASTER: master_node
}
def _init_connection(self, cluster_id: str = None,
props: str = None) -> dict:
cluster_args = super()._init_connection(cluster_id=cluster_id, props=props)
# propagate region to the cluster
cluster_args.setdefault('region', self.cli.get_env_var('region'))
return cluster_args
def get_all_spark_properties(self) -> dict:
return self.props.get_value('spark_conf')
def _build_migrated_cluster(self, orig_cluster):
"""
specific to the platform on how to build a cluster based on migration
:param orig_cluster: the cpu_cluster that does not support the GPU devices.
"""
# get the map of the instance types
mc_type_map, _ = orig_cluster.find_matches_for_node()
new_worker_nodes: list = []
for anode in orig_cluster.nodes.get(SparkNodeType.WORKER):
# loop on all worker nodes.
# even if the node is the same type, we still need to set the hardware
if anode.instance_type not in mc_type_map:
# the node stays the same
# skip converting the node
new_instance_type = anode.instance_type
self.logger.info('Node with %s supports GPU devices.',
anode.instance_type)
else:
new_instance_type = mc_type_map.get(anode.instance_type)
self.logger.info('Converting node %s into GPU supported instance-type %s',
anode.instance_type,
new_instance_type)
worker_props = {
'instance_type': new_instance_type,
'name': anode.name,
'Id': anode.Id,
'region': anode.region,
'props': anode.props,
}
new_node = DatabricksNode.create_worker_node().set_fields_from_dict(worker_props)
new_worker_nodes.append(new_node)
self.nodes = {
SparkNodeType.WORKER: new_worker_nodes,
SparkNodeType.MASTER: orig_cluster.nodes.get(SparkNodeType.MASTER)
}
if bool(mc_type_map):
# update the platform notes
self.platform.update_ctxt_notes('nodeConversions', mc_type_map)
def get_tmp_storage(self) -> str:
raise NotImplementedError
@dataclass
class DBAWSSavingsEstimator(SavingsEstimator):
"""
A class that calculates the savings based on a Databricks-AWS price provider
"""
def __calculate_ec2_cost(self, cluster: ClusterGetAccessor) -> float:
res = 0.0
for node_type in [SparkNodeType.MASTER, SparkNodeType.WORKER]:
instance_type = cluster.get_node_instance_type(node_type)
nodes_cnt = cluster.get_nodes_cnt(node_type)
ec2_cost = self.price_provider.catalogs['aws'].get_value('ec2', instance_type)
res += ec2_cost * nodes_cnt
return res
def _get_cost_per_cluster(self, cluster: ClusterGetAccessor):
dbu_cost = 0.0
for node_type in [SparkNodeType.MASTER, SparkNodeType.WORKER]:
instance_type = cluster.get_node_instance_type(node_type)
nodes_cnt = cluster.get_nodes_cnt(node_type)
cost = self.price_provider.get_instance_price(instance=instance_type)
dbu_cost += cost * nodes_cnt
return self.__calculate_ec2_cost(cluster) + dbu_cost
def _setup_costs(self):
# calculate target_cost
self.target_cost = self._get_cost_per_cluster(self.reshaped_cluster)
self.source_cost = self._get_cost_per_cluster(self.source_cluster)
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/cloud_api/databricks_aws.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service providers defined types"""
import configparser
import json
from collections import defaultdict
from dataclasses import dataclass, field
from enum import Enum
from logging import Logger
from typing import Type, Any, List, Callable
from spark_rapids_tools import EnumeratedType, CspEnv
from spark_rapids_pytools.common.prop_manager import AbstractPropertiesContainer, JSONPropertiesContainer, \
get_elem_non_safe
from spark_rapids_pytools.common.sys_storage import StorageDriver, FSUtil
from spark_rapids_pytools.common.utilities import ToolLogging, SysCmd, Utils, TemplateGenerator
class DeployMode(EnumeratedType):
"""List of tools deployment methods"""
# The rapids job is running on local node
LOCAL = 'local'
# The rapids job is submitted on a remote cluster
REMOTE_CLUSTER = 'remote'
def requires_remote_storage(self) -> bool:
return self.value in [self.REMOTE_CLUSTER]
class GpuDevice(EnumeratedType):
"""List of supported GPU devices"""
T4 = 't4'
V100 = 'v100'
K80 = 'k80'
A100 = 'a100'
P100 = 'P100'
P4 = 'P4'
L4 = 'l4'
A10 = 'a10'
@classmethod
def get_default_gpu(cls):
return cls.T4
def get_gpu_mem(self) -> list:
memory_hash = {
self.T4: [16384],
self.L4: [24576],
self.A100: [40960, 81920],
self.P4: [8192],
self.K80: [12288],
self.V100: [16384],
self.P100: [16384],
self.A10: [24576]
}
return memory_hash.get(self)
class ClusterState(EnumeratedType):
"""
Standard states for a cluster.
"""
STARTING = 'starting'
BOOTSTRAPPING = 'bootstrapping'
WAITING = 'waiting'
RUNNING = 'running'
TERMINATING = 'terminating'
TERMINATED = 'terminated'
TERMINATED_WITH_ERRORS = 'terminated_with_errors'
STOPPED = 'stopped'
OFFLINE = 'offline'
UNKNOWN = 'unknown'
class TargetPlatform(EnumeratedType):
"""Determine CostSavings for target platform based on OnPrem cluster configuration"""
DATAPROC = 'dataproc'
NONE = None
class SparkNodeType(EnumeratedType):
"""
Node type from Spark perspective. We either have a master node or a worker node.
Note that the provider could have different grouping.
For example EMR has: master, task, and core.
Another categorization: onDemand..etc.
"""
MASTER = 'master'
WORKER = 'worker'
@dataclass
class SysInfo:
num_cpus: int = None
cpu_mem: int = None
@dataclass
class GpuHWInfo:
num_gpus: int = None
gpu_mem: int = None
gpu_device: GpuDevice = GpuDevice.get_default_gpu()
def get_gpu_device_name(self) -> str:
return GpuDevice.tostring(self.gpu_device)
@dataclass
class NodeHWInfo:
sys_info: SysInfo = None
gpu_info: GpuHWInfo = None
def is_gpu_node(self) -> bool:
return self.gpu_info is not None
@dataclass
class ClusterNode:
"""
Represents a single cluster node
:param node_type: type from Spark perspective (Worker vs Master)
:param name: name of the node used to remote access in SSH
:param instance_type: the instance type running on the node
:param mc_props: holds the properties of the instance type running on the node.
This is used for further processing
:param hw_info: contains hardware settings of the node: System and GPU.
"""
node_type: SparkNodeType
name: str = field(default=None, init=False)
instance_type: str = field(default=None, init=False)
platform_name: str = field(default=None, init=False)
props: AbstractPropertiesContainer = field(default=None, init=False)
mc_props: AbstractPropertiesContainer = field(default=None, init=False)
hw_info: NodeHWInfo = field(default=None, init=False)
def set_fields_from_dict(self, field_values: dict = None):
if field_values is not None:
for field_name in field_values:
setattr(self, field_name, field_values.get(field_name))
self._set_fields_from_props()
return self
def _set_fields_from_props(self):
pass
def _pull_and_set_mc_props(self, cli=None):
pass
def _pull_gpu_hw_info(self, cli=None) -> GpuHWInfo:
raise NotImplementedError
def _pull_sys_info(self, cli=None) -> SysInfo:
raise NotImplementedError
def get_name(self) -> str:
return self.name
def construct_hw_info(self,
cli=None,
gpu_info: GpuHWInfo = None,
sys_info: SysInfo = None):
del cli # Unused cli, defined for future use.
self.hw_info = NodeHWInfo(sys_info=sys_info,
gpu_info=gpu_info)
def fetch_and_set_hw_info(self, cli=None):
self._pull_and_set_mc_props(cli)
sys_info = self._pull_sys_info(cli)
try:
# if a node has no gpu, then it is expected that setting the gpu info fails
gpu_info = self._pull_gpu_hw_info(cli)
except Exception as ex: # pylint: disable=broad-except
cli.logger.info(f'Could not pull GPU info for '
f'{SparkNodeType.tostring(self.node_type)} node {self.name}: {ex}')
gpu_info = None
self.construct_hw_info(cli=cli, gpu_info=gpu_info, sys_info=sys_info)
def find_best_cpu_conversion(self, target_list: dict):
target_cpus = self.hw_info.sys_info.num_cpus
best_match = None
last_record = None
for prof_id, hw_info in target_list.items():
# assume that the list is sorted by number of CPUs
last_record = prof_id
if hw_info.sys_info.num_cpus <= target_cpus:
best_match = prof_id
if best_match is None:
best_match = last_record
return best_match
@classmethod
def create_worker_node(cls) -> Any:
return cls(SparkNodeType.WORKER)
@classmethod
def create_master_node(cls) -> Any:
return cls(SparkNodeType.MASTER)
@classmethod
def create_node(cls, value):
if isinstance(value, SparkNodeType):
if value == SparkNodeType.MASTER:
return cls.create_master_node()
if value == SparkNodeType.WORKER:
return cls.create_worker_node()
raise RuntimeError(f'Invalid node type while creating cluster node {value}')
@dataclass
class ClusterGetAccessor:
"""
Represents the interface used to access the cluster information that is
used by other entities such as the SavingEstimator
"""
def get_node(self, node_type: SparkNodeType) -> ClusterNode:
raise NotImplementedError
def get_all_nodes(self) -> list:
raise NotImplementedError
def get_nodes_cnt(self, node_type: SparkNodeType) -> int:
raise NotImplementedError
def get_name(self) -> str:
raise NotImplementedError
def get_node_core_count(self, node_type: SparkNodeType) -> int:
node = self.get_node(node_type)
return node.hw_info.sys_info.num_cpus
def get_node_mem_mb(self, node_type: SparkNodeType) -> int:
node = self.get_node(node_type)
return node.hw_info.sys_info.cpu_mem
def get_gpu_per_node(self, node_type: SparkNodeType) -> (int, str):
node = self.get_node(node_type)
gpu_info = node.hw_info.gpu_info
if gpu_info:
num_gpus, gpu_device = gpu_info.num_gpus, gpu_info.gpu_device
else:
num_gpus, gpu_device = 0, GpuDevice.get_default_gpu()
return num_gpus, GpuDevice.tostring(gpu_device)
def get_node_instance_type(self, node_type: SparkNodeType) -> str:
node = self.get_node(node_type)
return node.instance_type
def get_workers_instant_types(self) -> str:
return self.get_node_instance_type(SparkNodeType.WORKER)
def get_workers_count(self) -> int:
return self.get_nodes_cnt(SparkNodeType.WORKER)
def get_workers_cores_count(self) -> int:
return self.get_node_core_count(SparkNodeType.WORKER)
def get_workers_mem_mb(self) -> int:
return self.get_node_mem_mb(SparkNodeType.WORKER)
def get_gpu_per_worker(self) -> (int, str):
return self.get_gpu_per_node(SparkNodeType.WORKER)
@dataclass
class CMDDriverBase:
"""
Represents the command interface that will be used by the platform
It has:
1- the command used by the platform. for example gcloud, gsutil, or AWS
2- the ssh command to nodes (it could include authentications)
3- normal commands
:param cloud_ctxt: dictionary containing all the necessary configurations related to the CSP
:param timeout: How long to wait (in seconds) for the command to finish (optional).
:param: env_vars: dictionary containing all the variables required by the driver.
"""
cloud_ctxt: dict
timeout: int = 0
env_vars: dict = field(default_factory=dict, init=False)
logger: Logger = field(default=ToolLogging.get_and_setup_logger('rapids.tools.cmd'), init=False)
def get_env_var(self, key: str):
return self.env_vars.get(key)
def get_region(self) -> str:
return self.env_vars.get('region')
def get_cmd_run_configs(self) -> dict:
return self.env_vars.get('cmdRunnerProperties')
def get_required_props(self) -> list:
cmd_runner_props = self.get_cmd_run_configs()
if cmd_runner_props:
return cmd_runner_props.get('inheritedProps')
return None
def get_system_prerequisites(self) -> list:
res = []
cmd_runner_props = self.get_cmd_run_configs()
if cmd_runner_props:
res.extend(cmd_runner_props.get('systemPrerequisites'))
return res
def get_piggyback_props(self) -> list:
res = []
cmd_runner_props = self.get_cmd_run_configs()
if cmd_runner_props:
res = cmd_runner_props.get('cliPiggyBackEnvVars')['definedVars']
return res
def get_piggyback_arguments(self) -> list:
res = []
cmd_runner_props = self.get_cmd_run_configs()
if cmd_runner_props:
res = cmd_runner_props.get('cliPiggyBackArgs')['definedArgs']
return res
def get_rapids_job_configs(self, deploy_mode: DeployMode) -> dict:
cmd_runner_props = self.get_cmd_run_configs()
if cmd_runner_props and deploy_mode is not None:
deploy_mode_configs = get_elem_non_safe(cmd_runner_props,
['rapidsJobs', DeployMode.tostring(deploy_mode)])
return deploy_mode_configs
return None
def get_and_set_env_vars(self):
"""For that driver, try to get all the available system environment for the system."""
for item_key in self.cloud_ctxt:
# save all not-None entries to the env_vars
item_value = self.cloud_ctxt.get(item_key)
if item_value is not None:
self.env_vars[item_key] = item_value
def _list_inconsistent_configurations(self) -> list:
"""
List all the inconsistent configuration in the platform
:return: a list of inconsistencies
"""
incorrect_envs = []
for sys_tool in self.get_system_prerequisites():
if not Utils.is_system_tool(sys_tool):
incorrect_envs.append(f'Tool {sys_tool} is not in installed or not in the PATH environment')
if self.get_region() is None:
incorrect_envs.append('Platform region is not set.')
return incorrect_envs
def _handle_inconsistent_configurations(self, incorrect_envs: list) -> None:
if len(incorrect_envs) > 0:
# we do not want to raise a runtime error because some of the flags are not required by
# all the tools.
# TODO: improve this by checking the requirements for each tool.
exc_msg = Utils.gen_joined_str('; ', incorrect_envs)
self.logger.warning('Environment report: %s', exc_msg)
def validate_env(self):
incorrect_envs = self._list_inconsistent_configurations()
self._handle_inconsistent_configurations(incorrect_envs)
def run_sys_cmd(self,
cmd,
cmd_input: str = None,
fail_ok: bool = False,
env_vars: dict = None) -> str:
def process_credentials_option(cmd: list):
res = []
for i, arg in enumerate(cmd):
if 'account-key' in cmd[i - 1]:
arg = 'MY_ACCESS_KEY'
elif 'fs.azure.account.key' in arg:
arg = arg.split('=')[0] + '=MY_ACCESS_KEY'
res.append(arg)
return res
def process_streams(std_out, std_err):
if ToolLogging.is_debug_mode_enabled():
# reformat lines to make the log more readable
stdout_splits = std_out.splitlines()
stderr_splits = std_err.splitlines()
stdout_str = ''
if len(stdout_splits) > 0:
std_out_lines = Utils.gen_multiline_str([f'\t| {line}' for line in stdout_splits])
stdout_str = f'\n\t<STDOUT>\n{std_out_lines}'
cmd_log_str = Utils.gen_joined_str(' ', process_credentials_option(cmd))
if len(stderr_splits) > 0:
std_err_lines = Utils.gen_multiline_str([f'\t| {line}' for line in stderr_splits])
stderr_str = f'\n\t<STDERR>\n{std_err_lines}'
self.logger.debug('executing CMD:\n\t<CMD: %s>[%s]; [%s]',
cmd_log_str,
stdout_str,
stderr_str)
def is_sdk_cmd(original_cmd, cmd_prefix: str) -> bool:
if isinstance(original_cmd, list):
# the command is an array, then we should only pick the first index
cmd_token = original_cmd[0]
else:
cmd_token = original_cmd
return cmd_token.startswith(cmd_prefix)
def append_to_cmd(original_cmd, extra_args: list) -> Any:
if isinstance(original_cmd, list):
# We do not append at the end of the cmd because this can break some commands like
# spark-submit
res = []
ind = 0
# loop until we find the first argument (starts with --))
while ind < len(original_cmd) and not original_cmd[ind].startswith('--'):
res.append(original_cmd[ind])
ind += 1
res.extend(extra_args)
if ind < len(original_cmd):
res.extend(original_cmd[ind:])
return res
extra_args_flatten = Utils.gen_joined_str(' ', extra_args)
return f'{original_cmd} {extra_args_flatten}'
# process the env_variables of the command
piggyback_vars = self.get_piggyback_props()
for var_entry in piggyback_vars:
prop_label = var_entry['confProperty']
var_value = self.get_env_var(prop_label)
if var_value:
if not env_vars:
env_vars = {var_entry['varKey']: var_value}
else:
# use setdefault in case the env_car was already defined
env_vars.setdefault(var_entry['varKey'], var_value)
# process the pigyybacked sdk arguments
piggyback_args = []
piggyback_args_raw = self.get_piggyback_arguments()
for arg_entry in piggyback_args_raw:
if is_sdk_cmd(cmd, arg_entry['sdkCommand']):
# we should apply the
piggyback_args.append(f'--{arg_entry["argKey"]}')
if 'argValue' in arg_entry:
piggyback_args.append(f'{arg_entry["argValue"]}')
else:
arg_value = self.get_env_var(arg_entry['confProperty'])
piggyback_args.append(arg_value)
if piggyback_args:
cmd = append_to_cmd(cmd, piggyback_args)
cmd_args = {
'cmd': cmd,
'fail_ok': fail_ok,
'cmd_input': cmd_input,
'env_vars': env_vars,
'process_streams_cb': process_streams
}
sys_cmd = SysCmd().build(cmd_args)
return sys_cmd.exec()
def _build_ssh_cmd_prefix_for_node(self, node: ClusterNode) -> str:
del node # Unused by super method.
return ''
def _build_cmd_scp_to_node(self, node: ClusterNode, src: str, dest: str) -> str: # pylint: disable=unused-argument
del node # Unused by super method.
return ''
def _build_cmd_scp_from_node(self, node: ClusterNode, src: str, dest: str) -> str: # pylint: disable=unused-argument
del node # Unused by super method.
return ''
def _construct_ssh_cmd_with_prefix(self, prefix: str, remote_cmd: str) -> str:
return f'{prefix} {remote_cmd}'
def ssh_cmd_node(self, node: ClusterNode, ssh_cmd: str, cmd_input: str = None) -> str:
prefix_cmd = self._build_ssh_cmd_prefix_for_node(node=node)
full_ssh_cmd = self._construct_ssh_cmd_with_prefix(prefix=prefix_cmd, remote_cmd=ssh_cmd)
return self.run_sys_cmd(full_ssh_cmd, cmd_input=cmd_input)
def scp_to_node(self, node: ClusterNode, src: str, dest: str) -> str:
cmd = self._build_cmd_scp_to_node(node=node, src=src, dest=dest)
return self.run_sys_cmd(cmd)
def scp_from_node(self, node: ClusterNode, src: str, dest: str) -> str:
cmd = self._build_cmd_scp_from_node(node=node, src=src, dest=dest)
return self.run_sys_cmd(cmd)
def pull_cluster_props_by_args(self, args: dict) -> str or None:
del args # Unused by super method.
return ''
def _build_platform_describe_node_instance(self, node: ClusterNode) -> list:
del node # Unused by super method.
return []
def exec_platform_describe_node_instance(self, node: ClusterNode) -> str:
"""
Given a node, execute platform CLI to pull the properties of the instance type running on
that node
:param node: object representing cluster component
:return: string containing the properties of the machine. The string could be in json or yaml format.
"""
cmd_params = self._build_platform_describe_node_instance(node=node)
return self.run_sys_cmd(cmd_params)
def _build_platform_list_cluster(self,
cluster,
query_args: dict = None) -> list:
raise NotImplementedError
def exec_platform_list_cluster_instances(self,
cluster,
query_args: dict = None) -> str:
cmd_args = self._build_platform_list_cluster(cluster=cluster, query_args=query_args)
return self.run_sys_cmd(cmd_args)
def exec_platform_describe_accelerator(self,
accelerator_type: str,
**cmd_args) -> str:
"""
Some platforms like Dataproc represent GPUs as accelerators.
To get the information of each accelerator, we need to run describe cmd
:param accelerator_type: the name of the GPU accelerator which can be platform specific
:param cmd_args: the arguments to be sent to the sdk
:return: a string in json format representing the information about the accelerator
"""
del accelerator_type # Unused accelerator_type
del cmd_args # Unused cmd_args
return ''
def build_local_job_arguments(self, submit_args: dict) -> dict:
"""
an implementation specific to the platform that build a dictionary to store argument and
sys env-vars needed for the submission of a local mode on that platform
:param submit_args: the arguments specified by the user that reflects on the platform.
:return: a dictionary in the format of {"jvmArgs": {}, "envArgs": {}}
"""
jvm_heap_size = submit_args.get('jvmMaxHeapSize')
xmx_key = f'Xmx{jvm_heap_size}g'
res = {
'jvmArgs': {
# TODO: setting the AWS access keys from jvm arguments did not work
# 'Dspark.hadoop.fs.s3a.secret.key': aws_access_key,
# 'Dspark.hadoop.fs.s3a.access.key': aws_access_id
xmx_key: ''
},
'envArgs': {}
}
rapids_configs = self.get_rapids_job_configs(self.cloud_ctxt.get('deployMode'))
if not rapids_configs:
return res
global_sys_vars = rapids_configs.get('definedVars')
if not global_sys_vars:
return res
env_args_table = {}
for sys_var in global_sys_vars:
prop_value = self.get_env_var(sys_var['confProperty'])
if prop_value:
env_args_table.setdefault(sys_var['varKey'], prop_value)
res.update({'envArgs': env_args_table})
return res
def get_submit_spark_job_cmd_for_cluster(self,
cluster_name: str,
submit_args: dict) -> List[str]:
raise NotImplementedError
@dataclass
class PlatformBase:
"""
Represents the common methods used by all other platforms.
We need to load constants about platform:
1- supported machine types
2- supported Images or non-supported images
3- GPU specs
4- pricing catalog
"""
ctxt_args: dict
type_id: CspEnv = field(default_factory=lambda: CspEnv.NONE, init=False)
platform: str = field(default=None, init=False)
cli: CMDDriverBase = field(default=None, init=False)
storage: StorageDriver = field(default=None, init=False)
ctxt: dict = field(default_factory=dict, init=False)
configs: JSONPropertiesContainer = field(default=None, init=False)
logger: Logger = field(default=ToolLogging.get_and_setup_logger('rapids.tools.csp'), init=False)
@classmethod
def list_supported_gpus(cls):
return [GpuDevice.T4, GpuDevice.A100, GpuDevice.L4, GpuDevice.A10]
def load_from_config_parser(self, conf_file, **prop_args) -> dict:
res = None
try:
parser_obj = configparser.ConfigParser()
parser_obj.read(conf_file)
res = {}
if prop_args.get('sectionKey'):
section_name = prop_args.get('sectionKey')
if not parser_obj.has_section(section_name):
# try to use "profile XYZ" format
if parser_obj.has_section(f'profile {section_name}'):
section_name = f'profile {section_name}'
key_list = prop_args.get('keyList')
for k in key_list:
if parser_obj.has_option(section_name, k):
res.update({k: parser_obj.get(section_name, k)})
except (configparser.NoSectionError, configparser.NoOptionError, configparser.ParsingError) as conf_ex:
self.logger.debug('Could not load properties from configuration file %s. Exception: %s',
conf_file, conf_ex)
return res
def _construct_cli_object(self) -> CMDDriverBase:
raise NotImplementedError
def _create_cli_instance(self) -> CMDDriverBase:
cmd_driver_props = self._get_config_environment('cmdRunnerProperties')
self.ctxt['cmdRunnerProperties'] = cmd_driver_props
return self._construct_cli_object()
def _install_storage_driver(self):
raise NotImplementedError
def _get_config_environment(self, *key_strs) -> Any:
return self.configs.get_value('environment', *key_strs)
def _load_config_environment_var_prop(self, prop_key: str):
env_variables = self._get_config_environment('cliConfig', 'envVariables')
# find the env_variable that maps to the property
res = []
for env_var in env_variables:
if env_var['confProperty'] == prop_key:
res.append(env_var)
return res
def _set_env_prop_from_env_var(self, prop_key: str) -> None:
if self.ctxt.get(prop_key):
# it is already set. do nothing
return
# find the env_variable that maps to the property
for env_var in self._load_config_environment_var_prop(prop_key):
env_var_key = env_var['envVariableKey']
env_var_def_val = env_var.get('defaultValue')
env_var_val = Utils.get_sys_env_var(env_var_key, env_var_def_val)
if env_var_val is not None:
if '/' in env_var_val:
# this is a file
env_var_val = FSUtil.expand_path(env_var_val)
self.ctxt.update({prop_key: env_var_val})
break
def _set_initial_configuration_list(self) -> None:
# load the initial configurations list
initial_conf_list = self._get_config_environment('initialConfigList')
if initial_conf_list:
for conf_prop_k in initial_conf_list:
self._set_env_prop_from_env_var(conf_prop_k)
def _set_remaining_configuration_list(self) -> None:
remaining_props = self._get_config_environment('loadedConfigProps')
if not remaining_props:
return
properties_map_arr = self._get_config_environment('cliConfig',
'confProperties',
'propertiesMap')
if properties_map_arr:
# We support multiple CLI configurations, the following two dictionaries
# map config files to the corresponding property keys to be set, and section names respectively
config_file_keys = defaultdict(list)
config_file_section = {}
for prop_elem in properties_map_arr:
if prop_elem.get('confProperty') in remaining_props:
# The property uses the default value which is '_cliConfigFile_'
config_file = prop_elem.get('configFileProp', '_cliConfigFile_')
config_file_keys[config_file].append(prop_elem.get('propKey'))
if config_file not in config_file_section:
config_file_section[config_file] = prop_elem.get('section').strip('_')
# The section names are loaded from dictionary 'config_file_section'
# Example section names are awsProfile/profile
loaded_conf_dict = {}
for config_file in config_file_keys:
loaded_conf_dict = \
self._load_props_from_sdk_conf_file(keyList=config_file_keys[config_file],
configFile=config_file.strip('_'),
sectionKey=self.ctxt.get(config_file_section[config_file]))
if loaded_conf_dict:
self.ctxt.update(loaded_conf_dict)
for prop_elem in properties_map_arr:
if loaded_conf_dict and prop_elem.get('propKey') not in loaded_conf_dict:
# set it using environment variable if possible
self._set_env_prop_from_env_var(prop_elem.get('propKey'))
def _set_credential_properties(self) -> None:
properties_map_arr = self._get_config_environment('cliConfig',
'confProperties',
'credentialsMap')
if not properties_map_arr:
return
# We support multiple CLI configurations, the following two dictionaries
# map config files to the corresponding property keys to be set, and section names respectively
credential_file_keys = defaultdict(list)
credential_file_section = {}
for prop_elem in properties_map_arr:
credential_file = prop_elem.get('configFileProp', '_credentialFile_')
credential_file_keys[credential_file].append(prop_elem.get('propKey'))
if credential_file not in credential_file_section:
credential_file_section[credential_file] = prop_elem.get('section').strip('_')
# The section names are loaded from dictionary 'config_file_section'
# Example section names are awsProfile/profile
for credential_file in credential_file_keys:
credential_file_value = self.ctxt.get(credential_file.strip('_'))
if not credential_file_value:
continue
loaded_conf_dict = \
self.load_from_config_parser(credential_file_value,
keyList=credential_file_keys[credential_file],
sectionKey=self.ctxt.get(credential_file_section[credential_file]))
if loaded_conf_dict:
self.ctxt.update(loaded_conf_dict)
def _parse_arguments(self, ctxt_args: dict):
# Get the possible parameters for that platform.
# Arguments passed to the tool have more precedence than global env variables
list_of_params = self._get_config_environment('envParams')
if list_of_params:
for param_elem in list_of_params:
param_val = ctxt_args.get(param_elem)
if param_val:
# add the argument to the context
self.ctxt.update({param_elem: param_val})
self._set_initial_configuration_list()
# load the remaining properties
self._set_remaining_configuration_list()
# load the credential properties
self._set_credential_properties()
def _load_props_from_sdk_conf_file(self, **prop_args) -> dict:
if prop_args.get('configFile'):
cli_conf_file = self.ctxt.get(prop_args.get('configFile'))
else:
cli_conf_file = self.ctxt.get('cliConfigFile')
if cli_conf_file is None:
return None
return self.load_from_config_parser(cli_conf_file, **prop_args)
def __post_init__(self):
self.load_platform_configs()
self.ctxt = {
'platformType': self.type_id,
'notes': {}
}
self._parse_arguments(self.ctxt_args)
self.cli = self._create_cli_instance()
self._install_storage_driver()
def update_ctxt_notes(self, note_key, note_value):
self.ctxt['notes'].update({note_key: note_value})
def setup_and_validate_env(self):
if self.cli is not None:
self.cli.get_and_set_env_vars()
self.cli.validate_env()
# TODO we should fail if the CLI is None
def _construct_cluster_from_props(self,
cluster: str,
props: str = None):
raise NotImplementedError
def set_offline_cluster(self, cluster_args: dict = None):
raise NotImplementedError
def load_cluster_by_prop_file(self, cluster_prop_path: str):
prop_container = JSONPropertiesContainer(prop_arg=cluster_prop_path)
cluster = prop_container.get_value_silent('cluster_id')
return self._construct_cluster_from_props(cluster=cluster,
props=json.dumps(prop_container.props))
def connect_cluster_by_name(self, cluster: str):
"""
To be used to if the cluster can be found in the platform.
:param cluster:
:return:
"""
cluster_props = self.cli.pull_cluster_props_by_args(args={'cluster': cluster})
return self._construct_cluster_from_props(cluster=cluster,
props=cluster_props)
def migrate_cluster_to_gpu(self, orig_cluster):
"""
given a cluster, convert it to run NVIDIA Gpu based on mapping instance types
:param orig_cluster: the cluster on which the application was executed and is running
without acceleration.
:return: a new object cluster that supports GPU
"""
raise NotImplementedError
def create_saving_estimator(self,
source_cluster: ClusterGetAccessor,
reshaped_cluster: ClusterGetAccessor):
raise NotImplementedError
def create_local_submission_job(self, job_prop, ctxt) -> Any:
raise NotImplementedError
def load_platform_configs(self):
config_file_name = f'{CspEnv.tostring(self.type_id).lower()}-configs.json'
config_path = Utils.resource_path(config_file_name)
self.configs = JSONPropertiesContainer(prop_arg=config_path)
def get_supported_gpus(self) -> dict:
gpus_from_configs = self.configs.get_value('gpuConfigs', 'user-tools', 'supportedGpuInstances')
gpu_scopes = {}
for mc_prof, mc_info in gpus_from_configs.items():
hw_info_json = mc_info['SysInfo']
hw_info_ob = SysInfo(num_cpus=hw_info_json['num_cpus'], cpu_mem=hw_info_json['cpu_mem'])
gpu_info_json = mc_info['GpuHWInfo']
gpu_info_obj = GpuHWInfo(num_gpus=gpu_info_json['num_gpus'], gpu_mem=gpu_info_json['gpu_mem'])
gpu_scopes[mc_prof] = NodeHWInfo(sys_info=hw_info_ob, gpu_info=gpu_info_obj)
return gpu_scopes
def validate_job_submission_args(self, submission_args: dict) -> dict:
raise NotImplementedError
def get_platform_name(self) -> str:
"""
This used to get the lower case of the platform of the runtime.
:return: the name of the platform of the runtime in lower_case.
"""
return CspEnv.pretty_print(self.type_id)
def get_footer_message(self) -> str:
return 'To support acceleration with T4 GPUs, switch the worker node instance types'
@dataclass
class ClusterBase(ClusterGetAccessor):
"""
Represents an instance of a cluster on the platform.
Cluster can be running/offline
"""
platform: PlatformBase
cli: CMDDriverBase = field(default=None, init=False)
name: str = field(default=None, init=False)
uuid: str = field(default=None, init=False)
region: str = field(default=None, init=False)
zone: str = field(default=None, init=False)
state: ClusterState = field(default=ClusterState.RUNNING, init=False)
nodes: dict = field(default_factory=dict, init=False)
props: AbstractPropertiesContainer = field(default=None, init=False)
logger: Logger = field(default=ToolLogging.get_and_setup_logger('rapids.tools.cluster'), init=False)
@staticmethod
def _verify_workers_exist(has_no_workers_cb: Callable[[], bool]):
"""
Specifies how to handle cluster definitions that have no workers
:param has_no_workers_cb: A callback that returns True if the cluster does not have any
workers
"""
if has_no_workers_cb():
raise RuntimeError('Invalid cluster: The cluster has no worker nodes.\n\t'
'It is recommended to define a with (1 master, N workers).')
def __post_init__(self):
self.cli = self.platform.cli
self.region = self.cli.get_region()
def _init_connection(self, cluster_id: str = None,
props: str = None) -> dict:
name = cluster_id
if props is None:
# we need to pull the properties from the platform
props = self.cli.pull_cluster_props_by_args(args={'cluster': name, 'region': self.region})
cluster_props = JSONPropertiesContainer(props, file_load=False)
cluster_args = {
'name': name,
'props': cluster_props
}
return cluster_args
def set_fields_from_dict(self, field_values: dict = None):
"""
Given a dictionary, this function is to set the fields of the cluster
:param field_values: the dictionary containing the key/value pair to initialize the cluster.
:return:
"""
if field_values is not None:
for field_name in field_values:
setattr(self, field_name, field_values.get(field_name))
self._set_fields_from_props()
def _process_loaded_props(self) -> None:
"""
After loading the raw properties, perform any necessary processing to clean up the
properties.
"""
return None
def _set_name_from_props(self) -> None:
pass
def _set_fields_from_props(self):
self._process_loaded_props()
if not self.name:
self._set_name_from_props()
def _init_nodes(self):
pass
def set_connection(self,
cluster_id: str = None,
props: str = None):
"""
Setting a connection to an existing Connection to a cluster, then we need to pull properties
:param cluster_id: the argument to be used to fetch the cluster
:param props: optional argument that includes dictionary of the platform cluster's description.
:return: a cluster
"""
pre_init_args = self._init_connection(cluster_id, props)
self.set_fields_from_dict(pre_init_args)
self._init_nodes()
# Verify that the cluster has defined workers
self._verify_workers_exist(lambda: not self.nodes.get(SparkNodeType.WORKER))
return self
def is_cluster_running(self) -> bool:
return self.state == ClusterState.RUNNING
def get_eventlogs_from_config(self) -> List[str]:
res_arr = []
spark_props = self.get_all_spark_properties()
if spark_props and 'spark.eventLog.dir' in spark_props:
res_arr.append(spark_props.get('spark.eventLog.dir'))
return res_arr
def run_cmd_driver(self, ssh_cmd: str, cmd_input: str = None) -> str or None:
"""
Execute command on the driver node
:param ssh_cmd: the command to be executed on the remote node. Note that the quotes
surrounding the shell command should be included
:param cmd_input: optional argument string used as an input to the command line.
i.e., writing to a file.
:return:
"""
# get the master node
master_node: ClusterNode = self.get_master_node()
return self.cli.ssh_cmd_node(master_node, ssh_cmd, cmd_input=cmd_input)
def run_cmd_worker(self, ssh_cmd: str, cmd_input: str = None, ind: int = 0) -> str or None:
"""
Execute command on the worker node
:param ssh_cmd: the command to be executed on the remote node. Note that the quotes
surrounding the shell command should be included
:param cmd_input: optional argument string used as an input to the command line.
i.e., writing to a file
:param ind: the node index. By default, the command is executed on first worker node.
"""
# get the worker node
worker_node: ClusterNode = self.get_worker_node(ind)
return self.cli.ssh_cmd_node(worker_node, ssh_cmd, cmd_input=cmd_input)
def run_cmd_node(self, node: ClusterNode, ssh_cmd: str, cmd_input: str = None) -> str or None:
"""
Execute command on the node
:param node: the cluster node where the command to be executed on
:param ssh_cmd: the command to be executed on the remote node. Note that the quotes
surrounding the shell command should be included
:param cmd_input: optional argument string used as an input to the command line.
i.e., writing to a file
"""
return self.cli.ssh_cmd_node(node, ssh_cmd, cmd_input=cmd_input)
def scp_to_node(self, node: ClusterNode, src: str, dest: str) -> str or None:
"""
Scp file to the node
:param node: the cluster node to upload file to.
:param src: the file path to be uploaded to the cluster node.
:param dest: the file path where to store uploaded file on the cluster node.
"""
return self.cli.scp_to_node(node, src, dest)
def scp_from_node(self, node: ClusterNode, src: str, dest: str) -> str or None:
"""
Scp file from the node
:param node: the cluster node to download file from.
:param src: the file path on the cluster node to be downloaded.
:param dest: the file path where to store downloaded file.
"""
return self.cli.scp_from_node(node, src, dest)
def get_region(self) -> str:
return self.cli.get_region()
def get_worker_hw_info(self) -> NodeHWInfo:
worker_node = self.get_worker_node()
return worker_node.hw_info
def _build_migrated_cluster(self, orig_cluster):
"""
specific to the platform on how to build a cluster based on migration
:param orig_cluster:
"""
raise NotImplementedError
def migrate_from_cluster(self, orig_cluster):
self.name = orig_cluster.name
self.uuid = orig_cluster.uuid
self.zone = orig_cluster.zone
self.state = orig_cluster.state
# we need to copy the props in case we need to read a property
self.props = orig_cluster.props
self._build_migrated_cluster(orig_cluster)
def find_matches_for_node(self) -> (dict, dict):
"""
Maps the CPU instance types to GPU types
:return: a map converting CPU machines to GPU ones and a map
containing the supported GPUs.
"""
mc_map = {}
supported_gpus = self.platform.get_supported_gpus()
for spark_node_type, node_list in self.nodes.items():
if spark_node_type == SparkNodeType.MASTER:
# skip
self.cli.logger.debug('Skip converting Master nodes')
else:
for anode in node_list:
if anode.instance_type in supported_gpus:
continue
if anode.instance_type not in mc_map:
best_mc_match = anode.find_best_cpu_conversion(supported_gpus)
mc_map.update({anode.instance_type: best_mc_match})
return mc_map, supported_gpus
def get_all_spark_properties(self) -> dict:
"""Returns a dictionary containing the spark configurations defined in the cluster properties"""
raise NotImplementedError
def get_nodes_cnt(self, node_type: SparkNodeType) -> int:
node_values = self.nodes.get(node_type)
if isinstance(node_values, list):
res = len(node_values)
else:
res = 1
return res
def get_node(self, node_type: SparkNodeType) -> ClusterNode:
node_values = self.nodes.get(node_type)
if isinstance(node_values, list):
res = node_values[0]
else:
res = node_values
return res
def get_all_nodes(self) -> list:
nodes = []
for value in self.nodes.values():
if isinstance(value, list):
nodes += value
else:
nodes += [value]
return nodes
def get_master_node(self) -> ClusterNode:
return self.nodes.get(SparkNodeType.MASTER)
def get_worker_node(self, ind: int = 0) -> ClusterNode:
return self.nodes.get(SparkNodeType.WORKER)[ind]
def get_name(self) -> str:
return self.name
def get_tmp_storage(self) -> str:
raise NotImplementedError
def _set_render_args_create_template(self) -> dict:
raise NotImplementedError
def generate_create_script(self) -> str:
platform_name = CspEnv.pretty_print(self.platform.type_id)
template_path = Utils.resource_path(f'templates/{platform_name}-create_gpu_cluster_script.ms')
render_args = self._set_render_args_create_template()
return TemplateGenerator.render_template_file(template_path, render_args)
def _set_render_args_bootstrap_template(self, overridden_args: dict = None) -> dict:
res = {}
if overridden_args:
res.update(overridden_args)
res.setdefault('CLUSTER_NAME', self.get_name())
return res
def generate_bootstrap_script(self, overridden_args: dict = None) -> str:
platform_name = CspEnv.pretty_print(self.platform.type_id)
template_path = Utils.resource_path(f'templates/{platform_name}-run_bootstrap.ms')
render_args = self._set_render_args_bootstrap_template(overridden_args)
return TemplateGenerator.render_template_file(template_path, render_args)
@dataclass
class ClusterReshape(ClusterGetAccessor):
"""
A class that handles reshaping of the given cluster.
It takes argument a cluster object and callable methods that defines
the way each cluster property is being reshaped.
By default, the methods will have no effect on the properties.
The caller can override the behavior by passing a callback method.
The caller also can control which node type is affected by the reshap-methods.
This can be done by setting the "node_types". By default, the reshaping
is limited to the worker nodes of a cluster.
"""
cluster_inst: ClusterBase
node_types: List[SparkNodeType] = field(default_factory=lambda: [SparkNodeType.WORKER])
reshape_workers_mc_type: Callable[[str], str] = field(default_factory=lambda: lambda x: x)
reshape_workers_cnt: Callable[[int], int] = field(default_factory=lambda: lambda x: x)
reshape_workers_cpus: Callable[[int], int] = field(default_factory=lambda: lambda x: x)
reshape_workers_mem: Callable[[int], int] = field(default_factory=lambda: lambda x: x)
reshape_workers_gpu_cnt: Callable[[int], int] = field(default_factory=lambda: lambda x: x)
reshape_workers_gpu_device: Callable[[str], str] = field(default_factory=lambda: lambda x: x)
def get_node(self, node_type: SparkNodeType) -> ClusterNode:
if node_type == SparkNodeType.WORKER:
return self.cluster_inst.get_worker_node()
return self.cluster_inst.get_master_node()
def get_all_nodes(self) -> list:
raise NotImplementedError
def get_node_instance_type(self, node_type: SparkNodeType) -> str:
res = super().get_node_instance_type(node_type)
if node_type in self.node_types:
return self.reshape_workers_mc_type(res)
return res
def get_nodes_cnt(self, node_type: SparkNodeType) -> int:
res = self.cluster_inst.get_nodes_cnt(node_type)
if node_type in self.node_types:
return self.reshape_workers_cnt(res)
return res
def get_node_core_count(self, node_type: SparkNodeType) -> int:
res = super().get_node_core_count(node_type)
if node_type in self.node_types:
return self.reshape_workers_cpus(res)
return res
def get_node_mem_mb(self, node_type: SparkNodeType) -> int:
res = super().get_node_mem_mb(node_type)
if node_type in self.node_types:
return self.reshape_workers_mem(res)
return res
def get_gpu_per_node(self, node_type: SparkNodeType) -> (int, str):
num_gpus, gpu_device = super().get_gpu_per_node(node_type)
if node_type in self.node_types:
return self.reshape_workers_gpu_cnt(num_gpus), self.reshape_workers_gpu_device(gpu_device)
return num_gpus, gpu_device
def get_name(self) -> str:
return self.cluster_inst.get_name()
def get_platform(platform_id: Enum) -> Type[PlatformBase]:
platform_hash = {
CspEnv.DATABRICKS_AWS: ('spark_rapids_pytools.cloud_api.databricks_aws', 'DBAWSPlatform'),
CspEnv.DATABRICKS_AZURE: ('spark_rapids_pytools.cloud_api.databricks_azure', 'DBAzurePlatform'),
CspEnv.DATAPROC: ('spark_rapids_pytools.cloud_api.dataproc', 'DataprocPlatform'),
CspEnv.EMR: ('spark_rapids_pytools.cloud_api.emr', 'EMRPlatform'),
CspEnv.ONPREM: ('spark_rapids_pytools.cloud_api.onprem', 'OnPremPlatform'),
}
if platform_id in platform_hash:
mod_name, clz_name = platform_hash[platform_id]
imported_mod = __import__(mod_name, globals(), locals(), [clz_name])
return getattr(imported_mod, clz_name)
raise AttributeError(f'Provider {platform_id} does not exist')
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/cloud_api/sp_types.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract class of providing absolute costs of resources in CSP"""
import datetime
import os
from dataclasses import dataclass, field
from logging import Logger
from spark_rapids_pytools.cloud_api.sp_types import ClusterGetAccessor
from spark_rapids_pytools.common.sys_storage import FSUtil
from spark_rapids_pytools.common.utilities import ToolLogging, Utils
@dataclass
class PriceProvider:
"""
An abstract class that represents interface to retrieve costs of hardware configurations.
"""
region: str
pricing_configs: dict # [str, JSONPropertiesContainer]
cache_files: dict = field(default_factory=dict, init=False) # [str, str]
resource_urls: dict = field(default_factory=dict, init=False) # [str, str]
name: str = field(default=None, init=False)
cache_expiration_secs: int = field(default=604800, init=False) # download the file once a week
meta: dict = field(default_factory=dict)
catalogs: dict = field(default_factory=dict, init=False) # [str, AbstractPropertiesContainer]
comments: list = field(default_factory=lambda: [], init=False)
cache_directory: str = field(default=None, init=False)
logger: Logger = field(default=None, init=False)
def _init_cache_files(self):
if self._caches_expired(self.get_cached_files()):
self._generate_cache_files()
else:
self.logger.info('The catalog files are loaded from the cache: %s',
Utils.gen_joined_str('; ', self.get_cached_files()))
def _generate_cache_files(self):
# resource_urls and cache_files should have the same keys
cache_checks = {'cacheExpirationSecs': self.cache_expiration_secs}
for file_key, resource_url in self.resource_urls.items():
files_updated = FSUtil.cache_from_url(resource_url,
self.cache_files[file_key],
file_checks=cache_checks)
self.logger.info('The catalog file %s is %s',
self.cache_files[file_key],
'updated' if files_updated else 'not modified, using the cached content')
def __post_init__(self):
self.logger = ToolLogging.get_and_setup_logger(f'rapids.tools.price.{self.name}')
self.cache_directory = Utils.get_rapids_tools_env('CACHE_FOLDER')
self._process_configs()
self._init_catalogs()
def get_cached_files(self) -> list:
return list(self.cache_files.values())
def _caches_expired(self, cache_files: list) -> bool:
for c_file in cache_files:
if not os.path.exists(c_file):
return True
modified_time = os.path.getmtime(c_file)
diff_time = int(datetime.datetime.now().timestamp() - modified_time)
if diff_time > self.cache_expiration_secs:
return True
return False
def _process_resource_configs(self):
pass
def _process_configs(self):
self._process_resource_configs()
def _create_catalogs(self):
pass
def _init_catalogs(self):
self._init_cache_files()
self._create_catalogs()
def get_cpu_price(self, machine_type: str) -> float:
del machine_type # Unused machine_type
return 0.0
def get_container_cost(self) -> float:
return 0.0
def get_ssd_price(self, machine_type: str) -> float:
del machine_type # Unused machine_type
return 0.0
def get_ram_price(self, machine_type: str) -> float:
del machine_type # Unused machine_type
return 0.0
def get_gpu_price(self, gpu_device: str) -> float:
del gpu_device # Unused gpu_device
return 0.0
def get_instance_price(self, instance: str) -> float:
del instance # Unused gpu_device
return 0.0
def setup(self, **kwargs) -> None:
for key, value in kwargs.items():
self.meta[key] = value
@dataclass
class SavingsEstimator:
"""
Implementation of model to get an estimate of cost savings.
"""
price_provider: PriceProvider
source_cluster: ClusterGetAccessor
reshaped_cluster: ClusterGetAccessor
target_cost: float = field(default=None, init=False)
source_cost: float = field(default=None, init=False)
comments: list = field(default_factory=lambda: [], init=False)
logger: Logger = field(default=None, init=False)
def _setup_costs(self):
# calculate target_cost
pass
def __post_init__(self):
# when debug is set to true set it in the environment.
self.logger = ToolLogging.get_and_setup_logger('rapids.tools.savings')
self._setup_costs()
def get_costs_and_savings(self,
app_duration_ms: float,
estimated_gpu_duration_ms: float) -> (float, float, float):
"""
Calculates the cost of running an application for both clusters and returns the savings as a
percentage.
:param app_duration_ms: total execution time in milliseconds
:param estimated_gpu_duration_ms: estimated execution time of the app if executed on GPU
:return: a tuple of 3 floats representing cpu_cost, gpu_cost, and percent of savings
"""
cpu_cost = self.source_cost * app_duration_ms / (60.0 * 60 * 1000)
if cpu_cost <= 0.0:
self.logger.info('Force costs to 0 because the original cost is %.6f', cpu_cost)
# avoid division by zero
return 0.0, 0.0, 0.0
gpu_cost = self.target_cost * estimated_gpu_duration_ms / (60.0 * 60 * 1000)
estimated_savings = 100.0 - ((100.0 * gpu_cost) / cpu_cost)
return cpu_cost, gpu_cost, estimated_savings
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/pricing/price_provider.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides abstractions and implementations of savings estimator."""
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/pricing/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""providing absolute costs of resources in AWS"""
from dataclasses import dataclass, field
from spark_rapids_tools import get_elem_from_dict, get_elem_non_safe
from spark_rapids_pytools.common.prop_manager import JSONPropertiesContainer
from spark_rapids_pytools.common.sys_storage import FSUtil
from spark_rapids_pytools.pricing.price_provider import PriceProvider
@dataclass
class AWSCatalogContainer:
"""
An AWS pricing catalog. It is initialized by a list of catalog_files.
The final pricing will be loaded inside a dictionary for lookup
"""
catalog_files: dict # [str, str]
props: dict = field(default_factory=dict, init=False)
def __load_instance_type_price_by_sku(self,
comp_key: str,
comp_props: JSONPropertiesContainer,
sku_to_instance_type: dict):
price_map = {}
for sku, instance_type in sku_to_instance_type.items():
sku_info = comp_props.get_value('terms', 'OnDemand', sku)
_, sku_info_value = sku_info.popitem()
price_dimensions = sku_info_value['priceDimensions']
_, price_dimensions_value = price_dimensions.popitem()
price = float(price_dimensions_value['pricePerUnit']['USD'])
price_map[instance_type] = price
self.props.update({comp_key: price_map})
def _load_instance_types_emr(self, prop_key: str, catalog_file: str):
emr_props = JSONPropertiesContainer(catalog_file)
sku_to_instance_type = {}
for sku in emr_props.get_value('products'):
if sw_type := emr_props.get_value_silent('products', sku, 'attributes', 'softwareType'):
if sw_type == 'EMR':
sku_to_instance_type[sku] = emr_props.get_value('products', sku, 'attributes', 'instanceType')
self.__load_instance_type_price_by_sku(prop_key, emr_props, sku_to_instance_type)
def _load_instance_types_ec2(self, prop_key: str, catalog_file: str):
ec2_props = JSONPropertiesContainer(catalog_file)
ec2_sku_to_instance_type = {}
cond_dict = {
'tenancy': 'Shared',
'operatingSystem': 'Linux',
'operation': 'RunInstances',
'capacitystatus': 'Used'
}
for sku in ec2_props.get_value('products'):
if attr := ec2_props.get_value_silent('products', sku, 'attributes'):
precheck = True
for cond_k, cond_v in cond_dict.items():
precheck = precheck and attr.get(cond_k) == cond_v
if precheck:
ec2_sku_to_instance_type[sku] = attr['instanceType']
self.__load_instance_type_price_by_sku(prop_key, ec2_props, ec2_sku_to_instance_type)
def get_value(self, *key_strs):
return get_elem_from_dict(self.props, key_strs)
def get_value_silent(self, *key_strs):
return get_elem_non_safe(self.props, key_strs)
def __post_init__(self):
for catalog_k in self.catalog_files:
func_name = f'_load_instance_types_{catalog_k}'
if hasattr(self, func_name):
if callable(func_obj := getattr(self, func_name)):
func_obj(catalog_k, self.catalog_files.get(catalog_k))
@dataclass
class EMREc2PriceProvider(PriceProvider):
"""
Provide costs of EMR running on Ec2 instances
"""
name = 'Emr-Ec2'
def _process_resource_configs(self):
# TODO: current urls in configs file are static, but should be distinct based on region
online_entries = self.pricing_configs['emr'].get_value('catalog', 'onlineResources')
for online_entry in online_entries:
file_name = online_entry.get('localFile')
file_key = online_entry.get('resourceKey').split('-catalog')[0]
self.cache_files[file_key] = FSUtil.build_path(self.cache_directory, file_name)
self.resource_urls[file_key] = online_entry.get('onlineURL')
def _create_catalogs(self):
self.catalogs = {'aws': AWSCatalogContainer(self.cache_files)}
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/pricing/emr_pricing.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.