python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytorch_lightning as pl
from omegaconf import DictConfig
from nemo.collections.nlp.models import TokenClassificationModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
"""
This script shows how to perform evaluation and runs inference of a few examples.
More details on Token Classification model could be found in tutorials/nlp/Token_Classification_Named_Entity_Recognition.ipynb
*** Setting the configs ***
This script uses the `/examples/nlp/token_classification/conf/token_classification_config.yaml` config file
by default. You may update the config file from the file directly.
The other option is to set another config file via command line arguments by `--config-name=CONFIG_FILE_PATH'.
For more details about the config files and different ways of model restoration, see tutorials/00_NeMo_Primer.ipynb
*** Model Evaluation ***
The script runs two types of evaluation:
* model.test() - this eval will use the config setting for evaluation such as model.dataset.max_seq_length
* model.evaluate_from_file():
* disregards model.dataset.max_seq_length and evaluates all the tokens, BERT max seq length - 512 tokens after tokenization
* creates confusion matrix
* saves predictions and labels (if provided)
To run the script:
python token_classification_evaluate.py \
model.dataset.data_dir=<PATH_TO_DATA_DIR> \
pretrained_model=ner_en_bert
<PATH_TO_DATA_DIR> - a directory that contains test_ds.text_file and test_ds.labels_file (see the config)
pretrained_model - pretrained TokenClassification model from list_available_models() or
path to a .nemo file, for example: ner_en_bert or your_model.nemo
"""
@hydra_runner(config_path="conf", config_name="token_classification_config")
def main(cfg: DictConfig) -> None:
logging.info(
'During evaluation/testing, it is currently advisable to construct a new Trainer with single GPU and \
no DDP to obtain accurate results'
)
if not hasattr(cfg.model, 'test_ds'):
raise ValueError(f'model.test_ds was not found in the config, skipping evaluation')
trainer = pl.Trainer(
devices=1,
precision=cfg.trainer.precision,
logger=False,
enable_checkpointing=False,
accelerator=cfg.trainer.accelerator,
)
exp_dir = exp_manager(trainer, cfg.exp_manager)
if not cfg.pretrained_model:
raise ValueError(
'To run evaluation and inference script a pre-trained model or .nemo file must be provided.'
f'Choose from {TokenClassificationModel.list_available_models()} or "pretrained_model"="your_model.nemo"'
)
if os.path.exists(cfg.pretrained_model):
model = TokenClassificationModel.restore_from(cfg.pretrained_model)
elif cfg.pretrained_model in TokenClassificationModel.get_available_model_names():
model = TokenClassificationModel.from_pretrained(cfg.pretrained_model)
else:
raise ValueError(
f'Provide path to the pre-trained .nemo checkpoint or choose from {TokenClassificationModel.list_available_models()}'
)
data_dir = cfg.model.dataset.get('data_dir', None)
if data_dir is None:
logging.error(
'No dataset directory provided. Skipping evaluation. '
'To run evaluation on a file, specify path to the directory that contains test_ds.text_file and test_ds.labels_file with "model.dataset.data_dir" argument.'
)
elif not os.path.exists(data_dir):
logging.error(f'{data_dir} is not found, skipping evaluation on the test set.')
else:
model.update_data_dir(data_dir=data_dir)
model._cfg.dataset = cfg.model.dataset
if not hasattr(cfg.model, 'test_ds'):
logging.error(f'model.test_ds was not found in the config, skipping evaluation')
elif model.prepare_test(trainer):
model.setup_test_data(cfg.model.test_ds)
trainer.test(model)
model.evaluate_from_file(
text_file=os.path.join(data_dir, cfg.model.test_ds.text_file),
labels_file=os.path.join(data_dir, cfg.model.test_ds.labels_file),
output_dir=exp_dir,
add_confusion_matrix=True,
normalize_confusion_matrix=True,
)
else:
logging.error('Skipping the evaluation. The trainer is not setup properly.')
# run an inference on a few examples
queries = ['we bought four shirts from the nvidia gear store in santa clara.', 'Nvidia is a company.']
results = model.add_predictions(queries, output_file='predictions.txt')
for query, result in zip(queries, results):
logging.info(f'Query : {query}')
logging.info(f'Result: {result.strip()}\n')
logging.info(f'Results are saved at {exp_dir}')
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/token_classification/token_classification_evaluate.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.models import TokenClassificationModel
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
"""
This scripts shows how to train a Token Classification model.
The Token Classification model supports Named Entity Recognition task and other token level classification tasks,
as long as the data follows the format specified below.
More details on how to use this script could be found in
tutorials/nlp/Token_Classification_Named_Entity_Recognition.ipynb
*** Data Format ***
Token Classification Model requires the data to be split into 2 files: text.txt and labels.txt.
Each line of the text.txt file contains text sequences, where words are separated with spaces, i.e.:
[WORD] [SPACE] [WORD] [SPACE] [WORD].
The labels.txt file contains corresponding labels for each word in text.txt, the labels are separated with spaces, i.e.:
[LABEL] [SPACE] [LABEL] [SPACE] [LABEL].
Example of a text.txt file:
Jennifer is from New York City .
She likes ...
...
Corresponding labels.txt file:
B-PER O O B-LOC I-LOC I-LOC O
O O ...
...
*** Preparing the dataset ***
To convert an IOB format data to the format required for training, run
examples/nlp/token_classification/data/import_from_iob_format.py on your train and dev files, as follows:
python examples/nlp/token_classification/data/import_from_iob_format.py --data_file PATH_TO_IOB_FORMAT_DATAFILE
*** Setting the configs ***
The model and the PT trainer are defined in a config file which declares multiple important sections.
The most important ones are:
model: All arguments that are related to the Model - language model, tokenizer, token classifier, optimizer,
schedulers, and datasets/data loaders.
trainer: Any argument to be passed to PyTorch Lightning including number of epochs, number of GPUs,
precision level, etc.
This script uses the `/examples/nlp/token_classification/conf/token_classification_config.yaml` config file
by default. You may update the config file from the file directly.
The other option is to set another config file via command line arguments by `--config-name=CONFIG_FILE_PATH'.
For more details about the config files and different ways of model restoration, see tutorials/00_NeMo_Primer.ipynb
*** Model Training ***
To train TokenClassification model from scratch with the default config file, run:
python token_classification_train.py \
model.dataset.data_dir=<PATH_TO_DATA_DIR> \
trainer.max_epochs=<NUM_EPOCHS> \
trainer.devices=[<CHANGE_TO_GPU(s)_YOU_WANT_TO_USE>]
To use one of the pretrained versions of the model specify a `pretrained_model` arg with either
TokenClassification model from list_available_models() or path to a .nemo file, for example:
ner_en_bert or model.nemo, run:
python token_classification_train.py pretrained_model=ner_en_bert
To use one of the pretrained versions of the model and fine-tune it, run:
python token_classification_train.py \
model.dataset.data_dir=<PATH_TO_DATA_DIR> \
pretrained_model=ner_en_bert
<PATH_TO_DATA_DIR> - a directory that contains test_ds.text_file and test_ds.labels_file (see the config)
pretrained_model - pretrained TokenClassification model from list_available_models() or
path to a .nemo file, for example: ner_en_bert or model.nemo
For more ways of restoring a pre-trained model, see tutorials/00_NeMo_Primer.ipynb
"""
@hydra_runner(config_path="conf", config_name="token_classification_config")
def main(cfg: DictConfig) -> None:
try:
strategy = NLPDDPStrategy()
except (ImportError, ModuleNotFoundError):
strategy = None
trainer = pl.Trainer(strategy=strategy, **cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
if not cfg.pretrained_model:
logging.info(f'Config: {OmegaConf.to_yaml(cfg)}')
model = TokenClassificationModel(cfg.model, trainer=trainer)
else:
if os.path.exists(cfg.pretrained_model):
# TODO: can we drop strict=False?
model = TokenClassificationModel.restore_from(cfg.pretrained_model, trainer=trainer, strict=False)
elif cfg.pretrained_model in TokenClassificationModel.get_available_model_names():
model = TokenClassificationModel.from_pretrained(cfg.pretrained_model)
else:
raise ValueError(
f'Provide path to the pre-trained .nemo file or choose from {TokenClassificationModel.list_available_models()}'
)
data_dir = cfg.model.dataset.get('data_dir', None)
if data_dir:
if not os.path.exists(data_dir):
raise ValueError(f'{data_dir} is not found at')
# we can also do finetuning of the pretrained model but it will require
# setup the data dir to get class weights statistics
model.update_data_dir(data_dir=data_dir)
# finally, setup train and validation Pytorch DataLoaders
model.setup_training_data()
model.setup_validation_data()
# then we're setting up loss, use model.dataset.class_balancing,
# if you want to add class weights to the CrossEntropyLoss
model.setup_loss(class_balancing=cfg.model.dataset.class_balancing)
logging.info(f'Using config file of the pretrained model')
else:
raise ValueError(
'Specify a valid dataset directory that contains test_ds.text_file and test_ds.labels_file \
with "model.dataset.data_dir" argument'
)
trainer.fit(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/token_classification/token_classification_train.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script downloads and unpacks LibriTTS data. And prepares it for punctuation and capitalization lexical audio model.
Data is being downloaded from www.openslr.org and then extracted via tar.
The script gathers text from every *.normalized.txt file inside of archive into single file with text and file with audio filepaths.
"""
import argparse
import glob
import os
import re
import shutil
import subprocess
import tarfile
from tqdm import tqdm
from nemo.collections.nlp.data.token_classification.token_classification_utils import create_text_and_labels
from nemo.utils import logging
URL = {
'train_clean_100': "https://www.openslr.org/resources/60/train-clean-100.tar.gz",
'train_clean_360': "https://www.openslr.org/resources/60/train-clean-360.tar.gz",
'train_other_500': "https://www.openslr.org/resources/60/train-other-500.tar.gz",
'dev_clean': "https://www.openslr.org/resources/60/dev-clean.tar.gz",
'dev_other': "https://www.openslr.org/resources/60/dev-other.tar.gz",
'test_clean': "https://www.openslr.org/resources/60/test-clean.tar.gz",
'test_other': "https://www.openslr.org/resources/60/test-other.tar.gz",
}
def __extract_file(filepath, data_dir):
try:
tar = tarfile.open(filepath)
tar.extractall(data_dir)
tar.close()
except Exception:
print(f"Error while extracting {filepath}. Already extracted?")
def __maybe_download_file(destination: str, source: str):
"""
Downloads source to destination if not exists.
If exists, skips download
Args:
destination: local filepath
source: url of resource
"""
source = URL[source]
if not os.path.exists(destination):
logging.info(f'Downloading {source} to {destination}')
subprocess.run(['wget', '-O', destination, source])
return 1
else:
logging.info(f'{destination} found. Skipping download')
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Prepare LibriTTS dataset for punctuation capitalization lexical audio model training/evaluating.'
)
parser.add_argument("--data_sets", default="dev_clean", type=str, help="List of subsets separated by comma")
parser.add_argument("--data_dir", required=True, type=str, help="Path to dir where data will be stored")
parser.add_argument(
"--clean", "-c", action="store_true", help="If set to True will delete all files except produced .txt and .wav"
)
args = parser.parse_args()
data_dir = args.data_dir
if not os.path.exists(data_dir):
os.makedirs(data_dir)
for subset in args.data_sets.split(','):
logging.info(f'Downloading {subset} subset')
if __maybe_download_file(data_dir + f'/{subset}.tar.gz', subset):
logging.info(f'Extracting {subset} subset')
__extract_file(data_dir + f'/{subset}.tar.gz', data_dir)
logging.info(f'Processing data')
splits = set([split.split('_')[0] for split in args.data_sets.split(',')])
for split in splits:
os.makedirs(f'{data_dir}/audio/{split}', exist_ok=True)
with open(f'{data_dir}/{split}.txt', 'w') as text_data, open(
f'{data_dir}/audio_{split}.txt', 'w'
) as audio_data:
for file in tqdm(glob.glob(f'{data_dir}/LibriTTS/{split}*/*/*/*.wav'), desc=f'Processing {split}'):
with open(file[:-4] + '.normalized.txt', 'r') as source_file:
lines = source_file.readlines()
text = lines[0]
text = re.sub(r"[^a-zA-Z\d,?!.']", ' ', text)
text = re.sub(' +', ' ', text)
shutil.copy(file.strip(), (f'{data_dir}/audio/{split}/' + file.split('/')[-1]).strip())
text_data.write(text.strip() + "\n")
audio_data.write((f'{data_dir}/audio/{split}/' + file.split('/')[-1]).strip() + "\n")
create_text_and_labels(f'{data_dir}/', f'{data_dir}/{split}.txt')
logging.info(f'Processed {split} subset')
if args.clean:
shutil.rmtree(f'{data_dir}/LibriTTS')
for tar in glob.glob(f'{data_dir}/**.tar.gz'):
os.remove(tar)
| NeMo-main | examples/nlp/token_classification/data/get_libritts_data.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import random
import re
import subprocess
from nemo.collections.nlp.data.token_classification.token_classification_utils import create_text_and_labels
from nemo.utils import logging
URL = {'tatoeba': 'https://downloads.tatoeba.org/exports/sentences.csv'}
def __maybe_download_file(destination: str, source: str):
"""
Downloads source to destination if not exists.
If exists, skips download
Args:
destination: local filepath
source: url of resource
"""
source = URL[source]
if not os.path.exists(destination):
logging.info(f'Downloading {source} to {destination}')
subprocess.run(['wget', '-O', destination, source])
else:
logging.info(f'{destination} found. Skipping download')
def __process_english_sentences(
in_file: str, out_file: str, percent_to_cut: float = 0, num_to_combine: int = 1, num_samples: int = -1
):
"""
Extract English sentences from the Tatoeba dataset.
Expected in_file format
that
contain letters and punctuation marks (,.?).
Chop and combine sentences.
Args:
in_file: local filepath to the tatoeba dataset.
Format: id [TAB] region_name [TAB] sentence,
for example: "1276\teng\tLet's try something.\n"
out_file: local filepath to the clean dataset
percent_to_cut: Percent of sentences to cut in the middle
to get examples of incomplete sentences.
This could be useful since ASR output not always
represents a complete sentence
num_to_combine: Number of sentences to combine into
a single example
num_samples: Number of samples in the final dataset
"""
if not os.path.exists(in_file):
raise FileNotFoundError(f'{in_file} not found.')
in_file = open(in_file, 'r')
out_file = open(out_file, 'w')
lines_to_combine = []
samples_count = 0
for line in in_file:
line = line.split('\t')
# use only English sentences
if line[1] == 'eng':
line = line[2].strip()
if re.match("^[A-Z][A-Za-z.,'?\s]+$", line): # nopep8
# chop some sentences in the middle
if percent_to_cut > 0:
line = line.split()
if random.random() < percent_to_cut:
line = line[: len(line) // 2]
line = ' '.join(line)
# combine multiple sentences into a single example
# to make it harder for the model to learn eos punctuation
if len(lines_to_combine) >= num_to_combine:
if samples_count == num_samples:
return
out_file.write(' '.join(lines_to_combine) + '\n')
lines_to_combine = []
samples_count += 1
lines_to_combine.append(line)
if len(lines_to_combine) > 0 and (samples_count < num_samples or num_samples < 0):
out_file.write(' '.join(lines_to_combine) + '\n')
def __split_into_train_dev(in_file: str, train_file: str, dev_file: str, percent_dev: float):
"""
Create train and dev split of the dataset.
Args:
in_file: local filepath to the dataset
train_file: local filepath to the train dataset
dev_file: local filepath to the dev dataset
percent_dev: Percent of the sentences in the dev set
"""
if not os.path.exists(in_file):
raise FileNotFoundError(f'{in_file} not found.')
lines = open(in_file, 'r').readlines()
train_file = open(train_file, 'w')
dev_file = open(dev_file, 'w')
dev_size = int(len(lines) * percent_dev)
train_file.write(' '.join(lines[:-dev_size]))
dev_file.write(' '.join(lines[-dev_size:]))
def __delete_file(file_to_del: str):
"""
Deletes the file
Args:
file_to_del: local filepath to the file to delete
"""
if os.path.exists(file_to_del):
os.remove(file_to_del)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Prepare tatoeba dataset')
parser.add_argument("--data_dir", required=True, type=str)
parser.add_argument("--dataset", default='tatoeba', type=str)
parser.add_argument("--num_samples", default=-1, type=int, help='-1 to use the whole dataset')
parser.add_argument("--percent_to_cut", default=0, type=float, help='Percent of sentences to cut in the middle')
parser.add_argument(
"--num_lines_to_combine", default=1, type=int, help='Number of lines to combine into single example'
)
parser.add_argument("--percent_dev", default=0.2, type=float, help='Size of the dev set, float')
parser.add_argument("--clean_dir", action='store_true')
args = parser.parse_args()
if not os.path.exists(args.data_dir):
os.makedirs(args.data_dir)
if args.dataset != 'tatoeba':
raise ValueError("Unsupported dataset.")
logging.info(f'Downloading tatoeba dataset')
tatoeba_dataset = os.path.join(args.data_dir, 'sentences.csv')
__maybe_download_file(tatoeba_dataset, args.dataset)
logging.info(f'Processing English sentences...')
clean_eng_sentences = os.path.join(args.data_dir, 'clean_eng_sentences.txt')
__process_english_sentences(
tatoeba_dataset, clean_eng_sentences, args.percent_to_cut, args.num_lines_to_combine, args.num_samples
)
train_file = os.path.join(args.data_dir, 'train.txt')
dev_file = os.path.join(args.data_dir, 'dev.txt')
logging.info(
f'Splitting the {args.dataset} dataset into train and dev sets' + ' and creating labels and text files'
)
__split_into_train_dev(clean_eng_sentences, train_file, dev_file, args.percent_dev)
logging.info(f'Creating text and label files for training')
create_text_and_labels(args.data_dir, os.path.join(args.data_dir, 'train.txt'))
create_text_and_labels(args.data_dir, os.path.join(args.data_dir, 'dev.txt'))
if args.clean_dir:
logging.info(f'Cleaning up {args.data_dir}')
__delete_file(clean_eng_sentences)
__delete_file(tatoeba_dataset)
__delete_file(train_file)
__delete_file(dev_file)
logging.info(f'Processing of the {args.dataset} is complete')
| NeMo-main | examples/nlp/token_classification/data/get_tatoeba_data.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from nemo.utils import logging
def __convert_data(in_file: str, out_text_f: str, out_labels_f: str, max_length: int):
"""
Convert data from the IOB format to NeMo accepted format described below.
in_file should be in the IOB format, see example here:
https://www.clips.uantwerpen.be/conll2003/ner/.
Args:
in_file: input file name
out_text_f: output file with text
out_labels_f: output file with labels
max_length: use -1 to leave the examples' length as is, otherwise long examples will be split into multiple
examples
After the conversion, the dataset is split into 2 files: text.txt
and labels.txt.
Each line of the text.txt file contains text sequences, where words
are separated with spaces. The labels.txt file contains corresponding
labels for each word in text.txt, the labels are separated with spaces.
Each line of the files should follow the format:
[WORD] [SPACE] [WORD] [SPACE] [WORD] (for text.txt) and
[LABEL] [SPACE] [LABEL] [SPACE] [LABEL] (for labels.txt).
"""
in_file = open(in_file, 'r')
if max_length == -1:
with open(out_text_f, 'w') as out_text, open(out_labels_f, 'w') as out_labels:
for line in in_file:
if line == '\n':
out_text.write(line)
out_labels.write(line)
else:
line = line.split()
out_text.write(line[0] + ' ')
out_labels.write(line[-1] + ' ')
else:
words = []
labels = []
with open(out_text_f, 'w') as out_text, open(out_labels_f, 'w') as out_labels:
lines = in_file.readlines()
for line_id, line in enumerate(lines):
logging.info(f"{line_id} {len(lines)}")
contends = line.strip()
if len(contends) == 0:
assert len(words) == len(labels)
if len(words) > max_length:
# split if the sentence is longer than max_length
while len(words) > max_length:
tmplabel = labels[:max_length]
for iidx in range(len(tmplabel)):
if tmplabel.pop() == 'O':
break
l = ' '.join([label for label in labels[: len(tmplabel) + 1] if len(label) > 0])
w = ' '.join([word for word in words[: len(tmplabel) + 1] if len(word) > 0])
out_text.write(w + "\n")
out_labels.write(l + "\n")
words = words[len(tmplabel) + 1 :]
labels = labels[len(tmplabel) + 1 :]
if len(words) == 0:
continue
l = ' '.join([label for label in labels if len(label) > 0])
w = ' '.join([word for word in words if len(word) > 0])
out_text.write(w + "\n")
out_labels.write(l + "\n")
words = []
labels = []
continue
word = line.strip().split()[0]
label = line.strip().split()[-1]
words.append(word)
labels.append(label)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Convert data from IOB format to the format compatible with \
nlp/examples/token_classification/scripts/token_classification_train.py and \
token_classification_evaluate.py'
)
parser.add_argument("--data_file", required=True, type=str, help='path to a file in IOB format')
parser.add_argument(
"--max_length",
default=-1,
type=int,
help='use -1 to leave the examples\'s length as is, '
'otherwise long examples will be split into multiple examples',
)
args = parser.parse_args()
data_dir, basename = os.path.split(args.data_file)
prefix = os.path.splitext(basename)[0]
if not os.path.exists(args.data_file):
raise FileNotFoundError(f"{args.data_file} not found")
logging.info(f'Processing {args.data_file}')
out_text = os.path.join(data_dir, 'text_' + prefix + '.txt')
out_labels = os.path.join(data_dir, 'labels_' + prefix + '.txt')
__convert_data(args.data_file, out_text, out_labels, args.max_length)
logging.info(f'Processing of the {args.data_file} is complete')
| NeMo-main | examples/nlp/token_classification/data/import_from_iob_format.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The script converts raw text to the NeMo format for punctuation and capitalization task.
Raw Data Format
---------------
The Punctuation and Capitalization model can work with any text dataset, although it is recommended to balance the data, especially for the punctuation task.
Before pre-processing the data to the format expected by the model, the data should be split into train.txt and dev.txt (and optionally test.txt).
Each line in the **train.txt/dev.txt/test.txt** should represent one or more full and/or truncated sentences.
Example of the train.txt/dev.txt file:
When is the next flight to New York?
The next flight is ...
....
The `source_data_dir` structure should look like this:
.
|--sourced_data_dir
|-- dev.txt
|-- train.txt
NeMo Data Format for training the model
---------------------------------------
The punctuation and capitalization model expects the data in the following format:
The training and evaluation data is divided into 2 files: text.txt and labels.txt. \
Each line of the **text.txt** file contains text sequences, where words are separated with spaces, i.e.
[WORD] [SPACE] [WORD] [SPACE] [WORD], for example:
when is the next flight to new york
the next flight is ...
...
The **labels.txt** file contains corresponding labels for each word in text.txt, the labels are separated with spaces. \
Each label in labels.txt file consists of 2 symbols:
* the first symbol of the label indicates what punctuation mark should follow the word (where O means no punctuation needed);
* the second symbol determines if a word needs to be capitalized or not (where U indicates that the word should be upper-cased, and O - no capitalization needed.)
By default, the following punctuation marks are considered: commas, periods, and question marks; the rest punctuation marks were removed from the data.
This can be changed by introducing new labels in the labels.txt files
Each line of the labels.txt should follow the format: [LABEL] [SPACE] [LABEL] [SPACE] [LABEL] (for labels.txt). \
For example, labels for the above text.txt file should be:
OU OO OO OO OO OO OU ?U
OU OO OO OO ...
...
The complete list of all possible labels for this task used in this tutorial is: OO, ,O, .O, ?O, OU, ,U, .U, ?U.
Converting Raw data to NeMo format
----------------------------------
To pre-process the raw text data, stored under :code:`sourced_data_dir` (see the :ref:`raw_data_format_punct`
section), run the following command:
python examples/nlp/token_classification/data/prepare_data_for_punctuation_capitalization.py \
-s <PATH/TO/THE/SOURCE/FILE> \
-o <PATH/TO/THE/OUTPUT/DIRECTORY>
"""
import argparse
import os
from get_tatoeba_data import create_text_and_labels
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Prepare data for punctuation and capitalization tasks')
parser.add_argument("-s", "--source_file", required=True, type=str, help="Path to the source file")
parser.add_argument("-o", "--output_dir", required=True, type=str, help="Path to the output directory")
parser.add_argument(
"-p",
"--marks",
required=False,
type=str,
help="Punctuation marks to consider for dataset",
default=[",", ".", "?"],
nargs="+",
)
args = parser.parse_args()
if not os.path.exists(args.source_file):
raise ValueError(f'{args.source_file} was not found')
os.makedirs(args.output_dir, exist_ok=True)
create_text_and_labels(args.output_dir, args.source_file, "".join(args.marks))
print(f'Processing of the {args.source_file} is complete')
| NeMo-main | examples/nlp/token_classification/data/prepare_data_for_punctuation_capitalization.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import multiprocessing as mp
from pathlib import Path
from nemo.collections.nlp.data.token_classification.punctuation_capitalization_tarred_dataset import (
DEFAULT_CAPIT_LABEL_VOCAB_FILE_NAME,
DEFAULT_PUNCT_LABEL_VOCAB_FILE_NAME,
METADATA_CAPIT_LABEL_VOCAB_KEY,
METADATA_PUNCT_LABEL_VOCAB_KEY,
build_label_ids_from_list_of_labels,
check_labels_for_being_unique_before_building_label_ids,
check_tar_file_prefix,
create_tarred_dataset,
)
"""
A tarred dataset allows to train on large amounts without storing it all into memory simultaneously. In case of
punctuation and capitalization model, tarred dataset is a directory which contains metadata file, tar files with
batches, punct_label_vocab.csv and capit_label_vocab.csv files.
A metadata file is a JSON file with 4 fields: 'num_batches', 'tar_files', 'punct_label_vocab_file',
'capit_label_vocab_file'. 'num_batches' (int) is a total number of batches in tarred dataset. 'tar_files' is a list of
paths to tar files relative to directory containing the metadata file. 'punct_label_vocab_file' and
'capit_label_vocab_file' are paths to .csv files containing all unique punctuation and capitalization labels. Each
label in these files is written in a separate line. The first labels in both files are equal and serve for padding and
as neutral labels.
Every tar file contains objects written using `webdataset.TarWriter`. Each object is a dictionary with two items:
'__key__' and 'batch.pyd'. '__key__' is a name of a batch and 'batch.pyd' is a pickled dictionary which contains
'input_ids', 'subtokens_mask', 'punct_labels', 'capit_labels'. 'input_ids' is an array containing ids of source tokens,
'subtokens_mask' is a boolean array showing first tokens in words, 'punct_labels' and 'capit_labels' are arrays with
ids of labels. Metadata file should be passed to constructor of
`nemo.collections.nlp.data.token_classification.PunctuationCapitalizationTarredDataset` and the instance of
the class will handle iteration and constructing masks and token types for BERT model.
Example of usage:
python create_punctuation_capitalization_tarred_dataset.py \
--text <PATH/TO/TEXT/FILE> \
--labels <PATH/TO/LABELS/FILE> \
--output_dir <PATH/TO/OUTPUT/DIR> \
--lines_per_dataset_fragment 10000 \
--tokens_in_batch 8000 \
--num_batches_per_tarfile 5 \
--tokenizer_name char \
--vocab_file <PATH_TO_CHAR_TOKENIZER_VOCABULARY>
"""
def get_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=f"A tarred dataset allows to train on large amounts without storing it all into memory "
f"simultaneously. In case of punctuation and capitalization model, tarred dataset is a directory which "
f"contains metadata file, tar files with batches, {DEFAULT_PUNCT_LABEL_VOCAB_FILE_NAME} and "
f"{DEFAULT_CAPIT_LABEL_VOCAB_FILE_NAME} files. A metadata file is a JSON file with 4 fields: 'num_batches', "
f"'tar_files', '{METADATA_PUNCT_LABEL_VOCAB_KEY}', '{METADATA_CAPIT_LABEL_VOCAB_KEY}'. 'num_batches' (int) is "
f"a total number of batches in tarred dataset. 'tar_files' is a list of paths to tar files relative "
f"to directory containing the metadata file. '{METADATA_PUNCT_LABEL_VOCAB_KEY}' and "
f"'{METADATA_CAPIT_LABEL_VOCAB_KEY}' are paths to .csv files containing all unique punctuation and "
f"capitalization labels. Each label in these files is written in a separate line. The first labels in both "
f"files are equal and serve for padding and as neutral labels. Every tar file contains objects written "
f"using `webdataset.TarWriter`. Each object is a dictionary with two items: '__key__' and 'batch.pyd'. "
f"'__key__' is a name of a batch and 'batch.pyd' is a pickled dictionary which contains 'input_ids', "
f"'subtokens_mask', 'punct_labels', 'capit_labels'. 'input_ids' is an array containing ids of source tokens, "
f"'subtokens_mask' is a boolean array showing first tokens in words, 'punct_labels' and 'capit_labels' are "
f"arrays with ids of labels. Metadata file should be passed to constructor of "
"`nemo.collections.nlp.data.token_classification.PunctuationCapitalizationTarredDataset` and the instance of "
"the class will handle iteration and constructing masks and token types for BERT model.",
)
parser.add_argument(
"--text",
"-t",
help="Path to source lowercased text without punctuation. Number of lines in `--text` file has to be equal "
"to number of lines in `--labels` file.",
type=Path,
required=True,
)
parser.add_argument(
"--audio_file",
type=Path,
required=False,
help="Path to source file which contains paths to audio one path per line. "
"Number of lines in `--audio_file` has to be equal to number of lines in `--labels` file",
)
parser.add_argument(
"--use_audio",
required=False,
action="store_true",
help="If set to `True` script creates lexical audio dataset which can be used with `PunctuationCapitalizationLexicalAudioModel`.",
)
parser.add_argument(
"--sample_rate",
type=int,
required=False,
help="Target sample rate of audios. Can be used for downsampling or upsampling.",
)
parser.add_argument(
"--labels",
"-L",
type=Path,
required=True,
help="Path to file with labels in the format described here "
"https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/punctuation_and_capitalization.html#"
"nemo-data-format . Number of lines in `--labels` file has to be equal to the number of lines in `--text` "
"file.",
)
parser.add_argument(
"--output_dir",
"-o",
type=Path,
required=True,
help="Path to directory where .tar files, metadata file, label id files are stored.",
)
parser.add_argument(
"--max_seq_length",
"-s",
type=int,
default=512,
help="Maximum number of subtokens in an input sequence. A source sequence which contain too many subtokens are "
"clipped to `--max_seq_length - 2` subtokens and then [CLS] token is prepended to the clipped sequence and "
"[SEP] token is appended to the clipped sequence. The clipping is performed via removal of subtokens in the "
"end of a source sequence.",
)
parser.add_argument(
"--tokens_in_batch",
"-b",
type=int,
default=15000,
help="Maximum number of tokens in a batch including [CLS], [SEP], [UNK], and [PAD] tokens. Before packing into "
"batches source sequences are sorted by number of tokens in order to reduce number of pad tokens. So the "
"number of sequences in a batch may be different.",
)
parser.add_argument(
"--lines_per_dataset_fragment",
type=int,
default=10 ** 6,
help="A number of lines processed by one worker during creation of tarred dataset. A worker tokenizes "
"`--lines_per_dataset_fragment` lines and keeps in RAM tokenized text labels before packing them into "
"batches. Reducing `--lines_per_dataset_fragment` leads to reducing of the amount of memory required by this "
"script.",
)
parser.add_argument(
"--num_batches_per_tarfile",
type=int,
default=1000,
help="A number of batches saved in a tar file. If you increase `--num_batches_per_tarfile`, then there will "
"be less tar files in the dataset. There cannot be less then `--num_batches_per_tarfile` batches in a tar "
"file, and all excess batches are removed. Maximum number of discarded batches is "
"`--num_batches_per_tarfile - 1`.",
)
parser.add_argument(
"--tokenizer_name",
"-T",
default="bert-base-uncased",
help="Name of the tokenizer used for tokenization of source sequences. Possible options are 'sentencepiece', "
"'word', 'char', HuggingFace tokenizers. For more options see function "
"`nemo.collections.nlp.modules.common.get_tokenizer`. The tokenizer has to have properties `cls_id`, "
"`pad_id`, `sep_id`, `unk_id`.",
)
parser.add_argument(
"--tokenizer_model", "-m", type=Path, help="Path to tokenizer model required for 'sentencepiece' tokenizer."
)
parser.add_argument(
"--vocab_file",
"-v",
type=Path,
help="Path to vocabulary file which can be used in 'word', 'char', and HuggingFace tokenizers.",
)
parser.add_argument(
"--merges_file", "-M", type=Path, help="Path to merges file which can be used in HuggingFace tokenizers."
)
parser.add_argument(
"--special_token_names",
"-n",
nargs="+",
help="Names of special tokens which may be passed to constructors of 'char', 'word', 'sentencepiece', and "
"HuggingFace tokenizers.",
)
parser.add_argument(
"--special_token_values",
"-V",
nargs="+",
help="Values of special tokens which may be passed to constructors of 'char', 'word', 'sentencepiece', and "
"HuggingFace tokenizers.",
)
parser.add_argument(
"--use_fast_tokenizer", "-f", action="store_true", help="Whether to use fast HuggingFace tokenizer."
)
parser.add_argument(
"--pad_label",
"-P",
default='O',
help="Pad label both for punctuation and capitalization. This label is also is used for marking words which "
"do not need punctuation and capitalization. It is also a neutral label used for marking words which do "
"not require punctuation and capitalization.",
)
punct = parser.add_mutually_exclusive_group(required=False)
punct.add_argument(
"--punct_labels",
"-p",
nargs="+",
help="All punctuation labels EXCEPT PAD LABEL. Punctuation labels are strings separated by spaces. "
"Alternatively you can use parameter `--punct_label_vocab_file`. If none of parameters `--punct_labels` "
"and `--punct_label_vocab_file` are provided, then punctuation label ids will be inferred from `--labels` "
"file.",
)
punct.add_argument(
"--punct_label_vocab_file",
type=Path,
help="A path to file with punctuation labels. These labels include pad label. Pad label has to be the first "
"label in the file. Each label is written on separate line. Alternatively you can use `--punct_labels` "
"parameter. If none of parameters `--punct_labels` and `--punct_label_vocab_file` are provided, then "
"punctuation label ids will be inferred from `--labels` file.",
)
capit = parser.add_mutually_exclusive_group(required=False)
capit.add_argument(
"--capit_labels",
"-c",
nargs="+",
help="All capitalization labels EXCEPT PAD LABEL. Capitalization labels are strings separated by spaces. "
"Alternatively you can use parameter `--capit_label_vocab_file`. If none of parameters `--capit_labels` "
"and `--capit_label_vocab_file` are provided, then capitalization label ids will be inferred from `--labels` "
"file.",
)
capit.add_argument(
"--capit_label_vocab_file",
type=Path,
help="A path to file with capitalization labels. These labels include pad label. Pad label has to be the "
"first label in the file. Each label is written on separate line. Alternatively you can use `--capit_labels` "
"parameter. If none of parameters `--capit_labels` and `--capit_label_vocab_file` are provided, then "
"capitalization label ids will be inferred from `--labels` file.",
)
parser.add_argument(
"--tar_file_prefix",
"-x",
default="punctuation_capitalization",
help="A string from which tar file names start. It can contain only characters 'A-Z', 'a-z', '0-9', '_', '-', "
"'.'.",
)
parser.add_argument(
"--n_jobs",
"-j",
type=int,
default=mp.cpu_count(),
help="Number of workers for creating tarred dataset. By default it is equal to the number of CPU cores.",
)
args = parser.parse_args()
for name in [
"text",
"labels",
"output_dir",
"tokenizer_model",
"vocab_file",
"merges_file",
"punct_label_vocab_file",
"capit_label_vocab_file",
]:
if getattr(args, name) is not None:
setattr(args, name, getattr(args, name).expanduser())
if args.special_token_names is not None or args.special_token_values is not None:
if args.special_token_names is None:
parser.error(
"If you provide parameter `--special_token_values` you have to provide parameter "
"`--special_token_names`."
)
if args.special_token_values is None:
parser.error(
"If you provide parameter `--special_token_names` you have to provide parameter "
"`--special_token_values`."
)
if len(args.special_token_names) != len(args.special_token_values):
parser.error(
f"Parameters `--special_token_names` and `--special_token_values` have to have equal number of values "
f"whereas parameter `--special_token_names` has {len(args.special_token_names)} values and parameter "
f"`--special_token_values` has {len(args.special_token_values)} values."
)
if len(set(args.special_token_names)) != len(args.special_token_names):
for i in range(len(args.special_token_names) - 1):
if args.special_token_names[i] in args.special_token_names[i + 1 :]:
parser.error(
f"Values of parameter `--special_token_names` has to be unique. Found duplicate value "
f"'{args.special_token_names[i]}'."
)
if args.punct_labels is not None:
check_labels_for_being_unique_before_building_label_ids(
args.pad_label, args.punct_labels, '--pad_label', '--punct_labels', parser.error
)
check_labels_for_being_unique_before_building_label_ids(
args.pad_label, args.capit_labels, '--pad_label', '--capit_labels', parser.error
)
check_tar_file_prefix(args.tar_file_prefix, parser.error, '--tar_file_prefix')
return args
def main() -> None:
args = get_args()
if args.special_token_names is None:
special_tokens = None
else:
special_tokens = dict(zip(args.special_token_names, args.special_token_values))
if args.punct_labels is not None:
punct_label_ids = build_label_ids_from_list_of_labels(args.pad_label, args.punct_labels)
else:
punct_label_ids = None
if args.capit_labels is not None:
capit_label_ids = build_label_ids_from_list_of_labels(args.pad_label, args.capit_labels)
else:
capit_label_ids = None
create_tarred_dataset(
args.text,
args.labels,
args.output_dir,
args.max_seq_length,
args.tokens_in_batch,
args.lines_per_dataset_fragment,
args.num_batches_per_tarfile,
args.tokenizer_name,
tokenizer_model=args.tokenizer_model,
vocab_file=args.vocab_file,
merges_file=args.merges_file,
special_tokens=special_tokens,
use_fast_tokenizer=args.use_fast_tokenizer,
pad_label=args.pad_label,
punct_label_ids=punct_label_ids,
capit_label_ids=capit_label_ids,
punct_label_vocab_file=args.punct_label_vocab_file,
capit_label_vocab_file=args.capit_label_vocab_file,
tar_file_prefix=args.tar_file_prefix,
n_jobs=args.n_jobs,
audio_file=args.audio_file,
sample_rate=args.sample_rate,
use_audio=args.use_audio,
)
if __name__ == "__main__":
main()
| NeMo-main | examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import torch.multiprocessing as mp
from megatron.core import parallel_state
from omegaconf import OmegaConf
from omegaconf.omegaconf import open_dict
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.models.language_modeling.megatron_gpt_prompt_learning_model import (
MegatronGPTPromptLearningModel,
)
from nemo.collections.nlp.modules.common.transformer.text_generation import LengthParam, SamplingParam
from nemo.collections.nlp.parts.nlp_overrides import CustomProgressBar, NLPDDPStrategy, NLPSaveRestoreConnector
from nemo.core.config import hydra_runner
from nemo.utils import logging
mp.set_start_method("spawn", force=True)
"""
This is the script to run GPT text generation.
a. run greedy inference from a p-tuned/prompt-tuned model's nemo file:
python megatron_gpt_prompt_learning_eval.py \
virtual_prompt_model_file=PATH_TO_NEMO_PROMPT_LEARNING_MODEL_FILE \
gpt_model_file=PATH_TO_FROZEN_GPT_MODEL_FILE \
inference.greedy=True \
inference.add_BOS=False \
trainer.devices=1 \
trainer.num_nodes=1 \
tensor_model_parallel_size=1 \
pipeline_model_parallel_size=1 \
pred_file_path=PATH_WHERE_PRED_TEXT_FILE_WILL_BE_SAVED \
data_paths=[path/to/dataset1.jsonl, path/to/dataset2.jsonl]
virtual_prompt_model_file should be a path to a .nemo file saved after p-tuning/prompt tuning and model file
is still the path to the gpt model's .nemo file.
data_paths should be a list of .json or .jsonl files containing json objects similar to the ones
used during prompt learning. They should have keys that match the fields specified in the prompt template.
Fields can be dropped from the prompt dict and their corresponding section of the prompt template will
be automatically removed.
For example, say the prompt template during p-tuning/prompt-tuning looked like:
'<|VIRTUAL_PROMPT_0|> Context: {context} Question: {question} Answer: {answer}'
but you don't want to include the answer field during inference. Just don't
include the answer field in the prompt dict like below:
{"taskname": "squad", "context": "some paragraph", "question": "question related to paragraph"}
{"taskname": "squad", "context": "another paragraph", "question": "a different question related to paragraph"}
And the dataset class will automatically format your input to have the form:
[
'<|VIRTUAL_PROMPT_0|> Context: some paragraph Question: question related to paragraph Answer:',
'<|VIRTUAL_PROMPT_0|> Context: another paragraph Question: a different question related to paragraph Answer:'
]
Similarly for other senarios, just add virtual_prompt_model_file=PATH_TO_NEMO_PROMPT_LEARNING_MODEL_FILE if you're using a
p-tuned/prompt-tuned model.
"""
@hydra_runner(config_path="conf", config_name="megatron_gpt_prompt_learning_inference")
def main(cfg) -> None:
if not torch.cuda.is_available():
raise EnvironmentError("GPU is needed for the inference")
# trainer required for restoring model parallel models
trainer = Trainer(strategy=NLPDDPStrategy(), **cfg.trainer, callbacks=[CustomProgressBar()])
if (
cfg.tensor_model_parallel_size < 0
or cfg.pipeline_model_parallel_size < 0
or cfg.get('pipeline_model_parallel_split_rank', -1) < 0
):
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(cfg.gpt_model_file):
save_restore_connector.model_extracted_dir = cfg.gpt_model_file
model_config = MegatronGPTModel.restore_from(
restore_path=cfg.gpt_model_file,
trainer=trainer,
return_config=True,
save_restore_connector=save_restore_connector,
)
with open_dict(cfg):
cfg.tensor_model_parallel_size = model_config.get('tensor_model_parallel_size', 1)
cfg.pipeline_model_parallel_size = model_config.get('pipeline_model_parallel_size', 1)
cfg.pipeline_model_parallel_split_rank = model_config.get('pipeline_model_parallel_split_rank', 0)
assert (
cfg.trainer.devices * cfg.trainer.num_nodes
== cfg.tensor_model_parallel_size * cfg.pipeline_model_parallel_size
), "devices * num_nodes should equal tensor_model_parallel_size * pipeline_model_parallel_size"
# Update frozen GPT model path if it is given in case it has changed
prompt_learning_cfg = MegatronGPTPromptLearningModel.restore_from(
cfg.virtual_prompt_model_file, trainer=trainer, return_config=True,
)
if cfg.get("gpt_model_file"):
with open_dict(prompt_learning_cfg):
prompt_learning_cfg.language_model_path = cfg.gpt_model_file
prompt_learning_cfg.sequence_parallel = False
prompt_learning_cfg.activations_checkpoint_method = None
prompt_learning_cfg.activations_checkpoint_granularity = None
prompt_learning_cfg.activations_checkpoint_num_layers = None
# Load prompt tuned model, virtual_prompt_model_file must be provided in config
# Now load prompt learning model with frozen gpt model base
model = MegatronGPTPromptLearningModel.restore_from(
restore_path=cfg.virtual_prompt_model_file, trainer=trainer, override_config_path=prompt_learning_cfg,
)
model.freeze()
# Have to turn off activations_checkpoint_method for inference
try:
model.frozen_model.model.language_model.encoder.activations_checkpoint_method = None
except AttributeError:
pass
# Check whether the DDP is initialized
if parallel_state.is_unitialized():
def placeholder():
return
if model.trainer.strategy.launcher is not None:
model.trainer.strategy.launcher.launch(placeholder, trainer=model.trainer)
model.trainer.strategy.setup_environment()
length_params: LengthParam = {
"max_length": cfg.inference.tokens_to_generate,
"min_length": cfg.inference.min_tokens_to_generate,
}
sampling_params: SamplingParam = {
"use_greedy": cfg.inference.greedy,
"temperature": cfg.inference.temperature,
"top_k": cfg.inference.top_k,
"top_p": cfg.inference.top_p,
"repetition_penalty": cfg.inference.repetition_penalty,
"add_BOS": cfg.inference.add_BOS,
"all_probs": cfg.inference.all_probs,
"compute_logprob": cfg.inference.compute_logprob,
}
max_seq_length = model.frozen_model.cfg.encoder_seq_length - length_params["max_length"]
max_seq_length = min(max_seq_length, cfg.get("max_seq_length", 8192))
_, dataloader = model.build_virtual_prompt_dataset(
data=cfg.data_paths,
batch_size=cfg.inference.get('batch_size', 1),
max_seq_length=max_seq_length,
min_seq_length=model.cfg.data.get('min_seq_length', 1),
add_bos=sampling_params["add_BOS"],
add_eos=False,
for_train=False,
tokens_to_generate=length_params["max_length"],
drop_last=False,
shuffle=False,
num_workers=cfg.get("num_workers", 1),
)
config = OmegaConf.to_container(cfg.inference)
model.set_inference_config(config)
response = trainer.predict(model, dataloader)
print("***************************")
with open(cfg.pred_file_path, "w", encoding="utf-8") as pred_file:
for i in range(len(response)):
for sent in response[i]["sentences"]:
sent = sent.strip()
sent = sent.replace("\n", " ")
pred_file.write(sent + "\n")
print(f"Inference Complete, prediction file saved at {cfg.pred_file_path}")
print("***************************")
if __name__ == '__main__':
dep_msg = "* Please switch to using examples/nlp/language_modeling/tuning/megatron_gpt_peft_eval.py *"
dep = "Deprecation Notice!!".center(len(dep_msg) - 2, " ")
banner = "*" * len(dep_msg)
spacer = " " * (len(dep_msg) - 2)
logging.warning(f"\n\n{banner}\n*{spacer}*\n*{dep}*\n{dep_msg}\n*{spacer}*\n{banner}\n\n")
main()
logging.warning(f"\n\n{banner}\n*{spacer}*\n*{dep}*\n{dep_msg}\n*{spacer}*\n{banner}\n\n")
| NeMo-main | examples/nlp/language_modeling/megatron_gpt_prompt_learning_eval.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from pytorch_lightning.plugins.precision import MixedPrecisionPlugin
from pytorch_lightning.trainer.connectors.checkpoint_connector import _CheckpointConnector
from nemo.collections.nlp.models.language_modeling.megatron_retrieval_model import MegatronRetrievalModel
from nemo.collections.nlp.modules.common.megatron.mup.optim import MuAdam, MuAdamW
from nemo.collections.nlp.parts.nlp_overrides import (
CustomProgressBar,
GradScaler,
MegatronHalfPrecisionPlugin,
NLPDDPStrategy,
)
from nemo.core.config import hydra_runner
from nemo.core.config.optimizers import AdamParams, AdamWParams
from nemo.core.optim.optimizers import register_optimizer
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="megatron_retro_mutransfer")
def main(cfg) -> None:
register_optimizer("muadamw", MuAdamW, AdamWParams())
register_optimizer("muadam", MuAdam, AdamParams())
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
megatron_amp_o2 = cfg.model.get('megatron_amp_O2', False)
plugins = []
strategy = NLPDDPStrategy(
no_ddp_communication_hook=True if megatron_amp_o2 else False,
gradient_as_bucket_view=cfg.model.gradient_as_bucket_view,
find_unused_parameters=False,
)
if cfg.trainer.precision in [16, '16', 'bf16', '16-mixed', 'bf16-mixed']:
scaler = None
if cfg.trainer.precision in [16, '16', '16-mixed']:
scaler = GradScaler(
init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=cfg.model.get('native_amp_growth_interval', 1000),
hysteresis=cfg.model.get('hysteresis', 2),
)
plugin_precision = '16-mixed'
else:
plugin_precision = 'bf16-mixed'
if megatron_amp_o2:
plugins.append(MegatronHalfPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
else:
plugins.append(MixedPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
if cfg.get('cluster_type', None) == 'BCP':
plugins.append(TorchElasticEnvironment())
trainer = Trainer(plugins=plugins, strategy=strategy, **cfg.trainer, callbacks=[CustomProgressBar()])
exp_manager(trainer, cfg.exp_manager)
# resume_from_checkpoint = uninject_model_parallel_rank(resume_from_checkpoint)
logging.info(f'Resuming training from checkpoint: {trainer.ckpt_path}')
model = MegatronRetrievalModel(cfg.model, trainer)
trainer.fit(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/language_modeling/megatron_retro_mutransfer_pretrain.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelSummary
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from pytorch_lightning.trainer.connectors.checkpoint_connector import _CheckpointConnector
from nemo.collections.nlp.models.language_modeling.megatron_t5_model import MegatronT5Model
from nemo.collections.nlp.parts.nlp_overrides import (
CustomProgressBar,
GradScaler,
MegatronHalfPrecisionPlugin,
NLPDDPStrategy,
NLPSaveRestoreConnector,
PipelineMixedPrecisionPlugin,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="megatron_t5_lm_adaptation_finetune")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
megatron_amp_o2 = cfg.model.get('megatron_amp_O2', False)
plugins = []
strategy = NLPDDPStrategy(
no_ddp_communication_hook=True, # we don't use DDP for async grad allreduce
gradient_as_bucket_view=cfg.model.gradient_as_bucket_view,
find_unused_parameters=False,
)
if cfg.trainer.precision in [16, '16', '16-mixed', 'bf16', 'bf16-mixed']:
scaler = None
if cfg.trainer.precision in [16, '16', '16-mixed']:
scaler = GradScaler(
init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=cfg.model.get('native_amp_growth_interval', 1000),
hysteresis=cfg.model.get('hysteresis', 2),
)
# MixedPrecisionPlugin in PTL >= 2.0 requires precision to be 16-mixed or bf16-mixed
plugin_precision = '16-mixed'
else:
plugin_precision = 'bf16-mixed'
if megatron_amp_o2:
plugins.append(MegatronHalfPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
else:
plugins.append(PipelineMixedPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
if cfg.get('cluster_type', None) == 'BCP':
plugins.append(TorchElasticEnvironment())
trainer = Trainer(
plugins=plugins, strategy=strategy, **cfg.trainer, callbacks=[ModelSummary(max_depth=3), CustomProgressBar()]
)
exp_manager(trainer, cfg.exp_manager)
# update resume from checkpoint found by exp_manager
if cfg.model.resume_from_checkpoint is not None:
trainer.ckpt_path = cfg.model.resume_from_checkpoint
logging.info(f'Resuming training from checkpoint: {trainer.ckpt_path}')
if hasattr(cfg.model, 'pretrained_model_path') and cfg.model.pretrained_model_path is not None:
pretrained_cfg = MegatronT5Model.restore_from(
cfg.model.pretrained_model_path, trainer=trainer, return_config=True
)
OmegaConf.set_struct(pretrained_cfg, True)
with open_dict(pretrained_cfg):
# Override data from T5 to Prefix-LM
encoder_seq_length = pretrained_cfg.data.seq_length
decoder_seq_length = (
pretrained_cfg.data.seq_length
) # Set decoder seq length to be enoder seq length for prefix-lm
pretrained_cfg.data = cfg.model.data
pretrained_cfg.data.seq_length = encoder_seq_length
pretrained_cfg.data.seq_length_dec = (
decoder_seq_length - 1
) # -1 is to account for the addition of <bos> and <eos> and right shifting to create targets.
# Override fusion params.
pretrained_cfg.masked_softmax_fusion = cfg.model.masked_softmax_fusion
pretrained_cfg.bias_dropout_add_fusion = cfg.model.bias_dropout_add_fusion
pretrained_cfg.bias_gelu_fusion = cfg.model.bias_gelu_fusion
# Override dropout
if cfg.model.hidden_dropout is not None:
pretrained_cfg.hidden_dropout = cfg.model.hidden_dropout
if cfg.model.attention_dropout is not None:
pretrained_cfg.attention_dropout = cfg.model.attention_dropout
# Override precision
pretrained_cfg.precision = trainer.precision # Set above from trainer.precision
# Override micro/global batch
pretrained_cfg.micro_batch_size = cfg.model.micro_batch_size
pretrained_cfg.global_batch_size = cfg.model.global_batch_size
# O2 AMP
pretrained_cfg.megatron_amp_O2 = cfg.model.get('megatron_amp_O2', False)
# Optimizer overrides.
pretrained_cfg.optim = cfg.model.optim
model = MegatronT5Model.restore_from(
cfg.model.pretrained_model_path,
trainer=trainer,
override_config_path=pretrained_cfg,
save_restore_connector=NLPSaveRestoreConnector(),
)
else:
raise ValueError(f'No pretrained model path specified or does not exist {cfg.model.pretrained_model_path}')
trainer.fit(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/language_modeling/megatron_t5_lm_adaptation_finetune.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import torch.multiprocessing as mp
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from pytorch_lightning.trainer.connectors.checkpoint_connector import _CheckpointConnector
from nemo.collections.nlp.models.language_modeling.megatron_finetune_model import MegatronT5FinetuneModel
from nemo.collections.nlp.models.language_modeling.megatron_glue_model import MegatronT5GLUEModel
from nemo.collections.nlp.models.language_modeling.megatron_t0_model import MegatronT0Model
from nemo.collections.nlp.modules.common.megatron.megatron_init import fake_initialize_model_parallel
from nemo.collections.nlp.parts.nlp_overrides import (
CustomProgressBar,
GradScaler,
MegatronHalfPrecisionPlugin,
NLPDDPStrategy,
NLPSaveRestoreConnector,
PipelineMixedPrecisionPlugin,
)
from nemo.core.config import hydra_runner
from nemo.utils import AppState, logging
from nemo.utils.exp_manager import exp_manager
from nemo.utils.model_utils import inject_model_parallel_rank
mp.set_start_method("spawn", force=True)
def _modify_config(t5_cfg, cfg, add_cfg_to_tree=False):
"""
This function modifies the original t5 pre-training config (t5_cfg) with attributes from the finetuning config (cfg).
The `add_cfg_to_tree` arg adds `cfg` to the top of the yaml tree which is needed for all `hparams.yaml` files when passed as an arg to `load_from_checkpoint()`.
"""
OmegaConf.set_struct(t5_cfg, True)
with open_dict(t5_cfg):
t5_cfg.megatron_amp_O2 = cfg.model.get('megatron_amp_O2', False)
if hasattr(t5_cfg, 'encoder') and hasattr(t5_cfg, 'decoder'):
t5_cfg.encoder.masked_softmax_fusion = False
t5_cfg.decoder.masked_softmax_fusion = False
t5_cfg.encoder.hidden_dropout = cfg.model.get('hidden_dropout', 0.1)
t5_cfg.decoder.hidden_dropout = cfg.model.get('hidden_dropout', 0.1)
if hasattr(t5_cfg.encoder, 'ffn_dropout'):
t5_cfg.encoder.ffn_dropout = cfg.model.get('ffn_dropout', 0.1)
if hasattr(t5_cfg.decoder, 'ffn_dropout'):
t5_cfg.decoder.ffn_dropout = cfg.model.get('ffn_dropout', 0.1)
if hasattr(cfg.model, 'encoder'):
if hasattr(cfg.model.encoder, 'position_embedding_type'):
t5_cfg.encoder.position_embedding_type = cfg.model.encoder.position_embedding_type
if hasattr(cfg.model.encoder, 'use_flash_attention'):
t5_cfg.encoder.use_flash_attention = cfg.model.encoder.use_flash_attention
if hasattr(cfg.model.encoder, 'attention_dropout'):
t5_cfg.encoder.attention_dropout = cfg.model.encoder.attention_dropout
if hasattr(cfg.model, 'decoder'):
if hasattr(cfg.model.decoder, 'position_embedding_type'):
t5_cfg.decoder.position_embedding_type = cfg.model.decoder.position_embedding_type
if hasattr(cfg.model.decoder, 'use_flash_attention'):
t5_cfg.decoder.use_flash_attention = cfg.model.decoder.use_flash_attention
if hasattr(cfg.model.decoder, 'attention_dropout'):
t5_cfg.decoder.attention_dropout = cfg.model.decoder.attention_dropout
else:
t5_cfg.hidden_dropout = cfg.model.get('hidden_dropout', 0.1)
t5_cfg.attention_dropout = cfg.model.get('attention_dropout', 0.1)
t5_cfg.masked_softmax_fusion = False
t5_cfg.data = cfg.model.data
t5_cfg.precision = cfg.trainer.precision
t5_cfg.optim = cfg.model.optim
t5_cfg.micro_batch_size = cfg.model.data.train_ds.micro_batch_size
t5_cfg.global_batch_size = cfg.model.data.train_ds.global_batch_size
# XNLI has eval languages in the yaml config.
if hasattr(cfg.model, 'eval_languages'):
t5_cfg.eval_languages = cfg.model.eval_languages
# This is needed when modifying a hparam file directly to load `.ckpt` files.
# This is not needed to modify the cfg in `.nemo` files.
if add_cfg_to_tree:
OmegaConf.resolve(t5_cfg)
t5_cfg.cfg = t5_cfg
return t5_cfg
def load_from_nemo(cls, cfg, trainer, t5_cfg, modify_confg_fn):
t5_cfg = modify_confg_fn(t5_cfg, cfg, add_cfg_to_tree=False)
model = cls.restore_from(
restore_path=cfg.model.restore_from_path,
trainer=trainer,
override_config_path=t5_cfg,
save_restore_connector=NLPSaveRestoreConnector(),
)
return model
def load_from_checkpoint_dir(cls, cfg, trainer, modify_confg_fn):
app_state = AppState()
if cfg.model.tensor_model_parallel_size > 1 or cfg.model.pipeline_model_parallel_size > 1:
app_state.model_parallel_size = cfg.model.tensor_model_parallel_size * cfg.model.pipeline_model_parallel_size
app_state.tensor_model_parallel_size = cfg.model.tensor_model_parallel_size
app_state.pipeline_model_parallel_size = cfg.model.pipeline_model_parallel_size
(
app_state.tensor_model_parallel_rank,
app_state.pipeline_model_parallel_rank,
app_state.model_parallel_size,
app_state.data_parallel_size,
app_state.pipeline_model_parallel_split_rank,
app_state.virtual_pipeline_model_parallel_rank,
) = fake_initialize_model_parallel(
world_size=app_state.model_parallel_size,
rank=trainer.global_rank,
tensor_model_parallel_size_=cfg.model.tensor_model_parallel_size,
pipeline_model_parallel_size_=cfg.model.pipeline_model_parallel_size,
pipeline_model_parallel_split_rank_=cfg.model.pipeline_model_parallel_split_rank,
)
checkpoint_path = inject_model_parallel_rank(
os.path.join(cfg.model.pretrained_checkpoint.checkpoint_dir, cfg.model.pretrained_checkpoint.checkpoint_name)
)
hparams_file = OmegaConf.load(cfg.model.pretrained_checkpoint.hparams_file)
t5_cfg = modify_confg_fn(hparams_file.cfg, cfg, add_cfg_to_tree=True)
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
OmegaConf.save(config=t5_cfg, f=f.name)
model = cls.load_from_checkpoint(checkpoint_path=checkpoint_path, trainer=trainer, hparams_file=f.name,)
return model
def validate_checkpoint_loading_args(cfg):
if cfg.checkpoint_dir is None or not os.path.isdir(cfg.checkpoint_dir):
raise ValueError(f'Checkpoint directory {cfg.checkpoint_dir} does not exist or is not a directory.')
if cfg.checkpoint_name is None:
raise ValueError(f'Checkpoint name {cfg.checkpoint_name} is not valid.')
if cfg.hparams_file is None or not os.path.isfile(cfg.hparams_file):
raise ValueError(f'Hparams file {cfg.hparams_file} does not exist or is not a file.')
@hydra_runner(config_path="conf", config_name="megatron_t5_config_finetune_glue_mnli")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
megatron_amp_o2 = cfg.model.get('megatron_amp_O2', False)
plugins = []
strategy = NLPDDPStrategy(
no_ddp_communication_hook=True,
gradient_as_bucket_view=cfg.model.gradient_as_bucket_view,
find_unused_parameters=False,
)
if cfg.trainer.precision in [16, '16', 'bf16', '16-mixed', 'bf16-mixed']:
scaler = None
if cfg.trainer.precision in [16, '16', '16-mixed']:
scaler = GradScaler(
init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=cfg.model.get('native_amp_growth_interval', 1000),
hysteresis=cfg.model.get('hysteresis', 2),
)
# MixedPrecisionPlugin in PTL >= 2.0 requires precision to be 16-mixed or bf16-mixed
plugin_precision = '16-mixed'
else:
plugin_precision = 'bf16-mixed'
if megatron_amp_o2:
plugins.append(MegatronHalfPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
else:
plugins.append(PipelineMixedPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
if cfg.get('cluster_type', None) == 'BCP':
plugins.append(TorchElasticEnvironment())
trainer = Trainer(plugins=plugins, strategy=strategy, **cfg.trainer, callbacks=[CustomProgressBar()])
exp_manager(trainer, cfg.exp_manager)
# update resume from checkpoint found by exp_manager
if cfg.model.resume_from_checkpoint is not None:
trainer.ckpt_path = cfg.model.resume_from_checkpoint
logging.info(f'Resuming training from checkpoint: {trainer.ckpt_path}')
if hasattr(cfg.model.data.train_ds, 'task_name'):
if cfg.model.restore_from_path:
t5_cfg = MegatronT5GLUEModel.restore_from(
restore_path=cfg.model.restore_from_path, trainer=trainer, return_config=True
)
model = load_from_nemo(MegatronT5GLUEModel, cfg, trainer, t5_cfg, modify_confg_fn=_modify_config)
else:
validate_checkpoint_loading_args(cfg.model.pretrained_checkpoint)
model = load_from_checkpoint_dir(MegatronT5GLUEModel, cfg, trainer, modify_confg_fn=_modify_config)
elif hasattr(cfg.model.data.train_ds, 'file_names'):
if cfg.model.restore_from_path:
t5_cfg = MegatronT0Model.restore_from(
restore_path=cfg.model.restore_from_path, trainer=trainer, return_config=True
)
model = load_from_nemo(MegatronT0Model, cfg, trainer, t5_cfg, modify_confg_fn=_modify_config)
else:
validate_checkpoint_loading_args(cfg.model.pretrained_checkpoint)
model = load_from_checkpoint_dir(MegatronT0Model, cfg, trainer, modify_confg_fn=_modify_config)
else:
if cfg.model.restore_from_path:
t5_cfg = MegatronT5FinetuneModel.restore_from(
restore_path=cfg.model.restore_from_path, trainer=trainer, return_config=True
)
model = load_from_nemo(MegatronT5FinetuneModel, cfg, trainer, t5_cfg, modify_confg_fn=_modify_config)
else:
validate_checkpoint_loading_args(cfg.model.pretrained_checkpoint)
model = load_from_checkpoint_dir(MegatronT5FinetuneModel, cfg, trainer, modify_confg_fn=_modify_config)
trainer.fit(model)
trainer.validate(model)
if hasattr(cfg.model.data, 'test_ds'):
trainer.test(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/language_modeling/megatron_t5_seq2seq_finetune.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from pytorch_lightning.plugins.precision import MixedPrecisionPlugin
from pytorch_lightning.trainer.connectors.checkpoint_connector import _CheckpointConnector
from nemo.collections.nlp.models.language_modeling.megatron_retrieval_model import MegatronRetrievalModel
from nemo.collections.nlp.modules.common.megatron.megatron_init import initialize_model_parallel_for_nemo
from nemo.collections.nlp.parts.nlp_overrides import (
CustomProgressBar,
GradScaler,
MegatronHalfPrecisionPlugin,
NLPDDPStrategy,
NLPSaveRestoreConnector,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="megatron_retro_config")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
megatron_amp_o2 = cfg.model.get('megatron_amp_O2', False)
plugins = []
strategy = NLPDDPStrategy(
no_ddp_communication_hook=True if megatron_amp_o2 else False,
gradient_as_bucket_view=cfg.model.gradient_as_bucket_view,
find_unused_parameters=False,
)
if cfg.trainer.precision in [16, '16', 'bf16', '16-mixed', 'bf16-mixed']:
scaler = None
if cfg.trainer.precision in [16, '16', '16-mixed']:
scaler = GradScaler(
init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=cfg.model.get('native_amp_growth_interval', 1000),
hysteresis=cfg.model.get('hysteresis', 2),
)
plugin_precision = '16-mixed'
else:
plugin_precision = 'bf16-mixed'
if megatron_amp_o2:
plugins.append(MegatronHalfPrecisionPlugin(plugin_precision, device='cuda', scaler=scaler))
else:
plugins.append(MixedPrecisionPlugin(plugin_precision, device='cuda', scaler=scaler))
if cfg.get('cluster_type', None) == 'BCP':
plugins.append(TorchElasticEnvironment())
trainer = Trainer(plugins=plugins, strategy=strategy, **cfg.trainer, callbacks=[CustomProgressBar()])
exp_manager(trainer, cfg.exp_manager)
# resume_from_checkpoint = uninject_model_parallel_rank(resume_from_checkpoint)
logging.info(f'Resuming training from checkpoint: {trainer.ckpt_path}')
# load existing nemo retro model
if cfg.get("restore_from_path", None) is not None:
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(cfg.restore_from_path):
save_restore_connector.model_extracted_dir = cfg.restore_from_path
model = MegatronRetrievalModel.restore_from(
restore_path=cfg.restore_from_path,
trainer=trainer,
override_config_path=cfg.model,
save_restore_connector=save_restore_connector,
strict=False,
)
else:
model = MegatronRetrievalModel(cfg.model, trainer)
trainer.fit(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/language_modeling/megatron_retro_pretraining.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import ArgumentParser
import torch
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning.trainer.trainer import Trainer
from torch.utils.data import DataLoader
from nemo.collections.nlp.data.language_modeling.megatron.request_dataset import T5RequestDataset
from nemo.collections.nlp.models.language_modeling.megatron_t5_model import MegatronT5Model
from nemo.collections.nlp.modules.common.megatron.megatron_init import fake_initialize_model_parallel
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy, NLPSaveRestoreConnector
from nemo.utils.app_state import AppState
assert torch.cuda.is_available()
def main():
parser = ArgumentParser()
parser.add_argument("--model_file", type=str, default="", required=True, help="Pass path to model's .nemo file")
parser.add_argument(
"--prompt", type=str, default="", required=True, help="Prompt for the model (a text to complete)"
)
parser.add_argument(
"--tokens_to_generate", type=int, default="16", required=False, help="How many tokens to add to prompt"
)
parser.add_argument(
"--tensor_model_parallel_size", type=int, default=-1, required=False,
)
parser.add_argument(
"--pipeline_model_parallel_size", type=int, default=-1, required=False,
)
parser.add_argument(
"--pipeline_model_parallel_split_rank", type=int, default=-1, required=False,
)
parser.add_argument("--precision", default="16", type=str, help="PyTorch Lightning Trainer precision flag")
parser.add_argument("--decoder_starts_with_pad", action="store_true", help="Decoder starts with pad token")
parser.add_argument("--add_eos_to_encoder_input", action="store_true", help="Encoder input ends with EOS token")
args = parser.parse_args()
# cast precision to int if 32 or 16
if args.precision in ["32", "16"]:
args.precision = int(float(args.precision))
if (
args.tensor_model_parallel_size < 0
or args.pipeline_model_parallel_size < 0
or args.pipeline_model_parallel_split_rank < 0
):
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(args.model_file):
save_restore_connector.model_extracted_dir = args.model_file
model_config = MegatronT5Model.restore_from(
restore_path=args.model_file,
trainer=Trainer(strategy=NLPDDPStrategy()),
return_config=True,
save_restore_connector=save_restore_connector,
)
args.tensor_model_parallel_size = model_config.get('tensor_model_parallel_size', 1)
args.pipeline_model_parallel_size = model_config.get('pipeline_model_parallel_size', 1)
args.pipeline_model_parallel_split_rank = model_config.get('pipeline_model_parallel_split_rank', 0)
# trainer required for restoring model parallel models
trainer = Trainer(
strategy=NLPDDPStrategy(),
devices=args.tensor_model_parallel_size * args.pipeline_model_parallel_size,
accelerator='gpu',
precision=args.precision,
)
app_state = AppState()
if args.tensor_model_parallel_size > 1 or args.pipeline_model_parallel_size > 1:
app_state.model_parallel_size = args.tensor_model_parallel_size * args.pipeline_model_parallel_size
(
app_state.tensor_model_parallel_rank,
app_state.pipeline_model_parallel_rank,
app_state.model_parallel_size,
app_state.data_parallel_size,
app_state.pipeline_model_parallel_split_rank,
app_state.virtual_pipeline_model_parallel_rank,
) = fake_initialize_model_parallel(
world_size=app_state.model_parallel_size,
rank=trainer.global_rank,
tensor_model_parallel_size_=args.tensor_model_parallel_size,
pipeline_model_parallel_size_=args.pipeline_model_parallel_size,
pipeline_model_parallel_split_rank_=args.pipeline_model_parallel_split_rank,
)
model_cfg = MegatronT5Model.restore_from(
restore_path=args.model_file,
trainer=trainer,
save_restore_connector=NLPSaveRestoreConnector(),
return_config=True,
)
OmegaConf.set_struct(model_cfg, True)
with open_dict(model_cfg):
model_cfg.precision = trainer.precision
model = MegatronT5Model.restore_from(
restore_path=args.model_file,
trainer=trainer,
save_restore_connector=NLPSaveRestoreConnector(),
override_config_path=model_cfg,
)
model.freeze()
model.training = False
request = {
"prompt": args.prompt,
"tokens_to_generate": args.tokens_to_generate,
"bos_id": model.tokenizer.pad_id if args.decoder_starts_with_pad else model.tokenizer.bos_id,
"add_eos_to_encoder_input": args.add_eos_to_encoder_input,
}
dataset = T5RequestDataset(request, model.tokenizer)
request_dl = DataLoader(dataset)
response = trainer.predict(model, request_dl)
print("***************************")
print(response)
print(response[0]['completion']['text'])
print("***************************")
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/nlp/language_modeling/megatron_t5_eval.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.multiprocessing as mp
from omegaconf.omegaconf import OmegaConf, open_dict
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.parts.megatron_trainer_builder import MegatronTrainerBuilder
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
mp.set_start_method("spawn", force=True)
@hydra_runner(config_path="conf", config_name="megatron_gpt_config")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
trainer = MegatronTrainerBuilder(cfg).create_trainer()
exp_manager(trainer, cfg.exp_manager)
model = MegatronGPTModel(cfg.model, trainer)
trainer.fit(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/language_modeling/megatron_gpt_pretraining.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Conversion script to convert Megatron_LM checkpoints into nemo checkpoint.
Example to run this conversion script:
python -m torch.distributed.launch --nproc_per_node=<tensor_model_parallel_size> megatron_lm_ckpt_to_nemo.py \
--checkpoint_folder <path_to_PTL_checkpoints_folder> \
--checkpoint_name megatron_gpt--val_loss=99.99-step={steps}-consumed_samples={consumed}.0 \
--nemo_file_path <path_to_output_nemo_file> \
--model_type <megatron model type> \
--hparams_file <hparams yaml file>
--tensor_model_parallel_size <tensor_model_parallel_size>
--pipeline_model_parallel_size <pipeline_model_parallel_size>
--gpus_per_node <gpus per node>
Note, hparams_file usually is generated by pytorch lightning when running the training job.
It is the model section of the model pretraining conf with an extra cfg key.
Check https://github.com/NVIDIA/NeMo/issues/4993 for an example.
To resume the training from converted MegatronLM checkpoint, make sure to set the
`trainer.max_steps=round(lr-warmup-fraction * lr-decay-iters + lr-decay-iters)`
where `lr-warmup-fraction` and `lr-decay-iters` are arguments from MegatronLM training
so the learning rate scheduler will follow the same curve.
"""
import importlib
import os
import pathlib
import sys
from argparse import ArgumentParser
from collections import OrderedDict
from typing import Any, Optional
import torch
from lightning_fabric.utilities.cloud_io import _load as pl_load
from megatron.core import parallel_state
from pytorch_lightning.core.saving import _load_state as ptl_load_state
from pytorch_lightning.core.saving import load_hparams_from_tags_csv, load_hparams_from_yaml
from pytorch_lightning.trainer.trainer import Trainer
from pytorch_lightning.utilities.migration import pl_legacy_patch
from nemo.collections.nlp.models.language_modeling.megatron_bert_model import MegatronBertModel
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.modules.common.megatron.megatron_init import initialize_model_parallel_for_nemo
from nemo.collections.nlp.parts.nlp_overrides import NLPSaveRestoreConnector
from nemo.utils import AppState, logging
from nemo.utils.distributed import initialize_distributed
from nemo.utils.model_utils import inject_model_parallel_rank, uninject_model_parallel_rank
# this enums code is copied from Megatron_LM
enum_code = '''
import enum
class ModelType(enum.Enum):
encoder_or_decoder = 1
encoder_and_decoder = 2
class LayerType(enum.Enum):
encoder = 1
decoder = 2
class AttnType(enum.Enum):
self_attn = 1
cross_attn = 2
class AttnMaskType(enum.Enum):
padding = 1
causal = 2
'''
def install_megatron_dependence():
# this is a hack to install required modules for MegatronLM checkpoints
# run the following so we don't have to install Megatron_LM code
megatron_name = 'megatron'
megatron_spec = importlib.util.spec_from_loader(megatron_name, loader=None, is_package=True)
megatron_module = importlib.util.module_from_spec(megatron_spec)
sys.modules[megatron_name] = megatron_module
model_name = 'model'
model_spec = importlib.util.spec_from_loader(model_name, loader=None, is_package=True)
model_module = importlib.util.module_from_spec(model_spec)
megatron_module.__dict__['model'] = model_module
sys.modules[megatron_name + '.' + model_name] = model_module
enums_name = 'enums'
enums_spec = importlib.util.spec_from_loader(enums_name, loader=None, is_package=True)
enums_module = importlib.util.module_from_spec(enums_spec)
model_module.__dict__['enums'] = enums_module
sys.modules[megatron_name + '.' + model_name + '.' + enums_name] = enums_module
exec(enum_code, enums_module.__dict__)
def get_args():
parser = ArgumentParser()
parser.add_argument(
"--checkpoint_folder",
type=str,
default=None,
required=True,
help="Path to Megatron-LM checkpoints saved during training. Ex: /raid/Megatron_LM/checkpoints",
)
parser.add_argument(
"--checkpoint_name",
type=str,
default='model_optim_rng.pt',
required=True,
help="Name of checkpoint to be used. Ex: model_optim_rng.pt",
)
parser.add_argument(
"--hparams_file",
type=str,
default=None,
required=False,
help="Path config for restoring. It's created during training and may need to be modified during restore if restore environment is different than training. Ex: /raid/nemo_experiments/megatron_gpt/hparams.yaml",
)
parser.add_argument("--nemo_file_path", type=str, default=None, required=False, help="Path to output .nemo file.")
parser.add_argument(
"--output_ckpt_file_path", type=str, default=None, required=False, help="Path to output .ckpt file."
)
parser.add_argument("--gpus_per_node", type=int, required=False, default=1)
parser.add_argument("--tensor_model_parallel_size", type=int, required=True, default=None)
parser.add_argument("--pipeline_model_parallel_size", type=int, required=False, default=1)
parser.add_argument("--local_rank", type=int, required=False, default=os.getenv('LOCAL_RANK', -1))
parser.add_argument("--model_type", type=str, required=True, default="gpt", choices=["gpt", "t5", "bert"])
args = parser.parse_args()
return args
def parse_weights(weight_dict: OrderedDict, parent_key: str, total: list, converted: OrderedDict, translator: dict):
for key in weight_dict:
new_key = key
name_translate = translator
for replace_key in name_translate:
if key.find(replace_key) >= 0:
new_key = key.replace(replace_key, name_translate[replace_key])
if isinstance(weight_dict[key], OrderedDict) or isinstance(weight_dict[key], dict):
parse_weights(weight_dict[key], parent_key + '.' + new_key, total, converted, translator)
else:
num_parameters = torch.prod(torch.tensor(weight_dict[key].cpu().size())).item()
total[0] += num_parameters
final_key = 'model' + parent_key + '.' + new_key
converted[final_key] = weight_dict[key]
def add_optimizer_state(lm_checkpoint, new_checkpoint, megatron_amp_o2=True):
# this method is to convert lm_checkpoint optimizer states for nemo checkpoint
OPTIMIZER_KEY = 'optimizer'
FP32_FP16_KEY = 'fp32_from_fp16_params'
NEW_OPTIMIZER_KEY = 'optimizer_states'
STEP_KEY = 'iteration'
NEW_STEP_KEY = 'global_step'
LR_SCHEDULER = 'lr_scheduler'
NEW_LR_SCHEDULER = 'lr_schedulers'
if OPTIMIZER_KEY in lm_checkpoint and OPTIMIZER_KEY in lm_checkpoint[OPTIMIZER_KEY]:
opt_state = lm_checkpoint[OPTIMIZER_KEY][OPTIMIZER_KEY]
if megatron_amp_o2:
opt_dict = dict()
if LR_SCHEDULER in lm_checkpoint:
sched = lm_checkpoint[LR_SCHEDULER]
for param_group in opt_state['param_groups']:
param_group['initial_lr'] = sched['max_lr']
if FP32_FP16_KEY in lm_checkpoint[OPTIMIZER_KEY]:
fp32_state = lm_checkpoint[OPTIMIZER_KEY][FP32_FP16_KEY]
opt_dict[FP32_FP16_KEY] = fp32_state
opt_dict[OPTIMIZER_KEY] = opt_state
new_checkpoint[NEW_OPTIMIZER_KEY] = [opt_dict]
else:
new_checkpoint[NEW_OPTIMIZER_KEY] = [opt_state]
if STEP_KEY in lm_checkpoint:
new_checkpoint[NEW_STEP_KEY] = lm_checkpoint[STEP_KEY]
new_checkpoint['epoch'] = 1 # always one epoch
if LR_SCHEDULER in lm_checkpoint:
gbs = lm_checkpoint['args'].global_batch_size
sched = lm_checkpoint[LR_SCHEDULER]
content = OrderedDict()
content['max_steps'] = int(sched['decay_steps']) // gbs + sched['warmup_steps'] // gbs
content['warmup_steps'] = int(sched['warmup_steps']) // gbs
content['constant_steps'] = 0 # no such conf in lm checkpoint
content['decay_steps'] = int(sched['decay_steps']) // gbs
content['min_lr'] = sched['min_lr']
if OPTIMIZER_KEY in lm_checkpoint:
content['base_lrs'] = [
i['initial_lr'] for i in new_checkpoint['optimizer_states'][0]['optimizer']['param_groups']
]
content['last_epoch'] = int(sched['num_steps']) // gbs
content['_last_lr'] = [i['lr'] for i in new_checkpoint['optimizer_states'][0]['optimizer']['param_groups']]
else:
content['base_lrs'] = [sched['max_lr']]
content['last_epoch'] = int(sched['num_steps']) // gbs
content['_step_count'] = int(sched['num_steps']) // gbs
content['verbose'] = False
content['_get_lr_called_within_step'] = False
new_checkpoint[NEW_LR_SCHEDULER] = [content]
def load_model(cls, checkpoint, strict, **kwargs):
try:
if 'cfg' in kwargs:
model = ptl_load_state(cls, checkpoint, strict=strict, **kwargs)
else:
model = ptl_load_state(
cls, checkpoint, strict=strict, cfg=checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY].cfg, **kwargs
)
# register the artifacts
cfg = checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY].cfg
if cfg.tokenizer.model is not None:
model.register_artifact("tokenizer.tokenizer_model", cfg.tokenizer.model)
if cfg.tokenizer.vocab_file is not None:
model.register_artifact("tokenizer.vocab_file", cfg.tokenizer.vocab_file)
if cfg.tokenizer.merge_file is not None:
model.register_artifact("tokenizer.merge_file", cfg.tokenizer.merge_file)
finally:
cls._set_model_restore_state(is_being_restored=False)
return model
def load_from_checkpoint(
cls,
checkpoint_path: str,
map_location: Any = None,
hparams_file: Optional[str] = None,
strict: bool = True,
**kwargs,
):
"""
Loads Megatron_LM checkpoints, convert it, with some maintenance of restoration.
For documentation, please refer to LightningModule.load_from_checkpoin() documentation.
"""
checkpoint = None
try:
cls._set_model_restore_state(is_being_restored=True)
# TODO: replace with proper PTL API
with pl_legacy_patch():
if map_location is not None:
old_checkpoint = pl_load(checkpoint_path, map_location=map_location)
else:
old_checkpoint = pl_load(checkpoint_path, map_location=lambda storage, loc: storage)
total_params = [0]
checkpoint = OrderedDict()
checkpoint['state_dict'] = OrderedDict()
parse_weights(
old_checkpoint['model'], "", total_params, checkpoint['state_dict'], translator=kwargs['translator']
)
print('converted {:.2f}M parameters'.format(total_params[0] / 1e6))
if hparams_file is not None:
extension = hparams_file.split(".")[-1]
if extension.lower() == "csv":
hparams = load_hparams_from_tags_csv(hparams_file)
elif extension.lower() in ("yml", "yaml"):
hparams = load_hparams_from_yaml(hparams_file)
else:
raise ValueError(".csv, .yml or .yaml is required for `hparams_file`")
hparams["on_gpu"] = False
# overwrite hparams by the given file
checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY] = hparams
check_point_version = old_checkpoint.get('checkpoint_version', 0)
if check_point_version < 3:
# need to do the transpose of query_key_value variables
if hparams_file is not None:
np = hparams['cfg']['num_attention_heads']
elif 'config' in old_checkpoint and 'num-attention-heads' in old_checkpoint['config']:
np = old_checkpoint['config']['num-attention-heads']
else:
logging.warning("cannot determine the number attention heads")
raise ValueError('need to know number of attention heads')
if check_point_version == 0:
# 3, np, hn -> np, 3, hn
for key in checkpoint['state_dict']:
if key.find('query_key_value') >= 0:
weight = checkpoint['state_dict'][key]
if len(weight.size()) == 2:
# weight
weight = weight.view(3, np, -1, weight.size()[-1])
weight = weight.transpose(0, 1).contiguous()
checkpoint['state_dict'][key] = weight.view(-1, weight.size()[-1])
else:
# biase
weight = weight.view(3, np, -1)
weight = weight.transpose(0, 1).contiguous()
checkpoint['state_dict'][key] = weight.view(-1)
elif check_point_version == 1:
# np, hn, 3 -> np, 3, hn
for key in checkpoint['state_dict']:
if key.find('query_key_value') >= 0:
weight = checkpoint['state_dict'][key]
if len(weight.size()) == 2:
# weight
weight = weight.view(np, -1, 3, weight.size()[-1])
weight = weight.transpose(1, 2).contiguous()
checkpoint['state_dict'][key] = weight
else:
# biase
weight = weight.view(np, -1, 3)
weight = weight.transpose(1, 2).contiguous()
checkpoint['state_dict'][key] = weight
# for past checkpoint need to add the new key
if cls.CHECKPOINT_HYPER_PARAMS_KEY not in checkpoint:
checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY] = {}
# override the hparams with values that were passed in
# TODO: can we do this without overriding?
config_kwargs = kwargs.copy()
if 'trainer' in config_kwargs:
config_kwargs.pop('trainer')
checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY].update(config_kwargs)
add_optimizer_state(old_checkpoint, checkpoint)
consumed = None
if 'args' in old_checkpoint and hasattr(old_checkpoint['args'], 'consumed_train_samples'):
consumed = getattr(old_checkpoint['args'], 'consumed_train_samples')
steps = None
if 'iteration' in old_checkpoint:
steps = old_checkpoint['iteration']
finally:
cls._set_model_restore_state(is_being_restored=False)
logging.warning(f"the checkpoint version is {check_point_version}")
return checkpoint, consumed, steps, check_point_version
def megatron_lm_inject_model_parallel_rank(filepath):
"""
Injects tensor/pipeline model parallel ranks into the filepath.
Does nothing if not using model parallelism.
"""
# first make sure filepath does not have rank
filepath = uninject_model_parallel_rank(filepath)
app_state = AppState()
if app_state.model_parallel_size is not None and app_state.model_parallel_size > 1:
# filepath needs to be updated to include mp_rank
dirname = os.path.dirname(filepath)
basename = os.path.basename(filepath)
if app_state.pipeline_model_parallel_size is None or app_state.pipeline_model_parallel_size == 1:
filepath = f'{dirname}/mp_rank_{app_state.tensor_model_parallel_rank:02d}/{basename}'
else:
filepath = f'{dirname}/mp_rank_{app_state.tensor_model_parallel_rank:02d}_{app_state.pipeline_model_parallel_rank:03d}/{basename}'
return filepath
else:
return filepath
def convert(local_rank, rank, world_size, args):
app_state = AppState()
initialize_model_parallel_for_nemo(
world_size=world_size,
global_rank=rank,
local_rank=local_rank,
tensor_model_parallel_size=args.tensor_model_parallel_size,
pipeline_model_parallel_size=args.pipeline_model_parallel_size,
virtual_pipeline_model_parallel_size=None,
pipeline_model_parallel_split_rank=0,
micro_batch_size=None,
global_batch_size=None,
seed=1234,
apex_transformer_log_level=30,
)
# hard set the data parallel rank to 0, otherwiaze it is default to None
app_state.data_parallel_rank = 0
# tensor_model_parallel_size = args.tensor_model_parallel_size
num_nodes = world_size // args.gpus_per_node
assert world_size % args.gpus_per_node == 0, "world_size must be divisible by gpus_per_node"
trainer = Trainer(devices=args.gpus_per_node, accelerator='gpu', num_nodes=num_nodes)
checkpoint_path = megatron_lm_inject_model_parallel_rank(
os.path.join(args.checkpoint_folder, args.checkpoint_name)
)
logging.info(f"loading checkpoint {checkpoint_path}")
if args.model_type == 'gpt':
# this dictionary is used to rename the model parameters
name_translate = {}
name_translate['transformer'] = 'encoder'
name_translate['.attention.'] = '.self_attention.'
# nemo megatron doesn't have _for_head key
name_translate['word_embeddings_for_head'] = 'word_embeddings'
checkpoint, consumed, steps, version = load_from_checkpoint(
MegatronGPTModel,
checkpoint_path,
hparams_file=args.hparams_file,
trainer=trainer,
translator=name_translate,
strict=False,
)
elif args.model_type == 'bert':
# this dictionary is used to rename the model parameters
name_translate = {}
name_translate['transformer'] = 'encoder'
name_translate['.attention.'] = '.self_attention.'
# nemo megatron doesn't have _for_head key
name_translate['word_embeddings_for_head'] = 'word_embeddings'
checkpoint, consumed, steps, version = load_from_checkpoint(
MegatronBertModel,
checkpoint_path,
hparams_file=args.hparams_file,
trainer=trainer,
translator=name_translate,
strict=False,
)
else:
raise NotImplemented("{} is not supported".format(args.model_type))
if torch.distributed.is_initialized():
torch.distributed.barrier()
if args.output_ckpt_file_path:
filepath = args.output_ckpt_file_path
base_dir = pathlib.Path(filepath).parent
filename_str = pathlib.Path(filepath).name
suffix = '.ckpt'
content = {}
if consumed is not None:
content['consumed'] = consumed
else:
content['consumed'] = 0
if steps is not None:
content['steps'] = steps
else:
content['steps'] = 0
filename = filename_str.format(**content) + suffix
checkpoint_path_output = inject_model_parallel_rank(os.path.join(base_dir, filename))
trainer.training_type_plugin.checkpoint_io.save_checkpoint(checkpoint, checkpoint_path_output)
logging.info(f'NeMo model checkpoint files saved to: {args.output_ckpt_file_path}')
if args.nemo_file_path:
if args.model_type == 'gpt':
model = load_model(MegatronGPTModel, checkpoint, strict=False, trainer=trainer)
elif args.model_type == 'bert':
model = load_model(MegatronBertModel, checkpoint, strict=False, trainer=trainer)
else:
raise NotImplemented("{} is not supported".format(args.model_type))
# verify tensor parallel rank id and pipeline parallel rank id matches
assert app_state.data_parallel_size == 1
model._save_restore_connector = NLPSaveRestoreConnector()
model.save_to(args.nemo_file_path)
logging.info(f'NeMo model saved to: {args.nemo_file_path}')
if __name__ == '__main__':
install_megatron_dependence()
args = get_args()
if args.local_rank == -1:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
rank = args.local_rank
local_rank = rank
world_size = 1
else:
local_rank, rank, world_size = initialize_distributed(args)
# make sure the world size is divisible by tensor model parallel_size
assert world_size % args.tensor_model_parallel_size == 0
torch.distributed.barrier()
convert(local_rank, rank, world_size, args)
torch.distributed.barrier()
| NeMo-main | examples/nlp/language_modeling/megatron_lm_ckpt_to_nemo.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
import threading
from functools import partial
import torch
from omegaconf import OmegaConf, open_dict
from pytorch_lightning.trainer.trainer import Trainer
from torch.utils.data import DataLoader, Dataset
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.modules.common.megatron.megatron_init import fake_initialize_model_parallel
from nemo.collections.nlp.modules.common.text_generation_server import MegatronServer
from nemo.collections.nlp.modules.common.text_generation_utils import generate
from nemo.collections.nlp.modules.common.transformer.text_generation import LengthParam, SamplingParam
from nemo.collections.nlp.parts.nlp_overrides import CustomProgressBar, NLPDDPStrategy, NLPSaveRestoreConnector
from nemo.core.config import hydra_runner
from nemo.utils.app_state import AppState
from nemo.utils.model_utils import inject_model_parallel_rank
try:
from megatron.core import parallel_state
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
"""
This is the script to run GPT text generation.
Usage:
Assume the model has TP=1, PP=1 in the following use cases.
a. run greedy inference from a nemo file:
python megatron_gpt_eval.py \
gpt_model_file=PATH_TO_MODEL \
inference.greedy=True \
inference.add_BOS=True \
trainer.devices=1 \
trainer.num_nodes=1 \
tensor_model_parallel_size=-1 \
pipeline_model_parallel_size=-1 \
prompts=[prompt1,prompt2]
b. run greedy inference from a PTL checkpoint file:
python megatron_gpt_eval.py \
checkpoint_dir=PATH_TO_CHECKPOINT_FILE \
checkpoint_name=CHECKPOINT_FILE_NAME \
hparams_file=HPARAMS_FILE \
inference.greedy=True \
inference.add_BOS=True \
trainer.devices=1 \
trainer.num_nodes=1 \
tensor_model_parallel_size=-1 \
pipeline_model_parallel_size=-1 \
prompts=[prompt1,prompt2]
c. run top_p inference from a nemo file:
python megatron_gpt_eval.py \
gpt_model_file=PATH_TO_MODEL \
inference.greedy=False \
inference.top_k=0 \
inference.top_p=0.9 \
inference.repetition_penalty=1.2 \
inference.add_BOS=True \
trainer.devices=1 \
trainer.num_nodes=1 \
tensor_model_parallel_size=-1 \
pipeline_model_parallel_size=-1 \
prompts=[prompt1,prompt2]
d. If you don't need to generate tokens and need model to compute logprobs:
python megatron_gpt_eval.py \
gpt_model_file=PATH_TO_MODEL \
inference.compute_logprob=True \
trainer.devices=1 \
trainer.num_nodes=1 \
tensor_model_parallel_size=-1 \
pipeline_model_parallel_size=-1 \
prompts=[text to get logprob]
e. Launch the inference server
python megatron_gpt_eval.py \
gpt_model_file=PATH_TO_MODEL \
trainer.devices=1 \
trainer.num_nodes=1 \
tensor_model_parallel_size=-1 \
pipeline_model_parallel_size=-1 \
server=True
To send a request to the server, here is one example code:
```python
import json
import requests
batch_size = 8
port_num = 5555
headers = {"Content-Type": "application/json"}
def request_data(data):
resp = requests.put('http://localhost:{}/generate'.format(port_num),
data=json.dumps(data),
headers=headers)
sentences = resp.json()['sentences']
return sentences
data = {
"sentences": [""] * batch_size,
"tokens_to_generate": 300,
"temperature": 1.0,
"add_BOS": True,
"top_k": 0,
"top_p": 0.9,
"greedy": False,
"all_probs": False,
"repetition_penalty": 1.2,
"min_tokens_to_generate": 2,
}
sentences = request_data(data)
```
"""
if not torch.cuda.is_available():
raise EnvironmentError("GPU is needed for the inference")
class RequestDataSet(Dataset):
def __init__(self, sentences):
super().__init__()
self.sentences = sentences
def __len__(self,):
return len(self.sentences)
def __getitem__(self, idx):
return self.sentences[idx]
def remove_padded_prompts(response, nb_paddings):
result = {}
for k, v in response.items():
if v != None and (type(v) is list or type(v) is torch.Tensor):
v = v[:-nb_paddings]
result[k] = v
return result
@hydra_runner(config_path="conf", config_name="megatron_gpt_inference")
def main(cfg) -> None:
# trainer required for restoring model parallel models
trainer = Trainer(strategy=NLPDDPStrategy(), **cfg.trainer, callbacks=[CustomProgressBar()])
if cfg.gpt_model_file is not None:
if (
cfg.tensor_model_parallel_size < 0
or cfg.pipeline_model_parallel_size < 0
or cfg.get('pipeline_model_parallel_split_rank', -1) < 0
):
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(cfg.gpt_model_file):
save_restore_connector.model_extracted_dir = cfg.gpt_model_file
model_config = MegatronGPTModel.restore_from(
restore_path=cfg.gpt_model_file,
trainer=trainer,
return_config=True,
save_restore_connector=save_restore_connector,
)
# with dist checkpointing we don't need to set this
if not model_config.get('mcore_gpt', False):
with open_dict(cfg):
cfg.tensor_model_parallel_size = model_config.get('tensor_model_parallel_size', 1)
cfg.pipeline_model_parallel_size = model_config.get('pipeline_model_parallel_size', 1)
cfg.pipeline_model_parallel_split_rank = model_config.get('pipeline_model_parallel_split_rank', 0)
assert (
cfg.trainer.devices * cfg.trainer.num_nodes
== cfg.tensor_model_parallel_size * cfg.pipeline_model_parallel_size
), "devices * num_nodes should equal tensor_model_parallel_size * pipeline_model_parallel_size"
if cfg.gpt_model_file:
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(cfg.gpt_model_file):
save_restore_connector.model_extracted_dir = cfg.gpt_model_file
pretrained_cfg = MegatronGPTModel.restore_from(
restore_path=cfg.gpt_model_file,
trainer=trainer,
return_config=True,
save_restore_connector=save_restore_connector,
)
OmegaConf.set_struct(pretrained_cfg, True)
with open_dict(pretrained_cfg):
pretrained_cfg.sequence_parallel = False
pretrained_cfg.activations_checkpoint_granularity = None
pretrained_cfg.activations_checkpoint_method = None
pretrained_cfg.precision = trainer.precision
if pretrained_cfg.get('mcore_gpt', False):
# with dist checkpointing we can use the model parallel config specified by the user
pretrained_cfg.tensor_model_parallel_size = cfg.tensor_model_parallel_size
pretrained_cfg.pipeline_model_parallel_size = cfg.pipeline_model_parallel_size
if trainer.precision == "16":
pretrained_cfg.megatron_amp_O2 = False
elif trainer.precision in ['bf16', 'bf16-mixed'] and cfg.get('megatron_amp_O2', False):
pretrained_cfg.megatron_amp_O2 = True
model = MegatronGPTModel.restore_from(
restore_path=cfg.gpt_model_file,
trainer=trainer,
override_config_path=pretrained_cfg,
save_restore_connector=save_restore_connector,
map_location=f'cuda:{trainer.local_rank}', # map_location is needed for converted models
)
elif cfg.checkpoint_dir:
app_state = AppState()
if cfg.tensor_model_parallel_size > 1 or cfg.pipeline_model_parallel_size > 1:
app_state.model_parallel_size = cfg.tensor_model_parallel_size * cfg.pipeline_model_parallel_size
app_state.tensor_model_parallel_size = cfg.tensor_model_parallel_size
app_state.pipeline_model_parallel_size = cfg.pipeline_model_parallel_size
(
app_state.tensor_model_parallel_rank,
app_state.pipeline_model_parallel_rank,
app_state.model_parallel_size,
app_state.data_parallel_size,
app_state.pipeline_model_parallel_split_rank,
app_state.virtual_pipeline_model_parallel_rank,
) = fake_initialize_model_parallel(
world_size=app_state.model_parallel_size,
rank=trainer.global_rank,
tensor_model_parallel_size_=cfg.tensor_model_parallel_size,
pipeline_model_parallel_size_=cfg.pipeline_model_parallel_size,
pipeline_model_parallel_split_rank_=cfg.pipeline_model_parallel_split_rank,
)
checkpoint_path = os.path.join(cfg.checkpoint_dir, cfg.checkpoint_name)
# checkpoint_path is a dir in case of distributed checkpointing
if not os.path.isdir(checkpoint_path):
# legacy checkpoint needs model parallel rank injection
checkpoint_path = inject_model_parallel_rank(os.path.join(cfg.checkpoint_dir, cfg.checkpoint_name))
model = MegatronGPTModel.load_from_checkpoint(checkpoint_path, hparams_file=cfg.hparams_file, trainer=trainer)
else:
raise ValueError("need at least a nemo file or checkpoint dir")
model.freeze()
# Have to turn off activations_checkpoint_method for inference
try:
model.model.language_model.encoder.activations_checkpoint_method = None
except AttributeError:
pass
length_params: LengthParam = {
"max_length": cfg.inference.tokens_to_generate,
"min_length": cfg.inference.min_tokens_to_generate,
}
sampling_params: SamplingParam = {
"use_greedy": cfg.inference.greedy,
"temperature": cfg.inference.temperature,
"top_k": cfg.inference.top_k,
"top_p": cfg.inference.top_p,
"repetition_penalty": cfg.inference.repetition_penalty,
"add_BOS": cfg.inference.add_BOS,
"all_probs": cfg.inference.all_probs,
"compute_logprob": cfg.inference.compute_logprob,
"end_strings": cfg.inference.end_strings,
}
fp8_enabled = hasattr(model.cfg, "fp8") and (model.cfg.fp8 == True)
if fp8_enabled:
nb_paddings = 0
while len(cfg.prompts) % 8 != 0:
cfg.prompts.append("")
nb_paddings += 1
# First method of running text generation, call model.generate method
response = model.generate(
inputs=OmegaConf.to_container(cfg.prompts), length_params=length_params, sampling_params=sampling_params
)
if fp8_enabled:
response = remove_padded_prompts(response, nb_paddings)
print("***************************")
print(response)
print("***************************")
# Second method of running text generation, call trainer.predict [recommended]
bs = 8 if fp8_enabled else 2
ds = RequestDataSet(OmegaConf.to_container(cfg.prompts))
request_dl = DataLoader(dataset=ds, batch_size=bs)
config = OmegaConf.to_container(cfg.inference)
model.set_inference_config(config)
response = trainer.predict(model, request_dl)
if fp8_enabled:
response[-1] = remove_padded_prompts(response[-1], nb_paddings)
print("***************************")
print(response)
print("***************************")
# Third method of running text generation, use inference server
if cfg.server:
from nemo.collections.nlp.modules.common.megatron_web_server import get_chatbot_demo, get_demo
if parallel_state.is_pipeline_first_stage() and parallel_state.get_tensor_model_parallel_rank() == 0:
if cfg.web_server:
if cfg.chat:
defaults = {
'user': cfg.chatbot_config.user,
'assistant': cfg.chatbot_config.assistant,
'system': cfg.chatbot_config.system,
}
web_ui = partial(
get_chatbot_demo,
defaults=defaults,
value=cfg.chatbot_config.value,
attributes=cfg.chatbot_config.attributes,
)
else:
web_ui = get_demo
loop = asyncio.new_event_loop()
thread = threading.Thread(
target=web_ui,
daemon=True,
args=(cfg.share, cfg.username, cfg.password, cfg.port, cfg.web_port, loop),
)
thread.start()
server = MegatronServer(model.cuda())
server.run("0.0.0.0", port=cfg.port)
while True:
choice = torch.cuda.LongTensor(1)
torch.distributed.broadcast(choice, 0)
if choice[0].item() == 0:
generate(model.cuda())
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/nlp/language_modeling/megatron_gpt_eval.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from nemo.collections.nlp.models.language_modeling.megatron_bart_model import MegatronBARTModel
from nemo.collections.nlp.models.language_modeling.megatron_bert_model import MegatronBertModel
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.models.language_modeling.megatron_retrieval_model import MegatronRetrievalModel
from nemo.collections.nlp.models.language_modeling.megatron_t5_model import MegatronT5Model
from nemo.collections.nlp.models.machine_translation.megatron_nmt_model import MegatronNMTModel
from nemo.collections.nlp.modules.common.megatron.megatron_init import fake_initialize_model_parallel
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy, NLPSaveRestoreConnector
from nemo.core import ModelPT
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.app_state import AppState
from nemo.utils.model_utils import inject_model_parallel_rank
def get_model_class(cfg):
if cfg.model_type == 'gpt':
return MegatronGPTModel
elif cfg.model_type == 'bert':
return MegatronBertModel
elif cfg.model_type == 't5':
return MegatronT5Model
elif cfg.model_type == 'bart':
return MegatronBARTModel
elif cfg.model_type == 'nmt':
return MegatronNMTModel
elif cfg.model_type == 'retro':
return MegatronRetrievalModel
else:
raise ValueError("Invalid Model Type")
@hydra_runner(config_path="conf", config_name="megatron_gpt_export")
def nemo_export(cfg):
"""Convert a nemo model into .onnx ONNX format."""
nemo_in = None
if cfg.gpt_model_file:
nemo_in = cfg.gpt_model_file
elif cfg.checkpoint_dir:
nemo_in = os.path.join(cfg.checkpoint_dir, cfg.checkpoint_name)
assert nemo_in is not None, "NeMo model not provided. Please provide the path to the .nemo or .ckpt file"
onnx_out = cfg.onnx_model_file
trainer = Trainer(strategy=NLPDDPStrategy(), **cfg.trainer)
assert (
cfg.trainer.devices * cfg.trainer.num_nodes
== cfg.tensor_model_parallel_size * cfg.pipeline_model_parallel_size
), "devices * num_nodes should equal tensor_model_parallel_size * pipeline_model_parallel_size"
logging.info("Restoring NeMo model from '{}'".format(nemo_in))
try:
if cfg.gpt_model_file:
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(cfg.gpt_model_file):
save_restore_connector.model_extracted_dir = cfg.gpt_model_file
pretrained_cfg = ModelPT.restore_from(
restore_path=cfg.gpt_model_file,
trainer=trainer,
return_config=True,
save_restore_connector=save_restore_connector,
)
OmegaConf.set_struct(pretrained_cfg, True)
with open_dict(pretrained_cfg):
pretrained_cfg.sequence_parallel = False
pretrained_cfg.activations_checkpoint_granularity = None
pretrained_cfg.activations_checkpoint_method = None
pretrained_cfg.precision = trainer.precision
if trainer.precision == "16":
pretrained_cfg.megatron_amp_O2 = False
model = ModelPT.restore_from(
restore_path=cfg.gpt_model_file,
trainer=trainer,
override_config_path=pretrained_cfg,
save_restore_connector=save_restore_connector,
)
elif cfg.checkpoint_dir:
app_state = AppState()
if cfg.tensor_model_parallel_size > 1 or cfg.pipeline_model_parallel_size > 1:
app_state.model_parallel_size = cfg.tensor_model_parallel_size * cfg.pipeline_model_parallel_size
app_state.tensor_model_parallel_size = cfg.tensor_model_parallel_size
app_state.pipeline_model_parallel_size = cfg.pipeline_model_parallel_size
(
app_state.tensor_model_parallel_rank,
app_state.pipeline_model_parallel_rank,
app_state.model_parallel_size,
app_state.data_parallel_size,
app_state.pipeline_model_parallel_split_rank,
app_state.virtual_pipeline_model_parallel_rank,
) = fake_initialize_model_parallel(
world_size=app_state.model_parallel_size,
rank=trainer.global_rank,
tensor_model_parallel_size_=cfg.tensor_model_parallel_size,
pipeline_model_parallel_size_=cfg.pipeline_model_parallel_size,
pipeline_model_parallel_split_rank_=cfg.pipeline_model_parallel_split_rank,
)
checkpoint_path = inject_model_parallel_rank(os.path.join(cfg.checkpoint_dir, cfg.checkpoint_name))
model_cls = get_model_class(cfg)
model = model_cls.load_from_checkpoint(checkpoint_path, hparams_file=cfg.hparams_file, trainer=trainer)
else:
raise ValueError("need at least a nemo file or checkpoint dir")
except Exception as e:
logging.error(
"Failed to restore model from NeMo file : {}. Please make sure you have the latest NeMo package installed with [all] dependencies.".format(
nemo_in
)
)
raise e
logging.info("Model {} restored from '{}'".format(model.__class__.__name__, nemo_in))
# Export
check_trace = cfg.export_options.runtime_check
try:
model.to(device=cfg.export_options.device).freeze()
model.eval()
model.export(
onnx_out,
onnx_opset_version=cfg.export_options.onnx_opset,
do_constant_folding=cfg.export_options.do_constant_folding,
dynamic_axes={
'input_ids': {0: "sequence", 1: "batch"},
'position_ids': {0: "sequence", 1: "batch"},
'logits': {0: "sequence", 1: "batch"},
},
check_trace=check_trace,
check_tolerance=cfg.export_options.check_tolerance,
verbose=cfg.export_options.verbose,
)
except Exception as e:
logging.error(
"Export failed. Please make sure your NeMo model class ({}) has working export() and that you have the latest NeMo package installed with [all] dependencies.".format(
model.__class__
)
)
raise e
if __name__ == '__main__':
nemo_export()
| NeMo-main | examples/nlp/language_modeling/megatron_export.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.multiprocessing as mp
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from nemo.collections.nlp.models.language_modeling.megatron_gpt_prompt_learning_model import (
MegatronGPTPromptLearningModel,
)
from nemo.collections.nlp.parts.nlp_overrides import (
CustomProgressBar,
GradScaler,
MegatronHalfPrecisionPlugin,
NLPDDPStrategy,
NLPSaveRestoreConnector,
PipelineMixedPrecisionPlugin,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
mp.set_start_method("spawn", force=True)
"""
This is an example of how to ptune/prompt-tune a pretrained GPT model.
Be sure to use a .nemo gpt model with this code. If you've downloaded
a model from NGC or are otherwise using a MegatronLM model, please use
either megatron_ckpt_to_nemo.py or megatron_lm_ckpt_to_nemo.py found
withing this examples directory to convert your model to .nemo format.
"""
@hydra_runner(config_path="conf", config_name="megatron_gpt_prompt_learning_config")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
megatron_amp_o2 = cfg.model.get('megatron_amp_O2', False)
plugins = []
strategy = NLPDDPStrategy(no_ddp_communication_hook=True, find_unused_parameters=False,)
if cfg.trainer.precision in [16, '16', 'bf16', '16-mixed', 'bf16-mixed']:
scaler = None
if cfg.trainer.precision in [16, '16', '16-mixed']:
scaler = GradScaler(
init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=cfg.model.get('native_amp_growth_interval', 1000),
hysteresis=cfg.model.get('hysteresis', 2),
enabled=False
if cfg.model.pipeline_model_parallel_size > 1
else True, # turn off the grad scale for pipeline parallel LM model
)
# MixedPrecisionPlugin in PTL >= 2.0 requires precision to be 16-mixed or bf16-mixed
plugin_precision = '16-mixed'
else:
plugin_precision = 'bf16-mixed'
if megatron_amp_o2:
plugins.append(MegatronHalfPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
else:
plugins.append(PipelineMixedPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
if cfg.get('cluster_type', None) == 'BCP':
plugins.append(TorchElasticEnvironment())
trainer = Trainer(plugins=plugins, strategy=strategy, **cfg.trainer, callbacks=[CustomProgressBar()])
exp_manager(trainer, cfg.exp_manager)
# load existing or init new soft prompt GPT model
if cfg.model.get("restore_path", None):
model = MegatronGPTPromptLearningModel.restore_from(
cfg.model.restore_path, cfg.model, trainer=trainer, save_restore_connector=NLPSaveRestoreConnector()
)
else:
model = MegatronGPTPromptLearningModel(cfg.model, trainer=trainer)
trainer.fit(model)
if __name__ == '__main__':
dep_msg = "* Please switch to using examples/nlp/language_modeling/tuning/megatron_gpt_peft_tuning.py *"
dep = "Deprecation Notice!!".center(len(dep_msg) - 2, " ")
banner = "*" * len(dep_msg)
spacer = " " * (len(dep_msg) - 2)
logging.warning(f"\n\n{banner}\n*{spacer}*\n*{dep}*\n{dep_msg}\n*{spacer}*\n{banner}\n\n")
main()
logging.warning(f"\n\n{banner}\n*{spacer}*\n*{dep}*\n{dep_msg}\n*{spacer}*\n{banner}\n\n")
| NeMo-main | examples/nlp/language_modeling/megatron_gpt_prompt_learning.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from pytorch_lightning.plugins.precision import MixedPrecisionPlugin
from nemo.collections.nlp.models.language_modeling.megatron_retrieval_model import MegatronRetrievalModel
from nemo.collections.nlp.modules.common.megatron.mup.shape import make_base_shapes
from nemo.collections.nlp.parts.nlp_overrides import (
CustomProgressBar,
GradScaler,
MegatronHalfPrecisionPlugin,
NLPDDPStrategy,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
@hydra_runner(config_path="conf", config_name="megatron_retro_mutransfer")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
megatron_amp_o2 = cfg.model.get('megatron_amp_O2', False)
plugins = []
strategy = NLPDDPStrategy(
no_ddp_communication_hook=True if megatron_amp_o2 else False,
gradient_as_bucket_view=cfg.model.gradient_as_bucket_view,
find_unused_parameters=False,
)
if cfg.trainer.precision in [16, '16', 'bf16', '16-mixed', 'bf16-mixed']:
scaler = None
if cfg.trainer.precision in [16, '16', '16-mixed']:
scaler = GradScaler(
init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=cfg.model.get('native_amp_growth_interval', 1000),
hysteresis=cfg.model.get('hysteresis', 2),
)
plugin_precision = '16-mixed'
else:
plugin_precision = 'bf16-mixed'
if megatron_amp_o2:
plugins.append(MegatronHalfPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
else:
plugins.append(MixedPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
if cfg.get('cluster_type', None) == 'BCP':
plugins.append(TorchElasticEnvironment())
trainer = Trainer(plugins=plugins, strategy=strategy, **cfg.trainer, callbacks=[CustomProgressBar()])
# hydra interpolation does not work here as the interpolation key is lost when PTL saves hparams
with open_dict(cfg):
cfg.base_model.precision = cfg.trainer.precision
cfg.delta_model.precision = cfg.trainer.precision
base_model = MegatronRetrievalModel(cfg.base_model, trainer)
delta_model = MegatronRetrievalModel(cfg.delta_model, trainer)
make_base_shapes(base_model, delta_model, savefile=cfg.model.shape_file)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/language_modeling/megatron_retro_cal_shape.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tarfile
import tempfile
from argparse import ArgumentParser
from typing import Dict, List
import torch
import torch.nn as nn
from omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from nemo.collections.nlp.parts.nlp_overrides import (
NEMO_MEGATRON_MODEL_PARALLEL_APPSTATE_OVERRIDE,
GradScaler,
MegatronHalfPrecisionPlugin,
NLPDDPStrategy,
NLPSaveRestoreConnector,
PipelineMixedPrecisionPlugin,
)
from nemo.utils import logging, model_utils
from nemo.utils.app_state import AppState
"""
Usage:
### Tensor Parallelism and Pipeline Parallelism conversion ###
# Megatron GPT
python megatron_change_num_partitions.py \
--model_file=PATH_TO_SRC_FILE \
--target_file=PATH_TO_TGT_FILE \
--tensor_model_parallel_size=-1 \
--target_tensor_model_parallel_size=1 \
--pipeline_model_parallel_size=-1 \
--target_pipeline_model_parallel_size=1 \
--precision=bf16
# Megatron T5
python megatron_change_num_partitions.py \
--model_file=PATH_TO_SRC_FILE \
--target_file=PATH_TO_TGT_FILE \
--model_class="nemo.collections.nlp.models.language_modeling.megatron_t5_model.MegatronT5Model" \
--tensor_model_parallel_size=-1 \
--target_tensor_model_parallel_size=1 \
--pipeline_model_parallel_size=-1 \
--target_pipeline_model_parallel_size=1 \
--target_pipeline_model_parallel_split_rank=0 \
--precision=bf16
# Megatron GPT + Virtual Pipeline parallelism
python megatron_change_num_partitions.py \
--model_extracted_dir="<Directory of Pytorch Lightning ckpt folders>" \
--target_file="<Name of the target NeMo file>" \
--ckpt_name="<Name of a single Pytorch Lightning ckpt file inside the extracted dir>" \
--tensor_model_parallel_size=<TP SIZE> \
--target_tensor_model_parallel_size=<TARGET TP SIZE> \
--pipeline_model_parallel_size=<PP SIZE> \
--target_pipeline_model_parallel_size=<TARGET PP SIZE> \
--virtual_pipeline_model_parallel_size=<VP SIZE> \
--hparams_file="<Path to HPARAMS.yaml file>" \
--precision=bf16
### Only Tensor Parallelism conversion ###
To the above commands, add the following argument: `--tp_conversion_only`
# Note: This requires that the pipeline_model_parallel_size and tgt_pipeline_model_parallel_size is set to 1.
### Large Models conversion ###
When converting large models, ** always ** ensure that you pre-extract the nemo model and then only perform conversion
$ mkdir "unpacked_nemo_file"
$ tar -xvf "<path to nemo file>" -C "<absolute path to pwd>/unpacked_nemo_file/"
python megatron_change_num_partitions.py \
...
--model_extracted_dir="<Absolute path to pwd>/unpacked_nemo_file/"
### Model Classes ###
# NOTE: Conversion of other model types.
# Default model type is MegatronGPTModel, if you want another model you need to pass classpath of the model
# For example - MegatronT5Model -
python megatron_change_num_partitions.py \
...
--model_class="nemo.collections.nlp.models.language_modeling.megatron_t5_model.MegatronT5Model"
# Additional arguments:
--num_gpu_per_node: Number of GPUs per node. Default is 8.
--megatron_legacy: Whether the model is a legacy Megatron model or not. Default is False. May be unsuported for
Pipeline Parallelism change.
--tokenizer_model_path: Path to tokenizer model. Default is None. When not None, overrides the tokenizer model path
in the model config.
--tokenizer_vocab_file: Path to tokenizer vocab file. Default is None. When not None, overrides the tokenizer vocab
file in the model config.
# Comments
Passing --tensor_model_parallel_size=-1 or --pipeline_model_parallel_size=-1 will automatically infer the size from the
model config.
"""
def set_virtual_parallel_rank_safely(rank: int):
AppState().virtual_pipeline_model_parallel_rank = rank
try:
from megatron.core import parallel_state
parallel_state.set_virtual_pipeline_model_parallel_rank(rank)
if rank is None:
parallel_state.set_virtual_pipeline_model_parallel_world_size(None)
except (ImportError, ModuleNotFoundError):
logging.warning("`megatron-core` not installed, cannot set virtual parallel rank !")
#################
### Utilities ###
#################
def force_cpu_model(cfg):
with open_dict(cfg):
# temporarily set to cpu
original_cpu_init = cfg.get('use_cpu_initialization', False)
if 'megatron_amp_O2' in cfg:
key = 'megatron_amp_O2'
original_amp_o2 = cfg.megatron_amp_O2
elif 'megatron_amp_02' in cfg:
key = 'megatron_amp_02'
original_amp_o2 = cfg.megatron_amp_02
else:
key, original_amp_o2 = None, None
# Set new values
cfg.use_cpu_initialization = True
if key is not None:
cfg[key] = False
# Setup restore dict
restore_dict = {'use_cpu_initialization': original_cpu_init} # 'megatron_amp_O2': original_amp_o2
if key is not None:
restore_dict[key] = original_amp_o2
return cfg, restore_dict
def restore_model_config(cfg, original_dict):
with open_dict(cfg):
for key, val in original_dict.items():
logging.info(f"Restoring model config key ({key}) from {cfg[key]} to original value of {val}")
cfg[key] = val
return cfg
#################
### Utilities ###
#################
def compute_tp_splits(
param_name, param, partitions, global_idx, tp_size, pp_size, pp_rank, pp_split_rank, megatron_legacy, model_cfg
):
"""
Function to compute the splits required for tensor-parallelism.
Args:
param_name: Name of the current parameter of the current model (TP X PP Y)
param: Value of the current parameter of the current model (TP X PP Y)
partitions: Partitions of the flattened parameter of the current model (TP 1 PP 1)
global_idx: The index used to select the parameter in the global partition.
tp_size: Int, tensor-parallelism size.
pp_size: Int, pipeline-parallelism size.
pp_rank: Int, pipeline-parallelism rank.
pp_split_rank: Int, pipeline-parallelism split rank. This should be > 1 if TP is being used with EncDec models (T5)
megatron_legacy: Bool, whether the model is a legacy Megatron model or not.
model_cfg: The model config as a OmegaConf DictConfig.
Returns:
List of torch tensors, each of which is a split of the current parameter.
"""
# alias the global index to idx
idx = global_idx
fast_glu_activation = str(model_cfg.get('activation', '')).lower() in ['fast-geglu', 'fast-swiglu', 'fast-reglu']
if param.shape == partitions[0][idx].shape:
split = [partitions[0][idx].data] * tp_size
logging.debug(">> Perfect match, no splitting needed")
elif param.shape[0] == partitions[0][idx].shape[0]:
split = torch.split(partitions[0][idx].data, param.shape[-1], dim=-1)
else:
# For T5-converted weights, the splitting needs to be strided such that q,k,v weights are bunched together on each tensor-parallel rank.
if 'query_key_value.weight' in param_name and megatron_legacy:
split_dim = partitions[0][idx].data.shape[0]
if split_dim % (tp_size * 3) != 0:
raise ValueError(
f"Can not split Q,K,V parameter {param_name} with shape {param.shape} into tensor parallel size {tp_size}. Not divisible by {tp_size * 3}."
)
tp_qkv_splits = torch.chunk(partitions[0][idx].data, tp_size * 3, dim=0)
split = []
for i in range(tp_size):
tp_qkv = torch.cat([tp_qkv_splits[item] for item in range(i, tp_size * 3, tp_size)])
split.append(tp_qkv)
elif 'key_value.weight' in param_name and megatron_legacy:
split_dim = partitions[0][idx].data.shape[0]
if split_dim % (tp_size * 2) != 0:
raise ValueError(
f"Can not split K,V parameter {param_name} with shape {param.shape} into tensor parallel size {tp_size}. Not divisible by {tp_size * 2}."
)
tp_qkv_splits = torch.chunk(partitions[0][idx].data, tp_size * 2, dim=0)
split = []
for i in range(tp_size):
tp_qkv = torch.cat([tp_qkv_splits[item] for item in range(i, tp_size * 2, tp_size)])
split.append(tp_qkv)
elif ('dense_h_to_4h' in param_name or 'linear_fc1' in param_name) and fast_glu_activation:
# For Megatron GPT model with Fast Glu activation
# Handle gated linear units
# concat all the first halves ('W's) and all the second halves ('V's)
w_split, k_split = torch.chunk(partitions[0][idx].data, 2, dim=0)
w_split = torch.chunk(w_split, tp_size, dim=0)
k_split = torch.chunk(k_split, tp_size, dim=0)
split = [torch.cat(weights, dim=0) for weights in zip(w_split, k_split)] # split per tp rank
# Regular split for Megatron and NeMo-Megatron models.
else:
split = torch.split(partitions[0][idx].data, param.shape[0], dim=0)
return split
def compute_tp_merge(idx, name, param, partitions_pp, model_cfg):
"""
Function to compute the partition merge required for tensor-parallelism.
Args:
idx: The index used to select the parameter in the current pipeline partition.
name:
param: The parameter to be merged under TP 1 PP 1.
partitions_pp: List of all TP partitions of the flattened parameter of the current model for a given PP rank
(TP X PP Y). Indexed as partitions_pp[tp_rank][idx].
model_cfg: The model config as an OmegaConf DictConfig.
Returns:
The concatenated parameter for TP 1 PP 1.
"""
fast_glu_activation = str(model_cfg.get('activation', '')).lower() in ['fast-geglu', 'fast-swiglu', 'fast-reglu']
# Logic from original TP rank change
if param.shape == partitions_pp[0][idx].shape:
concated = partitions_pp[0][idx].data
elif param.shape[0] == partitions_pp[0][idx].shape[0]:
concated = torch.cat([partitions_pp[i][idx].data for i in range(len(partitions_pp))], dim=-1)
else:
concated = torch.cat([partitions_pp[i][idx].data for i in range(len(partitions_pp))], dim=0)
# Logic for Fast Glu activation
if 'dense_h_to_4h' in name and fast_glu_activation:
# concat all the first halves ('W's) and all the second halves ('V's)
wk_splits = []
for tpr in range(len(partitions_pp)):
wk_splits.append(torch.chunk(partitions_pp[tpr][idx].data, 2, dim=0))
w_split = torch.cat([w[0] for w in wk_splits], dim=0)
k_split = torch.cat([w[1] for w in wk_splits], dim=0)
concated = torch.cat([w_split, k_split], dim=0)
# Trim padding
if concated.shape != param.shape:
logging.info(
f"Warning: Shape mismatch for parameter {name} required shape: {param.shape}, merged shape: {concated.shape}. Narrowing to match required size."
)
if concated.shape[1:] == param.shape[1:]:
concated = torch.narrow(concated, 0, 0, param.shape[0])
elif concated.shape[:-1] == param.shape[:-1]:
concated = torch.narrow(concated, -1, 0, param.shape[-1])
else:
raise RuntimeError(
f"Can not handle parameter {name}, required shape: {param.shape}, merged shape: {concated.shape}."
)
return concated
def write_tp_pp_split(model, splits, app_state, tp_size, pp_rank, write_path):
"""
Function to write the given TP PP split to NeMo File.
Save each of the TP ranks in reverse order
This is done so that the last PP rank will save the last TP rank only after all other PP TP ranks are saved
The final rank will then save a new NeMo file with all other ranks inside.
Args:
model: The model corresponding to the current TP PP split. Contains partial parameters.
splits: Nested List of tensors containing the TP splits of the current model given current PP rank.
Indexed as splits[idx][tp_rank].
app_state: AppState object.
tp_size: The global tensor-parallel size of the final model.
pp_rank: The local pipeline parallel rank of the final model.
write_path: The path to save the NeMo file.
"""
for tp_rank in range(tp_size - 1, -1, -1):
app_state.pipeline_model_parallel_rank = pp_rank
app_state.tensor_model_parallel_rank = tp_rank
idx = 0
for name, param in model.named_parameters():
split_val = splits[idx][tp_rank].clone()
if param.shape != split_val.shape:
logging.info(
f"Warning: Shape mismatch for parameter {name} required shape: {param.shape}, split shape: {split_val.shape}. Padding to match required size."
)
if split_val.shape[1:] == param.shape[1:]:
pad = [0, 0] * len(split_val.shape)
pad[-1] = param.shape[0] - split_val.shape[0]
split_val = torch.nn.functional.pad(split_val, pad, 'constant')
elif split_val.shape[:-1] == param.shape[:-1]:
pad = [0, param.shape[-1] - split_val.shape[-1]]
split_val = torch.nn.functional.pad(split_val, pad, 'constant')
else:
raise RuntimeError(
f"Can not handle parameter {name}, required shape: {param.shape}, split shape: {split_val.shape}."
)
param.data = split_val
idx += 1
if write_path is not None:
logging.info(f"Writing pp rank {pp_rank} tp rank {tp_rank} to file {write_path}")
model.save_to(write_path)
def debug_log_split_param_diff(idx, param, param_name, partitions):
# Log some useful comparison of tensors that are being mapped.
# Note that the global param index for layers and modules may be different but the shapes
# and semantics of the layer should match.
logging.debug(f"Index: {idx} Model Params : {param_name} - {param.shape}")
logging.debug(f"Index: {idx} Global params: {partitions[1][idx]} - {partitions[0][idx].shape}")
################
### Handlers ###
################
class GPTHandler:
def __init__(self, megatron_legacy: bool):
self.duplicate_gpt_word_embedding_offset = 0
self.untied_gpt_embedding = False
self.megatron_legacy = megatron_legacy
def compute_split_index(self, model, idx, tp_rank, pp_rank, pp_split_rank, tp_size, pp_size):
if pp_rank == (pp_size - 1) and hasattr(model, 'model') and hasattr(model.model, 'word_embeddings'):
# duplicate embedding copy (tied weights)
self.duplicate_gpt_word_embedding_offset = 1
if model.cfg.get('share_embeddings_and_output_weights', True) is False:
self.untied_gpt_embedding = True
if self.duplicate_gpt_word_embedding_offset > 0:
logging.info(f"GPT duplicate_gpt_word_embedding_offset: {self.duplicate_gpt_word_embedding_offset}")
return idx + self.duplicate_gpt_word_embedding_offset
def compute_splits(self, model, partitions, idx, tp_rank, pp_rank, pp_split_rank, tp_size, pp_size):
splits = []
# This is the PP X TP Y model with partial parameters present in correct order.
# We need to extract the parameters from the global map in reverse order to fill in the
# parameters of this model in forward order.
for param_name, param in model.named_parameters():
# Since we are moving forward, we may reach the end of the global map
# but GPT has an additional word embedding as its last parameter
# Therefore we check for this, and reset the index to the parameter of the PP 0 TP 0 rank
# which holds the parameters of the embedding.
if idx == (len(partitions[0])) and self.duplicate_gpt_word_embedding_offset > 0:
logging.info("Found duplicate embedding copy for GPT model, resetting index")
idx = 0 # reset idx parameter to 0 if we have duplicate embedding copy
debug_log_split_param_diff(idx, param, param_name, partitions)
# Tensor Parallel Splitting
split = compute_tp_splits(
param_name,
param,
partitions,
idx,
tp_size,
pp_size,
pp_rank,
pp_split_rank,
self.megatron_legacy,
model.cfg,
)
splits.append(split)
idx += 1
return idx, splits
def compute_split_offset(self, offset_diff, tp_rank, pp_rank, pp_split_rank, tp_size, pp_size):
# GPT offset correction
if not self.untied_gpt_embedding and pp_size > 1 and pp_rank == (pp_size - 1) and pp_split_rank == 0:
offset_diff += 1
return offset_diff
class T5Handler:
def __init__(self, megatron_legacy: bool):
self.shared_enc_dec_embeddings = False
self.shared_enc_dec_embeddings_intermediate = False
self.enc_dec_share_token_embeddings_count = 0
self.intermediate_shared_embedding_location = -1
self.megatron_legacy = megatron_legacy
def compute_split_index(self, model, idx, tp_rank, pp_rank, pp_split_rank, tp_size, pp_size):
final_idx = idx
# Special case for T5 models - where the embeddings are shared between encoder and decoder
# and the rank of decoder split is arbitrary.
# Megatron T5 check for pipeline_model_parallel_split_rank in order to inject encoder embeddings
self.shared_enc_dec_embeddings = (
pp_split_rank > 0 and pp_split_rank == pp_rank and model.cfg.get('share_token_embeddings', True)
)
# If embedding sharing is active, both vocab and position embeddings are shared
if self.shared_enc_dec_embeddings:
self.enc_dec_share_token_embeddings_count = 2
else:
self.enc_dec_share_token_embeddings_count = 0
# Start to calculate new idx
final_idx = final_idx + self.enc_dec_share_token_embeddings_count
# Special case for T5 models - where the embeddings are shared between encoder and decoder
# For all decoder ranks which are not the pp_split_rank, we need to inject the vocab embeddings only at
# an intermediate location of the model (usually second last location).
# Megatron T5 check for pipeline_model_parallel_split_rank in order to inject encoder embeddings
# when the pipeline_model_parallel_split_rank is not the last PP rank
self.shared_enc_dec_embeddings_intermediate = (
pp_split_rank > 0
and pp_split_rank < pp_size
and hasattr(model, 'enc_dec_model')
and hasattr(model.enc_dec_model, 'word_embeddings')
)
if self.shared_enc_dec_embeddings_intermediate:
# Loop until we get the location of this tensor
self.intermediate_shared_embedding_location = -1
for param_name, param in model.named_parameters(): # special case for T5
if param_name == 'enc_dec_model.word_embeddings.weight':
self.intermediate_shared_embedding_location += 1
break
self.intermediate_shared_embedding_location += 1
else:
self.intermediate_shared_embedding_location = -1
# Re-evaluate the intermediate shared embedding flag
self.shared_enc_dec_embeddings_intermediate = self.shared_enc_dec_embeddings_intermediate and (
self.intermediate_shared_embedding_location >= 0
)
# If module is present, add a module offset to the index
if self.shared_enc_dec_embeddings_intermediate:
final_idx += 1
if self.enc_dec_share_token_embeddings_count:
logging.info(f"EncDec share_token_embeddings_count: {self.enc_dec_share_token_embeddings_count}")
if self.shared_enc_dec_embeddings_intermediate:
logging.info(
f"EncDec share_enc_dec_embeddings_intermediate: {self.intermediate_shared_embedding_location}"
)
return final_idx
def compute_splits(self, model, partitions, idx, tp_rank, pp_rank, pp_split_rank, tp_size, pp_size):
splits = []
# Backup index when EncDec models reset the index to fill in the first embedding matrices (when pp split rank == pp rank)
computed_index = idx
# This is the PP X TP Y model with partial parameters present in correct order.
# We need to extract the parameters from the global map in reverse order to fill in the
# parameters of this model in forward order.
for param_name, param in model.named_parameters():
# Since we are moving forward, we may reach the end of the global map
# but T5 has an additional word embedding as its first two parameter when pp split rank == pp rank
# Therefore we check for this, and update the index to the parameter of the PP 0 TP 0 rank
# which holds the parameters of the embedding.
if self.enc_dec_share_token_embeddings_count:
logging.info("EncDec models decoder shares embedding with encoder, resetting index")
idx = (
2 - self.enc_dec_share_token_embeddings_count
) # 0th index is vocab embedding, 1 is pos embedding, 2 is embedding count
# Since we are moving forward, we may reach the end of the global map
# but T5 has an additional word embedding as randomly located in the decoder when
# when pp rank > pp_split_rank.
# Therefore we check for this, and skip the parameter of the current TP X PP Y module
# and fill this parameter later.
if self.shared_enc_dec_embeddings_intermediate and param_name == 'enc_dec_model.word_embeddings.weight':
logging.info(
"EncDec models decoder shares embedding with encoder in intermediate pos, skipping module for later update"
)
continue
debug_log_split_param_diff(idx, param, param_name, partitions)
# Tensor Parallel Splitting
split = compute_tp_splits(
param_name,
param,
partitions,
idx,
tp_size,
pp_size,
pp_rank,
pp_split_rank,
self.megatron_legacy,
model.cfg,
)
splits.append(split)
idx += 1
# When pp split rank is equal to current pp rank, we need to first inject the encoder embeddings
# and then reset the index to the originally computed index
if self.enc_dec_share_token_embeddings_count > 0:
if self.enc_dec_share_token_embeddings_count - 1 == 0:
idx = computed_index
self.enc_dec_share_token_embeddings_count -= 1
# Inject the EncDec shared embeddings intermediate tensor
# at one random location in the decoder of this TP PP rank.
# Note that usually it is the second last tensor, but to avoid specific index we search for it
# again.
if self.shared_enc_dec_embeddings_intermediate:
for param_name, param in model.named_parameters():
if param_name == 'enc_dec_model.word_embeddings.weight':
logging.info("Found intermediate shared embedding, injecting")
split = compute_tp_splits(
param_name,
param,
partitions,
global_idx=0,
tp_size=tp_size,
pp_size=pp_size,
pp_rank=pp_rank,
pp_split_rank=pp_split_rank,
megatron_legacy=self.megatron_legacy,
model_cfg=model.cfg,
)
splits.insert(self.intermediate_shared_embedding_location, split)
break
return idx, splits
def compute_split_offset(self, offset_diff, tp_rank, pp_rank, pp_split_rank, tp_size, pp_size):
# T5 offset correction for shared embedding when pp split rank == pp rank
if self.shared_enc_dec_embeddings:
offset_diff += 2
# T5 offset correction for intermediate shared embedding when pp rank > pp split rank
if self.shared_enc_dec_embeddings_intermediate:
offset_diff += 1
return offset_diff
##################
### Converters ###
##################
def merge_partition(model, partitions: Dict[int, List[List[torch.Tensor]]], write_path: str = None):
# Extract the pp_rank and number of modules per tp rank in each pp rank
pp_ranks = list(partitions.keys())
pp_lens = []
for pp_rank in pp_ranks:
partition_pp = partitions[pp_rank]
max_len = max([len(x) for x in partition_pp]) # Perform max as we need to iterate through all modules
pp_lens.append(max_len)
total_params_merged = len([p for p in model.parameters()])
pp_total_len = sum(pp_lens)
logging.info(f"Total layers in Merged Model: {total_params_merged}")
og_pp_split_rank = 0
if pp_total_len > total_params_merged:
og_pp_split_rank = model.cfg.get('pipeline_model_parallel_split_rank', 0)
idx = 0
pp_rank = 0
global_idx = 0
# During merge - model is TP 1 PP 1 model with all parameters present in correct order.
# Merge the parameters of the various PP X TP Y models into the TP 1 PP 1 model.
for name, param in model.named_parameters():
# Since the PP ranks each contain the list of all their TP rank parameters
# We need to detect if we need to move to the next PP rank when we run out of tensors in current PP rank
# Reset the index so that it indexes the new pp rank tensor list correctly
if idx >= pp_lens[pp_rank]:
pp_rank += 1
idx = 0
# For EncDec models, after the encoder-decoder PP split occurs,
# the vocab and positional embeddings are duplicated across the PP ranks at the
# beginning of the decoder rank. We can skip them during the merge step.
if pp_total_len > total_params_merged:
if og_pp_split_rank > 0 and og_pp_split_rank == pp_rank:
logging.info(
f"Skipping duplicate vocab and positional embeddings for EncDec model "
f"at the pp split rank: {og_pp_split_rank}"
)
idx += 2
# For EncDec models, after the pp split occurs, final pp rank of the decoder
# has an intermediate embedding tensor at the penultimate positon, skip that.
if og_pp_split_rank > 0 and global_idx == total_params_merged - 1:
logging.info(
f"Skipping intermediate embedding tensor for EncDec model at the final pp split "
f"rank: {og_pp_split_rank}",
)
idx = pp_lens[pp_rank] - 1
# Extract all TP ranks out of current PP rank
partitions_pp = partitions[pp_rank]
logging.debug(
f"Global idx: {global_idx} Index: {idx} Model Param: {name} "
f"Partition Params: {[p[idx].shape for p in partitions_pp]}"
)
# Original TP rank change logic
concated = compute_tp_merge(idx, name, param, partitions_pp, model.cfg)
# Update the model parameter with the merged tensor
param.data = concated
idx += 1
global_idx += 1
# Save the file iff the original file was PP 1 TP 1
if write_path is not None:
model.save_to(write_path)
def split_partition(
model,
partitions,
pp_size: int,
tp_size: int,
pp_rank: int,
offset: int,
pp_split_rank: int = 0,
write_path: str = None,
megatron_legacy: bool = False,
):
if len(partitions) != 2:
raise ValueError(
"Can only split partitions of model with TP=1. For partitions of models with TP>1, merge first."
)
if tp_size < 1:
raise ValueError("TP size must to be >= 1.")
if pp_size < 1:
raise ValueError("PP size must to be >= 1.")
# Setup app state to mimic current PP and TP ranks with single merged module
app_state = AppState()
app_state.data_parallel_rank = 0
app_state.pipeline_model_parallel_size = pp_size
app_state.tensor_model_parallel_size = tp_size
app_state.model_parallel_size = app_state.pipeline_model_parallel_size * app_state.tensor_model_parallel_size
# Go in reverse for TP order, as PP 0 TP 0 will merge all preceding files
app_state.pipeline_model_parallel_rank = pp_rank
app_state.tensor_model_parallel_rank = tp_size - 1
# Compute reverse offset of parameter index from global map
num_params = sum([1 for _ in model.parameters()]) # Count number of parameters iteratively
idx = offset - num_params + 1 # start index of current PP TP rank in global map
assert (
idx + num_params - 1 == offset
), f"idx = {idx}, num_params = {num_params}, sum = {idx + num_params}, offset = {offset}"
# Special case for GPT models - whose last PP TP rank has a duplicate embedding tensor
if 'gpt' in model.cfg.target.lower():
logging.info("Splitting GPT model")
handler = GPTHandler(megatron_legacy=megatron_legacy)
elif 't5' in model.cfg.target.lower():
logging.info("Splitting T5 model")
handler = T5Handler(megatron_legacy=megatron_legacy)
else:
raise ValueError(f"Unsupported model for Pipeline Parallelism change - {model.cfg.target}")
idx = handler.compute_split_index(model, idx, 0, pp_rank, pp_split_rank, tp_size, pp_size)
# Print some debug info
logging.info(f"Start Layer Idx: {idx} Number of layers in current rank: {num_params} Offset: {offset}")
logging.info("\n")
# Split the model's parameters according to TP PP ranks
idx, splits = handler.compute_splits(model, partitions, idx, 0, pp_rank, pp_split_rank, tp_size, pp_size)
# Compute the new offset for the next PP rank in reverse order
# Add 1 to offset to account for last PP rank's duplicated Embedding
offset_diff = offset - num_params
offset_diff = handler.compute_split_offset(offset_diff, 0, pp_rank, pp_split_rank, tp_size, pp_size)
# Finalize the new offset
new_offset = offset_diff
# Save each of the TP ranks in reverse order
# This is done so that the last PP rank will save the last TP rank only after all other PP TP ranks are saved
# The final rank will then save a new NeMo file with all other ranks inside.
write_tp_pp_split(model, splits, app_state, tp_size, pp_rank, write_path)
return new_offset
def split_tp_partition_only(model, partitions, tp_size, write_path=None, megatron_legacy=False):
if len(partitions) != 2:
raise ValueError(
"Can only split partitions of model with TP=1. For partitions of models with TP>1, merge first."
)
if tp_size < 1:
raise ValueError("TP size must to be >= 1.")
app_state = AppState()
app_state.data_parallel_rank = 0
app_state.pipeline_model_parallel_size = 1
app_state.tensor_model_parallel_size = tp_size
app_state.model_parallel_size = app_state.pipeline_model_parallel_size * app_state.tensor_model_parallel_size
app_state.pipeline_model_parallel_rank = 0
app_state.tensor_model_parallel_rank = tp_size - 1
idx = 0
splits = []
for param_name, param in model.named_parameters():
split = compute_tp_splits(
param_name,
param,
partitions,
idx,
tp_size,
pp_size=1,
pp_rank=0,
pp_split_rank=0,
megatron_legacy=megatron_legacy,
model_cfg=model.cfg,
)
splits.append(split)
idx += 1
# Save each of the TP ranks in reverse order
# This is done so that the last PP rank will save the last TP rank only after all other PP TP ranks are saved
# The final rank will then save a new NeMo file with all other ranks inside.
write_tp_pp_split(model, splits, app_state, tp_size, pp_rank=0, write_path=write_path)
def main():
parser = ArgumentParser()
parser.add_argument("--model_file", type=str, default=None, required=False, help="Path to source .nemo file")
parser.add_argument("--target_file", type=str, required=True, help="Path to write target .nemo file")
parser.add_argument(
"--tensor_model_parallel_size", type=int, default=-1, required=False, help="TP size of source model"
)
parser.add_argument("--target_tensor_model_parallel_size", type=int, required=True, help="TP size of target model")
parser.add_argument(
'--pipeline_model_parallel_size', type=int, default=-1, required=False, help='PP size of source model'
)
parser.add_argument(
'--target_pipeline_model_parallel_size', type=int, required=True, help='PP size of target model'
)
parser.add_argument(
'--target_pipeline_model_parallel_split_rank', type=int, default=0, help='PP rank to split for Enc-Dec models'
)
parser.add_argument(
'--virtual_pipeline_model_parallel_size', type=int, default=None, help='Virtual Pipeline parallelism size'
)
parser.add_argument(
'--ckpt_name', type=str, default=None, help='Checkpoint name to load from for Virtual Parallel'
)
parser.add_argument(
"--model_class",
type=str,
default="nemo.collections.nlp.models.language_modeling.megatron_gpt_model.MegatronGPTModel",
help="NeMo model class. This script should support all NeMo megatron models that use Tensor Parallel",
)
parser.add_argument("--precision", default=16, help="PyTorch Lightning Trainer precision flag")
parser.add_argument('--num_gpu_per_node', default=8, type=int, help='Number of GPUs per node')
parser.add_argument(
"--megatron_legacy",
action="store_true",
help="Converter for legacy megatron modles that have different q,k,v weight splits",
)
parser.add_argument(
"--tokenizer_model_path",
type=str,
required=False,
default=None,
help="Path to the tokenizer model path if your model uses a tokenizer model as an artifact. This is needed if your model uses a sentencepiece tokenizer.",
)
parser.add_argument(
"--tokenizer_vocab_file",
type=str,
required=False,
default=None,
help="Path to the tokenizer model path if your model uses a tokenizer model as an artifact. This is needed if your model uses a sentencepiece tokenizer.",
)
parser.add_argument('--hparams_file', type=str, default=None, help='Path to hparams file from PTL training')
parser.add_argument('--tp_conversion_only', action='store_true', help='Only convert TP model to TP model')
parser.add_argument('--model_extracted_dir', type=str, default=None, help='Path to pre-extracted model directory')
args = parser.parse_args()
precision = args.precision
num_gpu_per_node = int(args.num_gpu_per_node)
if args.precision in ["32", "16"]:
precision = int(float(args.precision))
if precision in ["bf16", "bf16-mixed"]:
if torch.cuda.is_available() and torch.cuda.is_bf16_supported():
pass
else:
logging.warning("BF16 is not supported on this device. Using FP16 instead.")
precision = precision[2:]
if precision == 32:
dtype = torch.float32
elif precision in [16, "16", "16-mixed"]:
dtype = torch.float16
elif precision in ["bf16", "bf16-mixed"]:
dtype = torch.bfloat16
else:
dtype = torch.float32 # fallback
# Built target directory if it does not exist
target_dir = os.path.split(args.target_file)[0]
if not os.path.exists(target_dir):
os.makedirs(target_dir, exist_ok=True)
tp_size = args.tensor_model_parallel_size
tgt_tp_size = args.target_tensor_model_parallel_size
pp_size = args.pipeline_model_parallel_size
tgt_pp_size = args.target_pipeline_model_parallel_size
pipeline_model_parallel_split_rank = args.target_pipeline_model_parallel_split_rank
vp_size = args.virtual_pipeline_model_parallel_size
if vp_size is None:
vp_size = 1
convert_vp = vp_size > 1
if convert_vp:
from megatron.core import parallel_state
parallel_state.set_virtual_pipeline_model_parallel_world_size(vp_size)
hparams_filepath = args.hparams_file
if hparams_filepath is None:
logging.warning(
'\n\n\n!!!!!!!!!\n'
'You are converting a model with virtual pipeline parallelism enabled, \n'
'but have not passed `hparams_file` argument. \n'
'This will cause each ckpt file to be temporarily laoded onto GPU memory!\n\n'
'It is highly recommended to pass `hparams_file` argument to avoid this.\n'
)
else:
hparams_filepath = None
# Import the class of the model
cls = model_utils.import_class_by_path(args.model_class)
if args.model_file is None and args.model_extracted_dir is None:
raise ValueError("Cannot pass model_file and model_extracted_dir as None at the same time.")
tmp_cfg = cls.restore_from(
restore_path=args.model_file,
trainer=Trainer(devices=1, strategy=NLPDDPStrategy(), accelerator="cpu", precision=precision),
map_location=torch.device("cpu"),
return_config=True,
)
plugins = []
if precision in [16, '16', 'bf16', '16-mixed', 'bf16-mixed']:
scaler = None
if precision in [16, '16', '16-mixed']:
scaler = GradScaler(
init_scale=tmp_cfg.get('native_amp_init_scale', 2 ** 32),
growth_interval=tmp_cfg.get('native_amp_growth_interval', 1000),
hysteresis=tmp_cfg.get('hysteresis', 2),
)
# MixedPrecisionPlugin in PTL >= 2.0 requires precision to be 16-mixed or bf16-mixed
plugin_precision = '16-mixed'
else:
plugin_precision = 'bf16-mixed'
if tmp_cfg.get('megatron_amp_O2', False):
plugins.append(MegatronHalfPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
else:
plugins.append(PipelineMixedPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
trainer = Trainer(plugins=plugins, devices=1, strategy=NLPDDPStrategy(), accelerator="cpu", precision=precision)
if tp_size < 0 or pp_size < 0:
logging.info(f"Loading model config from {args.model_file} to get TP and PP size")
model_config_internal = cls.restore_from(
restore_path=args.model_file, trainer=trainer, map_location=torch.device("cpu"), return_config=True,
)
tp_size = model_config_internal.get('tensor_model_parallel_size', 1)
pp_size = model_config_internal.get('pipeline_model_parallel_size', 1)
# Check if TP conversion only
tp_conversion_only = args.tp_conversion_only
if tp_conversion_only:
logging.info("Converting TP model to TP model only")
if pp_size > 1:
raise ValueError("Provided `--tp_conversion_only` but `--pipeline_model_parallel_size` > 1")
if tgt_pp_size > 1:
raise ValueError("Provided `--tp_conversion_only` but `--target_pipeline_model_parallel_size` > 1")
if pipeline_model_parallel_split_rank > 0:
raise ValueError("Provided `--tp_conversion_only` but `--target_pipeline_model_parallel_split_rank` > 0")
# Force PP size to 1
pp_size = 1
tgt_pp_size = 1
pipeline_model_parallel_split_rank = 0
if vp_size is None or vp_size < 0:
vp_size = 1
app_state = AppState()
app_state.data_parallel_rank = 0
app_state.pipeline_model_parallel_size = pp_size
app_state.tensor_model_parallel_size = tp_size
if vp_size > 1:
app_state.virtual_pipeline_model_parallel_size = vp_size
app_state.model_parallel_size = app_state.pipeline_model_parallel_size * app_state.tensor_model_parallel_size
world_size = pp_size * tp_size # pseudo world size for simulating load of a specific rank on a single gpu
app_state.tensor_model_parallel_rank = 0
app_state.pipeline_model_parallel_rank = 0
if vp_size > 1:
set_virtual_parallel_rank_safely(0)
# Extract tokenizer artifact from the model to temp directory
logging.info("Extracting tokenizer artifact from NeMo file...")
temp_dir = tempfile.mkdtemp()
tokenizer_model_path = None
with tarfile.open(args.model_file, "r") as tar:
for member in tar.getmembers():
if '.model' in member.name:
extracted_file = tar.extractfile(member)
extracted_file_path = os.path.join(temp_dir, member.name)
if tokenizer_model_path is None:
logging.info(f"Found tokenizer. Extracting {member.name} to {extracted_file_path}")
tokenizer_model_path = extracted_file_path
with open(extracted_file_path, "wb") as f:
f.write(extracted_file.read())
else:
if args.tokenizer_model_path is None:
logging.warning(
f"\n\nFound multiple tokenizer artifacts in the model file.\n"
f"Using only {tokenizer_model_path}.\n"
f"If this is incorrect, manually pass the correct tokenizer using "
f"`--tokenizer_model_path`.\n\n"
)
# If input model has TP > 1 or PP > 1
# Reconstruct the model to have TP = 1 and PP = 1
# Note that this is a forward loop that will process PP [0..N] TP [0..M] in sequential order.
if tp_size > 1 or pp_size > 1:
partitions = {} # 3d list of VP x PP x TP
model = None
# Build partitions structure
for vp_idx in range(vp_size):
partitions[vp_idx] = [] # Build first layer - VP
for pp_idx in range(pp_size):
# For each VP, build PP x TP holder
partitions[vp_idx].append({})
partitions[vp_idx][pp_idx] = []
for vp_rank in range(vp_size):
if vp_size > 1:
set_virtual_parallel_rank_safely(vp_rank)
for pp_rank in range(pp_size):
app_state.pipeline_model_parallel_rank = pp_rank
for tp_rank in range(tp_size):
app_state.tensor_model_parallel_rank = tp_rank
logging.info(f"Loading ------------ PP Rank: {pp_rank} TP Rank: {tp_rank}")
# Override flag that forces Model to use AppState instead of Trainer
# to determine the world size, global and local rank
# Used for simulating load of a specific rank on a single gpu
os.environ[NEMO_MEGATRON_MODEL_PARALLEL_APPSTATE_OVERRIDE] = "true"
# Compute the global rank to load the correct subset of parameters
global_rank = pp_rank * tp_size + tp_rank
# Update AppState
app_state.world_size = world_size
app_state.global_rank = global_rank
app_state.local_rank = global_rank % num_gpu_per_node
app_state.pipeline_model_parallel_size = pp_size
app_state.tensor_model_parallel_size = tp_size
app_state.pipeline_model_parallel_split_rank = pipeline_model_parallel_split_rank
app_state.model_parallel_size = (
app_state.pipeline_model_parallel_size * app_state.tensor_model_parallel_size
)
if vp_size > 1:
set_virtual_parallel_rank_safely(vp_rank)
if vp_rank == 0:
save_restore_connector = NLPSaveRestoreConnector()
if args.model_extracted_dir is not None:
logging.info(f"Using extracted model directory: {args.model_extracted_dir}")
save_restore_connector.model_extracted_dir = args.model_extracted_dir
if args.model_file is not None:
model_filepath = args.model_file
else:
model_filepath = args.model_extracted_dir
if vp_size == 1:
# Get model config
tmp_cfg = cls.restore_from(
restore_path=model_filepath,
trainer=trainer,
map_location=torch.device("cpu"),
save_restore_connector=save_restore_connector,
return_config=True,
)
# Force model onto CPU
tmp_cfg, restore_dict = force_cpu_model(tmp_cfg)
# Restore model
model = cls.restore_from(
restore_path=model_filepath,
trainer=trainer,
map_location=torch.device("cpu"),
save_restore_connector=save_restore_connector,
override_config_path=tmp_cfg,
)
model.freeze()
# Restore model config
restore_model_config(model.cfg, restore_dict)
else:
if args.ckpt_name is None:
raise ValueError(
"For Virtual Parallel, ckpt name is required.\n"
"Please provide `--ckpt_name` argument."
)
# inject model parallel rank
checkpoint_path = model_utils.inject_model_parallel_rank(
os.path.join(model_filepath, args.ckpt_name)
)
vp_state_dict = torch.load(checkpoint_path, map_location="cpu")
if hparams_filepath is not None:
# Force the model onto CPU
tmp_cfg = OmegaConf.load(hparams_filepath)
tmp_cfg, restore_dict = force_cpu_model(tmp_cfg)
with tempfile.NamedTemporaryFile(mode='w', encoding='utf-8', suffix='.yml') as tmp:
OmegaConf.save(tmp_cfg, tmp, resolve=True)
tmp.seek(0)
model = cls.load_from_checkpoint(
checkpoint_path=checkpoint_path,
trainer=trainer,
map_location=torch.device("cpu"),
hparams_file=tmp.name,
)
model.freeze()
restore_model_config(model.cfg, restore_dict)
else:
model = cls.load_from_checkpoint(
checkpoint_path=checkpoint_path, trainer=trainer, map_location=torch.device("cpu"),
)
model.freeze()
model.to(dtype=dtype)
# Reset env flag
os.environ.pop(NEMO_MEGATRON_MODEL_PARALLEL_APPSTATE_OVERRIDE, None)
logging.info(
f"<<<<<<<< LOADED MODEL PP={pp_rank + 1} TP={tp_rank + 1} | "
f"GLOBAL RANK = {global_rank} >>>>>>>>>"
)
# Save the parameters
if vp_size == 1:
params = [p for p in model.parameters()]
partitions[vp_rank][pp_rank].append(params) # vp_rank = 0
else:
vp_params_tmp = []
for vp_idx in range(vp_size):
set_virtual_parallel_rank_safely(vp_idx)
vp_params = vp_state_dict[f'model{vp_idx}']
model.model[vp_idx].module.load_state_dict(vp_params, strict=True)
model.model[vp_idx].module.to('cpu')
params = [p for p in model.model[vp_idx].module.parameters()]
vp_params_tmp.append(params)
# partitions[pp_rank][vp_idx].append(params)
for vp_idx in range(vp_size):
partitions[vp_idx][pp_rank].append(vp_params_tmp[vp_idx])
del vp_params_tmp
set_virtual_parallel_rank_safely(0)
# app_state is being updated incorrectly during restore
app_state.data_parallel_rank = 0
app_state.pipeline_model_parallel_rank = pp_rank
app_state.tensor_model_parallel_rank = tp_rank
app_state.pipeline_model_parallel_size = pp_size
app_state.tensor_model_parallel_size = tp_size
app_state.model_parallel_size = (
app_state.pipeline_model_parallel_size * app_state.tensor_model_parallel_size
)
if vp_size > 1:
app_state.virtual_pipeline_model_parallel_size = vp_size
set_virtual_parallel_rank_safely(vp_rank)
# Build a unified model with PP 1 TP 1
with open_dict(model.cfg):
model.cfg.tensor_model_parallel_size = 1
model.cfg.pipeline_model_parallel_size = 1
model.cfg.virtual_pipeline_model_parallel_size = None
app_state.global_rank = 0
app_state.local_rank = 0
app_state.data_parallel_rank = 0
app_state.pipeline_model_parallel_rank = 0
app_state.tensor_model_parallel_rank = 0
app_state.pipeline_model_parallel_size = 1
app_state.tensor_model_parallel_size = 1
app_state.model_parallel_size = 1
if vp_size > 1:
set_virtual_parallel_rank_safely(None)
trainer = Trainer(
plugins=plugins, devices=1, strategy=NLPDDPStrategy(), accelerator="cpu", precision=precision
)
with open_dict(model.cfg):
if args.tokenizer_model_path is not None:
model.cfg.tokenizer.model = args.tokenizer_model_path
if args.tokenizer_vocab_file is not None:
model.cfg.tokenizer.vocab_file = args.tokenizer_vocab_file
model.cfg, restore_dict = force_cpu_model(model.cfg)
# Remove Virtual Parallelism
model.cfg.virtual_pipeline_model_parallel_size = None
logging.info(f"<<<<<<<< Building TP 1 PP 1 base model >>>>>>>>>")
model = cls(model.cfg, trainer) # type: nn.Module
model.freeze()
model = model.to('cpu')
model._save_restore_connector = NLPSaveRestoreConnector()
restore_model_config(model.cfg, restore_dict)
vp_param_count = 0
for vp in range(vp_size):
for pp in range(pp_size):
for tp in range(tp_size):
vp_param_count += len(partitions[vp][pp][tp])
if vp_size > 1:
logging.debug(f"Total params in TP PP VP = 1 : {len(list(model.parameters()))}")
logging.debug(f"Total params in VP PP TP (og): {vp_param_count}")
# Flatten Virtual Pipeline
if vp_size == 1:
# unpack vp container, pack pp tp container
partitions = partitions[0]
partitions = {idx: val for idx, val in enumerate(partitions)}
else:
flat_partitions = {idx: [] for idx in range(pp_size)}
"""
Under VP convention
Notation :
Stage = PP rank
Number = GPT model / layer index
Ignore TP - every PP has all TP corresponding to that PP
chunk_index = the physical index of any [] in the list. Ex idx = 2 in below map corresponds to [2: PP 0 VP 1]]
For a PP 2 VP 4 model with 8 GPT layers-
Indices
# Stage 0: [0:PP 0 VP 0] [2:PP 0 VP 1] [4:PP 0 VP 2] [6:PP 0 VP 3]
# Stage 1: [1:PP 1 VP 0] [3:PP 1 VP 1] [5:PP 1 VP 2] [7:PP 1 VP 3]
after conversion will become
# Stage 0: [0,1,2,3:PP 0]
# Stage 1: [4,5,6,7:PP 1]
"""
pp_index = 0
chunk_counter = 0
tp_cache = [[] for _ in range(tp_size)]
for vp in range(vp_size):
for pp in range(pp_size):
# Gather all TP under this VP PP combination.
# We will accumulate TP parameters from multiple layers in this cache.
for tp in range(tp_size):
tp_cache[tp].extend(partitions[vp][pp][tp])
# This counter indexes the global selection of a VP PP combination in the above map
chunk_counter += 1
# Log the mapping from old VP x PP to new PP index
logging.info(f"VP Conversion - vp: {vp} pp: {pp} -> pp_idx: {pp_index}")
# Every vp_size chunks, we can fill a new PP index in the flat_partitions
if chunk_counter % vp_size == 0:
flat_partitions[pp_index].extend(tp_cache)
tp_cache = [[] for _ in range(tp_size)]
pp_index += 1
logging.debug(
f"VP merge step: \n"
f"vp: {vp} pp: {pp} pp_idx: {pp_index - 1} "
f"len(flat_partitions): {len(flat_partitions[pp_index - 1])}"
)
logging.debug(f"PP Size len(flat partitions) : {len(flat_partitions)}")
logging.debug(f"TP Size len(flat partitions[0]): {len(flat_partitions[0])}")
logging.debug(f"Layers len(flat partitions[0][0]) : {len(flat_partitions[0][0])}")
partitions = flat_partitions
del tp_cache
if tgt_tp_size > 1 or tgt_pp_size > 1:
merge_partition(model, partitions)
else:
# Write out the PP 1 TP 1 model to disk
merge_partition(model, partitions, args.target_file)
# Empty cache memory of all parameters from all PP TP partitions
partitions.clear()
else:
# If input model has TP = 1 and PP = 1
app_state.model_parallel_size = 1
save_restore_connector = NLPSaveRestoreConnector()
if args.model_extracted_dir is not None:
logging.info(f"Using extracted model directory: {args.model_extracted_dir}")
save_restore_connector.model_extracted_dir = args.model_extracted_dir
if args.model_file is not None:
model_filepath = args.model_file
else:
model_filepath = args.model_extracted_dir
tmp_cfg = cls.restore_from(
restore_path=model_filepath,
trainer=trainer,
map_location=torch.device("cpu"),
save_restore_connector=save_restore_connector,
return_config=True,
)
tmp_cfg, restore_dict = force_cpu_model(tmp_cfg)
model = cls.restore_from(
restore_path=model_filepath,
trainer=trainer,
map_location=torch.device("cpu"),
save_restore_connector=save_restore_connector,
override_config_path=tmp_cfg,
)
model.to(dtype=dtype)
restore_model_config(model.cfg, restore_dict)
# If target model has TP > 1 or PP > 1
if tgt_pp_size > 1 or tgt_tp_size > 1:
# Preserve the TP 1 PP 1 model parameters and names
global_params = []
global_params.append([p for n, p in model.named_parameters()]) # params
global_params.append([n for n, p in model.named_parameters()]) # names
logging.debug("Global parameters:")
for idx, (name, p) in enumerate(zip(global_params[1], global_params[0])):
logging.debug(f"{name} - {p.shape}")
logging.info(f"TP 1 PP 1 Number of Parameters : {len(global_params[0])}")
world_size = (
tgt_pp_size * tgt_tp_size
) # pseudo world size for simulating load of a specific rank on a single gpu
new_global_batch_size = model.cfg.micro_batch_size * world_size
old_global_batch_size = model.cfg.get('global_batch_size', model.cfg.micro_batch_size)
global_offset = len(global_params[0]) - 1 # -1 cause this indexes the array, range [0, L-1]
logging.info(f"Final layer offset for parameters: {global_offset}")
for pp_rank in range(tgt_pp_size - 1, -1, -1): # reverse order
with open_dict(model.cfg):
model.cfg.pipeline_model_parallel_size = tgt_pp_size
model.cfg.tensor_model_parallel_size = tgt_tp_size
if 'pipeline_model_parallel_split_rank' in model.cfg:
if pipeline_model_parallel_split_rank > 0:
model.cfg.pipeline_model_parallel_split_rank = pipeline_model_parallel_split_rank
elif pp_size > 1:
logging.warning(
f"Model config has `pipeline_model_parallel_split_rank` set to "
f"{model.cfg.pipeline_model_parallel_split_rank} and target PP "
f"size is {tgt_pp_size}. "
f"Provided `pipeline_model_parallel_split_rank` is "
f"{pipeline_model_parallel_split_rank}. "
f"Be careful that the model config is correct "
f"if encoder-decoder models are being converted."
)
model.cfg.global_batch_size = old_global_batch_size # Used for restoration
# Override flag that forces Model to use AppState instead of Trainer
# to determine the world size, global and local rank
# Used for simulating load of a specific rank on a single gpu
os.environ[NEMO_MEGATRON_MODEL_PARALLEL_APPSTATE_OVERRIDE] = "true"
# Compute the global rank
global_rank = (
pp_rank * tgt_tp_size + 0
) # tp_rank = 0 needed just for modules, all TP will be merged to this PP rank
# Update AppState
app_state.world_size = world_size
app_state.global_rank = global_rank
app_state.local_rank = global_rank % num_gpu_per_node
app_state.pipeline_model_parallel_size = tgt_pp_size
app_state.tensor_model_parallel_size = tgt_tp_size
app_state.model_parallel_size = (
app_state.pipeline_model_parallel_size * app_state.tensor_model_parallel_size
)
trainer = Trainer(
plugins=plugins, devices=1, strategy=NLPDDPStrategy(), accelerator="cpu", precision=precision
)
if args.tokenizer_model_path is not None:
with open_dict(model.cfg):
model.cfg.tokenizer.model = args.tokenizer_model_path
else:
if tokenizer_model_path is None:
logging.warning("Could not extract tokenizer model file from checkpoint.")
else:
# Extract tokenizer info
with open_dict(model.cfg):
model.cfg.tokenizer.model = tokenizer_model_path
model.cfg, restore_dict = force_cpu_model(model.cfg)
model = cls(model.cfg, trainer)
model = model.to('cpu')
model._save_restore_connector = NLPSaveRestoreConnector()
model.freeze()
model.to(dtype=dtype)
restore_model_config(model.cfg, restore_dict)
# Update global batch size
if old_global_batch_size % new_global_batch_size != 0 or old_global_batch_size < new_global_batch_size:
logging.info(
f"Global batch size {old_global_batch_size} is not divisible by new global batch size {new_global_batch_size}."
f" The model config will be updated with new global batch size {new_global_batch_size}."
)
with open_dict(model.cfg):
model.cfg.global_batch_size = new_global_batch_size
logging.info(f"Global rank: {global_rank} Local rank: {app_state.local_rank} World size: {world_size}")
logging.info(f"PP rank: {pp_rank} TP rank: {0}")
logging.info(f"TP 1 PP 1 Number of Layers : {len(global_params[0])}")
logging.info(f"Remaining layer offset for parameters: {global_offset}")
logging.info("\n")
# Special case for TP conversion only mode
if tp_conversion_only:
logging.info(f"Skipping PP split due to flag `--tp_conversion_only`")
split_tp_partition_only(model, global_params, tgt_tp_size, args.target_file, args.megatron_legacy)
break
global_offset = split_partition(
model,
global_params,
tgt_pp_size,
tgt_tp_size,
pp_rank,
global_offset,
pipeline_model_parallel_split_rank,
args.target_file,
args.megatron_legacy,
)
# Reset env flag
os.environ.pop(NEMO_MEGATRON_MODEL_PARALLEL_APPSTATE_OVERRIDE, None)
# Check if invalid global offset - after all PP splits, global offset should be -1
if global_offset < -1 and not tp_conversion_only:
raise ValueError(
f"Invalid global offset {global_offset} found for global rank {app_state.global_rank} "
f"and local rank {app_state.local_rank}. Should be -1 if all parameters have been assigned. "
f"Currently, seems some parameters were duplicated."
)
elif global_offset > -1 and not tp_conversion_only:
logging.error("\n")
logging.error("!" * 80)
logging.error("Error: Some parameters were not correctly added to model partitions.")
logging.error("Below is list of parameters skipped in reverse order: ")
for param_id in range(global_offset, -1, -1):
logging.error(
f"Param ID: {param_id} : {global_params[1][param_id]} {global_params[0][param_id].shape}"
)
logging.error("!" * 80)
raise ValueError(
f"Invalid global offset {global_offset} found for global rank {app_state.global_rank} "
f"and local rank {app_state.local_rank}. Should be -1 if all parameters have been assigned. "
f"Currently, seems some parameters were not assigned."
)
logging.info("Successfully finished changing partitions!")
if temp_dir is not None:
shutil.rmtree(temp_dir, ignore_errors=True)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/language_modeling/megatron_change_num_partitions.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from examples.nlp.language_modeling.megatron_gpt_eval import RequestDataSet
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from torch.utils.data import DataLoader
from nemo.collections.nlp.models.language_modeling.megatron_retrieval_model import MegatronRetrievalModel
from nemo.collections.nlp.modules.common.transformer.text_generation import LengthParam, SamplingParam
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy, NLPSaveRestoreConnector
from nemo.core.config import hydra_runner
try:
from megatron.core import parallel_state
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
"""
This is the script to run RETRO Model text generation.
Usage:
Assume the model has TP=1, PP=1
run greedy inference from a nemo file:
python megatron_retro_eval.py \
trainer.devices=1 \
trainer.num_nodes=1 \
trainer.accelerator=gpu \
trainer.precision=16 \
inference.tokens_to_generate=128 \
inference.greedy=True \
retro_model_file=path_to_retro_nemo_file \
tensor_model_parallel_size=-1 \
pipeline_model_parallel_size=-1 \
retrieval_service.faiss_devices='0' \
retrieval_service.faiss_index=path_to_faiss_index \
retrieval_service.retrieval_index=path_to_retrieval_dataset \
retrieval_service.neighbors=20
"""
@hydra_runner(config_path="conf", config_name="megatron_retro_inference")
def main(cfg) -> None:
trainer = Trainer(strategy=NLPDDPStrategy(), **cfg.trainer)
model_path = cfg.retro_model_file
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(model_path):
save_restore_connector.model_extracted_dir = model_path
model_cfg = MegatronRetrievalModel.restore_from(
model_path, trainer=trainer, return_config=True, save_restore_connector=save_restore_connector,
)
with open_dict(model_cfg):
model_cfg.precision = trainer.precision
model_cfg.sequence_parallel = False
model_cfg.activations_checkpoint_granularity = None
model_cfg.activations_checkpoint_method = None
if (
cfg.tensor_model_parallel_size < 0
or cfg.pipeline_model_parallel_size < 0
or cfg.get('pipeline_model_parallel_split_rank', -1) < 0
):
with open_dict(cfg):
cfg.tensor_model_parallel_size = model_cfg.get('tensor_model_parallel_size', 1)
cfg.pipeline_model_parallel_size = model_cfg.get('pipeline_model_parallel_size', 1)
cfg.pipeline_model_parallel_split_rank = model_cfg.get('pipeline_model_parallel_split_rank', 0)
model = MegatronRetrievalModel.restore_from(
model_path, trainer=trainer, save_restore_connector=save_restore_connector, override_config_path=model_cfg,
)
length_params: LengthParam = {
"max_length": cfg.inference.tokens_to_generate,
"min_length": cfg.inference.min_tokens_to_generate,
}
sampling_params: SamplingParam = {
"use_greedy": cfg.inference.greedy,
"temperature": cfg.inference.temperature,
"top_k": cfg.inference.top_k,
"top_p": cfg.inference.top_p,
"repetition_penalty": cfg.inference.repetition_penalty,
"add_BOS": cfg.inference.add_BOS,
"all_probs": cfg.inference.all_probs,
"compute_logprob": cfg.inference.compute_logprob,
}
# check whether the DDP is initialized
if parallel_state.is_unitialized():
def dummy():
return
if model.trainer.strategy.launcher is not None:
model.trainer.strategy.launcher.launch(dummy, trainer=model.trainer)
model.trainer.strategy.setup_environment()
config = OmegaConf.to_container(cfg.inference)
retrieval_service = OmegaConf.to_container(cfg.retrieval_service)
model.set_inference_config(config, retrieval_service)
if not cfg.use_predict_method:
# First method of running text generation, call model.generate method
response = model.generate(
inputs=OmegaConf.to_container(cfg.prompts),
length_params=length_params,
sampling_params=sampling_params,
strategy=model.inference_strategy,
)
else:
# Second method of running text generation, call trainer.predict
ds = RequestDataSet(OmegaConf.to_container(cfg.prompts))
request_dl = DataLoader(dataset=ds, batch_size=cfg.inference_batch_size)
response = trainer.predict(model, request_dl)
print("***************************")
print(response)
print("***************************")
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/language_modeling/megatron_retro_eval.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf.omegaconf import OmegaConf, open_dict
from nemo.collections.nlp.models.language_modeling.megatron_t5_model import MegatronT5Model
from nemo.collections.nlp.parts.megatron_trainer_builder import MegatronT5TrainerBuilder
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="megatron_t5_config")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
trainer = MegatronT5TrainerBuilder(cfg).create_trainer()
exp_manager(trainer, cfg.exp_manager)
model = MegatronT5Model(cfg.model, trainer)
trainer.fit(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/language_modeling/megatron_t5_pretraining.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelSummary
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from pytorch_lightning.trainer.connectors.checkpoint_connector import _CheckpointConnector
from nemo.collections.nlp.models.language_modeling.megatron_bart_model import MegatronBARTModel
from nemo.collections.nlp.parts.nlp_overrides import (
CustomProgressBar,
GradScaler,
MegatronHalfPrecisionPlugin,
NLPDDPStrategy,
PipelineMixedPrecisionPlugin,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="megatron_bart_config")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
megatron_amp_o2 = cfg.model.get('megatron_amp_O2', False)
plugins = []
strategy = NLPDDPStrategy(
no_ddp_communication_hook=True,
gradient_as_bucket_view=cfg.model.gradient_as_bucket_view,
find_unused_parameters=False,
)
if cfg.trainer.precision in [16, '16', 'bf16', '16-mixed', 'bf16-mixed']:
scaler = None
if cfg.trainer.precision in [16, '16', '16-mixed']:
scaler = GradScaler(
init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=cfg.model.get('native_amp_growth_interval', 1000),
hysteresis=cfg.model.get('hysteresis', 2),
)
# MixedPrecisionPlugin in PTL >= 2.0 requires precision to be 16-mixed or bf16-mixed
plugin_precision = '16-mixed'
else:
plugin_precision = 'bf16-mixed'
if megatron_amp_o2:
plugins.append(MegatronHalfPrecisionPlugin(plugin_precision, device='cuda', scaler=scaler))
else:
plugins.append(PipelineMixedPrecisionPlugin(plugin_precision, device='cuda', scaler=scaler))
if cfg.get('cluster_type', None) == 'BCP':
plugins.append(TorchElasticEnvironment())
trainer = Trainer(
plugins=plugins, strategy=strategy, **cfg.trainer, callbacks=[ModelSummary(max_depth=3), CustomProgressBar()]
)
exp_manager(trainer, cfg.exp_manager)
# update resume from checkpoint found by exp_manager
if cfg.model.resume_from_checkpoint is not None:
trainer.ckpt_path = cfg.model.resume_from_checkpoint
logging.info(f'Resuming training from checkpoint: {trainer.ckpt_path}')
model = MegatronBARTModel(cfg.model, trainer)
trainer.fit(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/language_modeling/megatron_bart_pretraining.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from pytorch_lightning.trainer.connectors.checkpoint_connector import _CheckpointConnector
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.modules.common.megatron.megatron_init import fake_initialize_model_parallel
from nemo.collections.nlp.parts.nlp_overrides import (
CustomProgressBar,
GradScaler,
MegatronHalfPrecisionPlugin,
NLPDDPStrategy,
NLPSaveRestoreConnector,
PipelineMixedPrecisionPlugin,
)
from nemo.core.config import hydra_runner
from nemo.utils import AppState, logging
from nemo.utils.exp_manager import exp_manager
from nemo.utils.model_utils import inject_model_parallel_rank
def _modify_config(gpt_cfg, cfg, add_cfg_to_tree=False):
"""
This function modifies the original gpt pre-training config (t5_cfg) with attributes from the finetuning config (cfg).
The `add_cfg_to_tree` arg adds `cfg` to the top of the yaml tree which is needed for all `hparams.yaml` files when passed as an arg to `load_from_checkpoint()`.
"""
OmegaConf.set_struct(gpt_cfg, True)
OmegaConf.resolve(cfg)
with open_dict(gpt_cfg):
gpt_cfg.megatron_amp_O2 = cfg.model.get('megatron_amp_O2', False)
gpt_cfg.micro_batch_size = cfg.model.micro_batch_size
gpt_cfg.global_batch_size = cfg.model.global_batch_size
gpt_cfg.sequence_parallel = cfg.model.get("sequence_parallel", False)
gpt_cfg.activations_checkpoint_granularity = cfg.model.get("activations_checkpoint_granularity", None)
gpt_cfg.activations_checkpoint_num_layers = cfg.model.get("activations_checkpoint_num_layers", None)
gpt_cfg.activations_checkpoint_method = cfg.model.get("activations_checkpoint_method", None)
gpt_cfg.data = cfg.model.data
gpt_cfg.optim = cfg.model.optim
gpt_cfg.precision = cfg.trainer.precision
gpt_cfg.restore_from_path = cfg.restore_from_path
gpt_cfg.resume_from_checkpoint = cfg.model.resume_from_checkpoint
gpt_cfg.gradient_as_bucket_view = cfg.model.gradient_as_bucket_view
gpt_cfg.encoder_seq_length = cfg.model.encoder_seq_length
gpt_cfg.max_position_embeddings = cfg.model.max_position_embeddings
gpt_cfg.seq_len_interpolation_factor = cfg.model.seq_len_interpolation_factor
gpt_cfg.use_flash_attention = cfg.model.use_flash_attention
# This is needed when modifying a hparam file directly to load `.ckpt` files.
# This is not needed to modify the cfg in `.nemo` files.
if add_cfg_to_tree:
OmegaConf.resolve(gpt_cfg)
gpt_cfg.cfg = gpt_cfg
return gpt_cfg
def load_from_nemo(cls, cfg, trainer, gpt_cfg, modify_confg_fn):
gpt_cfg = modify_confg_fn(gpt_cfg, cfg, add_cfg_to_tree=False)
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(cfg.restore_from_path):
save_restore_connector.model_extracted_dir = cfg.restore_from_path
model = cls.restore_from(
restore_path=cfg.restore_from_path,
trainer=trainer,
override_config_path=gpt_cfg,
save_restore_connector=save_restore_connector,
)
return model
def load_from_checkpoint_dir(cls, cfg, trainer, modify_confg_fn):
app_state = AppState()
if cfg.model.tensor_model_parallel_size > 1 or cfg.model.pipeline_model_parallel_size > 1:
app_state.model_parallel_size = cfg.model.tensor_model_parallel_size * cfg.model.pipeline_model_parallel_size
app_state.tensor_model_parallel_size = cfg.model.tensor_model_parallel_size
app_state.pipeline_model_parallel_size = cfg.model.pipeline_model_parallel_size
(
app_state.tensor_model_parallel_rank,
app_state.pipeline_model_parallel_rank,
app_state.model_parallel_size,
app_state.data_parallel_size,
app_state.pipeline_model_parallel_split_rank,
app_state.virtual_pipeline_model_parallel_rank,
) = fake_initialize_model_parallel(
world_size=app_state.model_parallel_size,
rank=trainer.global_rank,
tensor_model_parallel_size_=cfg.model.tensor_model_parallel_size,
pipeline_model_parallel_size_=cfg.model.pipeline_model_parallel_size,
pipeline_model_parallel_split_rank_=cfg.model.pipeline_model_parallel_split_rank,
)
checkpoint_path = inject_model_parallel_rank(
os.path.join(cfg.model.pretrained_checkpoint.checkpoint_dir, cfg.model.pretrained_checkpoint.checkpoint_name)
)
hparams_file = OmegaConf.load(cfg.model.pretrained_checkpoint.hparams_file)
gpt_cfg = modify_confg_fn(hparams_file.cfg, cfg, add_cfg_to_tree=True)
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
OmegaConf.save(config=gpt_cfg, f=f.name)
model = cls.load_from_checkpoint(checkpoint_path=checkpoint_path, trainer=trainer, hparams_file=f.name,)
return model
def validate_checkpoint_loading_args(cfg):
if cfg.checkpoint_dir is None or not os.path.isdir(cfg.checkpoint_dir):
raise ValueError(f'Checkpoint directory {cfg.checkpoint_dir} does not exist or is not a directory.')
if cfg.checkpoint_name is None:
raise ValueError(f'Checkpoint name {cfg.checkpoint_name} is not valid.')
if cfg.hparams_file is None or not os.path.isfile(cfg.hparams_file):
raise ValueError(f'Hparams file {cfg.hparams_file} does not exist or is not a file.')
@hydra_runner(config_path="conf", config_name="megatron_gpt_config")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
megatron_amp_o2 = cfg.model.get('megatron_amp_O2', False)
with_distributed_adam = cfg.model.optim.get('name', 'fused_adam') == 'distributed_fused_adam'
plugins = []
strategy = NLPDDPStrategy(
no_ddp_communication_hook=True,
gradient_as_bucket_view=cfg.model.gradient_as_bucket_view,
find_unused_parameters=False,
)
if cfg.trainer.precision in [16, '16', 'bf16', '16-mixed', 'bf16-mixed']:
scaler = None
if cfg.trainer.precision in [16, '16', '16-mixed']:
scaler = GradScaler(
init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=cfg.model.get('native_amp_growth_interval', 1000),
hysteresis=cfg.model.get('hysteresis', 2),
)
plugin_precision = '16-mixed'
else:
plugin_precision = 'bf16-mixed'
if megatron_amp_o2 and not with_distributed_adam:
plugins.append(MegatronHalfPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
else:
plugins.append(PipelineMixedPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
if cfg.get('cluster_type', None) == 'BCP':
plugins.append(TorchElasticEnvironment())
trainer = Trainer(plugins=plugins, strategy=strategy, **cfg.trainer, callbacks=[CustomProgressBar()])
exp_manager(trainer, cfg.exp_manager)
# update resume from checkpoint found by exp_manager
if cfg.model.resume_from_checkpoint is not None:
trainer.ckpt_path = cfg.model.resume_from_checkpoint
logging.info(f'Resuming training from checkpoint: {trainer.ckpt_path}')
if cfg.restore_from_path:
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(cfg.restore_from_path):
save_restore_connector.model_extracted_dir = cfg.restore_from_path
gpt_cfg = MegatronGPTModel.restore_from(
restore_path=cfg.restore_from_path,
trainer=trainer,
return_config=True,
save_restore_connector=save_restore_connector,
)
model = load_from_nemo(MegatronGPTModel, cfg, trainer, gpt_cfg, modify_confg_fn=_modify_config)
elif cfg.model.get("pretrained_checkpoint", None) is not None:
validate_checkpoint_loading_args(cfg.model.pretrained_checkpoint)
model = load_from_checkpoint_dir(MegatronGPTModel, cfg, trainer, gpt_cfg, modify_confg_fn=_modify_config)
else:
print(' > WARNING: No checkpoint provided. Starting from scratch.')
model = MegatronGPTModel(cfg.model, trainer)
trainer.fit(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/language_modeling/megatron_gpt_continue_training.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from megatron_t5_seq2seq_finetune import load_from_checkpoint_dir, load_from_nemo, validate_checkpoint_loading_args
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from pytorch_lightning.plugins.precision import MixedPrecisionPlugin
from nemo.collections.nlp.models.language_modeling.megatron_finetune_model import MegatronT5FinetuneModel
from nemo.collections.nlp.models.language_modeling.megatron_glue_model import MegatronT5GLUEModel
from nemo.collections.nlp.models.language_modeling.megatron_t0_model import MegatronT0Model
from nemo.collections.nlp.parts.nlp_overrides import GradScaler, MegatronHalfPrecisionPlugin, NLPDDPStrategy
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
def _modify_config(t5_cfg, cfg, add_cfg_to_tree=False):
"""
This function modifies the original t5 pre-training config (t5_cfg) with attributes from the finetuning config (cfg).
The `add_cfg_to_tree` arg adds `cfg` to the top of the yaml tree which is needed for all `hparams.yaml` files when passed as an arg to `load_from_checkpoint()`.
"""
OmegaConf.set_struct(t5_cfg, True)
with open_dict(t5_cfg):
t5_cfg.precision = cfg.trainer.precision
# Overwrite data configs
if cfg.model.data.validation_ds.get('src_file_name', None) is not None:
logging.info(
'Found validation_ds.src_file_name in the config file. Overriding the finetuned model config file with the values from the new config file.'
)
t5_cfg.data.validation_ds.src_file_name = cfg.model.data.validation_ds.src_file_name
if cfg.model.data.validation_ds.get('tgt_file_name', None) is not None:
logging.info(
'Found validation_ds.tgt_file_name in the config file. Overriding the finetuned model config file with the values from the new config file.'
)
t5_cfg.data.validation_ds.tgt_file_name = cfg.model.data.validation_ds.tgt_file_name
if "write_predictions_to_file" in cfg.model.data.validation_ds:
t5_cfg.data.validation_ds.write_predictions_to_file = (
cfg.model.data.validation_ds.write_predictions_to_file
)
if "output_file_path_prefix" in cfg.model.data.validation_ds:
t5_cfg.data.validation_ds.output_file_path_prefix = cfg.model.data.validation_ds.output_file_path_prefix
t5_cfg.data.validation_ds.micro_batch_size = cfg.model.data.validation_ds.micro_batch_size
t5_cfg.data.validation_ds.global_batch_size = cfg.model.data.validation_ds.global_batch_size
# This is needed when modifying a hparam file directly to load `.ckpt` files.
# This is not needed to modify the cfg in `.nemo` files.
if add_cfg_to_tree:
OmegaConf.resolve(t5_cfg)
t5_cfg.cfg = t5_cfg
return t5_cfg
@hydra_runner(config_path="conf", config_name="megatron_t5_config_finetune_glue_eval")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
megatron_amp_o2 = cfg.model.get('megatron_amp_O2', False)
plugins = []
strategy = NLPDDPStrategy(
no_ddp_communication_hook=True,
gradient_as_bucket_view=cfg.model.gradient_as_bucket_view,
find_unused_parameters=False,
)
if cfg.trainer.precision in [16, '16', '16-mixed', 'bf16', 'bf16-mixed']:
scaler = None
if cfg.trainer.precision in [16, '16', '16-mixed']:
scaler = GradScaler(
init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=cfg.model.get('native_amp_growth_interval', 1000),
hysteresis=cfg.model.get('hysteresis', 2),
)
# MixedPrecisionPlugin in PTL >= 2.0 requires precision to be 16-mixed or bf16-mixed
plugin_precision = '16-mixed'
else:
plugin_precision = 'bf16-mixed'
if megatron_amp_o2:
plugins.append(MegatronHalfPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
else:
plugins.append(MixedPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
if cfg.get('cluster_type', None) == 'BCP':
plugins.append(TorchElasticEnvironment())
trainer = Trainer(plugins=plugins, strategy=strategy, **cfg.trainer)
exp_manager(trainer, cfg.exp_manager)
if hasattr(cfg.model.data.validation_ds, 'task_name'):
if cfg.model.restore_from_path:
t5_cfg = MegatronT5GLUEModel.restore_from(
restore_path=cfg.model.restore_from_path, trainer=trainer, return_config=True
)
model = load_from_nemo(MegatronT5GLUEModel, cfg, trainer, t5_cfg, modify_confg_fn=_modify_config)
else:
validate_checkpoint_loading_args(cfg.model.pretrained_checkpoint)
model = load_from_checkpoint_dir(MegatronT5GLUEModel, cfg, trainer, modify_confg_fn=_modify_config)
elif hasattr(cfg.model.data.validation_ds, 'file_names'):
if cfg.model.restore_from_path:
t5_cfg = MegatronT0Model.restore_from(
restore_path=cfg.model.restore_from_path, trainer=trainer, return_config=True
)
model = load_from_nemo(MegatronT0Model, cfg, trainer, t5_cfg, modify_confg_fn=_modify_config)
else:
validate_checkpoint_loading_args(cfg.model.pretrained_checkpoint)
model = load_from_checkpoint_dir(MegatronT0Model, cfg, trainer, modify_confg_fn=_modify_config)
else:
if cfg.model.restore_from_path:
t5_cfg = MegatronT5FinetuneModel.restore_from(
restore_path=cfg.model.restore_from_path, trainer=trainer, return_config=True
)
model = load_from_nemo(MegatronT5FinetuneModel, cfg, trainer, t5_cfg, modify_confg_fn=_modify_config)
else:
validate_checkpoint_loading_args(cfg.model.pretrained_checkpoint)
model = load_from_checkpoint_dir(MegatronT5FinetuneModel, cfg, trainer, modify_confg_fn=_modify_config)
model.freeze()
trainer.validate(model)
if hasattr(cfg.model.data, 'test_ds'):
trainer.test(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/language_modeling/megatron_t5_seq2seq_eval.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.models.language_modeling import TransformerLMModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="transformer_lm_config")
def main(cfg: DictConfig) -> None:
logging.info(f'Config: {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
transformer_lm = TransformerLMModel(cfg.model, trainer=trainer)
trainer.fit(transformer_lm)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/language_modeling/transformer_lm.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
from omegaconf import OmegaConf, open_dict
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.modules.common.megatron.megatron_init import fake_initialize_model_parallel
from nemo.collections.nlp.parts.nlp_overrides import (
MegatronHalfPrecisionPlugin,
NLPDDPStrategy,
NLPSaveRestoreConnector,
PipelineMixedPrecisionPlugin,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.app_state import AppState
from nemo.utils.model_utils import inject_model_parallel_rank
""" Example script showing how to run validation on a MegatronGPT model.
Sample usage:
From nemo model:
python megatron_gpt_validate.py \
trainer.devices=4 \
trainer.num_nodes=1 \
trainer.limit_val_batches=10 \
trainer.max_steps=100 \
tensor_model_parallel_size=1 \
pipeline_model_parallel_size=4 \
trainer.precision=bf16 \
gpt_model_file=/path/to/megatron_gpt_tp_1_pp4.nemo
from PTL checkpoint:
python megatron_gpt_validate.py \
trainer.devices=4 \
trainer.num_nodes=1 \
trainer.limit_val_batches=10 \
trainer.max_steps=100 \
tensor_model_parallel_size=1 \
pipeline_model_parallel_size=4 \
virtual_pipeline_model_parallel_size=4 \
trainer.precision=bf16 \
checkpoint_dir='/path/to/experiment/checkpoints' \
checkpoint_name='megatron_gpt--val_loss=7.78-step=100-consumed_samples=6336.0-last.ckpt' \
hparams_file='/path/to/experiment/hparams.yaml
"""
def modify_pretrained_cfg(pretrained_cfg, trainer, cfg):
with open_dict(pretrained_cfg):
OmegaConf.set_struct(pretrained_cfg, True)
pretrained_cfg.sequence_parallel = False
pretrained_cfg.activations_checkpoint_granularity = None
pretrained_cfg.activations_checkpoint_method = None
pretrained_cfg.precision = trainer.precision
if cfg.micro_batch_size is not None:
pretrained_cfg.micro_batch_size = cfg.micro_batch_size
if cfg.global_batch_size is not None:
pretrained_cfg.global_batch_size = cfg.global_batch_size
if trainer.precision == "16":
pretrained_cfg.megatron_amp_O2 = False
return pretrained_cfg
@hydra_runner(config_path="conf", config_name="megatron_gpt_validate_config")
def main(cfg) -> None:
trainer = Trainer(strategy=NLPDDPStrategy(), **cfg.trainer)
assert (
cfg.trainer.devices * cfg.trainer.num_nodes
== cfg.tensor_model_parallel_size * cfg.pipeline_model_parallel_size
), "devices * num_nodes should equal tensor_model_parallel_size * pipeline_model_parallel_size"
if cfg.gpt_model_file:
logging.info(f"Restoring model from {cfg.gpt_model_file}")
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(cfg.gpt_model_file):
save_restore_connector.model_extracted_dir = cfg.gpt_model_file
pretrained_cfg = MegatronGPTModel.restore_from(
restore_path=cfg.gpt_model_file,
trainer=trainer,
return_config=True,
save_restore_connector=save_restore_connector,
)
pretrained_cfg = modify_pretrained_cfg(pretrained_cfg, trainer, cfg)
model = MegatronGPTModel.restore_from(
restore_path=cfg.gpt_model_file,
trainer=trainer,
override_config_path=pretrained_cfg,
save_restore_connector=save_restore_connector,
map_location=f'cuda:{trainer.local_rank}', # map_location is needed for converted models
)
elif cfg.checkpoint_dir:
logging.info(
f"Restoring model from checkpoint_dir: {cfg.checkpoint_dir} with checkpoint name: {cfg.checkpoint_name}"
)
app_state = AppState()
if cfg.tensor_model_parallel_size > 1 or cfg.pipeline_model_parallel_size > 1:
app_state.model_parallel_size = cfg.tensor_model_parallel_size * cfg.pipeline_model_parallel_size
app_state.tensor_model_parallel_size = cfg.tensor_model_parallel_size
app_state.pipeline_model_parallel_size = cfg.pipeline_model_parallel_size
app_state.virtual_pipeline_model_parallel_size = cfg.virtual_pipeline_model_parallel_size
(
app_state.tensor_model_parallel_rank,
app_state.pipeline_model_parallel_rank,
app_state.model_parallel_size,
app_state.data_parallel_size,
app_state.pipeline_model_parallel_split_rank,
app_state.virtual_pipeline_model_parallel_rank,
) = fake_initialize_model_parallel(
world_size=app_state.model_parallel_size,
rank=trainer.global_rank,
tensor_model_parallel_size_=cfg.tensor_model_parallel_size,
pipeline_model_parallel_size_=cfg.pipeline_model_parallel_size,
virtual_pipeline_model_parallel_size_=cfg.virtual_pipeline_model_parallel_size,
)
checkpoint_path = inject_model_parallel_rank(os.path.join(cfg.checkpoint_dir, cfg.checkpoint_name))
pretrained_cfg = OmegaConf.load(cfg.hparams_file)
pretrained_cfg = modify_pretrained_cfg(pretrained_cfg.cfg, trainer, cfg)
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
OmegaConf.save(config=pretrained_cfg, f=f.name)
model = MegatronGPTModel.load_from_checkpoint(
checkpoint_path=checkpoint_path, trainer=trainer, hparams_file=f.name,
)
else:
raise ValueError("need at least a nemo file or checkpoint dir")
logging.info("\n\n************** Model configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(model.cfg)}')
trainer.validate(model=model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/nlp/language_modeling/megatron_gpt_validate.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.multiprocessing as mp
from omegaconf.omegaconf import OmegaConf, open_dict
from nemo.collections.nlp.models.language_modeling.megatron_bert_model import MegatronBertModel
from nemo.collections.nlp.parts.megatron_trainer_builder import MegatronBertTrainerBuilder
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="megatron_bert_config")
def main(cfg) -> None:
if cfg.model.data.dataloader_type != "LDDL":
mp.set_start_method("spawn", force=True)
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
trainer = MegatronBertTrainerBuilder(cfg).create_trainer()
exp_manager(trainer, cfg.exp_manager)
model = MegatronBertModel(cfg.model, trainer)
trainer.fit(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/language_modeling/megatron_bert_pretraining.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from omegaconf.omegaconf import open_dict
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.models.language_modeling.megatron_t5_prompt_learning_model import (
MegatronT5PromptLearningModel,
)
from nemo.collections.nlp.modules.common.megatron.megatron_init import fake_initialize_model_parallel
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
from nemo.core.config import hydra_runner
from nemo.utils.app_state import AppState
try:
from megatron.core import parallel_state
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
if not torch.cuda.is_available():
raise EnvironmentError("GPU is needed for the inference")
@hydra_runner(config_path="conf", config_name="megatron_t5_prompt_learning_inference")
def main(cfg) -> None:
# trainer required for restoring model parallel models
trainer = Trainer(strategy=NLPDDPStrategy(), **cfg.trainer)
if (
cfg.tensor_model_parallel_size < 0
or cfg.pipeline_model_parallel_size < 0
or cfg.get('pipeline_model_parallel_split_rank', -1) < 0
):
model_config = MegatronT5PromptLearningModel.restore_from(
restore_path=cfg.language_model_path, trainer=trainer, return_config=True,
)
with open_dict(cfg):
cfg.tensor_model_parallel_size = model_config.get('tensor_model_parallel_size', 1)
cfg.pipeline_model_parallel_size = model_config.get('pipeline_model_parallel_size', 1)
cfg.pipeline_model_parallel_split_rank = model_config.get('pipeline_model_parallel_split_rank', 0)
assert (
cfg.trainer.devices * cfg.trainer.num_nodes
== cfg.tensor_model_parallel_size * cfg.pipeline_model_parallel_size
), "devices * num_nodes should equal tensor_model_parallel_size * pipeline_model_parallel_size"
app_state = AppState()
if cfg.tensor_model_parallel_size > 1 or cfg.pipeline_model_parallel_size > 1:
app_state.model_parallel_size = cfg.tensor_model_parallel_size * cfg.pipeline_model_parallel_size
(
app_state.tensor_model_parallel_rank,
app_state.pipeline_model_parallel_rank,
app_state.model_parallel_size,
app_state.data_parallel_size,
app_state.pipeline_model_parallel_split_rank,
app_state.virtual_pipeline_model_parallel_rank,
) = fake_initialize_model_parallel(
world_size=app_state.model_parallel_size,
rank=trainer.global_rank,
tensor_model_parallel_size_=cfg.tensor_model_parallel_size,
pipeline_model_parallel_size_=cfg.pipeline_model_parallel_size,
pipeline_model_parallel_split_rank_=cfg.pipeline_model_parallel_split_rank,
)
# Load prompt tuned model, virtual_prompt_model_file and language_model_path must be provided in config
if cfg.get('virtual_prompt_model_file', None) is not None and cfg.get('language_model_path', None) is not None:
# Update frozen T5 model path in case it has changed
prompt_learning_cfg = MegatronT5PromptLearningModel.restore_from(
cfg.virtual_prompt_model_file, trainer=trainer, return_config=True
)
with open_dict(prompt_learning_cfg):
if cfg.get("language_model_path"):
# This is for backward compatibility with old checkpoints that used `pretrained_language_model_path` instead of `language_model_path`.
if hasattr(prompt_learning_cfg, 'pretrained_language_model_path'):
prompt_learning_cfg.pretrained_language_model_path = cfg.language_model_path
else:
prompt_learning_cfg.language_model_path = cfg.language_model_path
prompt_learning_cfg.micro_batch_size = cfg.data.get('micro_batch_size', 4)
prompt_learning_cfg.global_batch_size = cfg.data.get('global_batch_size', 4)
# Now load prompt learning model with frozen T5 model base
model = MegatronT5PromptLearningModel.restore_from(
restore_path=cfg.virtual_prompt_model_file, trainer=trainer, override_config_path=prompt_learning_cfg
)
else:
raise ValueError("virtual_prompt_model_file and pretrained_language_model_file must be provided in config")
# check whether the DDP is initialized
if parallel_state.is_unitialized():
def dummy():
return
if model.trainer.strategy.launcher is not None:
model.trainer.strategy.launcher.launch(dummy, trainer=model.trainer)
model.trainer.strategy.setup_environment()
model.freeze()
_, test_dl = model.build_virtual_prompt_dataset(
dataset_paths=cfg.data.test_ds,
batch_size=cfg.data.global_batch_size,
for_train=False,
drop_last=False,
shuffle=False,
num_workers=cfg.data.num_workers,
pin_memory=True,
)
outputs = trainer.predict(model, test_dl)
with open(cfg.pred_file_path, "w", encoding="utf-8") as pred_file:
for batch in outputs:
preds = batch["preds_text"]
for pred in preds:
pred = pred.strip().replace("\n", " ")
pred_file.write(pred + "\n")
print('test finish---------------------------------')
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/nlp/language_modeling/megatron_t5_prompt_learning_eval.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks.timer import Timer
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from pytorch_lightning.plugins.precision import MixedPrecisionPlugin
from pytorch_lightning.trainer.connectors.checkpoint_connector import _CheckpointConnector
from nemo.collections.nlp.models.language_modeling.megatron_retro_fine_tune_model import MegatronRetroFinetuneModel
from nemo.collections.nlp.parts.nlp_overrides import (
CustomProgressBar,
GradScaler,
MegatronHalfPrecisionPlugin,
NLPDDPStrategy,
NLPSaveRestoreConnector,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import StatelessTimer, exp_manager
def _modify_config(retro_cfg, cfg, add_cfg_to_tree=False):
"""
This function modifies the original retro pre-training config with attributes from the finetuning config (cfg).
The `add_cfg_to_tree` arg adds `cfg` to the top of the yaml tree which is needed for all `hparams.yaml` files when passed as an arg to `load_from_checkpoint()`.
"""
OmegaConf.set_struct(retro_cfg, True)
with open_dict(retro_cfg):
retro_cfg.megatron_amp_O2 = cfg.model.get('megatron_amp_O2', False)
retro_cfg.data = cfg.model.data
retro_cfg.precision = cfg.trainer.precision
retro_cfg.optim = cfg.model.optim
retro_cfg.micro_batch_size = cfg.model.micro_batch_size
# This is needed when modifying a hparam file directly to load `.ckpt` files.
# This is not needed to modify the cfg in `.nemo` files.
if add_cfg_to_tree:
OmegaConf.resolve(retro_cfg)
retro_cfg.cfg = retro_cfg
return retro_cfg
def load_from_nemo(cls, cfg, trainer, retro_cfg, modify_confg_fn, save_restore_connector):
retro_cfg = modify_confg_fn(retro_cfg, cfg, add_cfg_to_tree=False)
model = cls.restore_from(
restore_path=cfg.model.restore_path,
trainer=trainer,
override_config_path=retro_cfg,
save_restore_connector=save_restore_connector,
)
return model
@hydra_runner(config_path="conf", config_name="megatron_retro_finetune_config")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
###### following is the workaround for num_workers=0 issue #####
# import torch.multiprocessing as mp
# mp.set_start_method("spawn", force=True)
#####################################################
megatron_amp_o2 = cfg.model.get('megatron_amp_O2', False)
plugins = []
strategy = NLPDDPStrategy(
no_ddp_communication_hook=True if megatron_amp_o2 else False,
gradient_as_bucket_view=cfg.model.gradient_as_bucket_view,
find_unused_parameters=False,
timeout=datetime.timedelta(seconds=18000),
)
if cfg.trainer.precision in [16, '16', '16-mixed', 'bf16', 'bf16-mixed']:
scaler = None
if cfg.trainer.precision in [16, '16', '16-mixed']:
scaler = GradScaler(
init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=cfg.model.get('native_amp_growth_interval', 1000),
hysteresis=cfg.model.get('hysteresis', 2),
)
# MixedPrecisionPlugin in PTL >= 2.0 requires precision to be 16-mixed or bf16-mixed
plugin_precision = '16-mixed'
else:
plugin_precision = 'bf16-mixed'
if megatron_amp_o2:
plugins.append(MegatronHalfPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
else:
plugins.append(MixedPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
if cfg.get('cluster_type', None) == 'BCP':
plugins.append(TorchElasticEnvironment())
trainer = Trainer(plugins=plugins, strategy=strategy, **cfg.trainer, callbacks=[CustomProgressBar()])
exp_manager(trainer, cfg.exp_manager)
logging.info(f'Resuming training from checkpoint: {trainer.ckpt_path}')
# Override timer callback to a stateless one
for idx, callback in enumerate(trainer.callbacks):
if isinstance(callback, Timer):
trainer.callbacks[idx] = StatelessTimer(cfg.trainer.max_time,)
# load existing or init new soft prompt GPT model
if cfg.model.get("restore_path", None):
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(cfg.model.restore_path):
save_restore_connector.model_extracted_dir = cfg.model.restore_path
model_cfg = MegatronRetroFinetuneModel.restore_from(
restore_path=cfg.model.restore_path,
trainer=trainer,
return_config=True,
save_restore_connector=save_restore_connector,
)
# hydra interpolation does not work here as the interpolation key is lost when PTL saves hparams
model = load_from_nemo(
MegatronRetroFinetuneModel,
cfg,
trainer,
model_cfg,
modify_confg_fn=_modify_config,
save_restore_connector=save_restore_connector,
)
else:
model = MegatronRetroFinetuneModel(cfg.model, trainer=trainer)
trainer.fit(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/language_modeling/megatron_retro_fine_tune.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Converts BERT NeMo0.* checkpoints to NeMo1.0 format.
"""
from argparse import ArgumentParser
import torch
parser = ArgumentParser()
parser.add_argument("--bert_encoder", required=True, help="path to BERT encoder, e.g. /../BERT-STEP-2285714.pt")
parser.add_argument(
"--bert_token_classifier",
required=True,
help="path to BERT token classifier, e.g. /../BertTokenClassifier-STEP-2285714.pt",
)
parser.add_argument(
"--bert_sequence_classifier",
required=False,
default=None,
help="path to BERT sequence classifier, e.g /../SequenceClassifier-STEP-2285714.pt",
)
parser.add_argument(
"--output_path", required=False, default="converted_model.pt", help="output path to newly converted model"
)
args = parser.parse_args()
bert_in = torch.load(args.bert_encoder)
tok_in = torch.load(args.bert_token_classifier)
if args.bert_sequence_classifier:
seq_in = torch.load(args.bert_sequence_classifier)
new_dict = {}
new_model = {"state_dict": new_dict}
for k in bert_in:
new_name = k.replace("bert.", "bert_model.")
new_dict[new_name] = bert_in[k]
for k in tok_in:
new_name = "mlm_classifier." + k
new_dict[new_name] = tok_in[k]
if args.bert_sequence_classifier:
for k in seq_in:
new_name = "nsp_classifier." + k
new_dict[new_name] = seq_in[k]
torch.save(new_model, args.output_path)
| NeMo-main | examples/nlp/language_modeling/convert_weights_to_nemo1.0.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.multiprocessing as mp
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from nemo.collections.nlp.models.language_modeling.megatron_t5_prompt_learning_model import (
MegatronT5PromptLearningModel,
)
from nemo.collections.nlp.parts.nlp_overrides import (
CustomProgressBar,
GradScaler,
NLPDDPStrategy,
NLPSaveRestoreConnector,
PipelineMixedPrecisionPlugin,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
mp.set_start_method("spawn", force=True)
"""
This is an example of how to ptune/prompt-tune a pretrained T5 model.
Be sure to use a .nemo T5 model with this code. If you've downloaded
a model from NGC or are otherwise using a MegatronLM model, please use
either megatron_ckpt_to_nemo.py or megatron_lm_ckpt_to_nemo.py found
within this examples directory to convert your model to .nemo format.
"""
@hydra_runner(config_path="conf", config_name="megatron_t5_prompt_learning.yaml")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
plugins = []
strategy = NLPDDPStrategy(no_ddp_communication_hook=True, find_unused_parameters=False,)
if cfg.trainer.precision == 16 or cfg.trainer.precision == '16-mixed':
scaler = GradScaler(
init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=cfg.model.get('native_amp_growth_interval', 1000),
hysteresis=cfg.model.get('hysteresis', 2),
enabled=False
if cfg.model.pipeline_model_parallel_size > 1
else True, # turn off the grad scale for pipeline parallel LM model
)
# MixedPrecisionPlugin in PTL >= 2.0 requires precision to be 16-mixed or bf16-mixed
plugins.append(PipelineMixedPrecisionPlugin(precision='16-mixed', device='cuda', scaler=scaler))
if cfg.get('cluster_type', None) == 'BCP':
plugins.append(TorchElasticEnvironment())
trainer = Trainer(plugins=plugins, strategy=strategy, **cfg.trainer, callbacks=[CustomProgressBar()])
exp_manager(trainer, cfg.exp_manager)
# load existing or init new soft prompt T5 model
if cfg.model.get("restore_path", None):
model = MegatronT5PromptLearningModel.restore_from(
cfg.model.restore_path, cfg.model, trainer=trainer, save_restore_connector=NLPSaveRestoreConnector()
)
else:
model = MegatronT5PromptLearningModel(cfg.model, trainer=trainer)
trainer.fit(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/language_modeling/megatron_t5_prompt_learning.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.strategies import DDPStrategy
from nemo.collections.nlp.models.language_modeling import BERTLMModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="bert_pretraining_from_text_config")
def main(cfg: DictConfig) -> None:
logging.info(f'Config:\n {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(strategy=DDPStrategy(find_unused_parameters=True), **cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
bert_model = BERTLMModel(cfg.model, trainer=trainer)
trainer.fit(bert_model)
if cfg.model.nemo_path:
bert_model.save_to(cfg.model.nemo_path)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/language_modeling/bert_pretraining.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Conversion script to convert PTL checkpoints into nemo checkpoint.
Example to run this conversion script:
python -m torch.distributed.launch --nproc_per_node=<tensor_model_parallel_size> * <pipeline_model_parallel_size> \
megatron_ckpt_to_nemo.py \
--checkpoint_folder <path_to_PTL_checkpoints_folder> \
--checkpoint_name <checkpoint_name> \
--nemo_file_path <path_to_output_nemo_file> \
--tensor_model_parallel_size <tensor_model_parallel_size> \
--pipeline_model_parallel_size <pipeline_model_parallel_size>
"""
import dis
import os
from argparse import ArgumentParser
import torch
from genericpath import isdir
from megatron.core import parallel_state
from omegaconf import open_dict
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.models.language_modeling.megatron_bart_model import MegatronBARTModel
from nemo.collections.nlp.models.language_modeling.megatron_bert_model import MegatronBertModel
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.models.language_modeling.megatron_gpt_sft_model import MegatronGPTSFTModel
from nemo.collections.nlp.models.language_modeling.megatron_retrieval_model import MegatronRetrievalModel
from nemo.collections.nlp.models.language_modeling.megatron_t5_model import MegatronT5Model
from nemo.collections.nlp.models.machine_translation.megatron_nmt_model import MegatronNMTModel
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy, NLPSaveRestoreConnector
from nemo.utils import AppState, logging
from nemo.utils.distributed import initialize_distributed
from nemo.utils.model_utils import inject_model_parallel_rank
def get_args():
parser = ArgumentParser()
parser.add_argument(
"--checkpoint_folder",
type=str,
default=None,
required=True,
help="Path to PTL checkpoints saved during training. Ex: /raid/nemo_experiments/megatron_gpt/checkpoints",
)
parser.add_argument(
"--checkpoint_name",
type=str,
default=None,
required=True,
help="Name of checkpoint to be used. Ex: megatron_gpt--val_loss=6.34-step=649-last.ckpt",
)
parser.add_argument(
"--hparams_file",
type=str,
default=None,
required=False,
help="Path config for restoring. It's created during training and may need to be modified during restore if restore environment is different than training. Ex: /raid/nemo_experiments/megatron_gpt/hparams.yaml",
)
parser.add_argument("--nemo_file_path", type=str, default=None, required=True, help="Path to output .nemo file.")
parser.add_argument("--gpus_per_node", type=int, required=True, default=None)
parser.add_argument("--tensor_model_parallel_size", type=int, required=True, default=None)
parser.add_argument("--pipeline_model_parallel_size", type=int, required=True, default=None)
parser.add_argument(
"--pipeline_model_parallel_split_rank",
type=int,
required=False,
default=None,
help="If pipeline parallel size > 1, this is the rank at which the encoder ends and the decoder begins.",
)
parser.add_argument(
"--model_type",
type=str,
required=True,
default="gpt",
choices=["gpt", "sft", "t5", "bert", "nmt", "bart", "retro"],
)
parser.add_argument("--local_rank", type=int, required=False, default=os.getenv('LOCAL_RANK', -1))
parser.add_argument("--bcp", action="store_true", help="Whether on BCP platform")
args = parser.parse_args()
return args
def convert(local_rank, rank, world_size, args):
app_state = AppState()
app_state.data_parallel_rank = 0
num_nodes = world_size // args.gpus_per_node
plugins = []
strategy = "auto"
if args.bcp:
plugins.append(TorchElasticEnvironment())
if args.model_type == 'gpt':
strategy = NLPDDPStrategy()
trainer = Trainer(
devices=args.gpus_per_node, num_nodes=num_nodes, accelerator='gpu', plugins=plugins, strategy=strategy
)
app_state.pipeline_model_parallel_size = args.pipeline_model_parallel_size
app_state.tensor_model_parallel_size = args.tensor_model_parallel_size
# Auto set split rank for T5, BART, NMT if split rank is None.
if args.pipeline_model_parallel_size > 1 and args.model_type in ['t5', 'bart', 'nmt']:
if args.pipeline_model_parallel_split_rank is not None:
app_state.pipeline_model_parallel_split_rank = args.pipeline_model_parallel_split_rank
else:
if args.pipeline_model_parallel_size % 2 != 0:
raise ValueError(
f"Pipeline model parallel size {args.pipeline_model_parallel_size} must be even if split rank is not specified."
)
else:
# If split rank is not set, then we set it to be pipeline_model_parallel_size // 2 - this is because in most cases we have the same number of enc/dec layers.
app_state.pipeline_model_parallel_split_rank = args.pipeline_model_parallel_size // 2
else:
app_state.pipeline_model_parallel_split_rank = None
app_state.model_parallel_size = app_state.tensor_model_parallel_size * app_state.pipeline_model_parallel_size
parallel_state.initialize_model_parallel(
tensor_model_parallel_size=app_state.tensor_model_parallel_size,
pipeline_model_parallel_size=app_state.pipeline_model_parallel_size,
pipeline_model_parallel_split_rank=app_state.pipeline_model_parallel_split_rank,
)
app_state.pipeline_model_parallel_rank = parallel_state.get_pipeline_model_parallel_rank()
app_state.tensor_model_parallel_rank = parallel_state.get_tensor_model_parallel_rank()
# check for distributed checkpoint
dist_ckpt_dir = os.path.join(args.checkpoint_folder, args.checkpoint_name)
if os.path.isdir(dist_ckpt_dir):
checkpoint_path = dist_ckpt_dir
else:
# legacy checkpoint needs model parallel injection
checkpoint_path = inject_model_parallel_rank(os.path.join(args.checkpoint_folder, args.checkpoint_name))
logging.info(
f'rank: {rank}, local_rank: {local_rank}, is loading checkpoint: {checkpoint_path} for tp_rank: {app_state.tensor_model_parallel_rank} and pp_rank: {app_state.pipeline_model_parallel_rank}'
)
if args.model_type == 'gpt':
model = MegatronGPTModel.load_from_checkpoint(checkpoint_path, hparams_file=args.hparams_file, trainer=trainer)
elif args.model_type == 'sft':
model = MegatronGPTSFTModel.load_from_checkpoint(
checkpoint_path, hparams_file=args.hparams_file, trainer=trainer
)
# we force the target for the loaded model to have the correct target
# because the hparams.yaml sometimes contains MegatronGPTModel as the target.
with open_dict(model.cfg):
model.cfg.target = f"{MegatronGPTSFTModel.__module__}.{MegatronGPTSFTModel.__name__}"
elif args.model_type == 'bert':
model = MegatronBertModel.load_from_checkpoint(
checkpoint_path, hparams_file=args.hparams_file, trainer=trainer
)
elif args.model_type == 't5':
model = MegatronT5Model.load_from_checkpoint(checkpoint_path, hparams_file=args.hparams_file, trainer=trainer)
elif args.model_type == 'bart':
model = MegatronBARTModel.load_from_checkpoint(
checkpoint_path, hparams_file=args.hparams_file, trainer=trainer
)
elif args.model_type == 'nmt':
model = MegatronNMTModel.load_from_checkpoint(checkpoint_path, hparams_file=args.hparams_file, trainer=trainer)
elif args.model_type == 'retro':
model = MegatronRetrievalModel.load_from_checkpoint(
checkpoint_path, hparams_file=args.hparams_file, trainer=trainer
)
model._save_restore_connector = NLPSaveRestoreConnector()
if torch.distributed.is_initialized():
torch.distributed.barrier()
model.save_to(args.nemo_file_path)
logging.info(f'NeMo model saved to: {args.nemo_file_path}')
if __name__ == '__main__':
args = get_args()
local_rank, rank, world_size = initialize_distributed(args)
convert(local_rank, rank, world_size, args)
| NeMo-main | examples/nlp/language_modeling/megatron_ckpt_to_nemo.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf.omegaconf import OmegaConf
from pytorch_lightning import Trainer
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.modules.common.megatron.megatron_utils import compute_model_parallel_rank
from nemo.collections.nlp.parts.nlp_overrides import (
NLPDDPStrategy,
NLPMixedPrecisionPlugin,
NLPPrecisionPlugin,
NLPSaveRestoreConnector,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.app_state import AppState
@hydra_runner(config_path="conf", config_name="megatron_gpt_config")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
trainer = None
if cfg.trainer.precision == 16:
trainer = Trainer(
plugins=[
NLPMixedPrecisionPlugin(
init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=cfg.model.get('native_amp_growth_interval', 1000),
),
],
strategy=NLPDDPStrategy(),
**cfg.trainer,
)
elif cfg.trainer.precision == 'bf16':
trainer = Trainer(plugins=[NLPNativeBfloat16PrecisionPlugin(),], strategy=NLPDDPStrategy(), **cfg.trainer,)
else:
trainer = Trainer(plugins=[NLPPrecisionPlugin()], strategy=NLPDDPStrategy(), **cfg.trainer)
app_state = AppState()
app_state.model_parallel_size = cfg.model.tensor_model_parallel_size
app_state.model_parallel_rank = compute_model_parallel_rank(trainer.local_rank, app_state.model_parallel_size)
model = MegatronGPTModel.restore_from(
cfg.restore_from_path, trainer=trainer, save_restore_connector=NLPSaveRestoreConnector(),
)
# Note: most nemo models must have the data paths configured before instantiating the model
# MegatronGPTMOdel sets up the data in the PTL method .setup which happens after DDP spawns.
model.cfg.data.splits_string = cfg.model.data.splits_string
trainer.test(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/language_modeling/megatron_gpt_test.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import torch.multiprocessing as mp
from megatron.core import parallel_state
from omegaconf import OmegaConf
from omegaconf.omegaconf import open_dict
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.models.language_modeling.megatron_gpt_adapter_model import MegatronGPTInfusedAdapterModel
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy, NLPSaveRestoreConnector
from nemo.core.config import hydra_runner
from nemo.utils import logging
mp.set_start_method("spawn", force=True)
"""
This is the script to run an Adapter Tuned GPT Model for text generation.
Usage:
Assume the model has TP=1, PP=1 in the following use cases.
a. run greedy inference using a base gpt nemo file, and an adapter nemo file:
python megatron_gpt_ia3_eval.py \
gpt_model_file=PATH TO GPT MODEL NEMO FILE \
adapter_model_file=PATH TO ADAPTER MODEL NEMO FILE (generated by training script: ./megatron_gpt_ia3_tuning.py) \
data_paths=[PATH TO A JSONL FILE CONTAINING PROMPTS], \
pred_file_path=PATH TO OUTPUT FILE TO DUMP PREDICTIONS
"""
if not torch.cuda.is_available():
raise EnvironmentError("GPU is needed for the inference")
@hydra_runner(config_path="conf", config_name="megatron_gpt_adapter_inference")
def main(cfg) -> None:
# trainer required for restoring model parallel models
trainer = Trainer(strategy=NLPDDPStrategy(), **cfg.trainer)
if (
cfg.tensor_model_parallel_size < 0
or cfg.pipeline_model_parallel_size < 0
or cfg.get('pipeline_model_parallel_split_rank', -1) < 0
):
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(cfg.gpt_model_file):
save_restore_connector.model_extracted_dir = cfg.gpt_model_file
model_config = MegatronGPTModel.restore_from(
restore_path=cfg.gpt_model_file,
trainer=trainer,
return_config=True,
save_restore_connector=save_restore_connector,
)
with open_dict(cfg):
cfg.tensor_model_parallel_size = model_config.get('tensor_model_parallel_size', 1)
cfg.pipeline_model_parallel_size = model_config.get('pipeline_model_parallel_size', 1)
cfg.pipeline_model_parallel_split_rank = model_config.get('pipeline_model_parallel_split_rank', 0)
# Load an adapter model, must be provided in config
if cfg.get("adapter_model_file", None) is not None:
# Update frozen GPT model path in case it has changed
ia3_tuning_cfg = MegatronGPTInfusedAdapterModel.restore_from(
cfg.adapter_model_file, trainer=trainer, return_config=True
)
with open_dict(ia3_tuning_cfg):
ia3_tuning_cfg.language_model_path = cfg.gpt_model_file
# Now load prompt learning model with frozen gpt model base
model = MegatronGPTInfusedAdapterModel.restore_from(
restore_path=cfg.adapter_model_file, trainer=trainer, override_config_path=ia3_tuning_cfg
)
# Or load regular GPT model
else:
raise NotImplementedError(
"This script is meant for inference from an Adapter Tuned GPT Model, for inference from a Megatron GPT model, refer to ../megatron_gpt_eval.py"
)
model.freeze()
# Have to turn off activations_checkpoint_method for inference
try:
model.model.language_model.encoder.activations_checkpoint_method = None
except AttributeError:
pass
try:
model.frozen_model.model.language_model.encoder.activations_checkpoint_method = None
except AttributeError:
pass
max_input_length = model.frozen_model.cfg.encoder_seq_length - cfg.inference.tokens_to_generate
# check whether the DDP is initialized
if parallel_state.is_unitialized():
def dummy():
return
if trainer.strategy.launcher is not None:
trainer.strategy.launcher.launch(dummy, trainer=trainer)
trainer.strategy.setup_environment()
_, dataloader = model.build_virtual_prompt_dataset(
data=cfg.data_paths,
batch_size=cfg.get("batch_size", 1),
max_seq_length=max_input_length,
min_seq_length=model.cfg.data.get('min_seq_length', 1),
add_bos=cfg.inference.add_BOS,
add_eos=False,
for_train=False,
tokens_to_generate=cfg.inference.tokens_to_generate,
drop_last=False,
shuffle=False,
num_workers=cfg.get("num_workers", 1),
)
config = OmegaConf.to_container(cfg.inference)
model.set_inference_config(config)
response = trainer.predict(model, dataloader)
print("***************************")
if cfg.pred_file_path is not None:
with open(cfg.pred_file_path, "w", encoding="utf-8") as f:
for batch in response:
for sentence in batch['sentences']:
s = ' '.join(sentence.split('\n'))
f.write(s + "\n")
print("predictions saved to {}".format(cfg.pred_file_path))
else:
print(response)
print("***************************")
if __name__ == '__main__':
dep_msg = "* Please switch to using examples/nlp/language_modeling/tuning/megatron_gpt_peft_eval.py *"
dep = "Deprecation Notice!!".center(len(dep_msg) - 2, " ")
banner = "*" * len(dep_msg)
spacer = " " * (len(dep_msg) - 2)
logging.warning(f"\n\n{banner}\n*{spacer}*\n*{dep}*\n{dep_msg}\n*{spacer}*\n{banner}\n\n")
main() # noqa pylint: disable=no-value-for-parameter
logging.warning(f"\n\n{banner}\n*{spacer}*\n*{dep}*\n{dep_msg}\n*{spacer}*\n{banner}\n\n")
| NeMo-main | examples/nlp/language_modeling/tuning/megatron_gpt_ia3_eval.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.multiprocessing as mp
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from nemo.collections.nlp.models.language_modeling.megatron_gpt_adapter_model import MegatronGPTAdapterLearningModel
from nemo.collections.nlp.parts.nlp_overrides import (
CustomProgressBar,
GradScaler,
MegatronHalfPrecisionPlugin,
NLPDDPStrategy,
NLPSaveRestoreConnector,
PipelineMixedPrecisionPlugin,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
mp.set_start_method("spawn", force=True)
"""
This is the script to train an Adapter infused GPT Model for text generation.
A base GPT Model is required as a starting point. This script will then insert
Adapters into each Transformer layer and will train/update only these adapters
during training. The base GPT Model weights will remain frozen.
During training this script will only save the newly trained Adapter weights
in checkpoints. At the end of training a .nemo file of Adapter weights will
be saved.
Usage:
Assuming the base model is a 125m GPT Model, with TP=1, PP=1:
a. run a training run for a base gpt nemo file:
python megatron_gpt_adapter_tuning.py \
"model.data.train_ds=[PATH TO TRAINING JSONL FILE]",
"model.data.validation_ds=[PATH TO VALIDATION JSONL FILE]",
model.language_model_path="PATH TO BASE GPT MODEL .nemo FILE"
name="NAME OF TRAINING RUN"
exp_manager.exp_dir="DIR TO SAVE CHECKPOINTS and .nemo FILE",
trainer.max_epochs=2
"""
@hydra_runner(config_path="conf", config_name="megatron_gpt_adapter_tuning_config")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
megatron_amp_o2 = cfg.model.get('megatron_amp_O2', False)
with_distributed_adam = cfg.model.optim.get('name') == 'distributed_fused_adam'
plugins = []
strategy = NLPDDPStrategy(
no_ddp_communication_hook=True, # we don't use DDP for async grad allreduce
gradient_as_bucket_view=cfg.model.gradient_as_bucket_view,
find_unused_parameters=False,
)
if cfg.trainer.precision in [16, '16', 'bf16', '16-mixed', 'bf16-mixed']:
scaler = None
if cfg.trainer.precision in [16, '16', '16-mixed']:
scaler = GradScaler(
init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=cfg.model.get('native_amp_growth_interval', 1000),
hysteresis=cfg.model.get('hysteresis', 2),
)
# MixedPrecisionPlugin in PTL >= 2.0 requires precision to be 16-mixed or bf16-mixed
plugin_precision = '16-mixed'
else:
plugin_precision = 'bf16-mixed'
if megatron_amp_o2 and not with_distributed_adam:
plugins.append(MegatronHalfPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
else:
plugins.append(PipelineMixedPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
if cfg.get('cluster_type', None) == 'BCP':
plugins.append(TorchElasticEnvironment())
trainer = Trainer(plugins=plugins, strategy=strategy, **cfg.trainer, callbacks=[CustomProgressBar()])
exp_manager(trainer, cfg.exp_manager)
# load existing or init new soft prompt GPT model
if cfg.model.get("restore_path", None):
model = MegatronGPTAdapterLearningModel.restore_from(
cfg.model.restore_path, cfg.model, trainer=trainer, save_restore_connector=NLPSaveRestoreConnector()
)
else:
model = MegatronGPTAdapterLearningModel(cfg.model, trainer=trainer)
trainer.fit(model)
if __name__ == '__main__':
dep_msg = "* Please switch to using examples/nlp/language_modeling/tuning/megatron_gpt_peft_tuning.py *"
dep = "Deprecation Notice!!".center(len(dep_msg) - 2, " ")
banner = "*" * len(dep_msg)
spacer = " " * (len(dep_msg) - 2)
logging.warning(f"\n\n{banner}\n*{spacer}*\n*{dep}*\n{dep_msg}\n*{spacer}*\n{banner}\n\n")
main()
logging.warning(f"\n\n{banner}\n*{spacer}*\n*{dep}*\n{dep_msg}\n*{spacer}*\n{banner}\n\n")
| NeMo-main | examples/nlp/language_modeling/tuning/megatron_gpt_adapter_tuning.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.multiprocessing as mp
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from nemo.collections.nlp.models.language_modeling.megatron_t5_adapter_model import MegatronT5AdapterLearningModel
from nemo.collections.nlp.parts.nlp_overrides import (
CustomProgressBar,
GradScaler,
MegatronHalfPrecisionPlugin,
NLPDDPStrategy,
NLPSaveRestoreConnector,
PipelineMixedPrecisionPlugin,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
mp.set_start_method("spawn", force=True)
"""
This is the script to train an Adapter infused GPT Model for text generation.
A base GPT Model is required as a starting point. This script will then insert
Adapters into each Transformer layer and will train/update only these adapters
during training. The base GPT Model weights will remain frozen.
During training this script will only save the newly trained Adapter weights
in checkpoints. At the end of training a .nemo file of Adapter weights will
be saved.
Usage:
Assuming the base model is a 125m GPT Model, with TP=1, PP=1:
a. run a training run for a base gpt nemo file:
python megatron_gpt_adapter_tuning.py \
"model.data.train_ds=[PATH TO TRAINING JSONL FILE]",
"model.data.validation_ds=[PATH TO VALIDATION JSONL FILE]",
model.language_model_path="PATH TO BASE GPT MODEL .nemo FILE"
name="NAME OF TRAINING RUN"
exp_manager.exp_dir="DIR TO SAVE CHECKPOINTS and .nemo FILE",
trainer.max_epochs=2
"""
@hydra_runner(config_path="conf", config_name="megatron_t5_adapter_tuning_config")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
megatron_amp_o2 = cfg.model.get('megatron_amp_O2', False)
with_distributed_adam = cfg.model.optim.get('name') == 'distributed_fused_adam'
plugins = []
strategy = NLPDDPStrategy(
no_ddp_communication_hook=True, # we don't use DDP for async grad allreduce
gradient_as_bucket_view=cfg.model.gradient_as_bucket_view,
find_unused_parameters=False,
)
if cfg.trainer.precision in [16, '16', 'bf16', '16-mixed', 'bf16-mixed']:
scaler = None
if cfg.trainer.precision in [16, '16', '16-mixed']:
scaler = GradScaler(
init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=cfg.model.get('native_amp_growth_interval', 1000),
hysteresis=cfg.model.get('hysteresis', 2),
)
# MixedPrecisionPlugin in PTL >= 2.0 requires precision to be 16-mixed or bf16-mixed
plugin_precision = '16-mixed'
else:
plugin_precision = 'bf16-mixed'
if megatron_amp_o2 and not with_distributed_adam:
plugins.append(MegatronHalfPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
else:
plugins.append(PipelineMixedPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
if cfg.get('cluster_type', None) == 'BCP':
plugins.append(TorchElasticEnvironment())
trainer = Trainer(plugins=plugins, strategy=strategy, **cfg.trainer, callbacks=[CustomProgressBar()])
exp_manager(trainer, cfg.exp_manager)
# load existing or init new soft prompt GPT model
if cfg.model.get("restore_path", None):
model = MegatronT5AdapterLearningModel.restore_from(
cfg.model.restore_path, cfg.model, trainer=trainer, save_restore_connector=NLPSaveRestoreConnector()
)
else:
model = MegatronT5AdapterLearningModel(cfg.model, trainer=trainer)
trainer.fit(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/language_modeling/tuning/megatron_t5_adapter_tuning.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.multiprocessing as mp
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from nemo.collections.nlp.models.language_modeling.megatron_t5_adapter_model import MegatronT5LoraModel
from nemo.collections.nlp.parts.nlp_overrides import (
CustomProgressBar,
GradScaler,
MegatronHalfPrecisionPlugin,
NLPDDPStrategy,
NLPSaveRestoreConnector,
PipelineMixedPrecisionPlugin,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
mp.set_start_method("spawn", force=True)
"""
This is the script to train an Adapter infused GPT Model for text generation.
A base GPT Model is required as a starting point. This script will then insert
Adapters into each Transformer layer and will train/update only these adapters
during training. The base GPT Model weights will remain frozen.
During training this script will only save the newly trained Adapter weights
in checkpoints. At the end of training a .nemo file of Adapter weights will
be saved.
Usage:
Assuming the base model is a 125m GPT Model, with TP=1, PP=1:
a. run a training run for a base gpt nemo file:
python megatron_gpt_adapter_tuning.py \
"model.data.train_ds=[PATH TO TRAINING JSONL FILE]",
"model.data.validation_ds=[PATH TO VALIDATION JSONL FILE]",
model.language_model_path="PATH TO BASE GPT MODEL .nemo FILE"
name="NAME OF TRAINING RUN"
exp_manager.exp_dir="DIR TO SAVE CHECKPOINTS and .nemo FILE",
trainer.max_epochs=2
"""
@hydra_runner(config_path="conf", config_name="megatron_t5_lora_tuning_config")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
megatron_amp_o2 = cfg.model.get('megatron_amp_O2', False)
with_distributed_adam = cfg.model.optim.get('name') == 'distributed_fused_adam'
plugins = []
strategy = NLPDDPStrategy(
no_ddp_communication_hook=True, # we don't use DDP for async grad allreduce
gradient_as_bucket_view=cfg.model.gradient_as_bucket_view,
find_unused_parameters=False,
)
if cfg.trainer.precision in [16, '16', 'bf16', '16-mixed', 'bf16-mixed']:
scaler = None
if cfg.trainer.precision in [16, '16', '16-mixed']:
scaler = GradScaler(
init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=cfg.model.get('native_amp_growth_interval', 1000),
hysteresis=cfg.model.get('hysteresis', 2),
)
# MixedPrecisionPlugin in PTL >= 2.0 requires precision to be 16-mixed or bf16-mixed
plugin_precision = '16-mixed'
else:
plugin_precision = 'bf16-mixed'
if megatron_amp_o2 and not with_distributed_adam:
plugins.append(MegatronHalfPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
else:
plugins.append(PipelineMixedPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
if cfg.get('cluster_type', None) == 'BCP':
plugins.append(TorchElasticEnvironment())
trainer = Trainer(plugins=plugins, strategy=strategy, **cfg.trainer, callbacks=[CustomProgressBar()])
exp_manager(trainer, cfg.exp_manager)
# load existing or init new soft prompt GPT model
if cfg.model.get("restore_path", None):
model = MegatronT5LoraModel.restore_from(
cfg.model.restore_path, cfg.model, trainer=trainer, save_restore_connector=NLPSaveRestoreConnector()
)
else:
model = MegatronT5LoraModel(cfg.model, trainer=trainer)
trainer.fit(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/language_modeling/tuning/megatron_t5_lora_tuning.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import torch.multiprocessing as mp
from megatron.core import parallel_state
from omegaconf import OmegaConf
from omegaconf.omegaconf import open_dict
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.models.language_modeling.megatron_gpt_adapter_model import MegatronGPTAdapterLearningModel
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy, NLPSaveRestoreConnector
from nemo.core.config import hydra_runner
from nemo.utils import logging
mp.set_start_method("spawn", force=True)
"""
This is the script to run an Adapter Tuned GPT Model for text generation.
Usage:
Assume the model has TP=1, PP=1 in the following use cases.
a. run greedy inference using a base gpt nemo file, and an adapter nemo file:
python megatron_gpt_adapter_eval.py \
gpt_model_file=PATH TO GPT MODEL NEMO FILE \
adapter_model_file=PATH TO ADAPTER MODEL NEMO FILE (generated by training script: ./megatron_gpt_adapter_tuning.py) \
data_paths=[PATH TO A JSONL FILE CONTAINING PROMPTS], \
pred_file_path=PATH TO OUTPUT FILE TO DUMP PREDICTIONS
"""
if not torch.cuda.is_available():
raise EnvironmentError("GPU is needed for the inference")
@hydra_runner(config_path="conf", config_name="megatron_gpt_adapter_inference")
def main(cfg) -> None:
# trainer required for restoring model parallel models
trainer = Trainer(strategy=NLPDDPStrategy(), **cfg.trainer)
if (
cfg.tensor_model_parallel_size < 0
or cfg.pipeline_model_parallel_size < 0
or cfg.get('pipeline_model_parallel_split_rank', -1) < 0
):
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(cfg.gpt_model_file):
save_restore_connector.model_extracted_dir = cfg.gpt_model_file
model_config = MegatronGPTModel.restore_from(
restore_path=cfg.gpt_model_file,
trainer=trainer,
return_config=True,
save_restore_connector=save_restore_connector,
)
with open_dict(cfg):
cfg.tensor_model_parallel_size = model_config.get('tensor_model_parallel_size', 1)
cfg.pipeline_model_parallel_size = model_config.get('pipeline_model_parallel_size', 1)
cfg.pipeline_model_parallel_split_rank = model_config.get('pipeline_model_parallel_split_rank', 0)
# Load an adapter model, must be provided in config
if cfg.get("adapter_model_file", None) is not None:
# Update frozen GPT model path in case it has changed
adapter_tuning_cfg = MegatronGPTAdapterLearningModel.restore_from(
cfg.adapter_model_file, trainer=trainer, return_config=True
)
with open_dict(adapter_tuning_cfg):
adapter_tuning_cfg.language_model_path = cfg.gpt_model_file
# Now load prompt learning model with frozen gpt model base
model = MegatronGPTAdapterLearningModel.restore_from(
restore_path=cfg.adapter_model_file, trainer=trainer, override_config_path=adapter_tuning_cfg
)
# Or load regular GPT model
else:
raise NotImplementedError(
"This script is meant for inference from an Adapter Tuned GPT Model, for inference from a Megatron GPT model, refer to ../megatron_gpt_eval.py"
)
model.freeze()
# Have to turn off activations_checkpoint_method for inference
try:
model.model.language_model.encoder.activations_checkpoint_method = None
except AttributeError:
pass
try:
model.frozen_model.model.language_model.encoder.activations_checkpoint_method = None
except AttributeError:
pass
max_input_length = model.frozen_model.cfg.encoder_seq_length - cfg.inference.tokens_to_generate
# check whether the DDP is initialized
if parallel_state.is_unitialized():
def dummy():
return
if trainer.strategy.launcher is not None:
trainer.strategy.launcher.launch(dummy, trainer=trainer)
trainer.strategy.setup_environment()
_, dataloader = model.build_virtual_prompt_dataset(
data=cfg.data_paths,
batch_size=cfg.get("batch_size", 1),
max_seq_length=max_input_length,
min_seq_length=model.cfg.data.get('min_seq_length', 1),
add_bos=cfg.inference.add_BOS,
add_eos=False,
for_train=False,
tokens_to_generate=cfg.inference.tokens_to_generate,
drop_last=False,
shuffle=False,
num_workers=cfg.get("num_workers", 1),
)
config = OmegaConf.to_container(cfg.inference)
model.set_inference_config(config)
response = trainer.predict(model, dataloader)
print("***************************")
if cfg.pred_file_path is not None:
with open(cfg.pred_file_path, "w", encoding="utf-8") as f:
for batch in response:
for sentence in batch['sentences']:
s = ' '.join(sentence.split('\n'))
f.write(s + "\n")
print("predictions saved to {}".format(cfg.pred_file_path))
else:
print(response)
print("***************************")
if __name__ == '__main__':
dep_msg = "* Please switch to using examples/nlp/language_modeling/tuning/megatron_gpt_peft_eval.py *"
dep = "Deprecation Notice!!".center(len(dep_msg) - 2, " ")
banner = "*" * len(dep_msg)
spacer = " " * (len(dep_msg) - 2)
logging.warning(f"\n\n{banner}\n*{spacer}*\n*{dep}*\n{dep_msg}\n*{spacer}*\n{banner}\n\n")
main() # noqa pylint: disable=no-value-for-parameter
logging.warning(f"\n\n{banner}\n*{spacer}*\n*{dep}*\n{dep_msg}\n*{spacer}*\n{banner}\n\n")
| NeMo-main | examples/nlp/language_modeling/tuning/megatron_gpt_adapter_eval.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import torch.multiprocessing as mp
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from pytorch_lightning.trainer.connectors.checkpoint_connector import _CheckpointConnector
from torch.utils.data import DataLoader, Dataset
from nemo.collections.nlp.models.language_modeling.megatron_gpt_peft_models import (
MegatronGPTAdapterModel,
MegatronGPTAdapterModelWeightTying,
MegatronGPTAdapterPTuningModel,
MegatronGPTIA3Model,
MegatronGPTLoRAModel,
MegatronGPTLoRAModelWeightTying,
MegatronGPTPTuningModel,
)
from nemo.collections.nlp.models.language_modeling.megatron_gpt_sft_model import MegatronGPTModel
from nemo.collections.nlp.modules.common.megatron.megatron_init import fake_initialize_model_parallel
from nemo.collections.nlp.parts.nlp_overrides import (
CustomProgressBar,
GradScaler,
MegatronHalfPrecisionPlugin,
NLPDDPStrategy,
NLPSaveRestoreConnector,
PEFTSaveRestoreConnector,
PipelineMixedPrecisionPlugin,
)
from nemo.core.config import hydra_runner
from nemo.utils import AppState, logging
from nemo.utils.exp_manager import exp_manager
from nemo.utils.model_utils import inject_model_parallel_rank
mp.set_start_method("spawn", force=True)
"""
This is the script to train an Adapter infused GPT Model for text generation.
A base GPT Model is required as a starting point. This script will then insert
Adapters into each Transformer layer and will train/update only these adapters
during training. The base GPT Model weights will remain frozen.
During training this script will only save the newly trained Adapter weights
in checkpoints. At the end of training a .nemo file of Adapter weights will
be saved.
Usage:
Assuming the base model is a 125m GPT Model, with TP=1, PP=1:
a. run a training run for a base gpt nemo file:
python megatron_gpt_adapter_tuning.py \
"model.data.train_ds=[PATH TO TRAINING JSONL FILE]",
"model.data.validation_ds=[PATH TO VALIDATION JSONL FILE]",
model.language_model_path="PATH TO BASE GPT MODEL .nemo FILE"
name="NAME OF TRAINING RUN"
exp_manager.exp_dir="DIR TO SAVE CHECKPOINTS and .nemo FILE",
trainer.max_epochs=2
"""
def _modify_config(gpt_cfg, cfg, add_cfg_to_tree=False):
"""
This function modifies the original gpt pre-training config (gpt_cfg) with attributes from the finetuning config (cfg).
The `add_cfg_to_tree` arg adds `cfg` to the top of the yaml tree which is needed for all `hparams.yaml` files when passed as an arg to `load_from_checkpoint()`.
"""
OmegaConf.set_struct(gpt_cfg, True)
OmegaConf.resolve(cfg)
with open_dict(gpt_cfg):
gpt_cfg.megatron_amp_O2 = cfg.model.get('megatron_amp_O2', False)
gpt_cfg.micro_batch_size = cfg.model.data.train_ds.micro_batch_size
gpt_cfg.global_batch_size = cfg.model.data.train_ds.global_batch_size
gpt_cfg.sequence_parallel = cfg.model.get("sequence_parallel", False)
gpt_cfg.activations_checkpoint_granularity = cfg.model.get("activations_checkpoint_granularity", None)
gpt_cfg.activations_checkpoint_num_layers = cfg.model.get("activations_checkpoint_num_layers", None)
gpt_cfg.activations_checkpoint_method = cfg.model.get("activations_checkpoint_method", None)
gpt_cfg.activations_checkpoint_layers_per_pipeline = cfg.model.get(
"activations_checkpoint_layers_per_pipeline", None
)
gpt_cfg.data = cfg.model.data
gpt_cfg.optim = cfg.model.optim
gpt_cfg.precision = cfg.trainer.precision
gpt_cfg.answer_only_loss = cfg.model.answer_only_loss
gpt_cfg.restore_from_path = cfg.model.restore_from_path
gpt_cfg.resume_from_checkpoint = cfg.model.resume_from_checkpoint
gpt_cfg.save_nemo_on_validation_end = cfg.model.save_nemo_on_validation_end
gpt_cfg.gradient_as_bucket_view = cfg.model.gradient_as_bucket_view
gpt_cfg.hidden_dropout = cfg.model.get('hidden_dropout', 0.0)
gpt_cfg.attention_dropout = cfg.model.get('attention_dropout', 0.0)
gpt_cfg.ffn_dropout = cfg.model.ffn_dropout
gpt_cfg.peft = cfg.model.peft
peft_cls = _get_peft_scheme(cfg.model)
gpt_cfg.target = f"{peft_cls.__module__}.{peft_cls.__name__}"
# This is needed when modifying a hparam file directly to load `.ckpt` files.
# This is not needed to modify the cfg in `.nemo` files.
if add_cfg_to_tree:
OmegaConf.resolve(gpt_cfg)
gpt_cfg.cfg = gpt_cfg
return gpt_cfg
def _get_peft_scheme(cfg):
if cfg.peft.peft_scheme == "adapter":
if cfg.peft.adapter_tuning.weight_tying:
peft_cls = MegatronGPTAdapterModelWeightTying
else:
peft_cls = MegatronGPTAdapterModel
elif cfg.peft.peft_scheme == "ia3":
peft_cls = MegatronGPTIA3Model
elif cfg.peft.peft_scheme == "ptuning":
peft_cls = MegatronGPTPTuningModel
elif cfg.peft.peft_scheme == "adapter_and_ptuning":
peft_cls = MegatronGPTAdapterPTuningModel
elif cfg.peft.peft_scheme == "lora":
if cfg.peft.lora_tuning.weight_tying:
peft_cls = MegatronGPTLoRAModelWeightTying
else:
peft_cls = MegatronGPTLoRAModel
else:
raise RuntimeError("Invalid Peft scheme")
return peft_cls
def load_from_checkpoint_dir(cls, cfg, trainer, modify_confg_fn):
app_state = AppState()
if cfg.model.tensor_model_parallel_size > 1 or cfg.model.pipeline_model_parallel_size > 1:
app_state.model_parallel_size = cfg.model.tensor_model_parallel_size * cfg.model.pipeline_model_parallel_size
app_state.tensor_model_parallel_size = cfg.model.tensor_model_parallel_size
app_state.pipeline_model_parallel_size = cfg.model.pipeline_model_parallel_size
(
app_state.tensor_model_parallel_rank,
app_state.pipeline_model_parallel_rank,
app_state.model_parallel_size,
app_state.data_parallel_size,
app_state.pipeline_model_parallel_split_rank,
app_state.virtual_pipeline_model_parallel_rank,
) = fake_initialize_model_parallel(
world_size=app_state.model_parallel_size,
rank=trainer.global_rank,
tensor_model_parallel_size_=cfg.model.tensor_model_parallel_size,
pipeline_model_parallel_size_=cfg.model.pipeline_model_parallel_size,
pipeline_model_parallel_split_rank_=cfg.model.pipeline_model_parallel_split_rank,
)
checkpoint_path = inject_model_parallel_rank(
os.path.join(cfg.model.pretrained_checkpoint.checkpoint_dir, cfg.model.pretrained_checkpoint.checkpoint_name)
)
hparams_file = OmegaConf.load(cfg.model.pretrained_checkpoint.hparams_file)
gpt_cfg = modify_confg_fn(hparams_file.cfg, cfg, add_cfg_to_tree=True)
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
OmegaConf.save(config=gpt_cfg, f=f.name)
model = cls.load_from_checkpoint(checkpoint_path=checkpoint_path, trainer=trainer, hparams_file=f.name,)
return model
def validate_checkpoint_loading_args(cfg):
if cfg.checkpoint_dir is None or not os.path.isdir(cfg.checkpoint_dir):
raise ValueError(f'Checkpoint directory {cfg.checkpoint_dir} does not exist or is not a directory.')
if cfg.checkpoint_name is None:
raise ValueError(f'Checkpoint name {cfg.checkpoint_name} is not valid.')
if cfg.hparams_file is None or not os.path.isfile(cfg.hparams_file):
raise ValueError(f'Hparams file {cfg.hparams_file} does not exist or is not a file.')
@hydra_runner(config_path="conf", config_name="megatron_gpt_peft_tuning_config")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
megatron_amp_o2 = cfg.model.get('megatron_amp_O2', False)
with_distributed_adam = cfg.model.optim.get('name') == 'distributed_fused_adam'
plugins = []
strategy = NLPDDPStrategy(
no_ddp_communication_hook=True, # we don't use DDP for async grad allreduce
gradient_as_bucket_view=cfg.model.gradient_as_bucket_view,
find_unused_parameters=False,
)
if cfg.trainer.precision in [16, '16', 'bf16', '16-mixed', 'bf16-mixed']:
scaler = None
if cfg.trainer.precision in [16, '16', '16-mixed']:
scaler = GradScaler(
init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=cfg.model.get('native_amp_growth_interval', 1000),
hysteresis=cfg.model.get('hysteresis', 2),
enabled=False
if cfg.model.pipeline_model_parallel_size > 1
else True, # turn off the grad scale for pipeline parallel LM model
)
# MixedPrecisionPlugin in PTL >= 2.0 requires precision to be 16-mixed or bf16-mixed
plugin_precision = '16-mixed'
else:
plugin_precision = 'bf16-mixed'
if megatron_amp_o2 and not with_distributed_adam:
plugins.append(MegatronHalfPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
else:
plugins.append(PipelineMixedPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
if cfg.get('cluster_type', None) == 'BCP':
plugins.append(TorchElasticEnvironment())
trainer = Trainer(plugins=plugins, strategy=strategy, **cfg.trainer, callbacks=[CustomProgressBar()])
exp_manager(trainer, cfg.exp_manager)
# update resume from checkpoint found by exp_manager
if cfg.model.resume_from_checkpoint is not None:
trainer.ckpt_path = cfg.model.resume_from_checkpoint
logging.info(f'Resuming training from checkpoint: {trainer.ckpt_path}')
if cfg.model.restore_from_path:
base_model_save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(cfg.model.restore_from_path):
base_model_save_restore_connector.model_extracted_dir = cfg.model.restore_from_path
base_model_cfg = MegatronGPTModel.restore_from(
restore_path=cfg.model.restore_from_path,
trainer=trainer,
return_config=True,
save_restore_connector=base_model_save_restore_connector,
)
base_model_cfg = _modify_config(base_model_cfg, cfg, add_cfg_to_tree=False)
save_restore_connector = PEFTSaveRestoreConnector(
peft_model_nemo_path=cfg.model.peft.restore_from_path, peft_model_ckpt_path=trainer.ckpt_path
)
if os.path.isdir(cfg.model.restore_from_path):
save_restore_connector.model_extracted_dir = cfg.model.restore_from_path
peft_cls = _get_peft_scheme(cfg.model)
model = peft_cls.restore_from(
restore_path=cfg.model.restore_from_path,
trainer=trainer,
override_config_path=base_model_cfg,
save_restore_connector=save_restore_connector,
)
else:
raise RuntimeError("PEFT training needs a trained base model present.")
trainer.fit(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/language_modeling/tuning/megatron_gpt_peft_tuning.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from pytorch_lightning.trainer.connectors.checkpoint_connector import _CheckpointConnector
from nemo.collections.nlp.models.language_modeling.megatron_gpt_sft_model import MegatronGPTSFTModel
from nemo.collections.nlp.modules.common.megatron.megatron_init import fake_initialize_model_parallel
from nemo.collections.nlp.parts.nlp_overrides import (
CustomProgressBar,
GradScaler,
MegatronHalfPrecisionPlugin,
NLPDDPStrategy,
NLPSaveRestoreConnector,
PipelineMixedPrecisionPlugin,
)
from nemo.core.config import hydra_runner
from nemo.utils import AppState, logging
from nemo.utils.exp_manager import exp_manager
from nemo.utils.model_utils import inject_model_parallel_rank
def _modify_config(gpt_cfg, cfg, add_cfg_to_tree=False):
"""
This function modifies the original gpt pre-training config (gpt_cfg) with attributes from the finetuning config (cfg).
The `add_cfg_to_tree` arg adds `cfg` to the top of the yaml tree which is needed for all `hparams.yaml` files when passed as an arg to `load_from_checkpoint()`.
"""
OmegaConf.set_struct(gpt_cfg, True)
OmegaConf.resolve(cfg)
with open_dict(gpt_cfg):
gpt_cfg.megatron_amp_O2 = cfg.model.get('megatron_amp_O2', False)
gpt_cfg.micro_batch_size = cfg.model.data.train_ds.micro_batch_size
gpt_cfg.global_batch_size = cfg.model.data.train_ds.global_batch_size
gpt_cfg.sequence_parallel = cfg.model.get("sequence_parallel", False)
gpt_cfg.activations_checkpoint_granularity = cfg.model.get("activations_checkpoint_granularity", None)
gpt_cfg.activations_checkpoint_num_layers = cfg.model.get("activations_checkpoint_num_layers", None)
gpt_cfg.activations_checkpoint_method = cfg.model.get("activations_checkpoint_method", None)
gpt_cfg.activations_checkpoint_layers_per_pipeline = cfg.model.get(
"activations_checkpoint_layers_per_pipeline", None
)
gpt_cfg.data = cfg.model.data
gpt_cfg.optim = cfg.model.optim
gpt_cfg.precision = cfg.trainer.precision
gpt_cfg.answer_only_loss = cfg.model.answer_only_loss
gpt_cfg.restore_from_path = cfg.model.restore_from_path
gpt_cfg.resume_from_checkpoint = cfg.model.resume_from_checkpoint
gpt_cfg.save_nemo_on_validation_end = cfg.model.save_nemo_on_validation_end
gpt_cfg.gradient_as_bucket_view = cfg.model.gradient_as_bucket_view
gpt_cfg.hidden_dropout = cfg.model.get('hidden_dropout', 0.0)
gpt_cfg.attention_dropout = cfg.model.get('attention_dropout', 0.0)
gpt_cfg.ffn_dropout = cfg.model.ffn_dropout
gpt_cfg.use_flash_attention = cfg.model.get('use_flash_attention', False)
sft_cls = MegatronGPTSFTModel
gpt_cfg.target = f"{sft_cls.__module__}.{sft_cls.__name__}"
if cfg.model.get('use_flash_attention', None) is not None:
gpt_cfg.use_flash_attention = cfg.model.use_flash_attention
if cfg.model.get('seq_len_interpolation_factor', None) is not None:
gpt_cfg.seq_len_interpolation_factor = cfg.model.seq_len_interpolation_factor
sft_cls = MegatronGPTSFTModel
gpt_cfg.target = f"{sft_cls.__module__}.{sft_cls.__name__}"
# This is needed when modifying a hparam file directly to load `.ckpt` files.
# This is not needed to modify the cfg in `.nemo` files.
if add_cfg_to_tree:
OmegaConf.resolve(gpt_cfg)
gpt_cfg.cfg = gpt_cfg
return gpt_cfg
def load_from_nemo(cls, cfg, trainer, gpt_cfg, modify_confg_fn):
gpt_cfg = modify_confg_fn(gpt_cfg, cfg, add_cfg_to_tree=False)
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(cfg.model.restore_from_path):
save_restore_connector.model_extracted_dir = cfg.model.restore_from_path
model = cls.restore_from(
restore_path=cfg.model.restore_from_path,
trainer=trainer,
override_config_path=gpt_cfg,
save_restore_connector=save_restore_connector,
)
return model
def load_from_checkpoint_dir(cls, cfg, trainer, modify_confg_fn):
app_state = AppState()
if cfg.model.tensor_model_parallel_size > 1 or cfg.model.pipeline_model_parallel_size > 1:
app_state.model_parallel_size = cfg.model.tensor_model_parallel_size * cfg.model.pipeline_model_parallel_size
app_state.tensor_model_parallel_size = cfg.model.tensor_model_parallel_size
app_state.pipeline_model_parallel_size = cfg.model.pipeline_model_parallel_size
(
app_state.tensor_model_parallel_rank,
app_state.pipeline_model_parallel_rank,
app_state.model_parallel_size,
app_state.data_parallel_size,
app_state.pipeline_model_parallel_split_rank,
app_state.virtual_pipeline_model_parallel_rank,
) = fake_initialize_model_parallel(
world_size=app_state.model_parallel_size,
rank=trainer.global_rank,
tensor_model_parallel_size_=cfg.model.tensor_model_parallel_size,
pipeline_model_parallel_size_=cfg.model.pipeline_model_parallel_size,
pipeline_model_parallel_split_rank_=cfg.model.pipeline_model_parallel_split_rank,
)
checkpoint_path = inject_model_parallel_rank(
os.path.join(cfg.model.pretrained_checkpoint.checkpoint_dir, cfg.model.pretrained_checkpoint.checkpoint_name)
)
hparams_file = OmegaConf.load(cfg.model.pretrained_checkpoint.hparams_file)
gpt_cfg = modify_confg_fn(hparams_file.cfg, cfg, add_cfg_to_tree=True)
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
OmegaConf.save(config=gpt_cfg, f=f.name)
model = cls.load_from_checkpoint(checkpoint_path=checkpoint_path, trainer=trainer, hparams_file=f.name,)
return model
def validate_checkpoint_loading_args(cfg):
if cfg.checkpoint_dir is None or not os.path.isdir(cfg.checkpoint_dir):
raise ValueError(f'Checkpoint directory {cfg.checkpoint_dir} does not exist or is not a directory.')
if cfg.checkpoint_name is None:
raise ValueError(f'Checkpoint name {cfg.checkpoint_name} is not valid.')
if cfg.hparams_file is None or not os.path.isfile(cfg.hparams_file):
raise ValueError(f'Hparams file {cfg.hparams_file} does not exist or is not a file.')
@hydra_runner(config_path="conf", config_name="megatron_gpt_sft")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
megatron_amp_o2 = cfg.model.get('megatron_amp_O2', False)
with_distributed_adam = cfg.model.optim.get('name', 'fused_adam') == 'distributed_fused_adam'
plugins = []
strategy = NLPDDPStrategy(
no_ddp_communication_hook=True,
gradient_as_bucket_view=cfg.model.gradient_as_bucket_view,
find_unused_parameters=False,
)
if cfg.trainer.precision in [16, '16', 'bf16', '16-mixed', 'bf16-mixed']:
scaler = None
if cfg.trainer.precision in [16, '16', '16-mixed']:
scaler = GradScaler(
init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=cfg.model.get('native_amp_growth_interval', 1000),
hysteresis=cfg.model.get('hysteresis', 2),
)
# MixedPrecisionPlugin in PTL >= 2.0 requires precision to be 16-mixed or bf16-mixed
plugin_precision = '16-mixed'
else:
plugin_precision = 'bf16-mixed'
if megatron_amp_o2 and not with_distributed_adam:
plugins.append(MegatronHalfPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
else:
plugins.append(PipelineMixedPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
if cfg.get('cluster_type', None) == 'BCP':
plugins.append(TorchElasticEnvironment())
trainer = Trainer(plugins=plugins, strategy=strategy, **cfg.trainer, callbacks=[CustomProgressBar()])
exp_manager(trainer, cfg.exp_manager)
# update resume from checkpoint found by exp_manager
if cfg.model.resume_from_checkpoint is not None:
trainer.ckpt_path = cfg.model.resume_from_checkpoint
logging.info(f'Resuming training from checkpoint: {trainer.ckpt_path}')
if cfg.model.restore_from_path:
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(cfg.model.restore_from_path):
save_restore_connector.model_extracted_dir = cfg.model.restore_from_path
gpt_cfg = MegatronGPTSFTModel.restore_from(
restore_path=cfg.model.restore_from_path,
trainer=trainer,
return_config=True,
save_restore_connector=save_restore_connector,
)
gpt_cfg = _modify_config(gpt_cfg, cfg, add_cfg_to_tree=False)
model = load_from_nemo(MegatronGPTSFTModel, cfg, trainer, gpt_cfg, modify_confg_fn=_modify_config)
else:
validate_checkpoint_loading_args(cfg.model.pretrained_checkpoint)
model = load_from_checkpoint_dir(MegatronGPTSFTModel, cfg, trainer, modify_confg_fn=_modify_config)
if 'inference' in cfg:
if not cfg.model.use_flash_attention:
cfg.inference.compute_attention_mask = True
config = OmegaConf.to_container(cfg.inference, resolve=True)
model.set_inference_config(config)
trainer.fit(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/language_modeling/tuning/megatron_gpt_sft.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.multiprocessing as mp
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from nemo.collections.nlp.models.language_modeling.megatron_gpt_adapter_model import MegatronGPTInfusedAdapterModel
from nemo.collections.nlp.parts.nlp_overrides import (
CustomProgressBar,
GradScaler,
MegatronHalfPrecisionPlugin,
NLPDDPStrategy,
NLPSaveRestoreConnector,
PipelineMixedPrecisionPlugin,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
mp.set_start_method("spawn", force=True)
"""
This is the script to train an Adapter infused GPT Model for text generation.
A base GPT Model is required as a starting point. This script will then insert
Adapters into each Transformer layer and will train/update only these adapters
during training. The base GPT Model weights will remain frozen.
During training this script will only save the newly trained Adapter weights
in checkpoints. At the end of training a .nemo file of Adapter weights will
be saved.
Usage:
Assuming the base model is a 125m GPT Model, with TP=1, PP=1:
a. run a training run for a base gpt nemo file:
python megatron_gpt_adapter_tuning.py \
"model.data.train_ds=[PATH TO TRAINING JSONL FILE]",
"model.data.validation_ds=[PATH TO VALIDATION JSONL FILE]",
model.language_model_path="PATH TO BASE GPT MODEL .nemo FILE"
name="NAME OF TRAINING RUN"
exp_manager.exp_dir="DIR TO SAVE CHECKPOINTS and .nemo FILE",
trainer.max_epochs=2
"""
@hydra_runner(config_path="conf", config_name="megatron_gpt_ia3_tuning_config")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
megatron_amp_o2 = cfg.model.get('megatron_amp_O2', False)
with_distributed_adam = cfg.model.optim.get('name') == 'distributed_fused_adam'
plugins = []
strategy = NLPDDPStrategy(
no_ddp_communication_hook=True, # we don't use DDP for async grad allreduce
gradient_as_bucket_view=cfg.model.gradient_as_bucket_view,
find_unused_parameters=False,
)
if cfg.trainer.precision in [16, '16', 'bf16', '16-mixed', 'bf16-mixed']:
scaler = None
if cfg.trainer.precision in [16, '16', '16-mixed']:
scaler = GradScaler(
init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=cfg.model.get('native_amp_growth_interval', 1000),
hysteresis=cfg.model.get('hysteresis', 2),
)
# MixedPrecisionPlugin in PTL >= 2.0 requires precision to be 16-mixed or bf16-mixed
plugin_precision = '16-mixed'
else:
plugin_precision = 'bf16-mixed'
if megatron_amp_o2 and not with_distributed_adam:
plugins.append(MegatronHalfPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
else:
plugins.append(PipelineMixedPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
if cfg.get('cluster_type', None) == 'BCP':
plugins.append(TorchElasticEnvironment())
trainer = Trainer(plugins=plugins, strategy=strategy, **cfg.trainer, callbacks=[CustomProgressBar()])
exp_manager(trainer, cfg.exp_manager)
# load existing or init new soft prompt GPT model
if cfg.model.get("restore_path", None):
model = MegatronGPTInfusedAdapterModel.restore_from(
cfg.model.restore_path, cfg.model, trainer=trainer, save_restore_connector=NLPSaveRestoreConnector()
)
else:
model = MegatronGPTInfusedAdapterModel(cfg.model, trainer=trainer)
trainer.fit(model)
if __name__ == '__main__':
dep_msg = "* Please switch to using examples/nlp/language_modeling/tuning/megatron_gpt_peft_tuning.py *"
dep = "Deprecation Notice!!".center(len(dep_msg) - 2, " ")
banner = "*" * len(dep_msg)
spacer = " " * (len(dep_msg) - 2)
logging.warning(f"\n\n{banner}\n*{spacer}*\n*{dep}*\n{dep_msg}\n*{spacer}*\n{banner}\n\n")
main()
logging.warning(f"\n\n{banner}\n*{spacer}*\n*{dep}*\n{dep_msg}\n*{spacer}*\n{banner}\n\n")
| NeMo-main | examples/nlp/language_modeling/tuning/megatron_gpt_ia3_tuning.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.multiprocessing as mp
from megatron.core import parallel_state
from omegaconf import OmegaConf
from omegaconf.omegaconf import open_dict
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.models.language_modeling.megatron_t5_adapter_model import MegatronT5AdapterLearningModel
from nemo.collections.nlp.modules.common.megatron.megatron_init import fake_initialize_model_parallel
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
from nemo.core.config import hydra_runner
from nemo.utils.app_state import AppState
mp.set_start_method("spawn", force=True)
"""
This is the script to run an Adapter Tuned GPT Model for text generation.
Usage:
Assume the model has TP=1, PP=1 in the following use cases.
a. run greedy inference using a base gpt nemo file, and an adapter nemo file:
python megatron_gpt_ia3_eval.py \
gpt_model_file=PATH TO GPT MODEL NEMO FILE \
adapter_model_file=PATH TO ADAPTER MODEL NEMO FILE (generated by training script: ./megatron_gpt_ia3_tuning.py) \
data_paths=[PATH TO A JSONL FILE CONTAINING PROMPTS], \
pred_file_path=PATH TO OUTPUT FILE TO DUMP PREDICTIONS
"""
if not torch.cuda.is_available():
raise EnvironmentError("GPU is needed for the inference")
@hydra_runner(config_path="conf", config_name="megatron_t5_adapter_inference")
def main(cfg) -> None:
# trainer required for restoring model parallel models
trainer = Trainer(strategy=NLPDDPStrategy(), **cfg.trainer)
if (
cfg.tensor_model_parallel_size < 0
or cfg.pipeline_model_parallel_size < 0
or cfg.get('pipeline_model_parallel_split_rank', -1) < 0
):
model_config = MegatronT5AdapterLearningModel.restore_from(
restore_path=cfg.language_model_path, trainer=trainer, return_config=True,
)
with open_dict(cfg):
cfg.tensor_model_parallel_size = model_config.get('tensor_model_parallel_size', 1)
cfg.pipeline_model_parallel_size = model_config.get('pipeline_model_parallel_size', 1)
cfg.pipeline_model_parallel_split_rank = model_config.get('pipeline_model_parallel_split_rank', 0)
app_state = AppState()
if cfg.tensor_model_parallel_size > 1 or cfg.pipeline_model_parallel_size > 1:
app_state.model_parallel_size = cfg.tensor_model_parallel_size * cfg.pipeline_model_parallel_size
(
app_state.tensor_model_parallel_rank,
app_state.pipeline_model_parallel_rank,
app_state.model_parallel_size,
app_state.data_parallel_size,
app_state.pipeline_model_parallel_split_rank,
app_state.virtual_pipeline_model_parallel_rank,
) = fake_initialize_model_parallel(
world_size=app_state.model_parallel_size,
rank=trainer.global_rank,
tensor_model_parallel_size_=cfg.tensor_model_parallel_size,
pipeline_model_parallel_size_=cfg.pipeline_model_parallel_size,
pipeline_model_parallel_split_rank_=cfg.pipeline_model_parallel_split_rank,
)
# Load an adapter model, must be provided in config
if cfg.get("adapter_model_file", None) is not None and cfg.get("language_model_path", None) is not None:
# Update frozen GPT model path in case it has changed
adapter_tuning_cfg = MegatronT5AdapterLearningModel.restore_from(
cfg.adapter_model_file, trainer=trainer, return_config=True
)
with open_dict(adapter_tuning_cfg):
adapter_tuning_cfg.language_model_path = cfg.language_model_path
adapter_tuning_cfg.pretrained_language_model_path = cfg.language_model_path
adapter_tuning_cfg.micro_batch_size = cfg.data.micro_batch_size
adapter_tuning_cfg.global_batch_size = cfg.data.global_batch_size
# Now load prompt learning model with frozen gpt model base
model = MegatronT5AdapterLearningModel.restore_from(
restore_path=cfg.adapter_model_file, trainer=trainer, override_config_path=adapter_tuning_cfg
)
# Or load regular GPT model
else:
raise NotImplementedError(
"This script is meant for inference from an Infused Adapter Tuned T5 Model, config should contain an adapter_model_file and a language_model_path"
)
# check whether the DDP is initialized
if parallel_state.is_unitialized():
def dummy():
return
if trainer.strategy.launcher is not None:
trainer.strategy.launcher.launch(dummy, trainer=trainer)
trainer.strategy.setup_environment()
model.freeze()
# Have to turn off activations_checkpoint_method for inference
try:
model.model.language_model.encoder.activations_checkpoint_method = None
except AttributeError:
pass
try:
model.frozen_model.model.language_model.encoder.activations_checkpoint_method = None
except AttributeError:
pass
test_ds, test_dl = model.build_virtual_prompt_dataset(
dataset_paths=cfg.data.test_ds,
batch_size=cfg.data.global_batch_size,
for_train=False,
drop_last=False,
shuffle=False,
num_workers=cfg.data.num_workers,
pin_memory=True,
)
config = OmegaConf.to_container(cfg.inference)
model.set_inference_config(config)
response = trainer.predict(model, test_dl)
print("***************************")
if cfg.pred_file_path is not None:
with open(cfg.pred_file_path, "w", encoding="utf-8") as f:
for batch in response:
for inp, pred in zip(batch['input_text'], batch['preds_text']):
inp = ' '.join(inp.split('\n'))
pred = ' '.join(pred.split('\n'))
f.write(f'{inp} {pred}\n')
print("predictions saved to {}".format(cfg.pred_file_path))
else:
print(response)
print("***************************")
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/nlp/language_modeling/tuning/megatron_t5_adapter_eval.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.multiprocessing as mp
from megatron.core import parallel_state
from omegaconf import OmegaConf
from omegaconf.omegaconf import open_dict
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.models.language_modeling.megatron_t5_adapter_model import MegatronT5InfusedAdapterModel
from nemo.collections.nlp.modules.common.megatron.megatron_init import fake_initialize_model_parallel
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
from nemo.core.config import hydra_runner
from nemo.utils.app_state import AppState
mp.set_start_method("spawn", force=True)
"""
This is the script to run an Adapter Tuned GPT Model for text generation.
Usage:
Assume the model has TP=1, PP=1 in the following use cases.
a. run greedy inference using a base gpt nemo file, and an adapter nemo file:
python megatron_gpt_ia3_eval.py \
gpt_model_file=PATH TO GPT MODEL NEMO FILE \
adapter_model_file=PATH TO ADAPTER MODEL NEMO FILE (generated by training script: ./megatron_gpt_ia3_tuning.py) \
data_paths=[PATH TO A JSONL FILE CONTAINING PROMPTS], \
pred_file_path=PATH TO OUTPUT FILE TO DUMP PREDICTIONS
"""
if not torch.cuda.is_available():
raise EnvironmentError("GPU is needed for the inference")
@hydra_runner(config_path="conf", config_name="megatron_t5_ia3_inference")
def main(cfg) -> None:
# trainer required for restoring model parallel models
trainer = Trainer(strategy=NLPDDPStrategy(), **cfg.trainer)
if (
cfg.tensor_model_parallel_size < 0
or cfg.pipeline_model_parallel_size < 0
or cfg.get('pipeline_model_parallel_split_rank', -1) < 0
):
model_config = MegatronT5InfusedAdapterModel.restore_from(
restore_path=cfg.language_model_path, trainer=trainer, return_config=True,
)
with open_dict(cfg):
cfg.tensor_model_parallel_size = model_config.get('tensor_model_parallel_size', 1)
cfg.pipeline_model_parallel_size = model_config.get('pipeline_model_parallel_size', 1)
cfg.pipeline_model_parallel_split_rank = model_config.get('pipeline_model_parallel_split_rank', 0)
app_state = AppState()
if cfg.tensor_model_parallel_size > 1 or cfg.pipeline_model_parallel_size > 1:
app_state.model_parallel_size = cfg.tensor_model_parallel_size * cfg.pipeline_model_parallel_size
(
app_state.tensor_model_parallel_rank,
app_state.pipeline_model_parallel_rank,
app_state.model_parallel_size,
app_state.data_parallel_size,
app_state.pipeline_model_parallel_split_rank,
app_state.virtual_pipeline_model_parallel_rank,
) = fake_initialize_model_parallel(
world_size=app_state.model_parallel_size,
rank=trainer.global_rank,
tensor_model_parallel_size_=cfg.tensor_model_parallel_size,
pipeline_model_parallel_size_=cfg.pipeline_model_parallel_size,
pipeline_model_parallel_split_rank_=cfg.pipeline_model_parallel_split_rank,
)
# Load an adapter model, must be provided in config
if cfg.get("adapter_model_file", None) is not None and cfg.get("language_model_path", None) is not None:
# Update frozen GPT model path in case it has changed
ia3_tuning_cfg = MegatronT5InfusedAdapterModel.restore_from(
cfg.adapter_model_file, trainer=trainer, return_config=True
)
with open_dict(ia3_tuning_cfg):
ia3_tuning_cfg.language_model_path = cfg.language_model_path
ia3_tuning_cfg.pretrained_language_model_path = cfg.language_model_path
ia3_tuning_cfg.micro_batch_size = cfg.data.micro_batch_size
ia3_tuning_cfg.global_batch_size = cfg.data.global_batch_size
# Now load prompt learning model with frozen gpt model base
model = MegatronT5InfusedAdapterModel.restore_from(
restore_path=cfg.adapter_model_file, trainer=trainer, override_config_path=ia3_tuning_cfg
)
# Or load regular GPT model
else:
raise NotImplementedError(
"This script is meant for inference from an Infused Adapter Tuned T5 Model, config should contain an adapter_model_file and a language_model_path"
)
# check whether the DDP is initialized
if parallel_state.is_unitialized():
def dummy():
return
if trainer.strategy.launcher is not None:
trainer.strategy.launcher.launch(dummy, trainer=trainer)
trainer.strategy.setup_environment()
model.freeze()
# Have to turn off activations_checkpoint_method for inference
try:
model.model.language_model.encoder.activations_checkpoint_method = None
except AttributeError:
pass
try:
model.frozen_model.model.language_model.encoder.activations_checkpoint_method = None
except AttributeError:
pass
test_ds, test_dl = model.build_virtual_prompt_dataset(
dataset_paths=cfg.data.test_ds,
batch_size=cfg.data.global_batch_size,
for_train=False,
drop_last=False,
shuffle=False,
num_workers=cfg.data.num_workers,
pin_memory=True,
)
config = OmegaConf.to_container(cfg.inference)
model.set_inference_config(config)
response = trainer.predict(model, test_dl)
print("***************************")
if cfg.pred_file_path is not None:
with open(cfg.pred_file_path, "w", encoding="utf-8") as f:
for batch in response:
for inp, pred in zip(batch['input_text'], batch['preds_text']):
inp = ' '.join(inp.split('\n'))
pred = ' '.join(pred.split('\n'))
f.write(f'{inp} {pred}\n')
print("predictions saved to {}".format(cfg.pred_file_path))
else:
print(response)
print("***************************")
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/nlp/language_modeling/tuning/megatron_t5_ia3_eval.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.multiprocessing as mp
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from nemo.collections.nlp.models.language_modeling.megatron_t5_adapter_model import MegatronT5InfusedAdapterModel
from nemo.collections.nlp.parts.nlp_overrides import (
CustomProgressBar,
GradScaler,
MegatronHalfPrecisionPlugin,
NLPDDPStrategy,
NLPSaveRestoreConnector,
PipelineMixedPrecisionPlugin,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
mp.set_start_method("spawn", force=True)
"""
This is the script to train an Adapter infused GPT Model for text generation.
A base GPT Model is required as a starting point. This script will then insert
Adapters into each Transformer layer and will train/update only these adapters
during training. The base GPT Model weights will remain frozen.
During training this script will only save the newly trained Adapter weights
in checkpoints. At the end of training a .nemo file of Adapter weights will
be saved.
Usage:
Assuming the base model is a 125m GPT Model, with TP=1, PP=1:
a. run a training run for a base gpt nemo file:
python megatron_gpt_adapter_tuning.py \
"model.data.train_ds=[PATH TO TRAINING JSONL FILE]",
"model.data.validation_ds=[PATH TO VALIDATION JSONL FILE]",
model.language_model_path="PATH TO BASE GPT MODEL .nemo FILE"
name="NAME OF TRAINING RUN"
exp_manager.exp_dir="DIR TO SAVE CHECKPOINTS and .nemo FILE",
trainer.max_epochs=2
"""
@hydra_runner(config_path="conf", config_name="megatron_t5_ia3_tuning_config")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
megatron_amp_o2 = cfg.model.get('megatron_amp_O2', False)
with_distributed_adam = cfg.model.optim.get('name') == 'distributed_fused_adam'
plugins = []
strategy = NLPDDPStrategy(
no_ddp_communication_hook=True, # we don't use DDP for async grad allreduce
gradient_as_bucket_view=cfg.model.gradient_as_bucket_view,
find_unused_parameters=False,
)
if cfg.trainer.precision in [16, '16', 'bf16', '16-mixed', 'bf16-mixed']:
scaler = None
if cfg.trainer.precision in [16, '16', '16-mixed']:
scaler = GradScaler(
init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=cfg.model.get('native_amp_growth_interval', 1000),
hysteresis=cfg.model.get('hysteresis', 2),
)
# MixedPrecisionPlugin in PTL >= 2.0 requires precision to be 16-mixed or bf16-mixed
plugin_precision = '16-mixed'
else:
plugin_precision = 'bf16-mixed'
if megatron_amp_o2 and not with_distributed_adam:
plugins.append(MegatronHalfPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
else:
plugins.append(PipelineMixedPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
if cfg.get('cluster_type', None) == 'BCP':
plugins.append(TorchElasticEnvironment())
trainer = Trainer(plugins=plugins, strategy=strategy, **cfg.trainer, callbacks=[CustomProgressBar()])
exp_manager(trainer, cfg.exp_manager)
# hydra interpolation does not work here as the interpolation key is lost when PTL saves hparams
with open_dict(cfg):
cfg.model.pretrained_language_model_path = cfg.model.language_model_path
# load existing or init new soft prompt GPT model
if cfg.model.get("restore_path", None):
model = MegatronT5InfusedAdapterModel.restore_from(
cfg.model.restore_path, cfg.model, trainer=trainer, save_restore_connector=NLPSaveRestoreConnector()
)
else:
model = MegatronT5InfusedAdapterModel(cfg.model, trainer=trainer)
trainer.fit(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/language_modeling/tuning/megatron_t5_ia3_tuning.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.multiprocessing as mp
from megatron.core import parallel_state
from omegaconf import OmegaConf
from omegaconf.omegaconf import open_dict
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.models.language_modeling.megatron_t5_adapter_model import MegatronT5LoraModel
from nemo.collections.nlp.modules.common.megatron.megatron_init import fake_initialize_model_parallel
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
from nemo.core.config import hydra_runner
from nemo.utils.app_state import AppState
mp.set_start_method("spawn", force=True)
"""
This is the script to run an Adapter Tuned GPT Model for text generation.
Usage:
Assume the model has TP=1, PP=1 in the following use cases.
a. run greedy inference using a base gpt nemo file, and an adapter nemo file:
python megatron_gpt_ia3_eval.py \
gpt_model_file=PATH TO GPT MODEL NEMO FILE \
adapter_model_file=PATH TO ADAPTER MODEL NEMO FILE (generated by training script: ./megatron_gpt_ia3_tuning.py) \
data_paths=[PATH TO A JSONL FILE CONTAINING PROMPTS], \
pred_file_path=PATH TO OUTPUT FILE TO DUMP PREDICTIONS
"""
if not torch.cuda.is_available():
raise EnvironmentError("GPU is needed for the inference")
@hydra_runner(config_path="conf", config_name="megatron_t5_adapter_inference")
def main(cfg) -> None:
# trainer required for restoring model parallel models
trainer = Trainer(strategy=NLPDDPStrategy(), **cfg.trainer)
if (
cfg.tensor_model_parallel_size < 0
or cfg.pipeline_model_parallel_size < 0
or cfg.get('pipeline_model_parallel_split_rank', -1) < 0
):
model_config = MegatronT5LoraModel.restore_from(
restore_path=cfg.language_model_path, trainer=trainer, return_config=True,
)
with open_dict(cfg):
cfg.tensor_model_parallel_size = model_config.get('tensor_model_parallel_size', 1)
cfg.pipeline_model_parallel_size = model_config.get('pipeline_model_parallel_size', 1)
cfg.pipeline_model_parallel_split_rank = model_config.get('pipeline_model_parallel_split_rank', 0)
app_state = AppState()
if cfg.tensor_model_parallel_size > 1 or cfg.pipeline_model_parallel_size > 1:
app_state.model_parallel_size = cfg.tensor_model_parallel_size * cfg.pipeline_model_parallel_size
(
app_state.tensor_model_parallel_rank,
app_state.pipeline_model_parallel_rank,
app_state.model_parallel_size,
app_state.data_parallel_size,
app_state.pipeline_model_parallel_split_rank,
app_state.virtual_pipeline_model_parallel_rank,
) = fake_initialize_model_parallel(
world_size=app_state.model_parallel_size,
rank=trainer.global_rank,
tensor_model_parallel_size_=cfg.tensor_model_parallel_size,
pipeline_model_parallel_size_=cfg.pipeline_model_parallel_size,
pipeline_model_parallel_split_rank_=cfg.pipeline_model_parallel_split_rank,
)
# Load an adapter model, must be provided in config
if cfg.get("adapter_model_file", None) is not None and cfg.get("language_model_path", None) is not None:
# Update frozen GPT model path in case it has changed
adapter_tuning_cfg = MegatronT5LoraModel.restore_from(
cfg.adapter_model_file, trainer=trainer, return_config=True
)
with open_dict(adapter_tuning_cfg):
adapter_tuning_cfg.language_model_path = cfg.language_model_path
adapter_tuning_cfg.pretrained_language_model_path = cfg.language_model_path
adapter_tuning_cfg.micro_batch_size = cfg.data.micro_batch_size
adapter_tuning_cfg.global_batch_size = cfg.data.global_batch_size
# Now load prompt learning model with frozen gpt model base
model = MegatronT5LoraModel.restore_from(
restore_path=cfg.adapter_model_file, trainer=trainer, override_config_path=adapter_tuning_cfg
)
# Or load regular GPT model
else:
raise NotImplementedError(
"This script is meant for inference from an Infused Adapter Tuned T5 Model, config should contain an adapter_model_file and a language_model_path"
)
# check whether the DDP is initialized
if parallel_state.is_unitialized():
def dummy():
return
if trainer.strategy.launcher is not None:
trainer.strategy.launcher.launch(dummy, trainer=trainer)
trainer.strategy.setup_environment()
model.freeze()
# Have to turn off activations_checkpoint_method for inference
try:
model.model.language_model.encoder.activations_checkpoint_method = None
except AttributeError:
pass
try:
model.frozen_model.model.language_model.encoder.activations_checkpoint_method = None
except AttributeError:
pass
test_ds, test_dl = model.build_virtual_prompt_dataset(
dataset_paths=cfg.data.test_ds,
batch_size=cfg.data.global_batch_size,
for_train=False,
drop_last=False,
shuffle=False,
num_workers=cfg.data.num_workers,
pin_memory=True,
)
config = OmegaConf.to_container(cfg.inference)
model.set_inference_config(config)
response = trainer.predict(model, test_dl)
print("***************************")
if cfg.pred_file_path is not None:
with open(cfg.pred_file_path, "w", encoding="utf-8") as f:
for batch in response:
for inp, pred in zip(batch['input_text'], batch['preds_text']):
inp = ' '.join(inp.split('\n'))
pred = ' '.join(pred.split('\n'))
f.write(f'{inp} {pred}\n')
print("predictions saved to {}".format(cfg.pred_file_path))
else:
print(response)
print("***************************")
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/nlp/language_modeling/tuning/megatron_t5_lora_eval.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import json
import os
import threading
from functools import partial
import torch
import torch.multiprocessing as mp
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from torch.utils.data import DataLoader
from nemo.collections.nlp.models.language_modeling.megatron_gpt_peft_models import MegatronGPTPEFTModel
from nemo.collections.nlp.models.language_modeling.megatron_gpt_sft_model import MegatronGPTSFTModel
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.collections.nlp.modules.common.text_generation_server import MegatronServer
from nemo.collections.nlp.modules.common.text_generation_utils import generate
from nemo.collections.nlp.parts.nlp_overrides import (
GradScaler,
MegatronHalfPrecisionPlugin,
NLPDDPStrategy,
NLPSaveRestoreConnector,
PEFTSaveRestoreConnector,
PipelineMixedPrecisionPlugin,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
try:
from megatron.core import parallel_state
HAVE_MEGATRON_CORE = True
except:
pass
mp.set_start_method("spawn", force=True)
"""
This is the script to run inference with a PEFT model or an SFT Model.
If you want to evaluate an SFT .nemo file:
python examples/nlp/language_modeling/tuning/megatron_gpt_peft_eval.py \
model.restore_from_path=<path_to_sft_nemo_file> \
model.peft.restore_from_path=null \
trainer.devices=1 model.data.test_ds.file_names=\[<path_to_test_jsonl_file1>, <path_to_test_jsonl_file2>] \
model.data.test_ds.names=\['name_for_test_file1', 'name_for_test_file2'] \ # this is not the filename just some identifier
model.data.test_ds.global_batch_size=4 \ # or some other value
model.data.test_ds.micro_batch_size=4 \
model.data.test_ds.tokens_to_generate=30 \
inference.greedy=True \
inference.outfile_path=\'<path_to_jsonl_output_file>'
If you want to evaluate a PEFT Model, you should provide a base GPT model and a PEFT model .nemo file
python examples/nlp/language_modeling/tuning/megatron_gpt_peft_eval.py \
model.restore_from_path=<path_to_sft_nemo_file> \
model.peft.restore_from_path=<path_to_peft_nemo_file> \ # this will be created if you use `megatron_gpt_peft_tuning.py`
trainer.devices=1 model.data.test_ds.file_names=\[<path_to_test_jsonl_file1>, <path_to_test_jsonl_file2>] \
model.data.test_ds.names=\['name_for_test_file1', 'name_for_test_file2'] \ # this is not the filename just some identifier
model.data.test_ds.global_batch_size=4 \ # or some other value
model.data.test_ds.micro_batch_size=4 \
model.data.test_ds.tokens_to_generate=30 \
inference.greedy=True \
inference.outfile_path=\'<path_to_jsonl_output_file>'
"""
@hydra_runner(config_path="conf", config_name="megatron_gpt_peft_eval_config")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f"\n{OmegaConf.to_yaml(cfg)}")
assert cfg.model.restore_from_path is not None
megatron_amp_o2 = cfg.model.get("megatron_amp_O2", False)
with_distributed_adam = False
plugins = []
strategy = NLPDDPStrategy(
no_ddp_communication_hook=True, # we don't use DDP for async grad allreduce
gradient_as_bucket_view=cfg.model.gradient_as_bucket_view,
find_unused_parameters=False,
)
if cfg.trainer.precision in [16, '16', 'bf16', '16-mixed', 'bf16-mixed']:
scaler = None
if cfg.trainer.precision in [16, '16', '16-mixed']:
scaler = GradScaler(
init_scale=cfg.model.get("native_amp_init_scale", 2 ** 32),
growth_interval=cfg.model.get("native_amp_growth_interval", 1000),
hysteresis=cfg.model.get("hysteresis", 2),
enabled=False
if cfg.model.pipeline_model_parallel_size > 1
else True, # turn off the grad scale for pipeline parallel LM model
)
# MixedPrecisionPlugin in PTL >= 2.0 requires precision to be 16-mixed or bf16-mixed
plugin_precision = '16-mixed'
else:
plugin_precision = 'bf16-mixed'
if megatron_amp_o2 and not with_distributed_adam:
plugins.append(MegatronHalfPrecisionPlugin(precision=plugin_precision, device="cuda", scaler=scaler))
else:
plugins.append(PipelineMixedPrecisionPlugin(precision=plugin_precision, device="cuda", scaler=scaler))
if cfg.get("cluster_type", None) == "BCP":
plugins.append(TorchElasticEnvironment())
trainer = Trainer(plugins=plugins, strategy=strategy, **cfg.trainer)
if cfg.model.peft.restore_from_path:
if cfg.model.peft.restore_from_path.endswith(".nemo"):
peft_model_cfg = MegatronGPTPEFTModel.restore_from(
restore_path=cfg.model.peft.restore_from_path, trainer=trainer, return_config=True,
)
elif cfg.model.peft.restore_from_hparams_path: # not a .nemo model we expect a hparams.yaml file
peft_model_cfg = OmegaConf.to_container(OmegaConf.load(cfg.model.peft.restore_from_hparams_path).cfg)
peft_model_cfg = OmegaConf.create(peft_model_cfg)
# extract dict inside cfg key and convert it to DictConfig
# this allows interpolation to work the same way as config from the .restore_from method
else:
raise RuntimeError("This script requires a .nemo peft model or path to hparams.yaml (and a ckpt path).")
else:
peft_model_cfg = MegatronGPTSFTModel.restore_from(
restore_path=cfg.model.restore_from_path, trainer=trainer, return_config=True,
)
# hydra interpolation does not work here as the interpolation key is lost when PTL saves hparams
with open_dict(peft_model_cfg):
# update the model config of the trained model with params we want to set at inference time.
peft_model_cfg.precision = cfg.trainer.precision
peft_model_cfg.data.test_ds = cfg.model.data.test_ds
peft_model_cfg.activations_checkpoint_granularity = None
peft_model_cfg.activations_checkpoint_method = None
peft_model_cfg.activations_checkpoint_layers_per_pipeline = None
if peft_model_cfg.get("use_flash_attention", False):
peft_model_cfg.use_flash_attention = cfg.model.use_flash_attention
if cfg.model.get("seq_len_interpolation_factor", None) is not None:
peft_model_cfg["seq_len_interpolation_factor"] = cfg.model.seq_len_interpolation_factor
with open_dict(cfg):
# update the config with the trained model config
# required for hydra interpolation to work inside cfg.inference
cfg.inference.add_BOS = peft_model_cfg.data.test_ds.add_bos
cfg.inference.tokens_to_generate = peft_model_cfg.data.test_ds.tokens_to_generate
if cfg.model.peft.restore_from_path:
if cfg.model.peft.restore_from_path.endswith(".nemo"):
save_restore_connector = PEFTSaveRestoreConnector(
peft_model_nemo_path=cfg.model.peft.restore_from_path, peft_model_ckpt_path=None,
)
else:
# attempting to load a ckpt peft model.
if cfg.model.peft.restore_from_ckpt_name:
ckpt_name = cfg.model.peft.restore_from_ckpt_name
else:
ckpt_name = "model_weights.ckpt"
save_restore_connector = PEFTSaveRestoreConnector(
peft_model_nemo_path=None,
peft_model_ckpt_path=cfg.model.peft.restore_from_path,
peft_model_ckpt_name=ckpt_name,
)
else:
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(cfg.model.restore_from_path):
save_restore_connector.model_extracted_dir = cfg.model.restore_from_path
model = MegatronGPTSFTModel.restore_from(
restore_path=cfg.model.restore_from_path,
trainer=trainer,
override_config_path=peft_model_cfg,
save_restore_connector=save_restore_connector,
)
model.freeze()
if not cfg.model.get('use_flash_attention', False):
cfg.inference.compute_attention_mask = True
config = OmegaConf.to_container(cfg.inference, resolve=True)
model.set_inference_config(config)
if not cfg.server:
trainer.test(model)
else:
if not HAVE_MEGATRON_CORE:
raise ValueError('Megatron-core needs to be installed to use this feature!')
from nemo.collections.nlp.modules.common.megatron_web_server import get_chatbot_demo, get_demo
trainer.test(model, dataloaders=None)
if parallel_state.is_pipeline_first_stage() and parallel_state.get_tensor_model_parallel_rank() == 0:
if cfg.web_server:
if cfg.chat:
defaults = {
'user': cfg.chatbot_config.user,
'assistant': cfg.chatbot_config.assistant,
'system': cfg.chatbot_config.system,
}
web_ui = partial(
get_chatbot_demo,
defaults=defaults,
value=cfg.chatbot_config.value,
attributes=cfg.chatbot_config.attributes,
)
else:
web_ui = get_demo
loop = asyncio.new_event_loop()
thread = threading.Thread(
target=web_ui,
daemon=True,
args=(cfg.share, cfg.username, cfg.password, cfg.port, cfg.web_port, loop),
)
thread.start()
server = MegatronServer(model.cuda())
server.run("0.0.0.0", port=cfg.port)
while True:
choice = torch.cuda.LongTensor(1)
torch.distributed.broadcast(choice, 0)
if choice[0].item() == 0:
generate(model.cuda())
if __name__ == "__main__":
main()
| NeMo-main | examples/nlp/language_modeling/tuning/megatron_gpt_peft_eval.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script to compute metrics for a given audio-to-audio model for a given manifest file for some dataset.
The manifest file must include path to input audio and path to target (ground truth) audio.
Note: This scripts depends on the `process_audio.py` script, and therefore both scripts should be
located in the same directory during execution.
# Arguments
<< All arguments of `process_audio.py` are inherited by this script, so please refer to `process_audio.py`
for full list of arguments >>
dataset_manifest: Required - path to dataset JSON manifest file (in NeMo format)
output_dir: Optional - output directory where the processed audio will be saved
metrics: Optional - list of metrics to evaluate. Defaults to [sdr,estoi]
sample_rate: Optional - sample rate for loaded audio. Defaults to 16kHz.
only_score_manifest: Optional - If set, processing will be skipped and it is assumed the processed audio is available in dataset_manifest
# Usage
## To score a dataset with a manifest file that contains the input audio which needs to be processed and target audio
python audio_to_audio_eval.py \
model_path=null \
pretrained_model=null \
dataset_manifest=<Mandatory: path to a dataset manifest file> \
output_dir=<Optional: Directory where processed audio will be saved> \
processed_channel_selector=<Optional: list of channels to select from the processed audio file> \
target_key=<Optional: key for the target audio in the dataset manifest. Default: target_audio_filepath> \
target_channel_selector=<Optional: list of channels to select from the target audio file> \
metrics=<Optional: list of metrics to evaluate. Defaults to [sdr,estoi]>
batch_size=32 \
amp=True
## To score a manifest file which has been previously processed and contains both processed audio and target audio
python audio_to_audio_eval.py \
dataset_manifest=<Mandatory: path to a dataset manifest file> \
processed_key=<Optional: key for the target audio in the dataset manifest. Default: processed_audio_filepath>
processed_channel_selector=<Optional: list of channels to select from the processed audio file> \
target_key=<Optional: key for the target audio in the dataset manifest. Default: target_audio_filepath> \
target_channel_selector=<Optional: list of channels to select from the target audio file> \
metrics=<Optional: list of metrics to evaluate. Defaults to [sdr,estoi]>
batch_size=32 \
amp=True
"""
import json
import os
import tempfile
from dataclasses import dataclass, field, is_dataclass
from typing import List, Optional
import process_audio
import torch
from omegaconf import OmegaConf, open_dict
from torchmetrics.audio.pesq import PerceptualEvaluationSpeechQuality
from torchmetrics.audio.sdr import ScaleInvariantSignalDistortionRatio, SignalDistortionRatio
from torchmetrics.audio.stoi import ShortTimeObjectiveIntelligibility
from tqdm import tqdm
from nemo.collections.asr.data import audio_to_audio_dataset
from nemo.collections.asr.metrics.audio import AudioMetricWrapper
from nemo.collections.common.parts.preprocessing import manifest
from nemo.core.config import hydra_runner
from nemo.utils import logging
@dataclass
class AudioEvaluationConfig(process_audio.ProcessConfig):
# Processed audio config
processed_channel_selector: Optional[List] = None
processed_key: str = 'processed_audio_filepath'
# Target audio configs
target_dataset_dir: Optional[str] = None # If not provided, defaults to dirname(cfg.dataset_manifest)
target_channel_selector: Optional[List] = None
target_key: str = 'target_audio_filepath'
# Sample rate for audio evaluation
sample_rate: int = 16000
# Score an existing manifest without running processing
only_score_manifest: bool = False
# Metrics to calculate
metrics: List[str] = field(default_factory=lambda: ['sdr', 'estoi'])
def get_evaluation_dataloader(config):
"""Prepare a dataloader for evaluation.
"""
dataset = audio_to_audio_dataset.get_audio_to_target_dataset(config=config)
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=config['batch_size'],
collate_fn=dataset.collate_fn,
drop_last=config.get('drop_last', False),
shuffle=False,
num_workers=config.get('num_workers', min(config['batch_size'], os.cpu_count() - 1)),
pin_memory=True,
)
def get_metrics(cfg: AudioEvaluationConfig):
"""Prepare a dictionary with metrics.
"""
available_metrics = ['sdr', 'sisdr', 'stoi', 'estoi', 'pesq']
metrics = dict()
for name in sorted(set(cfg.metrics)):
name = name.lower()
if name == 'sdr':
metric = AudioMetricWrapper(metric=SignalDistortionRatio())
elif name == 'sisdr':
metric = AudioMetricWrapper(metric=ScaleInvariantSignalDistortionRatio())
elif name == 'stoi':
metric = AudioMetricWrapper(metric=ShortTimeObjectiveIntelligibility(fs=cfg.sample_rate, extended=False))
elif name == 'estoi':
metric = AudioMetricWrapper(metric=ShortTimeObjectiveIntelligibility(fs=cfg.sample_rate, extended=True))
elif name == 'pesq':
metric = AudioMetricWrapper(metric=PerceptualEvaluationSpeechQuality(fs=cfg.sample_rate, mode='wb'))
else:
raise ValueError(f'Unexpected metric: {name}. Currently available metrics: {available_metrics}')
metrics[name] = metric
return metrics
@hydra_runner(config_name="AudioEvaluationConfig", schema=AudioEvaluationConfig)
def main(cfg: AudioEvaluationConfig):
torch.set_grad_enabled(False)
if is_dataclass(cfg):
cfg = OmegaConf.structured(cfg)
if cfg.audio_dir is not None:
raise RuntimeError(
"Evaluation script requires ground truth audio to be passed via a manifest file. "
"If manifest file is available, submit it via `dataset_manifest` argument."
)
if not os.path.exists(cfg.dataset_manifest):
raise FileNotFoundError(f'The dataset manifest file could not be found at path : {cfg.dataset_manifest}')
if cfg.target_dataset_dir is None:
# Assume the target data is available in the same directory as the input data
cfg.target_dataset_dir = os.path.dirname(cfg.dataset_manifest)
elif not os.path.isdir(cfg.target_dataset_dir):
raise FileNotFoundError(f'Target dataset dir could not be found at path : {cfg.target_dataset_dir}')
# Setup metrics
metrics = get_metrics(cfg)
# Processing
if not cfg.only_score_manifest:
# Process audio using the configured model and save in the output directory
process_cfg = process_audio.main(cfg) # type: ProcessConfig
# Release GPU memory if it was used during transcription
if torch.cuda.is_available():
torch.cuda.empty_cache()
logging.info('Finished processing audio.')
else:
# Score the input manifest, no need to run a model
cfg.output_filename = cfg.dataset_manifest
process_cfg = cfg
# Evaluation
with tempfile.TemporaryDirectory() as tmp_dir:
# Prepare a temporary manifest with processed audio and target
temporary_manifest_filepath = os.path.join(tmp_dir, 'manifest.json')
num_files = 0
with open(process_cfg.output_filename, 'r') as f_processed, open(
temporary_manifest_filepath, 'w', encoding='utf-8'
) as f_tmp:
for line_processed in f_processed:
data_processed = json.loads(line_processed)
if cfg.processed_key not in data_processed:
raise ValueError(
f'Processed key {cfg.processed_key} not found in manifest: {process_cfg.output_filename}.'
)
if cfg.target_key not in data_processed:
raise ValueError(
f'Target key {cfg.target_key} not found in manifest: {process_cfg.output_filename}.'
)
item = {
'processed': manifest.get_full_path(
audio_file=data_processed[cfg.processed_key], manifest_file=process_cfg.output_filename
),
'target': manifest.get_full_path(
audio_file=data_processed[cfg.target_key], data_dir=cfg.target_dataset_dir
),
'duration': data_processed.get('duration'),
}
# Double-check files exist
for key in ['processed', 'target']:
if not os.path.isfile(item[key]):
raise ValueError(f'File for key "{key}" not found at: {item[key]}.\nCurrent item: {item}')
# Warn if we're comparing the same files
if item['target'] == item['processed']:
logging.warning('Using the same file as processed and target: %s', item['target'])
# Write the entry in the temporary manifest file
f_tmp.write(json.dumps(item) + '\n')
num_files += 1
# Prepare dataloader
config = {
'manifest_filepath': temporary_manifest_filepath,
'sample_rate': cfg.sample_rate,
'input_key': 'processed',
'input_channel_selector': cfg.processed_channel_selector,
'target_key': 'target',
'target_channel_selector': cfg.target_channel_selector,
'batch_size': min(cfg.batch_size, num_files),
'num_workers': cfg.num_workers,
}
temporary_dataloader = get_evaluation_dataloader(config)
# Calculate metrics
for eval_batch in tqdm(temporary_dataloader, desc='Evaluating'):
processed_signal, processed_length, target_signal, target_length = eval_batch
if not torch.equal(processed_length, target_length):
raise RuntimeError(f'Length mismatch.')
for name, metric in metrics.items():
metric.update(preds=processed_signal, target=target_signal, input_length=target_length)
# Convert to a dictionary with name: value
metrics_value = {name: metric.compute().item() for name, metric in metrics.items()}
logging.info('Finished running evaluation.')
# Show results
logging.info('Summary\n')
logging.info('Data')
logging.info('\tmanifest: %s', cfg.output_filename)
logging.info('\ttarget_dataset_dir: %s', cfg.target_dataset_dir)
logging.info('\tnum_files: %s', num_files)
logging.info('Metrics')
for name, value in metrics_value.items():
logging.info('\t%10s: \t%6.2f', name, value)
# Inject the metric name and score into the config, and return the entire config
with open_dict(cfg):
cfg.metrics_value = metrics_value
return cfg
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/audio_tasks/audio_to_audio_eval.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import glob
import json
import os
from dataclasses import dataclass, is_dataclass
from pathlib import Path
from typing import List, Optional
import pytorch_lightning as pl
import torch
from omegaconf import OmegaConf
from nemo.collections.asr.models import AudioToAudioModel
from nemo.core.config import hydra_runner
from nemo.utils import logging, model_utils
"""
Process audio file on a single CPU/GPU. Useful for processing of moderate amounts of audio data.
# Arguments
model_path: path to .nemo checkpoint for an AudioToAudioModel
pretrained_name: name of a pretrained AudioToAudioModel model (from NGC registry)
audio_dir: path to directory with audio files
dataset_manifest: path to dataset JSON manifest file (in NeMo format)
input_channel_selector: list of channels to take from audio files, defaults to `None` and takes all available channels
input_key: key for audio filepath in the manifest file, defaults to `audio_filepath`
output_dir: Directory where processed files will be saved
output_filename: Output filename where manifest pointing to processed files will be written
batch_size: batch size during inference
cuda: Optional int to enable or disable execution of model on certain CUDA device.
amp: Bool to decide if Automatic Mixed Precision should be used during inference
audio_type: Str filetype of the audio. Supported = wav, flac, mp3
overwrite_output: Bool which when set allowes repeated processing runs to overwrite previous results.
# Usage
AudioToAudioModel can be specified by either `model_path` or `pretrained_name`.
Data for processing can be defined with either `audio_dir` or `dataset_manifest`.
Processed audio is saved in `output_dir`, and a manifest for processed files is saved
in `output_filename`.
```
python process_audio.py \
model_path=null \
pretrained_name=null \
audio_dir="" \
dataset_manifest="" \
input_channel_selector=[] \
output_dir="" \
output_filename="" \
batch_size=1 \
cuda=0 \
amp=True
```
"""
@dataclass
class ProcessConfig:
# Required configs
model_path: Optional[str] = None # Path to a .nemo file
pretrained_name: Optional[str] = None # Name of a pretrained model
audio_dir: Optional[str] = None # Path to a directory which contains audio files
dataset_manifest: Optional[str] = None # Path to dataset's JSON manifest
# Audio configs
input_channel_selector: Optional[List] = None # Union types not supported Optional[Union[List, int]]
input_key: Optional[str] = None # Can be used with a manifest
# General configs
output_dir: Optional[str] = None
output_filename: Optional[str] = None
batch_size: int = 1
num_workers: int = 0
# Override model config
override_config_path: Optional[str] = None # path to a yaml config that will override the internal config file
# Set `cuda` to int to define CUDA device. If 'None', will look for CUDA
# device anyway, and do inference on CPU only if CUDA device is not found.
# If `cuda` is a negative number, inference will be on CPU only.
cuda: Optional[int] = None
amp: bool = False
audio_type: str = "wav"
# Recompute model predictions, even if the output folder exists.
overwrite_output: bool = False
@hydra_runner(config_name="ProcessConfig", schema=ProcessConfig)
def main(cfg: ProcessConfig) -> ProcessConfig:
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
if is_dataclass(cfg):
cfg = OmegaConf.structured(cfg)
if cfg.model_path is None and cfg.pretrained_name is None:
raise ValueError("Both cfg.model_path and cfg.pretrained_name cannot be None!")
if cfg.audio_dir is None and cfg.dataset_manifest is None:
raise ValueError("Both cfg.audio_dir and cfg.dataset_manifest cannot be None!")
# setup GPU
if cfg.cuda is None:
if torch.cuda.is_available():
device = [0] # use 0th CUDA device
accelerator = 'gpu'
else:
device = 1
accelerator = 'cpu'
else:
device = [cfg.cuda]
accelerator = 'gpu'
map_location = torch.device('cuda:{}'.format(device[0]) if accelerator == 'gpu' else 'cpu')
# setup model
if cfg.model_path is not None:
# restore model from .nemo file path
model_cfg = AudioToAudioModel.restore_from(restore_path=cfg.model_path, return_config=True)
classpath = model_cfg.target # original class path
imported_class = model_utils.import_class_by_path(classpath) # type: AudioToAudioModel
logging.info(f"Restoring model : {imported_class.__name__}")
audio_to_audio_model = imported_class.restore_from(
restore_path=cfg.model_path, override_config_path=cfg.override_config_path, map_location=map_location
) # type: AudioToAudioModel
model_name = os.path.splitext(os.path.basename(cfg.model_path))[0]
else:
# restore model by name
audio_to_audio_model = AudioToAudioModel.from_pretrained(
model_name=cfg.pretrained_name, map_location=map_location
) # type: AudioToAudioModel
model_name = cfg.pretrained_name
trainer = pl.Trainer(devices=device, accelerator=accelerator)
audio_to_audio_model.set_trainer(trainer)
audio_to_audio_model = audio_to_audio_model.eval()
if cfg.audio_dir is not None:
filepaths = list(glob.glob(os.path.join(cfg.audio_dir, f"**/*.{cfg.audio_type}"), recursive=True))
else:
# get filenames from manifest
filepaths = []
if os.stat(cfg.dataset_manifest).st_size == 0:
raise RuntimeError(f"The input dataset_manifest {cfg.dataset_manifest} is empty.")
input_key = 'audio_filepath' if cfg.input_key is None else cfg.input_key
manifest_dir = Path(cfg.dataset_manifest).parent
with open(cfg.dataset_manifest, 'r') as f:
for line in f:
item = json.loads(line)
audio_file = Path(item[input_key])
if not audio_file.is_file() and not audio_file.is_absolute():
audio_file = manifest_dir / audio_file
filepaths.append(str(audio_file.absolute()))
logging.info(f"\nProcessing {len(filepaths)} files...\n")
# setup AMP (optional)
if cfg.amp and torch.cuda.is_available() and hasattr(torch.cuda, 'amp') and hasattr(torch.cuda.amp, 'autocast'):
logging.info("AMP enabled!\n")
autocast = torch.cuda.amp.autocast
else:
@contextlib.contextmanager
def autocast():
yield
# Compute output filename
if cfg.output_dir is None:
# create default output filename
if cfg.audio_dir is not None:
cfg.output_dir = os.path.dirname(os.path.join(cfg.audio_dir, '.')) + f'_processed_{model_name}'
else:
cfg.output_dir = os.path.dirname(cfg.dataset_manifest) + f'_processed_{model_name}'
# Compute output filename
if cfg.output_filename is None:
# create default output filename
cfg.output_filename = cfg.output_dir.rstrip('/') + '_manifest.json'
# if transcripts should not be overwritten, and already exists, skip re-transcription step and return
if not cfg.overwrite_output and os.path.exists(cfg.output_dir):
raise RuntimeError(
f"Previous output found at {cfg.output_dir}, and flag `overwrite_output`"
f"is {cfg.overwrite_output}. Returning without processing."
)
# Process audio
with autocast():
with torch.no_grad():
paths2processed_files = audio_to_audio_model.process(
paths2audio_files=filepaths,
output_dir=cfg.output_dir,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
input_channel_selector=cfg.input_channel_selector,
)
logging.info(f"Finished processing {len(filepaths)} files!")
logging.info(f"Processed audio is available in the output directory: {cfg.output_dir}")
# Prepare new/updated manifest with a new key for processed audio
with open(cfg.output_filename, 'w', encoding='utf-8') as f:
if cfg.dataset_manifest is not None:
with open(cfg.dataset_manifest, 'r') as fr:
for idx, line in enumerate(fr):
item = json.loads(line)
item['processed_audio_filepath'] = paths2processed_files[idx]
f.write(json.dumps(item) + "\n")
else:
for idx, processed_file in enumerate(paths2processed_files):
item = {'processed_audio_filepath': processed_file}
f.write(json.dumps(item) + "\n")
return cfg
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/audio_tasks/process_audio.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Training the model
Basic run (on CPU for 50 epochs):
python examples/audio_tasks/speech_enhancement.py \
# (Optional: --config-path=<path to dir of configs> --config-name=<name of config without .yaml>) \
model.train_ds.manifest_filepath="<path to manifest file>" \
model.validation_ds.manifest_filepath="<path to manifest file>" \
trainer.devices=1 \
trainer.accelerator='cpu' \
trainer.max_epochs=50
PyTorch Lightning Trainer arguments and args of the model and the optimizer can be added or overriden from CLI
"""
import pytorch_lightning as pl
import torch
from omegaconf import OmegaConf
from nemo.collections.asr.models import EncMaskDecAudioToAudioModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="./conf", config_name="masking")
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg, resolve=True)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
model = EncMaskDecAudioToAudioModel(cfg=cfg.model, trainer=trainer)
# Initialize the weights of the model from another model, if provided via config
model.maybe_init_from_pretrained_checkpoint(cfg)
# Train the model
trainer.fit(model)
# Run on test data, if available
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:
if trainer.is_global_zero:
# Destroy the current process group and let the trainer initialize it again with a single device.
if torch.distributed.is_initialized():
torch.distributed.destroy_process_group()
# Run test on a single device
trainer = pl.Trainer(devices=1, accelerator=cfg.trainer.accelerator)
if model.prepare_test(trainer):
trainer.test(model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/audio_tasks/speech_enhancement.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Preparing the Tokenizer for the dataset
Use the `process_asr_text_tokenizer.py` script under <NEMO_ROOT>/scripts/tokenizers/ in order to prepare the tokenizer.
```sh
python <NEMO_ROOT>/scripts/tokenizers/process_asr_text_tokenizer.py \
--manifest=<path to train manifest files, seperated by commas>
OR
--data_file=<path to text data, seperated by commas> \
--data_root="<output directory>" \
--vocab_size=<number of tokens in vocabulary> \
--tokenizer=<"spe" or "wpe"> \
--no_lower_case \
--spe_type=<"unigram", "bpe", "char" or "word"> \
--spe_character_coverage=1.0 \
--log
```
# Training the model
```sh
python run_speech_intent_slot_train.py \
# (Optional: --config-path=<path to dir of configs> --config-name=<name of config without .yaml>) \
model.train_ds.manifest_filepath=<path to train manifest> \
model.validation_ds.manifest_filepath=<path to val/test manifest> \
model.tokenizer.dir=<path to directory of tokenizer (not full path to the vocab file!)> \
model.tokenizer.type=<either bpe or wpe> \
trainer.devices=-1 \
trainer.accelerator="gpu" \
trainer.strategy="ddp" \
trainer.max_epochs=100 \
model.optim.name="adamw" \
model.optim.lr=0.001 \
model.optim.betas=[0.9,0.999] \
model.optim.weight_decay=0.0001 \
model.optim.sched.warmup_steps=2000
exp_manager.create_wandb_logger=True \
exp_manager.wandb_logger_kwargs.name="<Name of experiment>" \
exp_manager.wandb_logger_kwargs.project="<Name of project>"
```
# Fine-tune a model
For documentation on fine-tuning this model, please visit -
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/configs.html#fine-tuning-configurations
# Pretrained Models
For documentation on existing pretrained models, please visit -
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speech_intent_slot/results.html
"""
from pathlib import Path
import pytorch_lightning as pl
import torch
from omegaconf import OmegaConf
from nemo.collections.asr.models import ASRModel, SLUIntentSlotBPEModel, SpeechEncDecSelfSupervisedModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="./configs/", config_name="conformer_transformer_large_bpe")
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
model = SLUIntentSlotBPEModel(cfg=cfg.model, trainer=trainer)
# Init encoder from pretrained model
pretrained_encoder_name = cfg.pretrained_encoder.name
if pretrained_encoder_name is not None:
if Path(pretrained_encoder_name).is_file():
logging.info(f"Loading pretrained encoder from local: {pretrained_encoder_name}")
pretraind_model = ASRModel.restore_from(
restore_path=pretrained_encoder_name, map_location=torch.device("cpu")
)
model.encoder.load_state_dict(pretraind_model.encoder.state_dict(), strict=False)
del pretraind_model
else:
logging.info(f"Loading pretrained encoder from NGC: {pretrained_encoder_name}")
if pretrained_encoder_name.startswith("ssl_"):
model_cls = SpeechEncDecSelfSupervisedModel
elif pretrained_encoder_name.startswith("stt_"):
model_cls = ASRModel
else:
raise ValueError(f"Unknown pretrained encoder: {pretrained_encoder_name}")
pretraind_model = model_cls.from_pretrained(
model_name=pretrained_encoder_name, map_location=torch.device("cpu")
)
model.encoder.load_state_dict(pretraind_model.encoder.state_dict(), strict=False)
del pretraind_model
else:
logging.info("Not using pretrained encoder.")
if cfg.pretrained_encoder.freeze:
logging.info("Freezing encoder...")
model.encoder.freeze()
else:
model.encoder.unfreeze()
trainer.fit(model)
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:
if model.prepare_test(trainer):
trainer.test(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/slu/speech_intent_slot/run_speech_intent_slot_train.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from dataclasses import dataclass, is_dataclass
from pathlib import Path
from typing import Optional
import torch
from eval_utils.evaluation.util import format_results
from eval_utils.evaluator import SLURPEvaluator
from eval_utils.inference import InferenceConfig, run_inference
from omegaconf import MISSING, OmegaConf, open_dict
from nemo.core.config import hydra_runner
from nemo.utils import logging
@dataclass
class EvaluationConfig(InferenceConfig):
dataset_manifest: str = MISSING
output_filename: Optional[str] = "evaluation_transcripts.json"
average: str = "micro"
full: bool = False
errors: bool = False
table_layout: str = "fancy_grid"
only_score_manifest: bool = False
@hydra_runner(config_name="EvaluationConfig", schema=EvaluationConfig)
def main(cfg: EvaluationConfig):
torch.set_grad_enabled(False)
cfg.output_filename = str(Path(Path(cfg.model_path).parent) / Path("predictions.json"))
if is_dataclass(cfg):
cfg = OmegaConf.structured(cfg)
if cfg.audio_dir is not None:
raise RuntimeError(
"Evaluation script requires ground truth labels to be passed via a manifest file. "
"If manifest file is available, submit it via `dataset_manifest` argument."
)
if not os.path.exists(cfg.dataset_manifest):
raise FileNotFoundError(f"The dataset manifest file could not be found at path : {cfg.dataset_manifest}")
if not cfg.only_score_manifest:
# Transcribe speech into an output directory
transcription_cfg = run_inference(cfg) # type: EvaluationConfig
# Release GPU memory if it was used during transcription
if torch.cuda.is_available():
torch.cuda.empty_cache()
logging.info("Finished transcribing speech dataset. Computing metrics..")
else:
cfg.output_filename = cfg.dataset_manifest
transcription_cfg = cfg
ground_truth_text = []
predicted_text = []
invalid_manifest = False
with open(transcription_cfg.output_filename, 'r') as f:
for line in f:
data = json.loads(line)
if 'pred_text' not in data:
invalid_manifest = True
break
ground_truth_text.append(data['text'])
predicted_text.append(data['pred_text'])
# Test for invalid manifest supplied
if invalid_manifest:
raise ValueError(
f"Invalid manifest provided: {transcription_cfg.output_filename} does not "
f"contain value for `pred_text`."
)
# Compute the metrics
evaluator = SLURPEvaluator(cfg.average)
evaluator.update(predictions=predicted_text, groundtruth=ground_truth_text)
results = evaluator.compute(aggregate=False)
total = results["total"]
invalid = results["invalid"]
slurp_f1 = results["slurp"]["overall"][2]
print("-------------- Results --------------")
print(
format_results(
results=results["scenario"],
label="scenario",
full=cfg.full,
errors=cfg.errors,
table_layout=cfg.table_layout,
),
"\n",
)
print(
format_results(
results=results["action"], label="action", full=cfg.full, errors=cfg.errors, table_layout=cfg.table_layout
),
"\n",
)
print(
format_results(
results=results["intent"],
label="intent (scen_act)",
full=cfg.full,
errors=cfg.errors,
table_layout=cfg.table_layout,
),
"\n",
)
print(
format_results(
results=results["entity"],
label="entities",
full=cfg.full,
errors=cfg.errors,
table_layout=cfg.table_layout,
),
"\n",
)
print(
format_results(
results=results["word_dist"],
label="entities (word distance)",
full=cfg.full,
errors=cfg.errors,
table_layout=cfg.table_layout,
),
"\n",
)
print(
format_results(
results=results["char_dist"],
label="entities (char distance)",
full=cfg.full,
errors=cfg.errors,
table_layout=cfg.table_layout,
),
"\n",
)
print(
format_results(
results=results["slurp"], label="SLU F1", full=cfg.full, errors=cfg.errors, table_layout=cfg.table_layout
),
"\n",
)
print(f"Found {invalid} out of {total} predictions that have syntax error.")
# Inject the metric name and score into the config, and return the entire config
with open_dict(cfg):
cfg.metric_name = "slurp_f1"
cfg.metric_value = slurp_f1
return cfg
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/slu/speech_intent_slot/run_speech_intent_slot_eval.py |
# ! /usr/bin/python
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import glob
import json
import os
from dataclasses import dataclass, is_dataclass
from pathlib import Path
from typing import List, Optional
import pytorch_lightning as pl
import torch
from omegaconf import OmegaConf
from tqdm.auto import tqdm
from nemo.collections.asr.models import SLUIntentSlotBPEModel
from nemo.collections.asr.parts.utils.slu_utils import SequenceGeneratorConfig
from nemo.core.config import hydra_runner
from nemo.utils import logging
@dataclass
class InferenceConfig:
# Required configs
model_path: Optional[str] = None # Path to a .nemo file
pretrained_name: Optional[str] = None # Name of a pretrained model
audio_dir: Optional[str] = None # Path to a directory which contains audio files
dataset_manifest: Optional[str] = None # Path to dataset's JSON manifest
# General configs
output_filename: Optional[str] = None
batch_size: int = 32
num_workers: int = 8
# Set `cuda` to int to define CUDA device. If 'None', will look for CUDA
# device anyway, and do inference on CPU only if CUDA device is not found.
# If `cuda` is a negative number, inference will be on CPU only.
cuda: Optional[int] = None
amp: bool = False
audio_type: str = "wav"
# Recompute model transcription, even if the output folder exists with scores.
overwrite_transcripts: bool = True
# Decoding strategy for semantic outputs
sequence_generator: SequenceGeneratorConfig = SequenceGeneratorConfig(type="greedy")
def slurp_inference(model, path2manifest: str, batch_size: int = 4, num_workers: int = 0,) -> List[str]:
if num_workers is None:
num_workers = min(batch_size, os.cpu_count() - 1)
# We will store transcriptions here
hypotheses = []
# Model's mode and device
mode = model.training
device = next(model.parameters()).device
dither_value = model.preprocessor.featurizer.dither
pad_to_value = model.preprocessor.featurizer.pad_to
try:
model.preprocessor.featurizer.dither = 0.0
model.preprocessor.featurizer.pad_to = 0
# Switch model to evaluation mode
model.eval()
logging_level = logging.get_verbosity()
logging.set_verbosity(logging.WARNING)
config = {
'manifest_filepath': path2manifest,
'batch_size': batch_size,
'num_workers': num_workers,
}
temporary_datalayer = model._setup_transcribe_dataloader(config)
for test_batch in tqdm(temporary_datalayer, desc="Transcribing", ncols=80):
predictions = model.predict(
input_signal=test_batch[0].to(device), input_signal_length=test_batch[1].to(device)
)
hypotheses += predictions
del predictions
del test_batch
finally:
# set mode back to its original value
model.train(mode=mode)
model.preprocessor.featurizer.dither = dither_value
model.preprocessor.featurizer.pad_to = pad_to_value
logging.set_verbosity(logging_level)
return hypotheses
@hydra_runner(config_name="InferenceConfig", schema=InferenceConfig)
def run_inference(cfg: InferenceConfig) -> InferenceConfig:
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
if is_dataclass(cfg):
cfg = OmegaConf.structured(cfg)
if cfg.model_path is None and cfg.pretrained_name is None:
raise ValueError("Both cfg.model_path and cfg.pretrained_name cannot be None!")
if cfg.audio_dir is None and cfg.dataset_manifest is None:
raise ValueError("Both cfg.audio_dir and cfg.dataset_manifest cannot be None!")
# setup GPU
if cfg.cuda is None:
if torch.cuda.is_available():
device = [0] # use 0th CUDA device
accelerator = 'gpu'
else:
device = 1
accelerator = 'cpu'
else:
device = [cfg.cuda]
accelerator = 'gpu'
map_location = torch.device('cuda:{}'.format(device[0]) if accelerator == 'gpu' else 'cpu')
# setup model
if cfg.model_path is not None:
# restore model from .nemo file path
logging.info(f"Restoring model : {cfg.model_path}")
model = SLUIntentSlotBPEModel.restore_from(restore_path=cfg.model_path, map_location=map_location)
model_name = os.path.splitext(os.path.basename(cfg.model_path))[0]
else:
# restore model by name
model = SLUIntentSlotBPEModel.from_pretrained(model_name=cfg.pretrained_name, map_location=map_location)
model_name = cfg.pretrained_name
trainer = pl.Trainer(devices=device, accelerator=accelerator)
model.set_trainer(trainer)
model = model.eval()
# Setup decoding strategy
model.set_decoding_strategy(cfg.sequence_generator)
# get audio filenames
if cfg.audio_dir is not None:
filepaths = list(glob.glob(os.path.join(cfg.audio_dir, f"**/*.{cfg.audio_type}"), recursive=True))
else:
# get filenames from manifest
filepaths = []
if os.stat(cfg.dataset_manifest).st_size == 0:
logging.error(f"The input dataset_manifest {cfg.dataset_manifest} is empty. Exiting!")
return None
manifest_dir = Path(cfg.dataset_manifest).parent
with open(cfg.dataset_manifest, 'r') as f:
has_two_fields = []
for line in f:
item = json.loads(line)
if "offset" in item and "duration" in item:
has_two_fields.append(True)
else:
has_two_fields.append(False)
audio_file = Path(item['audio_filepath'])
if not audio_file.is_file() and not audio_file.is_absolute():
audio_file = manifest_dir / audio_file
filepaths.append(str(audio_file.absolute()))
logging.info(f"\nStart inference with {len(filepaths)} files...\n")
# setup AMP (optional)
if cfg.amp and torch.cuda.is_available() and hasattr(torch.cuda, 'amp') and hasattr(torch.cuda.amp, 'autocast'):
logging.info("AMP enabled!\n")
autocast = torch.cuda.amp.autocast
else:
@contextlib.contextmanager
def autocast():
yield
# Compute output filename
if cfg.output_filename is None:
# create default output filename
if cfg.audio_dir is not None:
cfg.output_filename = os.path.dirname(os.path.join(cfg.audio_dir, '.')) + '.json'
else:
cfg.output_filename = cfg.dataset_manifest.replace('.json', f'_{model_name}.json')
# if transcripts should not be overwritten, and already exists, skip re-transcription step and return
if not cfg.overwrite_transcripts and os.path.exists(cfg.output_filename):
logging.info(
f"Previous transcripts found at {cfg.output_filename}, and flag `overwrite_transcripts`"
f"is {cfg.overwrite_transcripts}. Returning without re-transcribing text."
)
return cfg
# transcribe audio
with autocast():
with torch.no_grad():
predictions = slurp_inference(
model=model,
path2manifest=cfg.dataset_manifest,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
)
logging.info(f"Finished transcribing {len(filepaths)} files !")
logging.info(f"Writing transcriptions into file: {cfg.output_filename}")
# write audio transcriptions
with open(cfg.output_filename, 'w', encoding='utf-8') as f:
if cfg.audio_dir is not None:
for idx, text in enumerate(predictions):
item = {'audio_filepath': filepaths[idx], 'pred_text': text}
f.write(json.dumps(item) + "\n")
else:
with open(cfg.dataset_manifest, 'r') as fr:
for idx, line in enumerate(fr):
item = json.loads(line)
item['pred_text'] = predictions[idx]
f.write(json.dumps(item) + "\n")
logging.info("Finished writing predictions !")
return cfg
if __name__ == '__main__':
run_inference() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/slu/speech_intent_slot/eval_utils/inference.py |
# ! /usr/bin/python
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
from typing import Dict, List, Tuple, Union
from .evaluation.metrics.metrics import ErrorMetric
def parse_semantics_str2dict(semantics_str: Union[List[str], str, Dict]) -> Tuple[Dict, bool]:
"""
This function parse the input string to a valid python dictionary for later evaluation.
Part of this function is adapted from
https://github.com/speechbrain/speechbrain/blob/develop/recipes/SLURP/direct/train_with_wav2vec2.py#L110-L127
"""
invalid = False
if isinstance(semantics_str, dict):
return semantics_str, invalid
if isinstance(semantics_str, list):
semantics_str = " ".join(semantics_str)
try:
if "|" in semantics_str:
semantics_str = semantics_str.replace("|", ",")
_dict = ast.literal_eval(semantics_str)
if not isinstance(_dict, dict):
_dict = {
"scenario": "none",
"action": "none",
"entities": [],
}
invalid = True
except Exception: # need this if the output is not a valid dict
_dict = {
"scenario": "none",
"action": "none",
"entities": [],
}
invalid = True
if "scenario" not in _dict or not isinstance(_dict["scenario"], str):
_dict["scenario"] = "none"
invalid = True
if "action" not in _dict or not isinstance(_dict["action"], str):
_dict["action"] = "none"
invalid = True
if "entities" not in _dict:
_dict["entities"] = []
invalid = True
else:
def _parse_entity(item: Dict):
error = False
for key in ["type", "filler"]:
if key not in item or not isinstance(item[key], str):
item[key] = "none"
error = True
return item, error
for i, x in enumerate(_dict["entities"]):
item, entity_error = _parse_entity(x)
invalid = invalid or entity_error
_dict["entities"][i] = item
return _dict, invalid
class SLURPEvaluator:
"""
Evaluator class for calculating SLURP metrics
"""
def __init__(self, average_mode: str = 'micro') -> None:
if average_mode not in ['micro', 'macro']:
raise ValueError(f"Only supports 'micro' or 'macro' average, but got {average_mode} instead.")
self.average_mode = average_mode
self.scenario_f1 = ErrorMetric.get_instance(metric="f1", average=average_mode)
self.action_f1 = ErrorMetric.get_instance(metric="f1", average=average_mode)
self.intent_f1 = ErrorMetric.get_instance(metric="f1", average=average_mode)
self.span_f1 = ErrorMetric.get_instance(metric="span_f1", average=average_mode)
self.distance_metrics = {}
for distance in ['word', 'char']:
self.distance_metrics[distance] = ErrorMetric.get_instance(
metric="span_distance_f1", average=average_mode, distance=distance
)
self.slu_f1 = ErrorMetric.get_instance(metric="slu_f1", average=average_mode)
self.invalid = 0
self.total = 0
def reset(self):
self.scenario_f1 = ErrorMetric.get_instance(metric="f1", average=self.average_mode)
self.action_f1 = ErrorMetric.get_instance(metric="f1", average=self.average_mode)
self.intent_f1 = ErrorMetric.get_instance(metric="f1", average=self.average_mode)
self.span_f1 = ErrorMetric.get_instance(metric="span_f1", average=self.average_mode)
self.distance_metrics = {}
for distance in ['word', 'char']:
self.distance_metrics[distance] = ErrorMetric.get_instance(
metric="span_distance_f1", average=self.average_mode, distance=distance
)
self.slu_f1 = ErrorMetric.get_instance(metric="slu_f1", average=self.average_mode)
self.invalid = 0
self.total = 0
def update(self, predictions: Union[List[str], str], groundtruth: Union[List[str], str]) -> None:
if isinstance(predictions, str):
predictions = [predictions]
if isinstance(groundtruth, str):
groundtruth = [groundtruth]
for pred, truth in zip(predictions, groundtruth):
pred, syntax_error = parse_semantics_str2dict(pred)
truth, _ = parse_semantics_str2dict(truth)
self.scenario_f1(truth["scenario"], pred["scenario"])
self.action_f1(truth["action"], pred["action"])
self.intent_f1(f"{truth['scenario']}_{truth['action']}", f"{pred['scenario']}_{pred['action']}")
self.span_f1(truth["entities"], pred["entities"])
for distance, metric in self.distance_metrics.items():
metric(truth["entities"], pred["entities"])
self.total += 1
self.invalid += int(syntax_error)
def compute(self, aggregate=True) -> Dict:
scenario_results = self.scenario_f1.get_metric()
action_results = self.action_f1.get_metric()
intent_results = self.intent_f1.get_metric()
entity_results = self.span_f1.get_metric()
word_dist_results = self.distance_metrics['word'].get_metric()
char_dist_results = self.distance_metrics['char'].get_metric()
self.slu_f1(word_dist_results)
self.slu_f1(char_dist_results)
slurp_results = self.slu_f1.get_metric()
if not aggregate:
return {
"scenario": scenario_results,
"action": action_results,
"intent": intent_results,
"entity": entity_results,
"word_dist": word_dist_results,
"char_dist": char_dist_results,
"slurp": slurp_results,
"invalid": self.invalid,
"total": self.total,
}
scores = dict()
scores["invalid"] = self.invalid
scores["total"] = self.total
self.update_scores_dict(scenario_results, scores, "scenario")
self.update_scores_dict(action_results, scores, "action")
self.update_scores_dict(intent_results, scores, "intent")
self.update_scores_dict(entity_results, scores, "entity")
self.update_scores_dict(word_dist_results, scores, "word_dist")
self.update_scores_dict(char_dist_results, scores, "char_dist")
self.update_scores_dict(slurp_results, scores, "slurp")
return scores
def update_scores_dict(self, source: Dict, target: Dict, tag: str = '') -> Dict:
scores = source['overall']
p, r, f1 = scores[:3]
target[f"{tag}_p"] = p
target[f"{tag}_r"] = r
target[f"{tag}_f1"] = f1
return target
| NeMo-main | examples/slu/speech_intent_slot/eval_utils/evaluator.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import torch
from pytorch_lightning import Trainer
import nemo
from nemo.core import ModelPT
from nemo.core.classes import Exportable
from nemo.core.config.pytorch_lightning import TrainerConfig
from nemo.utils import logging
try:
from contextlib import nullcontext
except ImportError:
# handle python < 3.7
from contextlib import suppress as nullcontext
def get_args(argv):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=f"Export NeMo models to ONNX/Torchscript",
)
parser.add_argument("source", help="Source .nemo file")
parser.add_argument("out", help="Location to write result to")
parser.add_argument("--autocast", action="store_true", help="Use autocast when exporting")
parser.add_argument("--runtime-check", action="store_true", help="Runtime check of exported net result")
parser.add_argument("--verbose", default=None, help="Verbose level for logging, numeric")
parser.add_argument("--max-batch", type=int, default=None, help="Max batch size for model export")
parser.add_argument("--max-dim", type=int, default=None, help="Max dimension(s) for model export")
parser.add_argument("--onnx-opset", type=int, default=None, help="ONNX opset for model export")
parser.add_argument(
"--cache_support", action="store_true", help="enables caching inputs for the models support it."
)
parser.add_argument("--device", default="cuda", help="Device to export for")
parser.add_argument("--check-tolerance", type=float, default=0.01, help="tolerance for verification")
parser.add_argument(
"--export-config",
metavar="KEY=VALUE",
nargs='+',
help="Set a number of key-value pairs to model.export_config dictionary "
"(do not put spaces before or after the = sign). "
"Note that values are always treated as strings.",
)
args = parser.parse_args(argv)
return args
def nemo_export(argv):
args = get_args(argv)
loglevel = logging.INFO
# assuming loglevel is bound to the string value obtained from the
# command line argument. Convert to upper case to allow the user to
# specify --log=DEBUG or --log=debug
if args.verbose is not None:
numeric_level = getattr(logging, args.verbose.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % numeric_level)
loglevel = numeric_level
logging.setLevel(loglevel)
logging.info("Logging level set to {}".format(loglevel))
"""Convert a .nemo saved model into .riva Riva input format."""
nemo_in = args.source
out = args.out
# Create a PL trainer object which is required for restoring Megatron models
cfg_trainer = TrainerConfig(
accelerator='gpu',
strategy="ddp",
num_nodes=1,
devices=1,
# Need to set the following two to False as ExpManager will take care of them differently.
logger=False,
enable_checkpointing=False,
)
trainer = Trainer(cfg_trainer)
logging.info("Restoring NeMo model from '{}'".format(nemo_in))
try:
with torch.inference_mode():
# Restore instance from .nemo file using generic model restore_from
model = ModelPT.restore_from(restore_path=nemo_in, trainer=trainer)
except Exception as e:
logging.error(
"Failed to restore model from NeMo file : {}. Please make sure you have the latest NeMo package installed with [all] dependencies.".format(
nemo_in
)
)
raise e
logging.info("Model {} restored from '{}'".format(model.__class__.__name__, nemo_in))
if not isinstance(model, Exportable):
logging.error("Your NeMo model class ({}) is not Exportable.".format(model.__class__.__name__))
sys.exit(1)
#
# Add custom export parameters here
#
check_trace = args.runtime_check
in_args = {}
max_batch = 1
max_dim = None
if args.max_batch is not None:
in_args["max_batch"] = args.max_batch
max_batch = args.max_batch
if args.max_dim is not None:
in_args["max_dim"] = args.max_dim
max_dim = args.max_dim
if args.cache_support:
model.set_export_config({"cache_support": "True"})
if args.export_config:
kv = {}
for key_value in args.export_config:
lst = key_value.split("=")
if len(lst) != 2:
raise Exception("Use correct format for --export_config: k=v")
k, v = lst
kv[k] = v
model.set_export_config(kv)
autocast = nullcontext
if args.autocast:
autocast = torch.cuda.amp.autocast
try:
with autocast(), torch.no_grad(), torch.inference_mode():
model.to(device=args.device).freeze()
model.eval()
input_example = None
if check_trace and len(in_args) > 0:
input_example = model.input_module.input_example(**in_args)
check_trace = [input_example]
for key, arg in in_args.items():
in_args[key] = (arg + 1) // 2
input_example2 = model.input_module.input_example(**in_args)
check_trace.append(input_example2)
logging.info(f"Using additional check args: {in_args}")
_, descriptions = model.export(
out,
input_example=input_example,
check_trace=check_trace,
check_tolerance=args.check_tolerance,
onnx_opset_version=args.onnx_opset,
verbose=bool(args.verbose),
)
except Exception as e:
logging.error(
"Export failed. Please make sure your NeMo model class ({}) has working export() and that you have the latest NeMo package installed with [all] dependencies.".format(
model.__class__
)
)
raise e
if __name__ == '__main__':
nemo_export(sys.argv[1:])
| NeMo-main | scripts/export.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import os
import time
import librosa
import sox
from joblib import Parallel, delayed
def resample_file(resampled_dir, filepath, ext, sample_rate):
"""
Resample an audio file to 16kHZ and transform to monochannel
Remove incompatible files.
Args:
resampled_dir: Directory of transformed files.
filepath: Filepath of Audio
ext: File type e.g. "wav", "flac"
Returns:
"""
head, filename = os.path.split(filepath)
_, clsname = os.path.split(head)
filename, _ = os.path.splitext(filename)
new_dir = os.path.join(resampled_dir, clsname)
if not os.path.exists(new_dir):
os.makedirs(new_dir)
new_path = os.path.join(new_dir, filename + f'.{ext}')
# check if the resampled data exists.
if os.path.exists(new_path):
print(f"Resampled file {filepath} exists. Skip it.")
return None
transform = sox.Transformer()
transform.set_output_format(file_type='wav')
transform.convert(samplerate=sample_rate, n_channels=1)
try:
transform.build(filepath, new_path)
print(f"Finished converting file {filepath}.")
return None
except sox.core.SoxError as e:
try:
# Check if the file is readable
librosa.load(path=filepath)
# if it is, force input format and try again
transform.set_input_format(file_type=ext)
transform.build(filepath, new_path)
return None
except Exception:
return filepath
def main():
start = time.time()
parser = argparse.ArgumentParser(description='Freesound data resample')
parser.add_argument("--data_dir", required=True, default=None, type=str)
parser.add_argument('--resampled_dir', required=True, default=None, type=str)
parser.add_argument('--sample_rate', default=16000, type=int)
args = parser.parse_args()
data_dir = args.data_dir
resampled_dir = args.resampled_dir
sample_rate = args.sample_rate
wav_files = sorted(glob.glob(os.path.join(data_dir, '*/*.wav')))
flac_files = sorted(glob.glob(os.path.join(data_dir, '*/*.flac')))
with Parallel(n_jobs=-1, verbose=10) as parallel:
wav_files_failed = parallel(
delayed(resample_file)(resampled_dir, filepath, ext='wav', sample_rate=sample_rate)
for filepath in wav_files
)
flac_files_failed = parallel(
delayed(resample_file)(resampled_dir, filepath, ext='flac', sample_rate=sample_rate)
for filepath in flac_files
)
with open('dataset_conversion_logs.txt', 'w') as f:
for file in wav_files_failed:
if file is not None:
f.write(f"{file}\n")
for file in flac_files_failed:
if file is not None:
f.write(f"{file}\n")
end = time.time()
print(f'Resample data in {data_dir} and save to {resampled_dir} takes {end-start} seconds.')
if __name__ == '__main__':
main()
| NeMo-main | scripts/freesound_download_resample/freesound_resample.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import pickle
import time
try:
import librosa
import requests
import requests_oauthlib
from joblib import Parallel, delayed
from oauthlib.oauth2 import TokenExpiredError
except (ModuleNotFoundError, ImportError) as e:
raise e
try:
import freesound
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
"freesound is not installed. Execute `pip install --no-cache-dir git+https://github.com/MTG/freesound-python.git` in terminal"
)
"""
Instructions
1. We will need some requirements including freesound, requests, requests_oauthlib, joblib, librosa and sox. If they are not installed, please run `pip install -r freesound_requirements.txt`
2. Create an API key for freesound.org at https://freesound.org/help/developers/
3. Create a python file called `freesound_private_apikey.py` and add lined `api_key = <your Freesound api key>` and `client_id = <your Freesound client id>`
4. Authorize by run `python freesound_download.py --authorize` and visit website, and paste response code
5. Feel free to change any arguments in download_resample_freesound.sh such as max_samples and max_filesize
6. Run `bash download_resample_freesound.sh <numbers of files you want> <download data directory> <resampled data directory>`
"""
# Import the API Key
try:
from freesound_private_apikey import api_key, client_id
print("API Key found !")
except ImportError:
raise ImportError(
"Create a python file called `freesound_private_apikey.py` and add lined `api_key = <your Freesound api key>` and `client_id = <your Freesound client id>`"
)
auth_url = 'https://freesound.org/apiv2/oauth2/authorize/'
redirect_url = 'https://freesound.org/home/app_permissions/permission_granted/'
token_url = 'https://freesound.org/apiv2/oauth2/access_token/'
scope = ["read", "write"]
BACKGROUND_CLASSES = [
"Air brake",
"Static",
"Acoustic environment",
"Distortion",
"Tape hiss",
"Hubbub",
"Vibration",
"Cacophony",
"Throbbing",
"Reverberation",
"Inside, public space",
"Inside, small room",
"Echo",
"Outside, rural",
"Outside, natural",
"Outside, urban",
"Outside, manmade",
"Car",
"Bus",
"Traffic noise",
"Roadway noise",
"Truck",
"Emergency vehicle",
"Motorcycle",
"Aircraft engine",
"Aircraft",
"Helicopter",
"Bicycle",
"Skateboard",
"Subway, metro, underground",
"Railroad car",
"Train wagon",
"Train",
"Sailboat",
"Rowboat",
"Ship",
]
SPEECH_CLASSES = [
"Male speech",
"Female speech",
"Speech synthesizer",
"Babbling",
"Conversation",
"Child speech",
"Narration",
"Laughter",
"Yawn",
"Whispering",
"Whimper",
"Baby cry",
"Sigh",
"Groan",
"Humming",
"Male singing",
"Female singing",
"Child singing",
"Children shouting",
]
def initialize_oauth():
# If token already exists, then just load it
if os.path.exists('_token.pkl'):
token = unpickle_object('_token')
oauth = requests_oauthlib.OAuth2Session(client_id, redirect_uri=redirect_url, scope=scope, token=token)
else:
# Construct a new token after OAuth2 flow
# Initialize a OAuth2 session
oauth = requests_oauthlib.OAuth2Session(client_id, redirect_uri=redirect_url, scope=scope)
authorization_url, state = oauth.authorization_url(auth_url)
print(f"Visit below website and paste access token below : \n\n{authorization_url}\n")
authorization_response = input("Paste authorization response code here :\n")
token = oauth.fetch_token(
token_url,
authorization_response=authorization_response,
code=authorization_response,
client_secret=api_key,
)
# Save the token generated
pickle_object(token, '_token')
return oauth, token
def instantiate_session():
# Reconstruct session in process, and force singular execution thread to reduce session
# connections to server
token = unpickle_object('_token')
session = requests_oauthlib.OAuth2Session(client_id, redirect_uri=redirect_url, scope=scope, token=token)
adapter = requests.adapters.HTTPAdapter(pool_connections=1, pool_maxsize=1)
session.mount('http://', adapter)
return session
def refresh_token(session):
print("Refreshing tokens...")
# Token expired, perform token refresh
extras = {'client_id': client_id, 'client_secret': api_key}
token = session.refresh_token(token_url, **extras)
print("Token refresh performed...")
# Save the refreshed token
pickle_object(token, '_token')
return session
def pickle_object(token, name):
with open(name + '.pkl', 'wb') as f:
pickle.dump(token, f)
def unpickle_object(name):
fp = name + '.pkl'
if os.path.exists(fp):
with open(fp, 'rb') as f:
token = pickle.load(f)
return token
else:
raise FileNotFoundError('Token not found!')
def is_resource_limited(e: freesound.FreesoundException):
"""
Test if the reason for a freesound exception was either rate limit
or daily limit.
If it was for either reason, sleep for an appropriate delay and return
to try again.
Args:
e: Freesound Exception object
Returns:
A boolean which describes whether the error was due to some
api limit issue, or if it was some other reason.
If false is returned, then the user should carefully check the cause
and log it.
"""
detail = e.detail['detail']
if '2000' in detail:
# This is the request limit, hold off for 1 hour and try again
print(f"Hit daily limit, sleeping for 20 minutes.")
time.sleep(60 * 20)
return True
elif '60' in detail:
# This is the request limit per minute, hold off for 1 minute and try again
print(f"Hit rate limit, sleeping for 1 minute.")
time.sleep(60)
return True
else:
return False
def prepare_client(client: freesound.FreesoundClient, token) -> freesound.FreesoundClient:
# Initialize the client with token auth
client.set_token(token['access_token'], auth_type='oauth')
print("Client ready !")
return client
def get_text_query_with_resource_limit_checks(client, query: str, filters: list, fields: str, page_size: int):
"""
Performs a text query, checks for rate / api limits, and retries.
Args:
client: FreesoundAPI client
query: query string (either exact or inexact)
filters: list of string filters
fields: String of values to recover
page_size: samples per page returned
Returns:
"""
pages = None
attempts = 20
while pages is None:
try:
pages = client.text_search(query=query, filter=" ".join(filters), fields=fields, page_size=str(page_size),)
except freesound.FreesoundException as e:
# Most probably a rate limit or a request limit
# Check if that was the case, and wait appropriate ammount of time
# for retry
was_resource_limited = is_resource_limited(e)
# If result of test False, it means that failure was due to some other reason.
# Log it, then break loop
if not was_resource_limited:
print(e.with_traceback(None))
break
attempts -= 1
# Attempt to refresh tokens if it fails multiple times
if attempts % 5 == 0 and attempts > 0:
session = instantiate_session()
refresh_token(session)
session.close()
token = unpickle_object('_token')
client = prepare_client(client, token)
if attempts <= 0:
print(f"Failed to query pages for '{query}' after 10 attempts, skipping query")
break
if pages is None:
print(f"Query attempts remaining = {attempts}")
return client, pages
def get_resource_with_auto_refresh(session, download_url):
"""
Attempts download of audio with a token refresh if necessary.
"""
try:
result = session.get(download_url)
except TokenExpiredError as e:
session = refresh_token(session)
result = session.get(download_url)
except Exception as e:
result = None
print(f"Skipping file {download_url} due to exception below\n\n")
print(e)
return result.content
def download_song(basepath, id, name, download_url):
# Cleanup name
name = name.encode('ascii', 'replace').decode()
name = name.replace("?", "-")
name = name.replace(":", "-")
name = name.replace("(", "-")
name = name.replace(")", "-")
name = name.replace("'", "")
name = name.replace(",", "-")
name = name.replace("/", "-")
name = name.replace("\\", "-")
name = name.replace(".", "-")
name = name.replace(" ", "")
# Correct last `.` for filetype
name = name[:-4] + '.wav'
# Add file id to filename
name = f"id_{id}" + "_" + name
fp = os.path.join(basepath, name)
# Check if file, if exists already, can be loaded by librosa
# If it cannot be loaded, possibly corrupted file.
# Delete and then re-download
if os.path.exists(fp):
try:
_ = librosa.load(path=fp)
except Exception:
# File is currupted, delete and re-download.
os.remove(fp)
print(f"Pre-existing file {fp} was corrupt and was deleted, will be re-downloaded.")
if not os.path.exists(fp):
print("Downloading file :", name)
session = instantiate_session()
data = None
attempts = 10
try:
while data is None:
try:
# Get the sound data
data = get_resource_with_auto_refresh(session, download_url)
except freesound.FreesoundException as e:
# Most probably a rate limit or a request limit
# Check if that was the case, and wait appropriate amount of time
# for retry
was_resource_limited = is_resource_limited(e)
# If result of test False, it means that failure was due to some other reason.
# Log it, then break loop
if not was_resource_limited:
print(e)
break
attempts -= 1
if attempts <= 0:
print(f"Failed to download file {fp} after 10 attempts, skipping file")
break
if data is None:
print(f"Download attempts remaining = {attempts}")
finally:
session.close()
# Write the data to file
if data is not None:
print("Downloaded file :", name)
with open(fp, 'wb') as f:
f.write(data)
# If file size is less than 89, then this probably is a text format and not an actual audio file.
if os.path.getsize(fp) > 89:
print(f"File written : {fp}")
else:
os.remove(fp)
print(f"File corrupted and has been deleted: {fp}")
else:
print(f"File [{fp}] corrupted or faced some issue when downloading, skipped.")
# Sleep to avoid hitting rate limits
time.sleep(5)
else:
print(f"File [{fp}] already exists in dataset, skipping re-download.")
def get_songs_by_category(
client: freesound.FreesoundClient,
category: str,
data_dir: str,
max_num_samples=100,
page_size=100,
min_filesize_in_mb=0,
max_filesize_in_mb=10,
n_jobs=None,
):
"""
Download songs of a category with restrictions
Args:
client: FreesoundAPI client
category: category to be downloaded
data_dir: directory of downloaded songs
max_num_samples: maximum number of samples of this category
page_size: samples per page returned
min_filesize_in_mb: minimum filesize of the song in MB
max_filesize_in_mb: maximum filesize of the song in MB
n_jobs: number of jobs for parallel processing
Returns:
"""
# quote string to force exact match
query = f'"{category}"'
print(f"Query : {query}")
page_size = min(page_size, 150)
max_filesize = int(max_filesize_in_mb * (2 ** 20))
if min_filesize_in_mb == 0:
min_filesize_in_mb = 1
else:
min_filesize_in_mb = int(min_filesize_in_mb * (2 ** 20))
if max_num_samples < 0:
max_num_samples = int(1e6)
filters = [
'type:(wav OR flac)',
'license:("Attribution" OR "Creative Commons 0")',
f'filesize:[{min_filesize_in_mb} TO {max_filesize}]',
]
fields = "id,name,download,license"
client, pages = get_text_query_with_resource_limit_checks(
client, query=query, filters=filters, fields=fields, page_size=page_size
)
if pages is None:
print(f"Number of attempts exceeded limit, skipping query {query}")
return
num_pages = pages.count
# Check if returned empty result; if so, fallback to inexact category search
if num_pages == 0:
print(f"Found 0 samples of results for query '{query}'")
print(f"Trying less restricted query : {category}")
client, pages = get_text_query_with_resource_limit_checks(
client, query=category, filters=filters, fields=fields, page_size=page_size
)
if pages is None:
print(f"Number of attempts exceeded limit, skipping query {query}")
return
num_pages = pages.count
print(f"Found {num_pages} samples of results for query '{query}'")
category = category.replace(' ', '_')
basepath = os.path.join(data_dir, category)
if not os.path.exists(basepath):
os.makedirs(basepath)
sounds = []
sample_count = 0
# Retrieve sound license information
with open(os.path.join(basepath, 'licenses.txt'), 'w') as f:
f.write("ID,LICENSE\n")
f.flush()
while True:
for sound in pages:
if sample_count >= max_num_samples:
print(
f"Collected {sample_count} samples, which is >= max number of samples requested "
f"{max_num_samples}. Stopping for this category : {category}"
)
break
sounds.append(sound)
sample_count += 1
f.write(f"{sound.id},{sound.license}\n")
f.flush()
if sample_count >= max_num_samples:
break
try:
pages = pages.next_page()
except ValueError:
break
if n_jobs is None:
n_jobs = max(1, len(sounds))
# Parallel download all songs
with Parallel(n_jobs=n_jobs, verbose=10) as parallel:
_ = parallel(delayed(download_song)(basepath, sound.id, sound.name, sound.download) for sound in sounds)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Freesound download script")
parser.add_argument(
'--authorize', action='store_true', dest='auth', help='Flag to only perform OAuth2 authorization step'
)
parser.add_argument('-c', '--category', default='', type=str, help='Category required to download')
parser.add_argument('-d', '--data_dir', default='', type=str, help='Destination folder to store data')
parser.add_argument('--page_size', default=100, type=int, help='Number of sounds per page')
parser.add_argument('--max_samples', default=100, type=int, help='Maximum number of sound samples')
parser.add_argument('--min_filesize', default=0, type=int, help='Maximum filesize allowed (in MB)')
parser.add_argument('--max_filesize', default=20, type=int, help='Maximum filesize allowed (in MB)')
parser.set_defaults(auth=False)
args = parser.parse_args()
if args.auth:
""" Initialize oauth token to be used by all """
oauth, token = initialize_oauth()
oauth.close()
print("Authentication suceeded ! Token stored in `_token.pkl`")
exit(0)
if not os.path.exists('_token.pkl'):
raise FileNotFoundError(
"Please authorize the application first using " "`python freesound_download.py --authorize`"
)
if args.data_dir == '':
raise ValueError("Data dir must be passed as an argument using `--data_dir`")
data_dir = args.data_dir
page_size = args.page_size
max_num_samples = args.max_samples
min_filesize_in_mb = args.min_filesize
max_filesize_in_mb = args.max_filesize
# Initialize and authenticate client
token = unpickle_object('_token')
freesound_client = freesound.FreesoundClient()
client = prepare_client(freesound_client, token)
category = args.category
if category == '':
raise ValueError("Cannot pass empty string as it will select all of FreeSound data !")
print(f"Downloading category : {category}")
get_songs_by_category(
client,
category,
data_dir=data_dir,
max_num_samples=max_num_samples,
page_size=page_size,
min_filesize_in_mb=min_filesize_in_mb,
max_filesize_in_mb=max_filesize_in_mb,
n_jobs=30,
)
| NeMo-main | scripts/freesound_download_resample/freesound_download.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# USAGE: python process_asr_text_tokenizer.py --manifest=<path to train manifest files, seperated by commas> \
# --data_root="<output directory>" \
# --vocab_size=<number of tokens in vocabulary> \
# --tokenizer=<"spe" or "wpe"> \
# --log
# where <manifest> can be: train_clean_100, train_clean_360, train_other_500
# You can also put more than one data_set comma-separated:
# --manifest="train_clean_100,train_clean_360,train_other_500"
# or
# python process_asr_text_tokenizer.py --data_file=<path to train text file> \
# --data_root="<output directory>" \
# --vocab_size=<number of tokens in vocabulary> \
# --tokenizer=<"bpe" or "wpe"> \
# --log
# where <manifest> can be: train_clean_100, train_clean_360, train_other_500
# You can also put more than one data_set comma-separated:
# --manifest="train_clean_100,train_clean_360,train_other_500"
#
# Args:
# --manifest or --data_file: If your text data lies inside of an ASR manifest file,
# then use the --manifest path. If instead the text data is inside a file with separate lines
# corresponding to different text lines, then use --data_file.
# In either case, you can add commas to concatenate different manifests or different data files.
#
# --data_root: The output directory (whose subdirectories will be created if not present) where
# the tokenizers will be placed.
#
# --vocab_size: The size of the tokenizer vocabulary. Larger vocabularies can accommodate almost entire,
# words but the decoder size of any model will grow proportionally.
#
# --tokenizer: Can be either spe or wpe . spe refers to the Google sentencepiece library tokenizer.
# wpe refers to the HuggingFace BERT Word Piece tokenizer.
#
# --no_lower_case: When this flag is passed, it will force the tokenizer to create seperate tokens for
# upper and lower case characters. By default, the script will turn all the text to lower case
# before tokenization (and if upper case characters are passed during training/inference, the
# tokenizer will emit a token equivalent to Out-Of-Vocabulary). Used primarily for the
# English language.
#
# --spe_type: The sentencepiece library has a few implementations of the tokenization technique, and
# spe_type refers to these implementations. Currently supported types are unigram, bpe, char, word.
# Defaults to bpe.
#
# --spe_character_coverage: The sentencepiece library considers how much of the original vocabulary it
# should cover in its "base set" of tokens (akin to the lower and upper case characters of the
# English language). For almost all languages with small base token sets (<1000 tokens), this
# should be kept at its default of 1.0. For languages with larger vocabularies (say Japanese,
# Mandarin, Korean etc), the suggested value is 0.9995.
#
# --spe_sample_size: If the dataset is too large, consider using a sampled dataset indicated by a
# positive integer. By default, any negative value (default = -1) will use the entire dataset.
#
# --spe_train_extremely_large_corpus: When training a sentencepiece tokenizer on very large amounts of text,
# sometimes the tokenizer will run out of memory or wont be able to process so much data on RAM.
# At some point you might receive the following error - "Input corpus too large, try with
# train_extremely_large_corpus=true". If your machine has large amounts of RAM, it might still be possible
# to build the tokenizer using the above flag. Will silently fail if it runs out of RAM.
#
# --spe_max_sentencepiece_length: Limits the maximum length that any any SentencePiece subword can be.
# Using this will change the subword tokens generated.
#
# --spe_pad: Adds <pad> as special token.
#
# --spe_bos: Adds <s> as Begining-of-Sentence special token.
#
# --spe_eos: Adds </s> as End-of-Sentence special token.
#
# --log: Whether the script should display log messages
import argparse
import json
import logging
import os
import tokenizers
from nemo.collections.common.tokenizers.sentencepiece_tokenizer import create_spt_model
from nemo.utils.data_utils import DataStoreObject
parser = argparse.ArgumentParser(description='Create tokenizer')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--manifest", default=None, type=str, help='Comma separated list of manifest files')
group.add_argument("--data_file", default=None, help='data file from which to create tokenizer model')
parser.add_argument("--data_root", required=True, default=None, type=str, help='Output directory')
parser.add_argument("--vocab_size", default=1024, type=int, help='Vocabulary size')
parser.add_argument("--tokenizer", default="wpe", choices=["spe", "wpe"], help='Type of tokenization to perform')
parser.add_argument(
"--spe_type",
default="bpe",
choices=['bpe', 'unigram', 'char', 'word'],
help='Type of the SentencePiece model. Can be `bpe`, `unigram`, `char` or `word`.'
'Used only if --tokenizer == `spe`',
)
parser.add_argument(
'--spe_character_coverage',
type=float,
default=1.0,
help="Character coverage percentage for SentencePiece tokenization. For languages "
"with large vocabulary, should be close to 0.9995, otherwise kept as 1.0",
)
parser.add_argument('--spe_bos', action='store_true', help='Add <s> token to SentencePiece Tokenizer.')
parser.add_argument('--spe_eos', action='store_true', help='Add </s> token to SentencePiece Tokenizer.')
parser.add_argument('--spe_pad', action='store_true', help='Add <pad> token to SentencePiece Tokenizer.')
parser.add_argument(
'--spe_sample_size',
type=int,
default=-1,
help="Samples the dataset by `sample_size` if positive integer, otherwise uses whole dataset",
)
parser.add_argument('--spe_train_extremely_large_corpus', action='store_true', help='')
parser.add_argument(
'--spe_max_sentencepiece_length',
type=int,
default=-1,
help='Limit the maximum number of tokens in each SentencePiece subword. '
'Must be a positive integer > 0. By default places no limit on subword length.',
)
parser.add_argument(
'--spe_no_split_by_unicode_script',
dest='spe_split_by_unicode_script',
action='store_false',
help="Don't use Unicode script to split sentence pieces.",
)
parser.add_argument('--no_lower_case', dest='lower_case', action='store_false')
parser.add_argument("--log", action='store_true')
parser.set_defaults(log=False, lower_case=True, spe_train_extremely_large_corpus=False)
args = parser.parse_args()
def __build_document_from_manifests(
data_root: str, manifests: str,
):
if ',' in manifests:
manifests = manifests.split(',')
else:
manifests = [manifests]
document_dir = os.path.join(data_root, 'text_corpus')
if not os.path.exists(document_dir):
os.makedirs(document_dir)
document_path = os.path.join(document_dir, 'document.txt')
if os.path.exists(document_path):
logging.info('Corpus already exists at path : %s', document_path)
return document_path
num_lines = 0
with open(document_path, 'w') as out_writer:
for manifest in manifests:
with open(DataStoreObject(manifest).get(), 'r') as in_reader:
for line in in_reader:
item = json.loads(line)
text = item['text']
out_writer.write(text + '\n')
out_writer.flush()
num_lines += 1
logging.info(f"Finished extracting manifest : {manifest}")
logging.info("Finished extracting all manifests ! Number of sentences : {}".format(num_lines))
return document_path
def __process_data(
text_path: str,
dst_folder: str,
vocab_size: int,
tokenizer_type: str,
spe_type: str,
spe_character_coverage: float,
spe_train_extremely_large_corpus: bool,
spe_sample_size: int,
spe_max_sentencepiece_length: int,
spe_split_by_unicode_script: bool,
spe_bos: bool,
spe_eos: bool,
spe_pad: bool,
lower_case: bool,
):
"""
Converts flac to wav and build manifests's json
Args:
text_path: source with text lines
dst_folder: where wav files will be stored
vocab_size: vocabular size used in encoding the text
tokenizer_type: type of tokenization to perform - wpe or spe
spe_type: type of tokenization model used for spe.
spe_character_coverage: float value between 0 and 1 (as a percentage). For languages with a vast charset,
can be < 1.0, but for all other languages, it should be set as 1.0
spe_sample_size: int, default of -1. If positive integer is used, samples the dataset
by given sample size.
spe_train_extremely_large_corpus: bool. If dataset is too large, and user has sufficient RAM,
this flag can be set to try to trained the tokenizer. Will silently fail if it runs out of RAM.
spe_max_sentencepiece_length: Limits the maximum length of the SentencePiece subword that can be constructed.
By default, no limit is placed.
spe_bos: Bool flag, whether to add <s> to SentencePiece tokenizer vocabulary.
spe_eos: Bool flag, whether to add </s> to SentencePiece tokenizer vocabulary.
spe_pad: Bool flag, whether to add <pad> to SentencePiece tokenizer vocabulary.
lower_case: whether to tokenize with lower case character set only (for english)
Returns:
"""
if tokenizer_type == 'spe':
# Prepare directory of tokenizer
if spe_max_sentencepiece_length > 0:
tokenizer_dir = os.path.join(dst_folder, 'tokenizer_{}_{}_v{}_max_{}').format(
tokenizer_type, spe_type, vocab_size, spe_max_sentencepiece_length
)
else:
tokenizer_dir = os.path.join(dst_folder, 'tokenizer_{}_{}_v{}').format(
tokenizer_type, spe_type, vocab_size
)
if spe_pad:
tokenizer_dir = f'{tokenizer_dir}_pad'
if spe_bos:
tokenizer_dir = f'{tokenizer_dir}_bos'
if spe_eos:
tokenizer_dir = f'{tokenizer_dir}_eos'
if not os.path.exists(tokenizer_dir):
os.makedirs(tokenizer_dir)
if os.path.exists(os.path.join(tokenizer_dir, 'tokenizer.model')):
logging.warning("Model file already exists, overriding old model file !")
os.remove(os.path.join(tokenizer_dir, 'tokenizer.model'))
# Build tokenizer
tokenizer_path, vocab_path = create_spt_model(
data_file=text_path,
vocab_size=vocab_size,
sample_size=spe_sample_size,
do_lower_case=lower_case,
output_dir=tokenizer_dir,
tokenizer_type=spe_type,
character_coverage=spe_character_coverage,
train_extremely_large_corpus=spe_train_extremely_large_corpus,
max_sentencepiece_length=spe_max_sentencepiece_length,
split_by_unicode_script=spe_split_by_unicode_script,
bos=spe_bos,
eos=spe_eos,
pad=spe_pad,
)
else:
tokenizer_dir = os.path.join(dst_folder, 'tokenizer_{}_v{}').format(tokenizer_type, vocab_size)
if not os.path.exists(tokenizer_dir):
os.makedirs(tokenizer_dir)
tokenizer = tokenizers.BertWordPieceTokenizer(lowercase=lower_case)
tokenizer.train(text_path, vocab_size=vocab_size)
tokenizer.save_model(tokenizer_dir)
return tokenizer_dir
def main():
data_root = args.data_root
manifests = args.manifest
data_file = args.data_file
vocab_size = args.vocab_size
tokenizer = args.tokenizer
spe_type = args.spe_type
spe_character_coverage = args.spe_character_coverage
spe_sample_size = args.spe_sample_size
spe_train_extremely_large_corpus = args.spe_train_extremely_large_corpus
spe_max_sentencepiece_length = args.spe_max_sentencepiece_length
spe_split_by_unicode_script = args.spe_split_by_unicode_script
spe_bos, spe_eos, spe_pad = args.spe_bos, args.spe_eos, args.spe_pad
lower_case = args.lower_case
if not os.path.exists(data_root):
os.makedirs(data_root)
if args.log:
logging.basicConfig(level=logging.INFO)
if manifests:
text_corpus_path = __build_document_from_manifests(data_root, manifests)
else:
text_corpus_path = data_file
tokenizer_path = __process_data(
text_corpus_path,
data_root,
vocab_size,
tokenizer,
spe_type,
lower_case=lower_case,
spe_character_coverage=spe_character_coverage,
spe_sample_size=spe_sample_size,
spe_train_extremely_large_corpus=spe_train_extremely_large_corpus,
spe_max_sentencepiece_length=spe_max_sentencepiece_length,
spe_split_by_unicode_script=spe_split_by_unicode_script,
spe_bos=spe_bos,
spe_eos=spe_eos,
spe_pad=spe_pad,
)
print("Serialized tokenizer at location :", tokenizer_path)
logging.info('Done!')
if __name__ == "__main__":
main()
| NeMo-main | scripts/tokenizers/process_asr_text_tokenizer.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import pandas as pd
from omegaconf import OmegaConf
from nemo.collections.common.tokenizers.column_coder import ColumnCodes
from nemo.core.config import hydra_runner
from nemo.utils import logging
@hydra_runner(config_path="conf", config_name="tabular_data_tokenizer")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(OmegaConf.to_yaml(cfg))
table = pd.read_csv(cfg.table_csv_file)
example_arrays = {}
for col in cfg.table_structure:
col_name = col['name']
example_arrays[col_name] = table[col_name].dropna().unique()
cc = ColumnCodes.get_column_codes(cfg.table_structure, example_arrays)
with open(cfg.tokenizer_file, 'wb') as handle:
pickle.dump(cc, handle)
if __name__ == '__main__':
main()
| NeMo-main | scripts/tokenizers/train_tabular_data_tokenizer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from argparse import ArgumentParser
import sentencepiece as spm
try:
import sentencepiece_model_pb2 as spt
except (ImportError, ModuleNotFoundError):
raise Exception("Ensure that sentencepiece_model_pb2.py has been generated from the protoc compiler")
"""Utility to add special tokens to existing sentencepiece models.
Generate sentencepiece_model_pb2.py in the directory of this script before running
To generate run `protoc --python_out=<path_to_NeMo>/scripts/tokenizers/ sentencepiece_model.proto`
inside the src folder in sentencepiece repo
Refer: https://github.com/google/sentencepiece/issues/121
Usage:
python edit_spt_model.py \
--input_file <input_model_dir> \
--output_file <output_model_dir> \
--tokens <space separated special tokens>
Example:
python edit_spt_model.py \
--input_file test.model \
--output_file test.model \
--tokens [CLS] [SEP]
"""
def edit_spt_model():
parser = ArgumentParser()
parser.add_argument(
"--input_file", type=str, required=True, help="Path to sentencepiece model file",
)
parser.add_argument(
"--output_file", type=str, required=True, help="Path to sentencepiece model file",
)
parser.add_argument(
"--tokens", type=str, nargs='+', required=True, help="Special tokens to add to tokenizer",
)
parser.add_argument(
"--is_userdefined", action="store_true", help="When set, the new tokens are set as user_defined tokens",
)
args = parser.parse_args()
token_type = 3
if args.is_userdefined:
token_type = 4
model = spt.ModelProto()
model.ParseFromString(open(args.input_file, 'rb').read())
for token in args.tokens:
piece = model.SentencePiece(piece=token, score=0.0, type=token_type)
if piece in model.pieces:
logging.error(f"Special Token '{token}' already exists in the input model!")
sys.exit(1)
model.pieces.append(piece)
sp = spm.SentencePieceProcessor()
try:
sp.LoadFromSerializedProto(model.SerializeToString())
for token in args.tokens:
id = sp.piece_to_id(token)
logging.info(f"Created token '{token}' at ID {id}")
logging.info(f"New tokenizer vocab size: {sp.get_piece_size()}")
except:
logging.error("Could not appropriately configure new tokenizer. Verify if the special tokens already exist.")
sys.exit(1)
with open(args.output_file, 'wb') as outf:
outf.write(model.SerializeToString())
logging.info(f"Created new tokenizer at: {args.output_file}")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
edit_spt_model()
| NeMo-main | scripts/tokenizers/add_special_tokens_to_sentencepiece.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script provides a functionality to create confidence-based ensembles
from a collection of pretrained models.
For more details see the paper https://arxiv.org/abs/2306.15824
or tutorial in tutorials/asr/Confidence_Ensembles.ipynb
You would typically use this script by providing a yaml config file or overriding
default options from command line.
Usage examples:
1. Building an ensemble of two monolingual models with default settings (no confidence tuning).
python build_ensemble.py --config-path=. --config-name=ensemble_config.yaml
ensemble.0.model=stt_it_conformer_ctc_large
ensemble.0.training_manifest=<path to the Italian data of 100+ utterances (no transcription required)>
ensemble.1.model=stt_es_conformer_ctc_large
ensemble.1.training_manifest=<path to the Spanish data of 100+ utterances (no transcription required)>
output_path=<path to the desired location of the .nemo checkpoint>
You can have more than 2 models and can control transcription settings (e.g., batch size)
with ``transcription.<any argument of examples/asr/transcribe_speech.py>`` parameters.
2. If you want to get improved results, you can enable tuning of the confidence and logistic regression (LR) parameters.
E.g.
python build_ensemble.py
<all arguments like in the previous example>
ensemble.0.dev_manifest=<path to the dev data that's required for tuning>
...
# IMPORTANT: see the note below if you use > 2 models!
ensemble.N.dev_manifest=<path to the dev data that's required for tuning>
tune_confidence=True # to allow confidence tuning. LR is tuned by default
As with any tuning, it is recommended to have reasonably large validation set for each model,
otherwise you might overfit to the validation data.
Note that if you add additional models (> 2) you will need to modify ensemble_config.yaml
or create a new one with added models in there. While it's theoretically possible to
fully override such parameters from commandline, hydra is very unfriendly for such
use-cases, so it's strongly recommended to be creating new configs.
3. If you want to precisely control tuning grid search, you can do that with
python build_ensemble.py
<all arguments as in the previous examples>
tune_confidence_config.confidence_type='[entropy_renyi_exp,entropy_tsallis_exp]' # only tune over this set
tune_confidence_config.alpha='[0.1,0.5,1.0]' # only tune over this set
You can check the dataclasses in this file for the full list of supported
arguments and their default values.
"""
import atexit
# using default logging to be able to silence unnecessary messages from nemo
import logging
import os
import random
import sys
import tempfile
from copy import deepcopy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import joblib
import numpy as np
import pytorch_lightning as pl
from omegaconf import MISSING, DictConfig, OmegaConf
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
from nemo.collections.asr.models.confidence_ensemble import (
ConfidenceEnsembleModel,
ConfidenceSpec,
compute_confidence,
get_filtered_logprobs,
)
from nemo.collections.asr.parts.utils.asr_confidence_utils import (
ConfidenceConfig,
ConfidenceMeasureConfig,
get_confidence_aggregation_bank,
get_confidence_measure_bank,
)
from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis
from nemo.core.config import hydra_runner
LOG = logging.getLogger(__file__)
# adding Python path. If not found, asking user to get the file
try:
sys.path.append(str(Path(__file__).parents[2] / "examples" / "asr"))
import transcribe_speech
except ImportError:
# if users run script normally from nemo repo, this shouldn't be triggered as
# we modify the path above. But if they downloaded the build_ensemble.py as
# an isolated script, we'd ask them to also download corresponding version
# of the transcribe_speech.py
print(
"Current script depends on 'examples/asr/transcribe_speech.py', but can't find it. "
"If it's not present, download it from the NeMo github manually and put inside this folder."
)
@dataclass
class EnsembleConfig:
# .nemo path or pretrained name
model: str = MISSING
# path to the training data manifest (non-tarred)
training_manifest: str = MISSING
# specify to limit the number of training samples
# 100 is most likely enough, but setting higher default just in case
max_training_samples: int = 1000
# specify to provide dev data manifest for HP tuning
dev_manifest: Optional[str] = None
@dataclass
class TuneConfidenceConfig:
# important parameter, so should always be tuned
exclude_blank: Tuple[bool] = (True, False)
# prod is pretty much always worse, so not including by default
aggregation: Tuple[str] = ("mean", "min", "max")
# not including max prob, as there is always an entropy-based metric
# that's better but otherwise including everything
confidence_type: Tuple[str] = (
"entropy_renyi_exp",
"entropy_renyi_lin",
"entropy_tsallis_exp",
"entropy_tsallis_lin",
"entropy_gibbs_lin",
"entropy_gibbs_exp",
)
# TODO: currently it's not possible to efficiently tune temperature, as we always
# apply log-softmax in the decoder, so to try different values it will be required
# to rerun the decoding, which is very slow. To support this for one-off experiments
# it's possible to modify the code of CTC decoder / Transducer joint to
# remove log-softmax and then apply it directly in this script with the temperature
#
# Alternatively, one can run this script multiple times with different values of
# temperature and pick the best performing ensemble. Note that this will increase
# tuning time by the number of temperature values tried. On the other hand,
# the above approach is a lot more efficient and will only slightly increase
# the total tuning runtime.
# very important to tune for max prob, but for entropy metrics 1.0 is almost always best
# temperature: Tuple[float] = (1.0,)
# not that important, but can sometimes make a small difference
alpha: Tuple[float] = (0.25, 0.33, 0.5, 1.0)
def get_grid_size(self) -> int:
"""Returns the total number of points in the search space."""
if "max_prob" in self.confidence_type:
return (
len(self.exclude_blank)
* len(self.aggregation)
* ((len(self.confidence_type) - 1) * len(self.alpha) + 1)
)
return len(self.exclude_blank) * len(self.aggregation) * len(self.confidence_type) * len(self.alpha)
@dataclass
class TuneLogisticRegressionConfig:
# will have log-uniform grid over this range with that many points
# note that a value of 10000.0 (not regularization) is always added
C_num_points: int = 10
C_min: float = 0.0001
C_max: float = 10.0
# not too important
multi_class: Tuple[str] = ("ovr", "multinomial")
# should try to include weights directly if the data is too imbalanced
class_weight: Tuple = (None, "balanced")
# increase if getting many warnings that algorithm didn't converge
max_iter: int = 1000
@dataclass
class BuildEnsembleConfig:
# where to save the resulting ensemble model
output_path: str = MISSING
# each model specification
ensemble: List[EnsembleConfig] = MISSING
random_seed: int = 0 # for reproducibility
# default confidence, can override
confidence: ConfidenceConfig = ConfidenceConfig(
# we keep frame confidences and apply aggregation manually to get full-utterance confidence
preserve_frame_confidence=True,
exclude_blank=True,
aggregation="mean",
measure_cfg=ConfidenceMeasureConfig(name="entropy", entropy_type="renyi", alpha=0.25, entropy_norm="lin",),
)
temperature: float = 1.0
# this is optional, but can be used to change any aspect of the transcription
# config, such as batch size or amp usage. Note that model, data and confidence
# will be overriden by this script
transcription: transcribe_speech.TranscriptionConfig = transcribe_speech.TranscriptionConfig()
# set to True to tune the confidence.
# requires dev manifests to be specified for each model
tune_confidence: bool = False
# used to specify what to tune over. By default runs tuning over some
# reasonalbe grid, so that it does not take forever.
# Can be changed as needed
tune_confidence_config: TuneConfidenceConfig = TuneConfidenceConfig()
# very fast to tune and can be important in case of imbalanced datasets
# will automatically set to False if dev data is not available
tune_logistic_regression: bool = True
tune_logistic_regression_config: TuneLogisticRegressionConfig = TuneLogisticRegressionConfig()
def __post_init__(self):
"""Checking that if any dev data is provided, all are provided.
Will also auto-set tune_logistic_regression to False if no dev data
is available.
If tune_confidence is set to True (user choice) and no dev data is
provided, will raise an error.
"""
num_dev_data = 0
for ensemble_cfg in self.ensemble:
num_dev_data += ensemble_cfg.dev_manifest is not None
if num_dev_data == 0:
if self.tune_confidence:
raise ValueError("tune_confidence is set to True, but no dev data is provided")
LOG.info("Setting tune_logistic_regression = False since no dev data is provided")
self.tune_logistic_regression = False
return
if num_dev_data < len(self.ensemble):
raise ValueError(
"Some ensemble configs specify dev data, but some don't. Either all have to specify it or none!"
)
def calculate_score(features: np.ndarray, labels: np.ndarray, pipe: Pipeline) -> Tuple[float, np.ndarray]:
"""Score is always calculated as mean of the per-class scores.
This is done to account for possible class imbalances.
Args:
features: numpy array of features of shape [N x D], where N is the
number of objects (typically a total number of utterances in
all datasets) and D is the total number of confidence scores
used to train the model (typically = number of models).
labels: numpy array of shape [N] contatining ground-truth model indices.
pipe: classification pipeline (currently, standardization + logistic
regression).
Returns:
tuple: score value in [0, 1] and full classification confusion matrix.
"""
predictions = pipe.predict(features)
conf_m = confusion_matrix(labels, predictions)
score = np.diag(conf_m).sum() / conf_m.sum()
return score, conf_m
def train_model_selection(
training_features: np.ndarray,
training_labels: np.ndarray,
dev_features: Optional[np.ndarray] = None,
dev_labels: Optional[np.ndarray] = None,
tune_lr: bool = False,
tune_lr_cfg: Optional[TuneLogisticRegressionConfig] = None,
verbose: bool = False,
) -> Tuple[Pipeline, float]:
"""Trains model selection block with an (optional) tuning of the parameters.
Returns a pipeline consisting of feature standardization and logistic
regression. If tune_lr is set to True, dev features/labels will be used
to tune the hyperparameters of the logistic regression with the grid
search that's defined via ``tune_lr_cfg``.
If no tuning is requested, uses the following parameters::
best_pipe = make_pipeline(
StandardScaler(),
LogisticRegression(
multi_class="multinomial",
C=10000.0,
max_iter=1000,
class_weight="balanced",
),
)
Args:
training_features: numpy array of features of shape [N x D], where N is
the number of objects (typically a total number of utterances in
all training datasets) and D is the total number of confidence
scores used to train the model (typically = number of models).
training_labels: numpy array of shape [N] contatining ground-truth
model indices.
dev_features: same as training, but for the validation subset.
dev_labels: same as training, but for the validation subset.
tune_lr: controls whether tuning of LR hyperparameters is performed.
If set to True, it's required to also provide dev features/labels.
tune_lr_cfg: specifies what values of LR hyperparameters to try.
verbose: if True, will output final training/dev scores.
Returns:
tuple: trained model selection pipeline, best score (or -1 if no tuning
was done).
"""
if not tune_lr:
# default parameters: C=10000.0 disables regularization
best_pipe = make_pipeline(
StandardScaler(),
LogisticRegression(multi_class="multinomial", C=10000.0, max_iter=1000, class_weight="balanced"),
)
max_score = -1
else:
C_pms = np.append(
np.exp(np.linspace(np.log(tune_lr_cfg.C_min), np.log(tune_lr_cfg.C_max), tune_lr_cfg.C_num_points)),
10000.0,
)
max_score = 0
best_pipe = None
for class_weight in tune_lr_cfg.class_weight:
for multi_class in tune_lr_cfg.multi_class:
for C in C_pms:
pipe = make_pipeline(
StandardScaler(),
LogisticRegression(
multi_class=multi_class, C=C, max_iter=tune_lr_cfg.max_iter, class_weight=class_weight
),
)
pipe.fit(training_features, training_labels)
score, confusion = calculate_score(dev_features, dev_labels, pipe)
if score > max_score:
max_score = score
best_pipe = pipe
best_pipe.fit(training_features, training_labels)
if verbose:
accuracy, confusion = calculate_score(training_features, training_labels, best_pipe)
LOG.info("Training fit accuracy: %.4f", accuracy * 100.0)
LOG.info("Training confusion matrix:\n%s", str(confusion))
if dev_features is not None and verbose:
accuracy, confusion = calculate_score(dev_features, dev_labels, best_pipe)
LOG.info("Dev fit accuracy: %.4f", accuracy * 100.0)
LOG.info("Dev confusion matrix:\n%s", str(confusion))
return best_pipe, max_score
def subsample_manifest(manifest_file: str, max_samples: int) -> str:
"""Will save a subsampled version of the manifest to the same folder.
Have to save to the same folder to support relative paths.
Args:
manifest_file: path to the manifest file that needs subsampling.
max_samples: how many samples to retain. Will randomly select that
many lines from the manifest.
Returns:
str: the path to the subsampled manifest file.
"""
with open(manifest_file, "rt", encoding="utf-8") as fin:
lines = fin.readlines()
if max_samples < len(lines):
lines = random.sample(lines, max_samples)
output_file = manifest_file + "-subsampled"
with open(output_file, "wt", encoding="utf-8") as fout:
fout.write("".join(lines))
return output_file
def cleanup_subsampled_manifests(subsampled_manifests: List[str]):
"""Removes all generated subsamples manifests."""
for manifest in subsampled_manifests:
os.remove(manifest)
def compute_all_confidences(
hypothesis: Hypothesis, tune_confidence_cfg: TuneConfidenceConfig
) -> Dict[ConfidenceSpec, float]:
"""Computes a set of confidence scores from a given hypothesis.
Works with the output of both CTC and Transducer decoding.
Args:
hypothesis: generated hypothesis as returned from the transcribe
method of the ASR model.
tune_confidence_cfg: config specifying what confidence scores to
compute.
Returns:
dict: dictionary with confidenct spec -> confidence score mapping.
"""
conf_values = {}
for exclude_blank in tune_confidence_cfg.exclude_blank:
filtered_logprobs = get_filtered_logprobs(hypothesis, exclude_blank)
vocab_size = filtered_logprobs.shape[1]
for aggregation in tune_confidence_cfg.aggregation:
aggr_func = get_confidence_aggregation_bank()[aggregation]
for conf_type in tune_confidence_cfg.confidence_type:
conf_func = get_confidence_measure_bank()[conf_type]
if conf_type == "max_prob": # skipping alpha in this case
conf_value = aggr_func(conf_func(filtered_logprobs, v=vocab_size, t=1.0)).cpu().item()
conf_values[ConfidenceSpec(exclude_blank, aggregation, conf_type, 1.0)] = conf_value
else:
for alpha in tune_confidence_cfg.alpha:
conf_value = aggr_func(conf_func(filtered_logprobs, v=vocab_size, t=alpha)).cpu().item()
conf_values[ConfidenceSpec(exclude_blank, aggregation, conf_type, alpha)] = conf_value
return conf_values
def find_best_confidence(
train_confidences: List[List[Dict[ConfidenceSpec, float]]],
train_labels: List[int],
dev_confidences: List[List[Dict[ConfidenceSpec, float]]],
dev_labels: List[int],
tune_lr: bool,
tune_lr_config: TuneConfidenceConfig,
) -> Tuple[ConfidenceConfig, Pipeline]:
"""Finds the best confidence configuration for model selection.
Will loop over all values in the confidence dictionary and fit the LR
model (optionally tuning its HPs). The best performing confidence (on the
dev set) will be used for the final LR model.
Args:
train_confidences: this is an object of type
``List[List[Dict[ConfidenceSpec, float]]]``. The shape of this
object is [M, N, S], where
M: number of models
N: number of utterances in all training sets
S: number of confidence scores to try
This argument will be used to construct np.array objects for each
of the confidence scores with the shape [M, N]
train_labels: ground-truth labels of the correct model for each data
points. This is a list of size [N]
dev_confidences: same as training, but for the validation subset.
dev_labels: same as training, but for the validation subset.
tune_lr: controls whether tuning of LR hyperparameters is performed.
tune_lr_cfg: specifies what values of LR hyperparameters to try.
Returns:
tuple: best confidence config, best model selection pipeline
"""
max_score = 0
best_pipe = None
best_conf_spec = None
LOG.info("Evaluation all confidences. Total grid size: %d", len(train_confidences[0][0].keys()))
for conf_spec in tqdm(train_confidences[0][0].keys()):
cur_train_confidences = []
for model_confs in train_confidences:
cur_train_confidences.append([])
for model_conf in model_confs:
cur_train_confidences[-1].append(model_conf[conf_spec])
cur_dev_confidences = []
for model_confs in dev_confidences:
cur_dev_confidences.append([])
for model_conf in model_confs:
cur_dev_confidences[-1].append(model_conf[conf_spec])
# transposing with zip(*list)
training_features = np.array(list(zip(*cur_train_confidences)))
training_labels = np.array(train_labels)
dev_features = np.array(list(zip(*cur_dev_confidences)))
dev_labels = np.array(dev_labels)
pipe, score = train_model_selection(
training_features, training_labels, dev_features, dev_labels, tune_lr, tune_lr_config,
)
if max_score < score:
max_score = score
best_pipe = pipe
best_conf_spec = conf_spec
LOG.info("Found better parameters: %s. New score: %.4f", str(conf_spec), max_score)
return best_conf_spec.to_confidence_config(), best_pipe
@hydra_runner(config_name="BuildEnsembleConfig", schema=BuildEnsembleConfig)
def main(cfg: BuildEnsembleConfig):
# silencing all messages from nemo/ptl to avoid dumping tons of configs to the stdout
logging.getLogger('pytorch_lightning').setLevel(logging.CRITICAL)
logging.getLogger('nemo_logger').setLevel(logging.CRITICAL)
LOG.info(f'Build ensemble config:\n{OmegaConf.to_yaml(cfg)}')
# to ensure post init is called
cfg = BuildEnsembleConfig(**cfg)
pl.seed_everything(cfg.random_seed)
cfg.transcription.random_seed = None # seed is already applied
cfg.transcription.return_transcriptions = True
cfg.transcription.preserve_alignment = True
cfg.transcription.ctc_decoding.temperature = cfg.temperature
cfg.transcription.rnnt_decoding.temperature = cfg.temperature
# this ensures that generated output is after log-softmax for consistency with CTC
train_confidences = []
dev_confidences = []
train_labels = []
dev_labels = []
# registering clean-up function that will hold on to this list and
# should clean up even if there is partial error in some of the transcribe
# calls
subsampled_manifests = []
atexit.register(cleanup_subsampled_manifests, subsampled_manifests)
# note that we loop over the same config.
# This is intentional, as we need to run all models on all datasets
# this loop will do the following things:
# 1. Goes through each model X each training dataset
# 2. Computes predictions by directly calling transcribe_speech.main
# 3. Converts transcription to the confidence score(s) as specified in the config
# 4. If dev sets are provided, computes the same for them
# 5. Creates a list of ground-truth model indices by mapping each model
# to its own training dataset as specified in the config.
# 6. After the loop, we either run tuning over all confidence scores or
# directly use a single score to fit logistic regression and save the
# final ensemble model.
for model_idx, model_cfg in enumerate(cfg.ensemble):
train_model_confidences = []
dev_model_confidences = []
for data_idx, data_cfg in enumerate(cfg.ensemble):
if model_idx == 0: # generating subsampled manifests only one time
subsampled_manifests.append(
subsample_manifest(data_cfg.training_manifest, data_cfg.max_training_samples)
)
subsampled_manifest = subsampled_manifests[data_idx]
if model_cfg.model.endswith(".nemo"):
cfg.transcription.model_path = model_cfg.model
else: # assuming pretrained model
cfg.transcription.pretrained_name = model_cfg.model
cfg.transcription.dataset_manifest = subsampled_manifest
# training
with tempfile.NamedTemporaryFile() as output_file:
cfg.transcription.output_filename = output_file.name
LOG.info("Transcribing training dataset %d with model %d", data_idx, model_idx)
transcriptions = transcribe_speech.main(deepcopy(cfg.transcription))
LOG.info("Generating confidence scores")
# TODO: parallelize this loop?
for transcription in tqdm(transcriptions):
if cfg.tune_confidence:
train_model_confidences.append(
compute_all_confidences(transcription, cfg.tune_confidence_config)
)
else:
train_model_confidences.append(compute_confidence(transcription, cfg.confidence))
if model_idx == 0: # labels are the same for all models
train_labels.append(data_idx)
# optional dev
if data_cfg.dev_manifest is not None:
cfg.transcription.dataset_manifest = data_cfg.dev_manifest
with tempfile.NamedTemporaryFile() as output_file:
cfg.transcription.output_filename = output_file.name
LOG.info("Transcribing dev dataset %d with model %d", data_idx, model_idx)
transcriptions = transcribe_speech.main(deepcopy(cfg.transcription))
LOG.info("Generating confidence scores")
for transcription in tqdm(transcriptions):
if cfg.tune_confidence:
dev_model_confidences.append(
compute_all_confidences(transcription, cfg.tune_confidence_config)
)
else:
dev_model_confidences.append(compute_confidence(transcription, cfg.confidence))
if model_idx == 0: # labels are the same for all models
dev_labels.append(data_idx)
train_confidences.append(train_model_confidences)
if dev_model_confidences:
dev_confidences.append(dev_model_confidences)
if cfg.tune_confidence:
best_confidence, model_selection_block = find_best_confidence(
train_confidences,
train_labels,
dev_confidences,
dev_labels,
cfg.tune_logistic_regression,
cfg.tune_logistic_regression_config,
)
else:
best_confidence = cfg.confidence
# transposing with zip(*list)
training_features = np.array(list(zip(*train_confidences)))
training_labels = np.array(train_labels)
if dev_confidences:
dev_features = np.array(list(zip(*dev_confidences)))
dev_labels = np.array(dev_labels)
else:
dev_features = None
dev_labels = None
model_selection_block, _ = train_model_selection(
training_features,
training_labels,
dev_features,
dev_labels,
cfg.tune_logistic_regression,
cfg.tune_logistic_regression_config,
verbose=True,
)
with tempfile.TemporaryDirectory() as tmpdir:
model_selection_block_path = os.path.join(tmpdir, 'model_selection_block.pkl')
joblib.dump(model_selection_block, model_selection_block_path)
# creating ensemble checkpoint
ensemble_model = ConfidenceEnsembleModel(
cfg=DictConfig(
{
'model_selection_block': model_selection_block_path,
'confidence': best_confidence,
'temperature': cfg.temperature,
'load_models': [model_cfg.model for model_cfg in cfg.ensemble],
}
),
trainer=None,
)
ensemble_model.save_to(cfg.output_path)
if __name__ == '__main__':
main()
| NeMo-main | scripts/confidence_ensembles/build_ensemble.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# these tests are not included in CI, since they take moderate amount of time
# they are supposed to be run in the nightly pipeline instead
import os
import subprocess
import sys
from pathlib import Path
import pytest
from nemo.collections.asr.parts.utils.transcribe_utils import TextProcessingConfig
sys.path.append(str(Path(__file__).parents[2] / 'examples' / 'asr'))
import speech_to_text_eval
@pytest.mark.parametrize(
'build_args',
[
"ensemble.0.model=stt_es_conformer_ctc_large ensemble.1.model=stt_it_conformer_ctc_large",
"ensemble.0.model=stt_es_conformer_transducer_large ensemble.1.model=stt_it_conformer_transducer_large",
"ensemble.0.model=stt_es_fastconformer_hybrid_large_pc ensemble.1.model=stt_it_fastconformer_hybrid_large_pc",
(
"ensemble.0.model=stt_es_fastconformer_hybrid_large_pc "
"ensemble.1.model=stt_it_fastconformer_hybrid_large_pc "
"transcription.decoder_type=ctc "
),
"ensemble.0.model=stt_es_conformer_ctc_large ensemble.1.model=stt_it_conformer_transducer_large",
(
"ensemble.0.model=stt_es_conformer_ctc_large "
"ensemble.1.model=stt_it_conformer_ctc_large "
f"ensemble.0.dev_manifest={Path(os.getenv('TEST_DATA_PATH', '')) / 'es' / 'dev_manifest.json'} "
f"ensemble.1.dev_manifest={Path(os.getenv('TEST_DATA_PATH', '')) / 'it' / 'dev_manifest.json'} "
"tune_confidence=True "
),
(
"ensemble.0.model=stt_es_conformer_transducer_large "
"ensemble.1.model=stt_it_conformer_transducer_large "
f"ensemble.0.dev_manifest={Path(os.getenv('TEST_DATA_PATH', '')) / 'es' / 'dev_manifest.json'} "
f"ensemble.1.dev_manifest={Path(os.getenv('TEST_DATA_PATH', '')) / 'it' / 'dev_manifest.json'} "
"tune_confidence=True "
),
],
ids=(
[
"CTC models",
"Transducer models",
"Hybrid models (Transducer mode)",
"Hybrid models (CTC mode)",
"CTC + Transducer",
"CTC models + confidence tuning",
"Transducer models + confidence tuning",
]
),
)
def test_confidence_ensemble(tmp_path, build_args):
"""Integration tests for confidence-ensembles.
Tests building ensemble and running inference with the model.
To use, make sure to define TEST_DATA_PATH env variable with path to
the test data. The following structure is assumed:
$TEST_DATA_PATH
├── es
│ ├── dev
│ ├── dev_manifest.json
│ ├── test
│ ├── train
│ └── train_manifest.json
├── it
│ ├── dev
│ ├── dev_manifest.json
│ ├── test
│ ├── test_manifest.json
│ ├── train
│ └── train_manifest.json
└── test_manifest.json
"""
# checking for test data and failing right away if not defined
if not os.getenv("TEST_DATA_PATH"):
raise ValueError("TEST_DATA_PATH env variable has to be defined!")
test_data_path = Path(os.environ['TEST_DATA_PATH'])
build_ensemble_cmd = f"""
python {Path(__file__).parent / 'build_ensemble.py'} \
--config-name=ensemble_config.yaml \
output_path={tmp_path / 'ensemble.nemo'} \
{build_args}
"""
subprocess.run(build_ensemble_cmd, check=True, shell=True)
eval_cfg = speech_to_text_eval.EvaluationConfig(
dataset_manifest=str(test_data_path / 'test_manifest.json'),
output_filename=str(tmp_path / 'output.json'),
model_path=str(tmp_path / 'ensemble.nemo'),
text_processing=TextProcessingConfig(punctuation_marks=".,?", do_lowercase=True, rm_punctuation=True),
)
results = speech_to_text_eval.main(eval_cfg)
assert results.metric_value < 0.20 # relaxed check for better than 20% WER
| NeMo-main | scripts/confidence_ensembles/test_confidence_ensembles.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Execution :
python create_tarred_tokenized_text_lm_dataset.py \
--text_path=<comma seperated text filepaths> \
--data_root=<path to output directory> \
--tokenizer_name="bert-base-cased" \
--tokenizer_vocab_file=<path to vocab file for tokenizer> \
--num_shards=64 \
--chunk_size=8192 \
--chunk_write_buffer=512 \
--lower_case \
--log
"""
import argparse
import glob
import json
import logging
import os
import tarfile
import joblib
import numpy as np
from tqdm import tqdm
from nemo.collections.nlp.modules.common.tokenizer_utils import get_tokenizer
parser = argparse.ArgumentParser(description='Tarred Tokenized dataset for text language modelling')
# Data path arguments
parser.add_argument('--text_path', required=True, default=None, type=str, help='Text paths, seperated by commas')
parser.add_argument('--data_root', required=True, default=None, type=str, help='Output directory')
# General arguments
parser.add_argument(
'--chunk_write_buffer',
default=128,
type=int,
help='Number of chunks of `chunk_size` to buffer for parallel tokenization and serial write to disk',
)
parser.add_argument('--lower_case', action='store_true', help='Whether to lower case the corpus')
parser.add_argument('--log', action='store_true', help='Whether to print logs to terminal')
# Tokenizer arguments
parser.add_argument('--tokenizer_name', required=False, default=None, type=str, help='Tokenizer name for resolution')
parser.add_argument(
'--tokenizer_model', required=False, default=None, type=str, help='Path to tokenizer model for sentencepiece'
)
parser.add_argument('--tokenizer_vocab_file', required=False, type=str, default=None, help='Path to a vocab file')
parser.add_argument(
'--tokenizer_special_tokens', default=None, type=str, nargs='+', help='List of special tokens for the tokenizer'
)
# Tarred dataset arguments
parser.add_argument('--num_shards', default=1, type=int, help='Number of shards for the tarfile')
parser.add_argument('--chunk_size', default=8192, type=int, help='Number of rows of data concatenated into a vector')
parser.set_defaults(log=False, lower_case=False)
args = parser.parse_args()
def __build_dataset_from_text(texts: str, lower_case: bool, chunk_size: int):
if ',' in texts:
texts = texts.split(',')
else:
texts = [texts]
num_lines = 0
text_dataset = []
for text in texts:
with open(text, 'r', encoding='utf-8') as in_reader:
reader = tqdm(iter(lambda: in_reader.readline(), ''), desc="Read 0 lines", unit=' lines')
for i, line in enumerate(reader):
# Clean text line
line = line.replace("\n", "").strip()
if lower_case:
line = line.lower()
if line:
text_dataset.append(line)
num_lines += 1
if num_lines % 100000 == 0:
reader.set_description(f"Read {num_lines} lines")
if num_lines % chunk_size == 0:
yield text_dataset, num_lines
# Empty cache
text_dataset = []
logging.info(f"Finished extracting manifest : {text}")
logging.info("Finished extracting all manifests ! Number of sentences : {}".format(num_lines))
if len(text_dataset) != 0:
yield text_dataset, num_lines
def __tokenize_str(texts, tokenizer):
tokenized_text = []
for text in texts:
tok_text = tokenizer.text_to_ids(text)
tokenized_text.extend(tok_text)
return tokenized_text
def __tokenize_text(
text_paths, tokenizer, tokenized_cachedir, lower_case: bool = False, chunk_size=8192, write_buffer: int = -1
):
if write_buffer < 1:
write_buffer = max(os.cpu_count() - write_buffer, 1)
logging.info(f"Using write chunk buffer of size {write_buffer}")
if not os.path.exists(tokenized_cachedir):
os.makedirs(tokenized_cachedir)
# global parameters
global_chunk_idx = 0
chunk_paths = []
chunk_lens = []
# buffer parameters
data_cache = []
chunk_idx = 0
text_generator = iter(__build_dataset_from_text(text_paths, lower_case=lower_case, chunk_size=chunk_size))
global_num_lines = 0
last_batch = False
with joblib.Parallel(n_jobs=-2, verbose=10) as parallel:
while True:
try:
data, num_lines = next(text_generator)
data_cache.append(data)
global_num_lines = num_lines
except StopIteration:
last_batch = True
# Update counters
chunk_idx += 1
if (chunk_idx == write_buffer) or last_batch:
# write the chunks into disk after parallel tokenization
tokenized_data_list = parallel(
joblib.delayed(__tokenize_str)(chunk, tokenizer) for chunk in data_cache
)
# Sequential write cache
for chunk in tokenized_data_list:
fp = os.path.join(tokenized_cachedir, f"chunk_{global_chunk_idx}.npy")
chunk = np.asarray(chunk, dtype=np.int64)
np.save(fp, chunk, allow_pickle=False)
chunk_paths.append(fp)
chunk_lens.append(len(chunk))
global_chunk_idx += 1
logging.info(f"Wrote a total of {global_chunk_idx} chunks to file...")
# reset buffers
data_cache.clear()
del data_cache
data_cache = []
chunk_idx = 0
if last_batch:
logging.info("Finished tokenizing last chunk")
break
logging.info(
f"Chunking {global_num_lines} rows into {global_num_lines // chunk_size} tasks (each chunk contains {chunk_size} elements)"
)
return chunk_paths, chunk_lens
def __create_chunk(data_root, chunk_path, shard_id, compute_metrics=False):
"""Creates a tarball containing the tokenized text chunks.
"""
tar = tarfile.open(os.path.join(data_root, f'text_{shard_id}.tar'), mode='a', encoding='utf-8')
# We squash the filename since we do not preserve directory structure of tokenized text in the tarball.
base, ext = os.path.splitext(chunk_path)
base = base.replace(os.pathsep, '_')
# Need the following replacement as long as WebDataset splits on first period
base = base.replace('.', '_')
squashed_filename = f'{base}{ext}'
tar.add(chunk_path, arcname=squashed_filename)
tar.close()
if compute_metrics:
data = np.load(chunk_path, allow_pickle=False)
chunk_len = len(data)
return (chunk_len,)
else:
return None
def __write_tarred_tokenized_text_dataset(data_root, num_shards, chunk_paths, chunk_lens):
num_chunks = len(chunk_paths)
if chunk_lens is not None:
num_text = sum(chunk_lens)
shard_counts = {chunk_id: chunk_len for chunk_id, chunk_len in enumerate(chunk_lens)}
compute_metrics = False
else:
num_text = 0
shard_counts = {}
compute_metrics = True
for chunk_id, chunk_path in enumerate(tqdm(chunk_paths, desc='Writing chunk ', unit=' chunks')):
shard_id = chunk_id % num_shards
metrics = __create_chunk(data_root, chunk_path, shard_id, compute_metrics=compute_metrics)
if metrics is not None:
num_text += metrics[0]
shard_counts[chunk_id] = metrics[0]
# write metadata
metadata_path = os.path.join(data_root, 'metadata.json')
with open(metadata_path, 'w') as f:
metadata = {'num_chunks': num_chunks, 'num_text': num_text, 'shard_count': shard_counts}
json.dump(metadata, f, indent=4)
logging.info("Metadata writen..")
def main():
text_path = args.text_path
data_root = args.data_root
if args.log:
logging.basicConfig(level=logging.INFO)
tokenized_cachedir = os.path.join(data_root, '_tokenized_dataset_cachedir')
if os.path.exists(tokenized_cachedir):
logging.warning(
f'Tokenized cache directory {tokenized_cachedir} already potentially contains files.'
f'In such a case, please be aware that the tarfiles will be **appended** instead of overridden!'
)
if not os.path.exists(data_root):
os.makedirs(data_root)
chunk_paths = None
chunk_lens = None
if os.path.exists(tokenized_cachedir):
paths = glob.glob(os.path.join(tokenized_cachedir, "*.npy"))
if len(paths) > 0:
logging.info("Cached tokenized numpy files found, skipping re-tokenization of dataset")
chunk_paths = paths
chunk_lens = None
if chunk_paths is None:
if args.tokenizer_name is None:
raise ValueError("`tokenizer_name` name is required when tokenizing the dataset for the first time.")
if args.tokenizer_vocab_file is None:
raise ValueError("`tokenizer_vocab_file` is required when constructing the tokenized dataset")
tokenizer = get_tokenizer(
tokenizer_name=args.tokenizer_name,
tokenizer_model=args.tokenizer_model,
vocab_file=args.tokenizer_vocab_file,
special_tokens=args.tokenizer_special_tokens,
)
logging.info("Built tokenizer")
# tokenize text data into sub-words
chunk_paths, chunk_lens = __tokenize_text(
text_paths=text_path,
tokenizer=tokenizer,
tokenized_cachedir=tokenized_cachedir,
lower_case=args.lower_case,
chunk_size=args.chunk_size,
write_buffer=args.chunk_write_buffer,
)
logging.info(f"Tokenized dataset into sub-words and serialized cache at {tokenized_cachedir}")
# Write tarred dataset
__write_tarred_tokenized_text_dataset(
data_root, num_shards=args.num_shards, chunk_paths=chunk_paths, chunk_lens=chunk_lens
)
logging.info('Done preparing tokenized dataset!')
if __name__ == "__main__":
main()
| NeMo-main | scripts/asr_language_modeling/neural_rescorer/create_tarred_transformer_lm_dataset.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script would evaluate a neural language model (Transformer) trained with
`examples/nlp/language_modeling/transformer_lm.py' as a rescorer for ASR systems.
Given a trained TransformerLMModel `.nemo` file, this script can be used to re-score the beams obtained from a beam
search decoder of an ASR model.
USAGE:
1. Obtain `.tsv` file with beams and their corresponding scores. Scores can be from a regular beam search decoder or
in fusion with an N-gram LM scores. For a given beam size `beam_size` and a number of examples
for evaluation `num_eval_examples`, it should contain (`beam_size` x `num_eval_examples`) lines of
form `beam_candidate_text \t score`. This file can be generated by `scripts/asr_language_modeling/ngram_lm/eval_beamsearch_ngram.py`.
2. Rescore the candidates:
python eval_neural_rescorer.py
--lm_model=[path to .nemo file of the LM]
--beams_file=[path to beams .tsv file]
--beam_size=[size of the beams]
--eval_manifest=[path to eval manifest .json file]
--batch_size=[batch size used for inference on the LM model]
--alpha=[the value for the parameter rescorer_alpha]
--beta=[the value for the parameter rescorer_beta]
You may find more info on how to use this script at:
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/asr_language_modeling.html
"""
import contextlib
import inspect
import json
from argparse import ArgumentParser
import editdistance
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import tqdm
from transformers import AutoModelForCausalLM
from nemo.collections.nlp.models.language_modeling import TransformerLMModel
from nemo.collections.nlp.modules.common.tokenizer_utils import get_tokenizer
from nemo.utils import logging
class BeamScoresDataset(torch.utils.data.Dataset):
"""
Dataset to read the score file containing the beams and their score
Args:
data_path: path to the beams file
tokenizer: tokenizer of the LM model
manifest_path: manifest `.json` file which contains the ground truths transcripts
beam_size: the number of beams per sample
max_seq_length: the maximum length of sequences
"""
def __init__(self, data_path, tokenizer, manifest_path, beam_size=128, max_seq_length=256):
self.data = pd.read_csv(data_path, delimiter="\t", header=None)
self.tokenizer = tokenizer
self.ground_truths = []
with open(manifest_path, 'r', encoding='utf-8') as f_orig:
for line in f_orig:
item = json.loads(line)
self.ground_truths.append(item['text'])
self.beam_size = beam_size
self.max_seq_length = max_seq_length
if self.tokenizer.pad_id is not None:
self.pad_id = self.tokenizer.pad_id
elif self.tokenizer.eos_id is not None:
self.pad_id = self.tokenizer.eos_id
else:
logging.warning(f"Using 0 as pad_id as the tokenizer has no pad_id or eos_id.")
self.pad_id = 0
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
text = str(self.data[0][idx])
tokens = self.tokenizer.text_to_ids(text)
if self.tokenizer.bos_id is not None:
tokens = [self.tokenizer.bos_id] + tokens
if self.tokenizer.eos_id is not None:
tokens = tokens + [self.tokenizer.eos_id]
input_ids = [self.pad_id] * self.max_seq_length
input_ids[: len(tokens)] = tokens
input_ids = np.array(input_ids)
input_mask = np.zeros(self.max_seq_length)
input_mask[: len(tokens)] = 1
acoustic_score = self.data[1][idx]
dist = editdistance.eval(text.split(), self.ground_truths[idx // self.beam_size].split())
ref_len = len(self.ground_truths[idx // self.beam_size].split())
len_in_chars = len(str(self.data[0][idx]))
return input_ids, input_mask, acoustic_score, dist, ref_len, len_in_chars, idx
def linear_search_wer(
dists, scores1, scores2, total_len, coef_range=[0, 10], coef_steps=10000, param_name='parameter'
):
"""
performs linear search to find the best coefficient when two set of scores are getting linearly fused.
Args:
dists: Tesnor of the distances between the ground truth and the candidates with shape of [number of samples, beam size]
scores1: Tensor of the first set of scores with shape of [number of samples, beam size]
scores2: Tensor of the second set of scores with shape of [number of samples, beam size]
total_len: The total length of all samples
coef_range: the search range for the coefficient
coef_steps: the number of steps that the search range would get divided into
param_name: the name of the parameter to be used in the figure
Output:
(best coefficient found, best WER achieved)
"""
scale = scores1.mean().abs().item() / scores2.mean().abs().item()
left = coef_range[0] * scale
right = coef_range[1] * scale
coefs = np.linspace(left, right, coef_steps)
best_wer = 10000
best_coef = left
wers = []
for coef in coefs:
scores = scores1 + coef * scores2
wer = compute_wer(dists, scores, total_len)
wers.append(wer)
if wer < best_wer:
best_wer = wer
best_coef = coef
plt.plot(coefs, wers)
plt.title(f'WER% after rescoring with different values of {param_name}')
plt.ylabel('WER%')
plt.xlabel(param_name)
plt.show()
return best_coef, best_wer
def compute_wer(dists, scores, total_len):
"""
Sorts the candidates based on the scores and calculates the WER with the new top candidates.
Args:
dists: Tensor of the distances between the ground truth and the candidates with shape of [number of samples, beam size]
scores: Tensor of the scores for candidates with shape of [number of samples, beam size]
total_len: The total length of all samples
Output:
WER with the new scores
"""
indices = scores.max(dim=1, keepdim=True)[1]
wer = dists.gather(dim=1, index=indices).sum() / total_len
wer = wer.item()
return wer
def main():
parser = ArgumentParser()
parser.add_argument(
"--lm_model_file",
type=str,
required=True,
help="path to LM model .nemo file or the name of a HuggingFace pretrained models like 'transfo-xl-wt103' or 'gpt2'",
)
parser.add_argument("--beams_file", type=str, required=True, help="path to beams .tsv file")
parser.add_argument(
"--eval_manifest", type=str, required=True, help="path to the evaluation `.json` manifest file"
)
parser.add_argument("--beam_size", type=int, required=True, help="number of beams per candidate")
parser.add_argument("--batch_size", type=int, default=256, help="inference batch size")
parser.add_argument("--alpha", type=float, default=None, help="parameter alpha of the fusion")
parser.add_argument("--beta", type=float, default=None, help="parameter beta of the fusion")
parser.add_argument("--max_seq_length", default=512, help="Maximum sequence length (in tokens) for the input")
parser.add_argument(
"--scores_output_file", default=None, type=str, help="The optional path to store the rescored beams"
)
parser.add_argument(
"--device", default="cuda", type=str, help="The device to load the model onto to calculate the scores"
)
parser.add_argument(
"--use_amp", action="store_true", help="Whether to use AMP if available to calculate the scores"
)
args = parser.parse_args()
device = args.device
if device.startswith("cuda") and not torch.cuda.is_available():
logging.info(f"cuda is not available! switched to cpu.")
device = "cpu"
if args.lm_model_file.endswith(".nemo"):
nemo_model = True
logging.info("Attempting to initialize from .nemo file...")
model = TransformerLMModel.restore_from(
restore_path=args.lm_model_file, map_location=torch.device(device)
).eval()
model_tokenizer = model.tokenizer
else:
nemo_model = False
logging.info("Attempting to initialize from a pretrained model from HuggingFace...")
model = (
AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path=args.lm_model_file, is_decoder=True)
.to(device)
.eval()
)
model_tokenizer = get_tokenizer(tokenizer_name=args.lm_model_file)
max_seq_length = args.max_seq_length
dataset = BeamScoresDataset(args.beams_file, model_tokenizer, args.eval_manifest, args.beam_size, max_seq_length)
data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=args.batch_size)
if args.use_amp:
if torch.cuda.is_available() and hasattr(torch.cuda, 'amp') and hasattr(torch.cuda.amp, 'autocast'):
logging.info("AMP is enabled!\n")
autocast = torch.cuda.amp.autocast
else:
@contextlib.contextmanager
def autocast():
yield
if "attention_mask" in inspect.getfullargspec(model.forward).args:
support_att_mask = True
else:
support_att_mask = False
logging.info(f"Rescoring with beam_size: {args.beam_size}")
logging.info("Calculating the scores...")
with autocast():
with torch.no_grad():
am_scores, lm_scores, dists, ref_lens, lens_in_chars = [], [], [], [], []
for batch in tqdm.tqdm(data_loader):
input_ids, input_mask, acoustic_score, dist, ref_len, len_in_chars, idx = batch
max_len_in_batch = input_mask.sum(dim=0).argmin().item()
input_ids, input_mask = input_ids[:, :max_len_in_batch], input_mask[:, :max_len_in_batch]
if torch.cuda.is_available():
input_ids, input_mask = input_ids.to(device), input_mask.to(device)
dist, acoustic_score, len_in_chars = (
dist.to(device),
acoustic_score.to(device),
len_in_chars.to(device),
)
# some models like Transformer-XL don't need attention_mask as input
if support_att_mask:
log_probs = model(input_ids=input_ids, attention_mask=input_mask)
else:
log_probs = model(input_ids=input_ids)
if not nemo_model:
log_probs = torch.nn.functional.log_softmax(log_probs.logits, dim=-1)
target_log_probs = log_probs[:, :-1].gather(2, input_ids[:, 1:].unsqueeze(2)).squeeze(2)
neural_lm_score = torch.sum(target_log_probs * input_mask[:, 1:], dim=-1)
am_scores.append(acoustic_score)
lm_scores.append(neural_lm_score)
dists.append(dist)
ref_lens.append(ref_len)
lens_in_chars.append(len_in_chars)
am_scores = torch.cat(am_scores).view(-1, args.beam_size)
lm_scores = torch.cat(lm_scores).view(-1, args.beam_size)
dists = torch.cat(dists).view(-1, args.beam_size)
ref_lens = torch.cat(ref_lens).view(-1, args.beam_size)
lens_in_chars = torch.cat(lens_in_chars).view(-1, args.beam_size).to(am_scores.dtype)
total_len = ref_lens[:, 0].sum()
model_wer = dists[:, 0].sum() / total_len
ideal_wer = dists.min(dim=1)[0].sum() / total_len
if args.alpha is None:
logging.info("Linear search for alpha...")
coef1, _ = linear_search_wer(
dists=dists, scores1=am_scores, scores2=lm_scores, total_len=total_len, param_name='alpha'
)
coef1 = np.round(coef1, 3)
logging.info(f"alpha={coef1} achieved the best WER.")
logging.info(f"------------------------------------------------")
else:
coef1 = args.alpha
scores = am_scores + coef1 * lm_scores
if args.beta is None:
logging.info("Linear search for beta...")
coef2, _ = linear_search_wer(
dists=dists, scores1=scores, scores2=lens_in_chars, total_len=total_len, param_name='beta'
)
coef2 = np.round(coef2, 3)
logging.info(f"beta={coef2} achieved the best WER.")
logging.info(f"------------------------------------------------")
else:
coef2 = args.beta
new_scores = am_scores + coef1 * lm_scores + coef2 * lens_in_chars
rescored_wer = compute_wer(dists, new_scores, total_len)
logging.info(f"Input beams WER: {np.round(model_wer.item() * 100, 2)}%")
logging.info(f"------------------------------------------------")
logging.info(f" +LM rescoring WER: {np.round(rescored_wer * 100, 2)}%")
logging.info(f" with alpha={coef1}, beta={coef2}")
logging.info(f"------------------------------------------------")
logging.info(f"Oracle WER: {np.round(ideal_wer.item() * 100, 2)}%")
logging.info(f"------------------------------------------------")
new_scores_flatten = new_scores.flatten()
if args.scores_output_file is not None:
logging.info(f'Saving the candidates with their new scores at `{args.scores_output_file}`...')
with open(args.scores_output_file, "w", encoding='utf-8') as fout:
for sample_id in range(len(dataset)):
fout.write(f"{dataset.data[0][sample_id]}\t{new_scores_flatten[sample_id]}\n")
if __name__ == '__main__':
main()
| NeMo-main | scripts/asr_language_modeling/neural_rescorer/eval_neural_rescorer.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script would train an N-gram language model with KenLM library (https://github.com/kpu/kenlm) which can be used
# with the beam search decoders on top of the ASR models. This script supports both character level and BPE level
# encodings and models which is detected automatically from the type of the model.
# After the N-gram model is trained, and stored in the binary format, you may use
# 'scripts/ngram_lm/eval_beamsearch_ngram.py' to evaluate it on an ASR model.
#
# You need to install the KenLM library and also the beam search decoders to use this feature. Please refer
# to 'scripts/ngram_lm/install_beamsearch_decoders.sh' on how to install them.
#
# USAGE: python train_kenlm.py nemo_model_file=<path to the .nemo file of the model> \
# train_paths=<list of paths to the training text or JSON manifest file> \
# kenlm_bin_path=<path to the bin folder of KenLM library> \
# kenlm_model_file=<path to store the binary KenLM model> \
# ngram_length=<order of N-gram model> \
#
# After training is done, the binary LM model is stored at the path specified by '--kenlm_model_file'.
# You may find more info on how to use this script at:
# https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/asr_language_modeling.html
import logging
import os
import subprocess
import sys
from dataclasses import dataclass, field
from glob import glob
from typing import List
from omegaconf import MISSING
from scripts.asr_language_modeling.ngram_lm import kenlm_utils
from nemo.core.config import hydra_runner
from nemo.utils import logging
"""
NeMo's beam search decoders only support char-level encodings. In order to make it work with BPE-level encodings, we
use a trick to encode the sub-word tokens of the training data as unicode characters and train a char-level KenLM.
"""
@dataclass
class TrainKenlmConfig:
"""
Train an N-gram language model with KenLM to be used with beam search decoder of ASR models.
"""
train_paths: List[
str
] = MISSING # List of training files or folders. Files can be a plain text file or ".json" manifest or ".json.gz". Example: [/path/to/manifest/file,/path/to/folder]
nemo_model_file: str = MISSING # The path to '.nemo' file of the ASR model, or name of a pretrained NeMo model
kenlm_model_file: str = MISSING # The path to store the KenLM binary model file
ngram_length: int = MISSING # The order of N-gram LM
kenlm_bin_path: str = MISSING # The path to the bin folder of KenLM.
preserve_arpa: bool = False # Whether to preserve the intermediate ARPA file.
ngram_prune: List[int] = field(
default_factory=lambda: [0]
) # List of digits to prune Ngram. Example: [0,0,1]. See Pruning section on the https://kheafield.com/code/kenlm/estimation
cache_path: str = "" # Cache path to save tokenized files.
verbose: int = 1 # Verbose level, default is 1.
@hydra_runner(config_path=None, config_name='TrainKenlmConfig', schema=TrainKenlmConfig)
def main(args: TrainKenlmConfig):
train_paths = kenlm_utils.get_train_list(args.train_paths)
if isinstance(args.ngram_prune, str):
args.ngram_prune = [args.ngram_prune]
tokenizer, encoding_level, is_aggregate_tokenizer = kenlm_utils.setup_tokenizer(args.nemo_model_file)
if encoding_level == "subword":
discount_arg = "--discount_fallback" # --discount_fallback is needed for training KenLM for BPE-based models
else:
discount_arg = ""
arpa_file = f"{args.kenlm_model_file}.tmp.arpa"
""" LMPLZ ARGUMENT SETUP """
kenlm_args = [
os.path.join(args.kenlm_bin_path, 'lmplz'),
"-o",
str(args.ngram_length),
"--arpa",
arpa_file,
discount_arg,
"--prune",
] + [str(n) for n in args.ngram_prune]
if args.cache_path:
if not os.path.exists(args.cache_path):
os.makedirs(args.cache_path, exist_ok=True)
""" DATASET SETUP """
encoded_train_files = []
for file_num, train_file in enumerate(train_paths):
logging.info(f"Encoding the train file '{train_file}' number {file_num+1} out of {len(train_paths)} ...")
cached_files = glob(os.path.join(args.cache_path, os.path.split(train_file)[1]) + "*")
encoded_train_file = os.path.join(args.cache_path, os.path.split(train_file)[1] + f"_{file_num}.tmp.txt")
if (
cached_files and cached_files[0] != encoded_train_file
): # cached_files exists but has another file name: f"_{file_num}.tmp.txt"
os.rename(cached_files[0], encoded_train_file)
logging.info("Rename", cached_files[0], "to", encoded_train_file)
encoded_train_files.append(encoded_train_file)
kenlm_utils.iter_files(
source_path=train_paths,
dest_path=encoded_train_files,
tokenizer=tokenizer,
encoding_level=encoding_level,
is_aggregate_tokenizer=is_aggregate_tokenizer,
verbose=args.verbose,
)
first_process_args = ["cat"] + encoded_train_files
first_process = subprocess.Popen(first_process_args, stdout=subprocess.PIPE, stderr=sys.stderr)
logging.info(f"Running lmplz command \n\n{' '.join(kenlm_args)}\n\n")
kenlm_p = subprocess.run(
kenlm_args,
stdin=first_process.stdout,
capture_output=False,
text=True,
stdout=sys.stdout,
stderr=sys.stderr,
)
first_process.wait()
else:
logging.info(f"Running lmplz command \n\n{' '.join(kenlm_args)}\n\n")
kenlm_p = subprocess.Popen(kenlm_args, stdout=sys.stdout, stdin=subprocess.PIPE, stderr=sys.stderr)
kenlm_utils.iter_files(
source_path=train_paths,
dest_path=kenlm_p.stdin,
tokenizer=tokenizer,
encoding_level=encoding_level,
is_aggregate_tokenizer=is_aggregate_tokenizer,
verbose=args.verbose,
)
kenlm_p.communicate()
if kenlm_p.returncode != 0:
raise RuntimeError("Training KenLM was not successful!")
""" BINARY BUILD """
kenlm_args = [
os.path.join(args.kenlm_bin_path, "build_binary"),
"trie",
arpa_file,
args.kenlm_model_file,
]
logging.info(f"Running binary_build command \n\n{' '.join(kenlm_args)}\n\n")
ret = subprocess.run(kenlm_args, capture_output=False, text=True, stdout=sys.stdout, stderr=sys.stderr)
if ret.returncode != 0:
raise RuntimeError("Training KenLM was not successful!")
if not args.preserve_arpa:
os.remove(arpa_file)
logging.info(f"Deleted the arpa file '{arpa_file}'.")
if __name__ == '__main__':
main()
| NeMo-main | scripts/asr_language_modeling/ngram_lm/train_kenlm.py |
#!/usr/bin/env python
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2016, Johns Hopkins University (Author: Daniel Povey).
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script was copied from https://github.com/kaldi-asr/kaldi/blob/master/egs/wsj/s5/utils/lang/make_phone_lm.py
# with minor python3 related changes.
import argparse
import math
import sys
from collections import defaultdict
# note, this was originally based
parser = argparse.ArgumentParser(
description="""
This script creates a language model that's intended to be used in modeling
phone sequences (either of sentences or of dictionary entries), although of
course it will work for any type of data. The easiest way
to describe it is as a a Kneser-Ney language model (unmodified, with addition)
with a fixed discounting constant equal to 1, except with no smoothing of the
bigrams (and hence no unigram state). This is (a) because we want to keep the
graph after context expansion small, (b) because languages tend to have
constraints on which phones can follow each other, and (c) in order to get valid
sequences of word-position-dependent phones so that lattice-align-words can
work. It also includes have a special entropy-based pruning technique that
backs off the statistics of pruned n-grams to lower-order states.
This script reads lines from its standard input, each
consisting of a sequence of integer symbol-ids (which should be > 0),
representing the phone sequences of a sentence or dictionary entry.
This script outputs a backoff language model in FST format""",
epilog="See also utils/lang/make_phone_bigram_lang.sh",
)
parser.add_argument(
"--phone-disambig-symbol",
type=int,
required=False,
help="Integer corresponding to an otherwise-unused "
"phone-level disambiguation symbol (e.g. #5). This is "
"inserted at the beginning of the phone sequence and "
"whenever we back off.",
)
parser.add_argument(
"--ngram-order",
type=int,
default=4,
choices=[2, 3, 4, 5, 6, 7],
help="Order of n-gram to use (but see also --num-extra-states;" "the effective order after pruning may be less.",
)
parser.add_argument(
"--num-extra-ngrams",
type=int,
default=20000,
help="Target number of n-grams in addition to the n-grams in "
"the bigram LM states which can't be pruned away. n-grams "
"will be pruned to reach this target.",
)
parser.add_argument(
"--no-backoff-ngram-order",
type=int,
default=2,
choices=[1, 2, 3, 4, 5],
help="This specifies the n-gram order at which (and below which) "
"no backoff or pruning should be done. This is expected to normally "
"be bigram, but for testing purposes you may want to set it to "
"1.",
)
parser.add_argument(
"--print-as-arpa",
type=str,
default="false",
choices=["true", "false"],
help="If true, print LM in ARPA format (default is to print "
"as FST). You must also set --no-backoff-ngram-order=1 or "
"this is not allowed.",
)
parser.add_argument("--verbose", type=int, default=0, choices=[0, 1, 2, 3, 4, 5], help="Verbose level")
args = parser.parse_args()
if args.verbose >= 1:
print(" ".join(sys.argv), file=sys.stderr)
class CountsForHistory(object):
## This class (which is more like a struct) stores the counts seen in a
## particular history-state. It is used inside class NgramCounts.
## It really does the job of a dict from int to float, but it also
## keeps track of the total count.
def __init__(self):
# The 'lambda: defaultdict(float)' is an anonymous function taking no
# arguments that returns a new defaultdict(float).
self.word_to_count = defaultdict(int)
self.total_count = 0
def Words(self):
return list(self.word_to_count.keys())
def __str__(self):
# e.g. returns ' total=12 3->4 4->6 -1->2'
return " total={0} {1}".format(
str(self.total_count),
" ".join(["{0} -> {1}".format(word, count) for word, count in self.word_to_count.items()]),
)
## Adds a certain count (expected to be integer, but might be negative). If
## the resulting count for this word is zero, removes the dict entry from
## word_to_count.
## [note, though, that in some circumstances we 'add back' zero counts
## where the presence of n-grams would be structurally required by the arpa,
## specifically if a higher-order history state has a nonzero count,
## we need to structurally have the count there in the states it backs
## off to.
def AddCount(self, predicted_word, count):
self.total_count += count
assert self.total_count >= 0
old_count = self.word_to_count[predicted_word]
new_count = old_count + count
if new_count < 0:
print("predicted-word={0}, old-count={1}, count={2}".format(predicted_word, old_count, count))
assert new_count >= 0
if new_count == 0:
del self.word_to_count[predicted_word]
else:
self.word_to_count[predicted_word] = new_count
class NgramCounts(object):
## A note on data-structure. Firstly, all words are represented as
## integers. We store n-gram counts as an array, indexed by (history-length
## == n-gram order minus one) (note: python calls arrays "lists") of dicts
## from histories to counts, where histories are arrays of integers and
## "counts" are dicts from integer to float. For instance, when
## accumulating the 4-gram count for the '8' in the sequence '5 6 7 8', we'd
## do as follows: self.counts[3][[5,6,7]][8] += 1.0 where the [3] indexes an
## array, the [[5,6,7]] indexes a dict, and the [8] indexes a dict.
def __init__(self, ngram_order):
assert ngram_order >= 2
# Integerized counts will never contain negative numbers, so
# inside this program, we use -3 and -2 for the BOS and EOS symbols
# respectively.
# Note: it's actually important that the bos-symbol is the most negative;
# it helps ensure that we print the state with left-context <s> first
# when we print the FST, and this means that the start-state will have
# the correct value.
self.bos_symbol = -3
self.eos_symbol = -2
# backoff_symbol is kind of a pseudo-word, it's used in keeping track of
# the backoff counts in each state.
self.backoff_symbol = -1
self.total_num_words = 0 # count includes EOS but not BOS.
self.counts = []
for n in range(ngram_order):
self.counts.append(defaultdict(lambda: CountsForHistory()))
# adds a raw count (called while processing input data).
# Suppose we see the sequence '6 7 8 9' and ngram_order=4, 'history'
# would be (6,7,8) and 'predicted_word' would be 9; 'count' would be
# 1.
def AddCount(self, history, predicted_word, count):
self.counts[len(history)][history].AddCount(predicted_word, count)
# 'line' is a string containing a sequence of integer word-ids.
# This function adds the un-smoothed counts from this line of text.
def AddRawCountsFromLine(self, line):
try:
words = [self.bos_symbol] + [int(x) for x in line.split()] + [self.eos_symbol]
except Exception:
sys.exit("make_phone_lm.py: bad input line {0} (expected a sequence " "of integers)".format(line))
for n in range(1, len(words)):
predicted_word = words[n]
history_start = max(0, n + 1 - args.ngram_order)
history = tuple(words[history_start:n])
self.AddCount(history, predicted_word, 1)
self.total_num_words += 1
def AddRawCountsFromStandardInput(self):
lines_processed = 0
while True:
line = sys.stdin.readline()
if line == "":
break
self.AddRawCountsFromLine(line)
lines_processed += 1
if lines_processed == 0 or args.verbose > 0:
print(
"make_phone_lm.py: processed {0} lines of input".format(lines_processed), file=sys.stderr,
)
# This backs off the counts by subtracting 1 and assigning the subtracted
# count to the backoff state. It's like a special case of Kneser-Ney with D
# = 1. The optimal D would likely be something like 0.9, but we plan to
# later do entropy-pruning, and the remaining small counts of 0.1 would
# essentially all get pruned away anyway, so we don't lose much by doing it
# like this.
def ApplyBackoff(self):
# note: in the normal case where args.no_backoff_ngram_order == 2 we
# don't do backoff for history-length = 1 (i.e. for bigrams)... this is
# a kind of special LM where we're not going to back off to unigram,
# there will be no unigram.
if args.verbose >= 1:
initial_num_ngrams = self.GetNumNgrams()
for n in reversed(list(range(args.no_backoff_ngram_order, args.ngram_order))):
this_order_counts = self.counts[n]
for hist, counts_for_hist in this_order_counts.items():
backoff_hist = hist[1:]
backoff_counts_for_hist = self.counts[n - 1][backoff_hist]
this_discount_total = 0
for word in counts_for_hist.Words():
counts_for_hist.AddCount(word, -1)
# You can interpret the following line as incrementing the
# count-of-counts for the next-lower order. Note, however,
# that later when we remove n-grams, we'll also add their
# counts to the next-lower-order history state, so the
# resulting counts won't strictly speaking be
# counts-of-counts.
backoff_counts_for_hist.AddCount(word, 1)
this_discount_total += 1
counts_for_hist.AddCount(self.backoff_symbol, this_discount_total)
if args.verbose >= 1:
# Note: because D == 1, we completely back off singletons.
print(
"make_phone_lm.py: ApplyBackoff() reduced the num-ngrams from "
"{0} to {1}".format(initial_num_ngrams, self.GetNumNgrams()),
file=sys.stderr,
)
# This function prints out to stderr the n-gram counts stored in this
# object; it's used for debugging.
def Print(self, info_string):
print(info_string, file=sys.stderr)
# these are useful for debug.
total = 0.0
total_excluding_backoff = 0.0
for this_order_counts in self.counts:
for hist, counts_for_hist in this_order_counts.items():
print(str(hist) + str(counts_for_hist), file=sys.stderr)
total += counts_for_hist.total_count
total_excluding_backoff += counts_for_hist.total_count
if self.backoff_symbol in counts_for_hist.word_to_count:
total_excluding_backoff -= counts_for_hist.word_to_count[self.backoff_symbol]
print(
"total count = {0}, excluding backoff = {1}".format(total, total_excluding_backoff), file=sys.stderr,
)
def GetHistToStateMap(self):
# This function, called from PrintAsFst, returns a map from
# history to integer FST-state.
hist_to_state = dict()
fst_state_counter = 0
for n in range(0, args.ngram_order):
for hist in self.counts[n].keys():
hist_to_state[hist] = fst_state_counter
fst_state_counter += 1
return hist_to_state
# Returns the probability of word 'word' in history-state 'hist'.
# If 'word' is self.backoff_symbol, returns the backoff prob
# of this history-state.
# Returns None if there is no such word in this history-state, or this
# history-state does not exist.
def GetProb(self, hist, word):
if len(hist) >= args.ngram_order or not hist in self.counts[len(hist)]:
return None
counts_for_hist = self.counts[len(hist)][hist]
total_count = float(counts_for_hist.total_count)
if not word in counts_for_hist.word_to_count:
print(
"make_phone_lm.py: no prob for {0} -> {1} " "[no such count]".format(hist, word), file=sys.stderr,
)
return None
prob = float(counts_for_hist.word_to_count[word]) / total_count
if len(hist) > 0 and word != self.backoff_symbol and self.backoff_symbol in counts_for_hist.word_to_count:
prob_in_backoff = self.GetProb(hist[1:], word)
backoff_prob = float(counts_for_hist.word_to_count[self.backoff_symbol]) / total_count
try:
prob += backoff_prob * prob_in_backoff
except Exception:
sys.exit("problem, hist is {0}, word is {1}".format(hist, word))
return prob
def PruneEmptyStates(self):
# Removes history-states that have no counts.
# It's possible in principle for history-states to have no counts and
# yet they cannot be pruned away because a higher-order version of the
# state exists with nonzero counts, so we have to keep track of this.
protected_histories = set()
states_removed_per_hist_len = [0] * args.ngram_order
for n in reversed(list(range(args.no_backoff_ngram_order, args.ngram_order))):
num_states_removed = 0
for hist, counts_for_hist in self.counts[n].items():
l = len(counts_for_hist.word_to_count)
assert l > 0 and self.backoff_symbol in counts_for_hist.word_to_count
if l == 1 and not hist in protected_histories: # only the backoff symbol has a count.
del self.counts[n][hist]
num_states_removed += 1
else:
# if this state was not pruned away, then the state that
# it backs off to may not be pruned away either.
backoff_hist = hist[1:]
protected_histories.add(backoff_hist)
states_removed_per_hist_len[n] = num_states_removed
if args.verbose >= 1:
print(
"make_phone_lm.py: in PruneEmptyStates(), num states removed for "
"each history-length was: " + str(states_removed_per_hist_len),
file=sys.stderr,
)
def EnsureStructurallyNeededNgramsExist(self):
# makes sure that if an n-gram like (6, 7, 8) -> 9 exists,
# then counts exist for (7, 8) -> 9 and (8,) -> 9. It does so
# by adding zero counts where such counts were absent.
# [note: () -> 9 is guaranteed anyway by the backoff method, if
# we have a unigram state].
if args.verbose >= 1:
num_ngrams_initial = self.GetNumNgrams()
for n in reversed(list(range(args.no_backoff_ngram_order, args.ngram_order))):
for hist, counts_for_hist in self.counts[n].items():
# This loop ensures that if we have an n-gram like (6, 7, 8) -> 9,
# then, say, (7, 8) -> 9 and (8) -> 9 exist.
reduced_hist = hist
for m in reversed(list(range(args.no_backoff_ngram_order, n))):
reduced_hist = reduced_hist[1:] # shift an element off
# the history.
counts_for_backoff_hist = self.counts[m][reduced_hist]
for word in counts_for_hist.word_to_count.keys():
counts_for_backoff_hist.word_to_count[word] += 0
# This loop ensures that if we have an n-gram like (6, 7, 8) -> 9,
# then, say, (6, 7) -> 8 and (6) -> 7 exist. This will be needed
# for FST representations of the ARPA LM.
reduced_hist = hist
for m in reversed(list(range(args.no_backoff_ngram_order, n))):
this_word = reduced_hist[-1]
reduced_hist = reduced_hist[:-1] # pop an element off the
# history
counts_for_backoff_hist = self.counts[m][reduced_hist]
counts_for_backoff_hist.word_to_count[this_word] += 0
if args.verbose >= 1:
print(
"make_phone_lm.py: in EnsureStructurallyNeededNgramsExist(), "
"added {0} n-grams".format(self.GetNumNgrams() - num_ngrams_initial),
file=sys.stderr,
)
# This function prints the estimated language model as an FST.
def PrintAsFst(self, word_disambig_symbol):
# n is the history-length (== order + 1). We iterate over the
# history-length in the order 1, 0, 2, 3, and then iterate over the
# histories of each order in sorted order. Putting order 1 first
# and sorting on the histories
# ensures that the bigram state with <s> as the left context comes first.
# (note: self.bos_symbol is the most negative symbol)
# History will map from history (as a tuple) to integer FST-state.
hist_to_state = self.GetHistToStateMap()
for n in [1, 0] + list(range(2, args.ngram_order)):
this_order_counts = self.counts[n]
# For order 1, make sure the keys are sorted.
keys = this_order_counts.keys() if n != 1 else sorted(this_order_counts.keys())
for hist in keys:
word_to_count = this_order_counts[hist].word_to_count
this_fst_state = hist_to_state[hist]
for word in word_to_count.keys():
# work out this_cost. Costs in OpenFst are negative logs.
this_cost = -math.log(self.GetProb(hist, word))
if word > 0: # a real word.
next_hist = hist + (word,) # appending tuples
while not next_hist in hist_to_state:
next_hist = next_hist[1:]
next_fst_state = hist_to_state[next_hist]
print(this_fst_state, next_fst_state, word, word, this_cost)
elif word == self.eos_symbol:
# print final-prob for this state.
print(this_fst_state, this_cost)
else:
assert word == self.backoff_symbol
backoff_fst_state = hist_to_state[hist[1 : len(hist)]]
print(
this_fst_state, backoff_fst_state, word_disambig_symbol, 0, this_cost,
)
# This function returns a set of n-grams that cannot currently be pruned
# away, either because a higher-order form of the same n-gram already exists,
# or because the n-gram leads to an n-gram state that exists.
# [Note: as we prune, we remove any states that can be removed; see that
# PruneToIntermediateTarget() calls PruneEmptyStates().
def GetProtectedNgrams(self):
ans = set()
for n in range(args.no_backoff_ngram_order + 1, args.ngram_order):
for hist, counts_for_hist in self.counts[n].items():
# If we have an n-gram (6, 7, 8) -> 9, the following loop will
# add the backed-off n-grams (7, 8) -> 9 and (8) -> 9 to
# 'protected-ngrams'.
reduced_hist = hist
for _ in reversed(list(range(args.no_backoff_ngram_order, n))):
reduced_hist = reduced_hist[1:] # shift an element off
# the history.
for word in counts_for_hist.word_to_count.keys():
if word != self.backoff_symbol:
ans.add(reduced_hist + (word,))
# The following statement ensures that if we are in a
# history-state (6, 7, 8), then n-grams (6, 7, 8) and (6, 7) are
# protected. This assures that the FST states are accessible.
reduced_hist = hist
for _ in reversed(list(range(args.no_backoff_ngram_order, n))):
ans.add(reduced_hist)
reduced_hist = reduced_hist[:-1] # pop an element off the
# history
return ans
def PruneNgram(self, hist, word):
counts_for_hist = self.counts[len(hist)][hist]
assert word != self.backoff_symbol and word in counts_for_hist.word_to_count
count = counts_for_hist.word_to_count[word]
del counts_for_hist.word_to_count[word]
counts_for_hist.word_to_count[self.backoff_symbol] += count
# the next call adds the count to the symbol 'word' in the backoff
# history-state, and also updates its 'total_count'.
self.counts[len(hist) - 1][hist[1:]].AddCount(word, count)
# The function PruningLogprobChange is the same as the same-named
# function in float-counts-prune.cc in pocolm. Note, it doesn't access
# any class members.
# This function computes the log-likelihood change (<= 0) from backing off
# a particular symbol to the lower-order state.
# The value it returns can be interpreted as a lower bound the actual log-likelihood
# change. By "the actual log-likelihood change" we mean of data generated by
# the model itself before making the change, then modeled with the changed model
# [and comparing the log-like with the log-like before changing the model]. That is,
# it's a K-L divergence, but with the caveat that we don't normalize by the
# overall count of the data, so it's a K-L divergence multiplied by the training-data
# count.
# 'count' is the count of the word (call it 'a') in this state. It's an integer.
# 'discount' is the discount-count in this state (represented as the count
# for the symbol self.backoff_symbol). It's an integer.
# [note: we don't care about the total-count in this state, it cancels out.]
# 'backoff_count' is the count of word 'a' in the lower-order state.
# [actually it is the augmented count, treating any
# extra probability from even-lower-order states as
# if it were a count]. It's a float.
# 'backoff_total' is the total count in the lower-order state. It's a float.
def PruningLogprobChange(self, count, discount, backoff_count, backoff_total):
if count == 0:
return 0.0
assert discount > 0 and backoff_total >= backoff_count and backoff_total >= 0.99 * discount
# augmented_count is like 'count', but with the extra count for symbol
# 'a' due to backoff included.
augmented_count = count + discount * backoff_count / backoff_total
# We imagine a phantom symbol 'b' that represents all symbols other than
# 'a' appearing in this history-state that are accessed via backoff. We
# treat these as being distinct symbols from the same symbol if accessed
# not-via-backoff. (Treating same symbols as distinct gives an upper bound
# on the divergence). We also treat them as distinct from the same symbols
# that are being accessed via backoff from other states. b_count is the
# observed count of symbol 'b' in this state (the backed-off count is
# zero). b_count is also the count of symbol 'b' in the backoff state.
# Note: b_count will not be negative because backoff_total >= backoff_count.
b_count = discount * ((backoff_total - backoff_count) / backoff_total)
assert b_count >= -0.001 * backoff_total
# We imagine a phantom symbol 'c' that represents all symbols other than
# 'a' and 'b' appearing in the backoff state, which got there from
# backing off other states (other than 'this' state). Again, we imagine
# the symbols are distinct even though they may not be (i.e. that c and
# b represent disjoint sets of symbol, even though they might not really
# be disjoint), and this gives us an upper bound on the divergence.
c_count = backoff_total - backoff_count - b_count
assert c_count >= -0.001 * backoff_total
# a_other is the count of 'a' in the backoff state that comes from
# 'other sources', i.e. it was backed off from history-states other than
# the current history state.
a_other_count = backoff_count - discount * backoff_count / backoff_total
assert a_other_count >= -0.001 * backoff_count
# the following sub-expressions are the 'new' versions of certain
# quantities after we assign the total count 'count' to backoff. it
# increases the backoff count in 'this' state, and also the total count
# in the backoff state, and the count of symbol 'a' in the backoff
# state.
new_backoff_count = backoff_count + count # new count of symbol 'a' in
# backoff state
new_backoff_total = backoff_total + count # new total count in
# backoff state.
new_discount = discount + count # new discount-count in 'this' state.
# all the loglike changes below are of the form
# count-of-symbol * log(new prob / old prob)
# which can be more conveniently written (by canceling the denominators),
# count-of-symbol * log(new count / old count).
# this_a_change is the log-like change of symbol 'a' coming from 'this'
# state. bear in mind that
# augmented_count = count + discount * backoff_count / backoff_total,
# and the 'count' term is zero in the numerator part of the log expression,
# because symbol 'a' is completely backed off in 'this' state.
this_a_change = augmented_count * math.log(
(new_discount * new_backoff_count / new_backoff_total) / augmented_count
)
# other_a_change is the log-like change of symbol 'a' coming from all
# other states than 'this'. For speed reasons we don't examine the
# direct (non-backoff) counts of symbol 'a' in all other states than
# 'this' that back off to the backoff state-- it would be slower.
# Instead we just treat the direct part of the prob for symbol 'a' as a
# distinct symbol when it comes from those other states... as usual,
# doing so gives us an upper bound on the divergence.
other_a_change = a_other_count * math.log(
(new_backoff_count / new_backoff_total) / (backoff_count / backoff_total)
)
# b_change is the log-like change of phantom symbol 'b' coming from
# 'this' state (and note: it only comes from this state, that's how we
# defined it).
# note: the expression below could be more directly written as a
# ratio of pseudo-counts as follows, by converting the backoff probabilities
# into pseudo-counts in 'this' state:
# b_count * logf((new_discount * b_count / new_backoff_total) /
# (discount * b_count / backoff_total),
# but we cancel b_count to give us the expression below.
b_change = b_count * math.log((new_discount / new_backoff_total) / (discount / backoff_total))
# c_change is the log-like change of phantom symbol 'c' coming from
# all other states that back off to the backoff sate (and all prob. mass of
# 'c' comes from those other states). The expression below could be more
# directly written as a ratio of counts, as c_count * logf((c_count /
# new_backoff_total) / (c_count / backoff_total)), but we simplified it to
# the expression below.
c_change = c_count * math.log(backoff_total / new_backoff_total)
ans = this_a_change + other_a_change + b_change + c_change
# the answer should not be positive.
assert ans <= 0.0001 * (count + discount + backoff_count + backoff_total)
if args.verbose >= 4:
print(
"pruning-logprob-change for {0},{1},{2},{3} is {4}".format(
count, discount, backoff_count, backoff_total, ans
),
file=sys.stderr,
)
return ans
def GetLikeChangeFromPruningNgram(self, hist, word):
counts_for_hist = self.counts[len(hist)][hist]
counts_for_backoff_hist = self.counts[len(hist) - 1][hist[1:]]
assert word != self.backoff_symbol and word in counts_for_hist.word_to_count
count = counts_for_hist.word_to_count[word]
discount = counts_for_hist.word_to_count[self.backoff_symbol]
backoff_total = counts_for_backoff_hist.total_count
# backoff_count is a pseudo-count: it's like the count of 'word' in the
# backoff history-state, but adding something to account for further
# levels of backoff.
try:
backoff_count = self.GetProb(hist[1:], word) * backoff_total
except Exception:
print(
"problem getting backoff count: hist = {0}, word = {1}".format(hist, word), file=sys.stderr,
)
sys.exit(1)
return self.PruningLogprobChange(float(count), float(discount), backoff_count, float(backoff_total))
# note: returns loglike change per word.
def PruneToIntermediateTarget(self, num_extra_ngrams):
protected_ngrams = self.GetProtectedNgrams()
initial_num_extra_ngrams = self.GetNumExtraNgrams()
num_ngrams_to_prune = initial_num_extra_ngrams - num_extra_ngrams
assert num_ngrams_to_prune > 0
num_candidates_per_order = [0] * args.ngram_order
num_pruned_per_order = [0] * args.ngram_order
# like_change_and_ngrams this will be a list of tuples consisting
# of the likelihood change as a float and then the words of the n-gram
# that we're considering pruning,
# e.g. (-0.164, 7, 8, 9)
# meaning that pruning the n-gram (7, 8) -> 9 leads to
# a likelihood change of -0.164. We'll later sort this list
# so we can prune the n-grams that made the least-negative
# likelihood change.
like_change_and_ngrams = []
for n in range(args.no_backoff_ngram_order, args.ngram_order):
for hist, counts_for_hist in self.counts[n].items():
for word, count in counts_for_hist.word_to_count.items():
if word != self.backoff_symbol:
if not hist + (word,) in protected_ngrams:
like_change = self.GetLikeChangeFromPruningNgram(hist, word)
like_change_and_ngrams.append((like_change,) + hist + (word,))
num_candidates_per_order[len(hist)] += 1
like_change_and_ngrams.sort(reverse=True)
if num_ngrams_to_prune > len(like_change_and_ngrams):
print(
"make_phone_lm.py: aimed to prune {0} n-grams but could only "
"prune {1}".format(num_ngrams_to_prune, len(like_change_and_ngrams)),
file=sys.stderr,
)
num_ngrams_to_prune = len(like_change_and_ngrams)
total_loglike_change = 0.0
for i in range(num_ngrams_to_prune):
total_loglike_change += like_change_and_ngrams[i][0]
hist = like_change_and_ngrams[i][1:-1] # all but 1st and last elements
word = like_change_and_ngrams[i][-1] # last element
num_pruned_per_order[len(hist)] += 1
self.PruneNgram(hist, word)
like_change_per_word = total_loglike_change / self.total_num_words
if args.verbose >= 1:
effective_threshold = (
like_change_and_ngrams[num_ngrams_to_prune - 1][0] if num_ngrams_to_prune >= 0 else 0.0
)
print(
"Pruned from {0} ngrams to {1}, with threshold {2}. Candidates per order were {3}, "
"num-ngrams pruned per order were {4}. Like-change per word was {5}".format(
initial_num_extra_ngrams,
initial_num_extra_ngrams - num_ngrams_to_prune,
"%.4f" % effective_threshold,
num_candidates_per_order,
num_pruned_per_order,
like_change_per_word,
),
file=sys.stderr,
)
if args.verbose >= 3:
print(
"Pruning: like_change_and_ngrams is:\n"
+ "\n".join([str(x) for x in like_change_and_ngrams[:num_ngrams_to_prune]])
+ "\n-------- stop pruning here: ----------\n"
+ "\n".join([str(x) for x in like_change_and_ngrams[num_ngrams_to_prune:]]),
file=sys.stderr,
)
self.Print(
"Counts after pruning to num-extra-ngrams={0}".format(initial_num_extra_ngrams - num_ngrams_to_prune)
)
self.PruneEmptyStates()
if args.verbose >= 3:
ngram_counts.Print("Counts after removing empty states [inside pruning algorithm]:")
return like_change_per_word
def PruneToFinalTarget(self, num_extra_ngrams):
# prunes to a specified num_extra_ngrams. The 'extra_ngrams' refers to
# the count of n-grams of order higher than args.no_backoff_ngram_order.
# We construct a sequence of targets that gradually approaches
# this value. Doing it iteratively like this is a good way
# to deal with the fact that sometimes we can't prune a certain
# n-gram before certain other n-grams are pruned (because
# they lead to a state that must be kept, or an n-gram exists
# that backs off to this n-gram).
current_num_extra_ngrams = self.GetNumExtraNgrams()
if num_extra_ngrams >= current_num_extra_ngrams:
print(
"make_phone_lm.py: not pruning since target num-extra-ngrams={0} is >= "
"current num-extra-ngrams={1}".format(num_extra_ngrams, current_num_extra_ngrams),
file=sys.stderr,
)
return
target_sequence = [num_extra_ngrams]
# two final iterations where the targets differ by factors of 1.1,
# preceded by two iterations where the targets differ by factors of 1.2.
for this_factor in [1.1, 1.2]:
for n in range(0, 2):
if int((target_sequence[-1] + 1) * this_factor) < current_num_extra_ngrams:
target_sequence.append(int((target_sequence[-1] + 1) * this_factor))
# then change in factors of 1.3
while True:
this_factor = 1.3
if int((target_sequence[-1] + 1) * this_factor) < current_num_extra_ngrams:
target_sequence.append(int((target_sequence[-1] + 1) * this_factor))
else:
break
target_sequence = list(set(target_sequence)) # only keep unique targets.
target_sequence.sort(reverse=True)
print(
"make_phone_lm.py: current num-extra-ngrams={0}, pruning with "
"following sequence of targets: {1}".format(current_num_extra_ngrams, target_sequence),
file=sys.stderr,
)
total_like_change_per_word = 0.0
for target in target_sequence:
total_like_change_per_word += self.PruneToIntermediateTarget(target)
if args.verbose >= 1:
print(
"make_phone_lm.py: K-L divergence from pruning (upper bound) is " "%.4f" % total_like_change_per_word,
file=sys.stderr,
)
# returns the number of n-grams on top of those that can't be pruned away
# because their order is <= args.no_backoff_ngram_order.
def GetNumExtraNgrams(self):
ans = 0
for hist_len in range(args.no_backoff_ngram_order, args.ngram_order):
# note: hist_len + 1 is the actual order.
ans += self.GetNumNgrams(hist_len)
return ans
def GetNumNgrams(self, hist_len=None):
ans = 0
if hist_len is None:
for hist_len in range(args.ngram_order):
# note: hist_len + 1 is the actual order.
ans += self.GetNumNgrams(hist_len)
return ans
else:
for counts_for_hist in self.counts[hist_len].values():
ans += len(counts_for_hist.word_to_count)
if self.backoff_symbol in counts_for_hist.word_to_count:
ans -= 1 # don't count the backoff symbol, it doesn't produce
# its own n-gram line.
return ans
# this function, used in PrintAsArpa, converts an integer to
# a string by either printing it as a string, or for self.bos_symbol
# and self.eos_symbol, printing them as "<s>" and "</s>" respectively.
def IntToString(self, i):
if i == self.bos_symbol:
return "<s>"
elif i == self.eos_symbol:
return "</s>"
else:
assert i != self.backoff_symbol
return str(i)
def PrintAsArpa(self):
# Prints out the FST in ARPA format.
assert args.no_backoff_ngram_order == 1 # without unigrams we couldn't
# print as ARPA format.
print("\\data\\")
for hist_len in range(args.ngram_order):
# print the number of n-grams. Add 1 for the 1-gram
# section because of <s>, we print -99 as the prob so we
# have a place to put the backoff prob.
print("ngram {0}={1}".format(hist_len + 1, self.GetNumNgrams(hist_len) + (1 if hist_len == 0 else 0),))
print("")
for hist_len in range(args.ngram_order):
print("\\{0}-grams:".format(hist_len + 1))
# print fake n-gram for <s>, for its backoff prob.
if hist_len == 0:
backoff_prob = self.GetProb((self.bos_symbol,), self.backoff_symbol)
if backoff_prob != None:
print("-99\t<s>\t{0}".format("%.5f" % math.log10(backoff_prob)))
for hist in self.counts[hist_len].keys():
for word in self.counts[hist_len][hist].word_to_count.keys():
if word != self.backoff_symbol:
prob = self.GetProb(hist, word)
assert prob != None and prob > 0
backoff_prob = self.GetProb((hist) + (word,), self.backoff_symbol)
line = "{0}\t{1}".format(
"%.5f" % math.log10(prob), " ".join(self.IntToString(x) for x in hist + (word,)),
)
if backoff_prob != None:
line += "\t{0}".format("%.5f" % math.log10(backoff_prob))
print(line)
print("")
print("\\end\\")
ngram_counts = NgramCounts(args.ngram_order)
ngram_counts.AddRawCountsFromStandardInput()
if args.verbose >= 3:
ngram_counts.Print("Raw counts:")
ngram_counts.ApplyBackoff()
if args.verbose >= 3:
ngram_counts.Print("Counts after applying Kneser-Ney discounting:")
ngram_counts.EnsureStructurallyNeededNgramsExist()
if args.verbose >= 3:
ngram_counts.Print("Counts after adding structurally-needed n-grams (1st time):")
ngram_counts.PruneEmptyStates()
if args.verbose >= 3:
ngram_counts.Print("Counts after removing empty states:")
ngram_counts.PruneToFinalTarget(args.num_extra_ngrams)
ngram_counts.EnsureStructurallyNeededNgramsExist()
if args.verbose >= 3:
ngram_counts.Print("Counts after adding structurally-needed n-grams (2nd time):")
if args.print_as_arpa == "true":
ngram_counts.PrintAsArpa()
else:
if args.phone_disambig_symbol is None:
sys.exit("make_phone_lm.py: --phone-disambig-symbol must be provided (unless " "you are writing as ARPA")
ngram_counts.PrintAsFst(args.phone_disambig_symbol)
## Below are some little test commands that can be used to look at the detailed stats
## for a kind of sanity check.
# test comand:
# (echo 6 7 8 4; echo 7 8 9; echo 7 8; echo 7 4; echo 8 4 ) | utils/lang/make_phone_lm.py --phone-disambig-symbol=400 --verbose=3
# (echo 6 7 8 4; echo 7 8 9; echo 7 8; echo 7 4; echo 8 4 ) | utils/lang/make_phone_lm.py --phone-disambig-symbol=400 --verbose=3 --num-extra-ngrams=0
# (echo 6 7 8 4; echo 6 7 ) | utils/lang/make_phone_lm.py --print-as-arpa=true --no-backoff-ngram-order=1 --verbose=3
## The following shows how we created some data suitable to do comparisons with
## other language modeling toolkits. Note: we're running in a configuration
## where --no-backoff-ngram-order=1 (i.e. we have a unigram LM state) because
## it's the only way to get perplexity calculations and to write an ARPA file.
##
# cd egs/tedlium/s5_r2
# . ./path.sh
# mkdir -p lm_test
# ali-to-phones exp/tri3/final.mdl "ark:gunzip -c exp/tri3/ali.*.gz|" ark,t:- | awk '{$1 = ""; print}' > lm_test/phone_seqs
# wc lm_test/phone_seqs
# 92464 8409563 27953288 lm_test/phone_seqs
# head -n 20000 lm_test/phone_seqs > lm_test/train.txt
# tail -n 1000 lm_test/phone_seqs > lm_test/test.txt
## This shows make_phone_lm.py with the default number of extra-lm-states (20k)
## You have to have SRILM on your path to ger perplexities [note: it should be on the
## path if you installed it and you sourced the tedlium s5b path.sh, as above.]
# utils/lang/make_phone_lm.py --print-as-arpa=true --no-backoff-ngram-order=1 --verbose=1 < lm_test/train.txt > lm_test/arpa_pr20k
# ngram -order 4 -unk -lm lm_test/arpa_pr20k -ppl lm_test/test.txt
# file lm_test/test.txt: 1000 sentences, 86489 words, 3 OOVs
# 0 zeroprobs, logprob= -80130.1 ppl=*8.23985* ppl1= 8.44325
# on training data: 0 zeroprobs, logprob= -1.6264e+06 ppl= 7.46947 ppl1= 7.63431
## This shows make_phone_lm.py without any pruning (make --num-extra-ngrams very large).
# utils/lang/make_phone_lm.py --print-as-arpa=true --num-extra-ngrams=1000000 --no-backoff-ngram-order=1 --verbose=1 < lm_test/train.txt > lm_test/arpa
# ngram -order 4 -unk -lm lm_test/arpa -ppl lm_test/test.txt
# file lm_test/test.txt: 1000 sentences, 86489 words, 3 OOVs
# 0 zeroprobs, logprob= -74976 ppl=*7.19459* ppl1= 7.36064
# on training data: 0 zeroprobs, logprob= -1.44198e+06 ppl= 5.94659 ppl1= 6.06279
## This is SRILM without pruning (c.f. the 7.19 above, it's slightly better).
# ngram-count -text lm_test/train.txt -order 4 -kndiscount2 -kndiscount3 -kndiscount4 -interpolate -lm lm_test/arpa_srilm
# ngram -order 4 -unk -lm lm_test/arpa_srilm -ppl lm_test/test.txt
# file lm_test/test.txt: 1000 sentences, 86489 words, 3 OOVs
# 0 zeroprobs, logprob= -74742.2 ppl= *7.15044* ppl1= 7.31494
## This is SRILM with a pruning beam tuned to get 20k n-grams above unigram
## (c.f. the 8.23 above, it's a lot worse).
# ngram-count -text lm_test/train.txt -order 4 -kndiscount2 -kndiscount3 -kndiscount4 -interpolate -prune 1.65e-05 -lm lm_test/arpa_srilm.pr1.65e-5
# the model has 20249 n-grams above unigram [c.f. our 20k]
# ngram -order 4 -unk -lm lm_test/arpa_srilm.pr1.65e-5 -ppl lm_test/test.txt
# file lm_test/test.txt: 1000 sentences, 86489 words, 3 OOVs
# 0 zeroprobs, logprob= -86803.7 ppl=*9.82202* ppl1= 10.0849
## This is pocolm..
## Note: we have to hold out some of the training data as dev to
## estimate the hyperparameters, but we'll fold it back in before
## making the final LM. [--fold-dev-into=train]
# mkdir -p lm_test/data/text
# head -n 1000 lm_test/train.txt > lm_test/data/text/dev.txt
# tail -n +1001 lm_test/train.txt > lm_test/data/text/train.txt
## give it a 'large' num-words so it picks them all.
# export PATH=$PATH:../../../tools/pocolm/scripts
# train_lm.py --num-word=100000 --fold-dev-into=train lm_test/data/text 4 lm_test/data/lm_unpruned
# get_data_prob.py lm_test/test.txt lm_test/data/lm_unpruned/100000_4.pocolm
## compute-probs: average log-prob per word was -1.95956 (perplexity = *7.0962*) over 87489 words.
## Note: we can compare this perplexity with 7.15 with SRILM and 7.19 with make_phone_lm.py.
# pruned_lm_dir=${lm_dir}/${num_word}_${order}_prune${threshold}.pocolm
# prune_lm_dir.py --target-num-ngrams=20100 lm_test/data/lm_unpruned/100000_4.pocolm lm_test/data/lm_unpruned/100000_4_pr20k.pocolm
# get_data_prob.py lm_test/test.txt lm_test/data/lm_unpruned/100000_4_pr20k.pocolm
## compute-probs: average log-prob per word was -2.0409 (perplexity = 7.69757) over 87489 words.
## note: the 7.69 can be compared with 9.82 from SRILM and 8.23 from pocolm.
## format_arpa_lm.py lm_test/data/lm_unpruned/100000_4_pr20k.pocolm | head
## .. it has 20488 n-grams above unigram. More than 20k but not enough to explain the difference
## .. in perplexity.
## OK... if I reran after modifying prune_lm_dir.py to comment out the line
## 'steps += 'EM EM'.split()' which adds the two EM stages per step, and got the
## perplexity again, I got the following:
## compute-probs: average log-prob per word was -2.09722 (perplexity = 8.14353) over 87489 words.
## .. so it turns out the E-M is actually important.
| NeMo-main | scripts/asr_language_modeling/ngram_lm/make_phone_lm.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Use this file to create a lexicon file for Flashlight decoding from an existing KenLM arpa file
# A lexicon file is required for Flashlight decoding in most cases, as it acts as a map from the words
# in you arpa file to the representation used by your ASR AM.
# For more details, see: https://github.com/flashlight/flashlight/tree/main/flashlight/app/asr#data-preparation
#
# Usage: python create_lexicon_from_arpa.py --arpa /path/to/english.arpa --model /path/to/model.nemo --lower
#
#
import argparse
import os
import re
from nemo.utils import logging
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Utility script for generating lexicon file from a KenLM arpa file")
parser.add_argument("--arpa", required=True, help="path to your arpa file")
parser.add_argument("--dst", help="directory to store generated lexicon", default=None)
parser.add_argument("--lower", action='store_true', help="Whether to lowercase the arpa vocab")
parser.add_argument("--model", default=None, help="path to Nemo model for its tokeniser")
args = parser.parse_args()
if not os.path.exists(args.arpa):
logging.critical(f"ARPA file [ {args.arpa} ] not detected on disk, aborting!")
exit(255)
if args.dst is not None:
save_path = args.dst
else:
save_path = os.path.dirname(args.arpa)
os.makedirs(save_path, exist_ok=True)
tokenizer = None
if args.model is not None:
from nemo.collections.asr.models import ASRModel
model = ASRModel.restore_from(restore_path=args.model, map_location='cpu')
if hasattr(model, 'tokenizer'):
tokenizer = model.tokenizer
else:
logging.warning('Supplied Nemo model does not contain a tokenizer')
lex_file = os.path.join(save_path, os.path.splitext(os.path.basename(args.arpa))[0] + '.lexicon')
logging.info(f"Writing Lexicon file to: {lex_file}...")
with open(lex_file, "w", encoding='utf_8', newline='\n') as f:
with open(args.arpa, "r", encoding='utf_8') as arpa:
for line in arpa:
# verify if the line corresponds to unigram
if not re.match(r"[-]*[0-9\.]+\t\S+\t*[-]*[0-9\.]*$", line):
continue
word = line.split("\t")[1]
word = word.strip().lower() if args.lower else word.strip()
if word == "<UNK>" or word == "<unk>" or word == "<s>" or word == "</s>":
continue
if tokenizer is None:
f.write("{w}\t{s}\n".format(w=word, s=" ".join(word)))
else:
w_ids = tokenizer.text_to_ids(word)
if tokenizer.unk_id not in w_ids:
f.write("{w}\t{s}\n".format(w=word, s=" ".join(tokenizer.text_to_tokens(word))))
| NeMo-main | scripts/asr_language_modeling/ngram_lm/create_lexicon_from_arpa.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
# This script would evaluate an N-gram language model trained with KenLM library (https://github.com/kpu/kenlm) in
# fusion with beam search decoders on top of a trained ASR Transducer model. NeMo's beam search decoders are capable of using the
# KenLM's N-gram models to find the best candidates. This script supports both character level and BPE level
# encodings and models which is detected automatically from the type of the model.
# You may train the LM model with 'scripts/ngram_lm/train_kenlm.py'.
# Config Help
To discover all arguments of the script, please run :
python eval_beamsearch_ngram.py --help
python eval_beamsearch_ngram.py --cfg job
# USAGE
python eval_beamsearch_ngram_transducer.py nemo_model_file=<path to the .nemo file of the model> \
input_manifest=<path to the evaluation JSON manifest file \
kenlm_model_file=<path to the binary KenLM model> \
beam_width=[<list of the beam widths, separated with commas>] \
beam_alpha=[<list of the beam alphas, separated with commas>] \
preds_output_folder=<optional folder to store the predictions> \
probs_cache_file=null \
decoding_strategy=<greedy_batch or maes decoding>
maes_prefix_alpha=[<list of the maes prefix alphas, separated with commas>] \
maes_expansion_gamma=[<list of the maes expansion gammas, separated with commas>] \
hat_subtract_ilm=<in case of HAT model: subtract internal LM or not> \
hat_ilm_weight=[<in case of HAT model: list of the HAT internal LM weights, separated with commas>] \
...
# Grid Search for Hyper parameters
For grid search, you can provide a list of arguments as follows -
beam_width=[4,8,16,....] \
beam_alpha=[-2.0,-1.0,...,1.0,2.0] \
# You may find more info on how to use this script at:
# https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/asr_language_modeling.html
"""
import contextlib
import json
import os
import pickle
import tempfile
from dataclasses import dataclass, field, is_dataclass
from pathlib import Path
from typing import List, Optional
import editdistance
import numpy as np
import torch
from omegaconf import MISSING, OmegaConf
from sklearn.model_selection import ParameterGrid
from tqdm.auto import tqdm
import nemo.collections.asr as nemo_asr
from nemo.collections.asr.parts.submodules import rnnt_beam_decoding
from nemo.core.config import hydra_runner
from nemo.utils import logging
# fmt: off
@dataclass
class EvalBeamSearchNGramConfig:
"""
Evaluate an ASR model with beam search decoding and n-gram KenLM language model.
"""
# # The path of the '.nemo' file of the ASR model or the name of a pretrained model (ngc / huggingface)
nemo_model_file: str = MISSING
# File paths
input_manifest: str = MISSING # The manifest file of the evaluation set
kenlm_model_file: Optional[str] = None # The path of the KenLM binary model file
preds_output_folder: Optional[str] = None # The optional folder where the predictions are stored
probs_cache_file: Optional[str] = None # The cache file for storing the logprobs of the model
# Parameters for inference
acoustic_batch_size: int = 128 # The batch size to calculate log probabilities
beam_batch_size: int = 128 # The batch size to be used for beam search decoding
device: str = "cuda" # The device to load the model onto to calculate log probabilities
use_amp: bool = False # Whether to use AMP if available to calculate log probabilities
num_workers: int = 1 # Number of workers for DataLoader
# The decoding scheme to be used for evaluation
decoding_strategy: str = "greedy_batch" # ["greedy_batch", "beam", "tsd", "alsd", "maes"]
# Beam Search hyperparameters
beam_width: List[int] = field(default_factory=lambda: [8]) # The width or list of the widths for the beam search decoding
beam_alpha: List[float] = field(default_factory=lambda: [0.2]) # The alpha parameter or list of the alphas for the beam search decoding
maes_prefix_alpha: List[int] = field(default_factory=lambda: [2]) # The maes_prefix_alpha or list of the maes_prefix_alpha for the maes decoding
maes_expansion_gamma: List[float] = field(default_factory=lambda: [2.3]) # The maes_expansion_gamma or list of the maes_expansion_gamma for the maes decoding
# HAT related parameters (only for internal lm subtraction)
hat_subtract_ilm: bool = False
hat_ilm_weight: List[float] = field(default_factory=lambda: [0.0])
decoding: rnnt_beam_decoding.BeamRNNTInferConfig = rnnt_beam_decoding.BeamRNNTInferConfig(beam_size=128)
# fmt: on
def decoding_step(
model: nemo_asr.models.ASRModel,
cfg: EvalBeamSearchNGramConfig,
all_probs: List[torch.Tensor],
target_transcripts: List[str],
preds_output_file: str = None,
beam_batch_size: int = 128,
progress_bar: bool = True,
):
level = logging.getEffectiveLevel()
logging.setLevel(logging.CRITICAL)
# Reset config
model.change_decoding_strategy(None)
cfg.decoding.hat_ilm_weight = cfg.decoding.hat_ilm_weight * cfg.hat_subtract_ilm
# Override the beam search config with current search candidate configuration
cfg.decoding.return_best_hypothesis = False
cfg.decoding.ngram_lm_model = cfg.kenlm_model_file
cfg.decoding.hat_subtract_ilm = cfg.hat_subtract_ilm
# Update model's decoding strategy config
model.cfg.decoding.strategy = cfg.decoding_strategy
model.cfg.decoding.beam = cfg.decoding
# Update model's decoding strategy
model.change_decoding_strategy(model.cfg.decoding)
logging.setLevel(level)
wer_dist_first = cer_dist_first = 0
wer_dist_best = cer_dist_best = 0
words_count = 0
chars_count = 0
sample_idx = 0
if preds_output_file:
out_file = open(preds_output_file, 'w', encoding='utf_8', newline='\n')
if progress_bar:
if cfg.decoding_strategy == "greedy_batch":
description = "Greedy_batch decoding.."
else:
description = f"{cfg.decoding_strategy} decoding with bw={cfg.decoding.beam_size}, ba={cfg.decoding.ngram_lm_alpha}, ma={cfg.decoding.maes_prefix_alpha}, mg={cfg.decoding.maes_expansion_gamma}, hat_ilmw={cfg.decoding.hat_ilm_weight}"
it = tqdm(range(int(np.ceil(len(all_probs) / beam_batch_size))), desc=description, ncols=120)
else:
it = range(int(np.ceil(len(all_probs) / beam_batch_size)))
for batch_idx in it:
# disabling type checking
probs_batch = all_probs[batch_idx * beam_batch_size : (batch_idx + 1) * beam_batch_size]
probs_lens = torch.tensor([prob.shape[-1] for prob in probs_batch])
with torch.no_grad():
packed_batch = torch.zeros(len(probs_batch), probs_batch[0].shape[0], max(probs_lens), device='cpu')
for prob_index in range(len(probs_batch)):
packed_batch[prob_index, :, : probs_lens[prob_index]] = torch.tensor(
probs_batch[prob_index].unsqueeze(0), device=packed_batch.device, dtype=packed_batch.dtype
)
best_hyp_batch, beams_batch = model.decoding.rnnt_decoder_predictions_tensor(
packed_batch, probs_lens, return_hypotheses=True,
)
if cfg.decoding_strategy == "greedy_batch":
beams_batch = [[x] for x in best_hyp_batch]
for beams_idx, beams in enumerate(beams_batch):
target = target_transcripts[sample_idx + beams_idx]
target_split_w = target.split()
target_split_c = list(target)
words_count += len(target_split_w)
chars_count += len(target_split_c)
wer_dist_min = cer_dist_min = 10000
for candidate_idx, candidate in enumerate(beams): # type: (int, rnnt_beam_decoding.rnnt_utils.Hypothesis)
pred_text = candidate.text
pred_split_w = pred_text.split()
wer_dist = editdistance.eval(target_split_w, pred_split_w)
pred_split_c = list(pred_text)
cer_dist = editdistance.eval(target_split_c, pred_split_c)
wer_dist_min = min(wer_dist_min, wer_dist)
cer_dist_min = min(cer_dist_min, cer_dist)
if candidate_idx == 0:
# first candidate
wer_dist_first += wer_dist
cer_dist_first += cer_dist
score = candidate.score
if preds_output_file:
out_file.write('{}\t{}\n'.format(pred_text, score))
wer_dist_best += wer_dist_min
cer_dist_best += cer_dist_min
sample_idx += len(probs_batch)
if cfg.decoding_strategy == "greedy_batch":
return wer_dist_first / words_count, cer_dist_first / chars_count
if preds_output_file:
out_file.close()
logging.info(f"Stored the predictions of {cfg.decoding_strategy} decoding at '{preds_output_file}'.")
if cfg.decoding.ngram_lm_model:
logging.info(
f"WER/CER with {cfg.decoding_strategy} decoding and N-gram model = {wer_dist_first / words_count:.2%}/{cer_dist_first / chars_count:.2%}"
)
else:
logging.info(
f"WER/CER with {cfg.decoding_strategy} decoding = {wer_dist_first / words_count:.2%}/{cer_dist_first / chars_count:.2%}"
)
logging.info(
f"Oracle WER/CER in candidates with perfect LM= {wer_dist_best / words_count:.2%}/{cer_dist_best / chars_count:.2%}"
)
logging.info(f"=================================================================================")
return wer_dist_first / words_count, cer_dist_first / chars_count
@hydra_runner(config_path=None, config_name='EvalBeamSearchNGramConfig', schema=EvalBeamSearchNGramConfig)
def main(cfg: EvalBeamSearchNGramConfig):
if is_dataclass(cfg):
cfg = OmegaConf.structured(cfg) # type: EvalBeamSearchNGramConfig
valid_decoding_strategis = ["greedy_batch", "beam", "tsd", "alsd", "maes"]
if cfg.decoding_strategy not in valid_decoding_strategis:
raise ValueError(
f"Given decoding_strategy={cfg.decoding_strategy} is invalid. Available options are :\n"
f"{valid_decoding_strategis}"
)
if cfg.nemo_model_file.endswith('.nemo'):
asr_model = nemo_asr.models.ASRModel.restore_from(cfg.nemo_model_file, map_location=torch.device(cfg.device))
else:
logging.warning(
"nemo_model_file does not end with .nemo, therefore trying to load a pretrained model with this name."
)
asr_model = nemo_asr.models.ASRModel.from_pretrained(
cfg.nemo_model_file, map_location=torch.device(cfg.device)
)
if cfg.kenlm_model_file:
if not os.path.exists(cfg.kenlm_model_file):
raise FileNotFoundError(f"Could not find the KenLM model file '{cfg.kenlm_model_file}'.")
if cfg.decoding_strategy != "maes":
raise ValueError(f"Decoding with kenlm model is supported only for maes decoding algorithm.")
lm_path = cfg.kenlm_model_file
else:
lm_path = None
cfg.beam_alpha = [0.0]
if cfg.hat_subtract_ilm:
assert lm_path, "kenlm must be set for hat internal lm subtraction"
if cfg.decoding_strategy != "maes":
cfg.maes_prefix_alpha, cfg.maes_expansion_gamma, cfg.hat_ilm_weight = [0], [0], [0]
target_transcripts = []
manifest_dir = Path(cfg.input_manifest).parent
with open(cfg.input_manifest, 'r', encoding='utf_8') as manifest_file:
audio_file_paths = []
for line in tqdm(manifest_file, desc=f"Reading Manifest {cfg.input_manifest} ...", ncols=120):
data = json.loads(line)
audio_file = Path(data['audio_filepath'])
if not audio_file.is_file() and not audio_file.is_absolute():
audio_file = manifest_dir / audio_file
target_transcripts.append(data['text'])
audio_file_paths.append(str(audio_file.absolute()))
if cfg.probs_cache_file and os.path.exists(cfg.probs_cache_file):
logging.info(f"Found a pickle file of probabilities at '{cfg.probs_cache_file}'.")
logging.info(f"Loading the cached pickle file of probabilities from '{cfg.probs_cache_file}' ...")
with open(cfg.probs_cache_file, 'rb') as probs_file:
all_probs = pickle.load(probs_file)
if len(all_probs) != len(audio_file_paths):
raise ValueError(
f"The number of samples in the probabilities file '{cfg.probs_cache_file}' does not "
f"match the manifest file. You may need to delete the probabilities cached file."
)
else:
@contextlib.contextmanager
def default_autocast():
yield
if cfg.use_amp:
if torch.cuda.is_available() and hasattr(torch.cuda, 'amp') and hasattr(torch.cuda.amp, 'autocast'):
logging.info("AMP is enabled!\n")
autocast = torch.cuda.amp.autocast
else:
autocast = default_autocast
else:
autocast = default_autocast
# manual calculation of encoder_embeddings
with autocast():
with torch.no_grad():
asr_model.eval()
asr_model.encoder.freeze()
device = next(asr_model.parameters()).device
all_probs = []
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'manifest.json'), 'w', encoding='utf-8') as fp:
for audio_file in audio_file_paths:
entry = {'audio_filepath': audio_file, 'duration': 100000, 'text': ''}
fp.write(json.dumps(entry) + '\n')
config = {
'paths2audio_files': audio_file_paths,
'batch_size': cfg.acoustic_batch_size,
'temp_dir': tmpdir,
'num_workers': cfg.num_workers,
'channel_selector': None,
'augmentor': None,
}
temporary_datalayer = asr_model._setup_transcribe_dataloader(config)
for test_batch in tqdm(temporary_datalayer, desc="Transcribing", disable=True):
encoded, encoded_len = asr_model.forward(
input_signal=test_batch[0].to(device), input_signal_length=test_batch[1].to(device)
)
# dump encoder embeddings per file
for idx in range(encoded.shape[0]):
encoded_no_pad = encoded[idx, :, : encoded_len[idx]]
all_probs.append(encoded_no_pad)
if cfg.probs_cache_file:
logging.info(f"Writing pickle files of probabilities at '{cfg.probs_cache_file}'...")
with open(cfg.probs_cache_file, 'wb') as f_dump:
pickle.dump(all_probs, f_dump)
if cfg.decoding_strategy == "greedy_batch":
asr_model = asr_model.to('cpu')
candidate_wer, candidate_cer = decoding_step(
asr_model,
cfg,
all_probs=all_probs,
target_transcripts=target_transcripts,
beam_batch_size=cfg.beam_batch_size,
progress_bar=True,
)
logging.info(f"Greedy batch WER/CER = {candidate_wer:.2%}/{candidate_cer:.2%}")
asr_model = asr_model.to('cpu')
# 'greedy_batch' decoding_strategy would skip the beam search decoding
if cfg.decoding_strategy in ["beam", "tsd", "alsd", "maes"]:
if cfg.beam_width is None or cfg.beam_alpha is None:
raise ValueError("beam_width and beam_alpha are needed to perform beam search decoding.")
params = {
'beam_width': cfg.beam_width,
'beam_alpha': cfg.beam_alpha,
'maes_prefix_alpha': cfg.maes_prefix_alpha,
'maes_expansion_gamma': cfg.maes_expansion_gamma,
'hat_ilm_weight': cfg.hat_ilm_weight,
}
hp_grid = ParameterGrid(params)
hp_grid = list(hp_grid)
best_wer_beam_size, best_cer_beam_size = None, None
best_wer_alpha, best_cer_alpha = None, None
best_wer, best_cer = 1e6, 1e6
logging.info(
f"==============================Starting the {cfg.decoding_strategy} decoding==============================="
)
logging.info(f"Grid search size: {len(hp_grid)}")
logging.info(f"It may take some time...")
logging.info(f"==============================================================================================")
if cfg.preds_output_folder and not os.path.exists(cfg.preds_output_folder):
os.mkdir(cfg.preds_output_folder)
for hp in hp_grid:
if cfg.preds_output_folder:
results_file = f"preds_out_{cfg.decoding_strategy}_bw{hp['beam_width']}"
if cfg.decoding_strategy == "maes":
results_file = f"{results_file}_ma{hp['maes_prefix_alpha']}_mg{hp['maes_expansion_gamma']}"
if cfg.kenlm_model_file:
results_file = f"{results_file}_ba{hp['beam_alpha']}"
if cfg.hat_subtract_ilm:
results_file = f"{results_file}_hat_ilmw{hp['hat_ilm_weight']}"
preds_output_file = os.path.join(cfg.preds_output_folder, f"{results_file}.tsv")
else:
preds_output_file = None
cfg.decoding.beam_size = hp["beam_width"]
cfg.decoding.ngram_lm_alpha = hp["beam_alpha"]
cfg.decoding.maes_prefix_alpha = hp["maes_prefix_alpha"]
cfg.decoding.maes_expansion_gamma = hp["maes_expansion_gamma"]
cfg.decoding.hat_ilm_weight = hp["hat_ilm_weight"]
candidate_wer, candidate_cer = decoding_step(
asr_model,
cfg,
all_probs=all_probs,
target_transcripts=target_transcripts,
preds_output_file=preds_output_file,
beam_batch_size=cfg.beam_batch_size,
progress_bar=True,
)
if candidate_cer < best_cer:
best_cer_beam_size = hp["beam_width"]
best_cer_alpha = hp["beam_alpha"]
best_cer_ma = hp["maes_prefix_alpha"]
best_cer_mg = hp["maes_expansion_gamma"]
best_cer_hat_ilm_weight = hp["hat_ilm_weight"]
best_cer = candidate_cer
if candidate_wer < best_wer:
best_wer_beam_size = hp["beam_width"]
best_wer_alpha = hp["beam_alpha"]
best_wer_ma = hp["maes_prefix_alpha"]
best_wer_ga = hp["maes_expansion_gamma"]
best_wer_hat_ilm_weight = hp["hat_ilm_weight"]
best_wer = candidate_wer
wer_hat_parameter = ""
if cfg.hat_subtract_ilm:
wer_hat_parameter = f"HAT ilm weight = {best_wer_hat_ilm_weight}, "
logging.info(
f'Best WER Candidate = {best_wer:.2%} :: Beam size = {best_wer_beam_size}, '
f'Beam alpha = {best_wer_alpha}, {wer_hat_parameter}'
f'maes_prefix_alpha = {best_wer_ma}, maes_expansion_gamma = {best_wer_ga} '
)
cer_hat_parameter = ""
if cfg.hat_subtract_ilm:
cer_hat_parameter = f"HAT ilm weight = {best_cer_hat_ilm_weight}"
logging.info(
f'Best CER Candidate = {best_cer:.2%} :: Beam size = {best_cer_beam_size}, '
f'Beam alpha = {best_cer_alpha}, {cer_hat_parameter} '
f'maes_prefix_alpha = {best_cer_ma}, maes_expansion_gamma = {best_cer_mg}'
)
logging.info(f"=================================================================================")
if __name__ == '__main__':
main()
| NeMo-main | scripts/asr_language_modeling/ngram_lm/eval_beamsearch_ngram_transducer.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This script would interpolate two arpa N-gram language models (LMs),
culculate perplexity of resulted LM, and make binary KenLM from it.
Minimun usage example to interpolate two N-gram language models with weights:
alpha * ngram_a + beta * ngram_b = 2 * ngram_a + 1 * ngram_b
python3 ngram_merge.py --kenlm_bin_path /workspace/nemo/decoders/kenlm/build/bin \
--arpa_a /path/ngram_a.kenlm.tmp.arpa \
--alpha 2 \
--arpa_b /path/ngram_b.kenlm.tmp.arpa \
--beta 1 \
--out_path /path/out
Merge two N-gram language models and calculate its perplexity with test_file.
python3 ngram_merge.py --kenlm_bin_path /workspace/nemo/decoders/kenlm/build/bin \
--ngram_bin_path /workspace/nemo/decoders/ngram-1.3.14/src/bin \
--arpa_a /path/ngram_a.kenlm.tmp.arpa \
--alpha 0.5 \
--arpa_b /path/ngram_b.kenlm.tmp.arpa \
--beta 0.5 \
--out_path /path/out \
--nemo_model_file /path/to/model_tokenizer.nemo \
--test_file /path/to/test_manifest.json \
--force
"""
import argparse
import os
import subprocess
import sys
from typing import Tuple
import kenlm_utils
import torch
import nemo.collections.asr as nemo_asr
from nemo.collections.asr.modules.rnnt import RNNTDecoder
from nemo.collections.asr.parts.submodules.ctc_beam_decoding import DEFAULT_TOKEN_OFFSET
from nemo.utils import logging
class NgramMerge:
def __init__(self, ngram_bin_path):
self.ngram_bin_path = ngram_bin_path
def ngrammerge(self, arpa_a: str, alpha: float, arpa_b: str, beta: float, arpa_c: str, force: bool) -> str:
"""
Merge two ARPA n-gram language models using the ngrammerge command-line tool and output the result in ARPA format.
Args:
arpa_a (str): Path to the first input ARPA file.
alpha (float): Interpolation weight for the first model.
arpa_b (str): Path to the second input ARPA file.
beta (float): Interpolation weight for the second model.
arpa_c (str): Path to the output ARPA file.
force (bool): Whether to overwrite existing output files.
Returns:
str: Path to the output ARPA file in mod format.
"""
mod_a = arpa_a + ".mod"
mod_b = arpa_b + ".mod"
mod_c = arpa_c + ".mod"
if os.path.isfile(mod_c) and not force:
logging.info("File " + mod_c + " exists. Skipping.")
else:
sh_args = [
os.path.join(self.ngram_bin_path, "ngrammerge"),
"--alpha=" + str(alpha),
"--beta=" + str(beta),
"--normalize",
# "--use_smoothing",
mod_a,
mod_b,
mod_c,
]
logging.info(
"\n"
+ str(subprocess.run(sh_args, capture_output=False, text=True, stdout=sys.stdout, stderr=sys.stderr,))
+ "\n",
)
return mod_c
def arpa2mod(self, arpa_path: str, force: bool):
"""
This function reads an ARPA n-gram model and converts it to a binary format. The binary model is saved to the same directory as the ARPA model with a ".mod" extension. If the binary model file already exists and force argument is False, then the function skips conversion and returns a message. Otherwise, it executes the command to create a binary model using the subprocess.run method.
Parameters:
arpa_path (string): The file path to the ARPA n-gram model.
force (bool): If True, the function will convert the ARPA model to binary even if the binary file already exists. If False and the binary file exists, the function will skip the conversion.
Returns:
If the binary model file already exists and force argument is False, returns a message indicating that the file exists and the conversion is skipped.
Otherwise, returns a subprocess.CompletedProcess object, which contains information about the executed command. The subprocess's output and error streams are redirected to stdout and stderr, respectively.
"""
mod_path = arpa_path + ".mod"
if os.path.isfile(mod_path) and not force:
return "File " + mod_path + " exists. Skipping."
else:
sh_args = [
os.path.join(self.ngram_bin_path, "ngramread"),
"--ARPA",
arpa_path,
mod_path,
]
return subprocess.run(sh_args, capture_output=False, text=True, stdout=sys.stdout, stderr=sys.stderr,)
def merge(
self, arpa_a: str, alpha: float, arpa_b: str, beta: float, out_path: str, force: bool
) -> Tuple[str, str]:
"""
Merges two ARPA language models using the ngrammerge tool.
Args:
arpa_a (str): Path to the first ARPA language model file.
alpha (float): Interpolation weight for the first model.
arpa_b (str): Path to the second ARPA language model file.
beta (float): Interpolation weight for the second model.
out_path (str): Path to the output directory for the merged ARPA model.
force (bool): Whether to force overwrite of existing files.
Returns:
Tuple[str, str]: A tuple containing the path to the merged binary language model file and the path to the
merged ARPA language model file.
"""
logging.info("\n" + str(self.arpa2mod(arpa_a, force)) + "\n")
logging.info("\n" + str(self.arpa2mod(arpa_b, force)) + "\n")
arpa_c = os.path.join(out_path, f"{os.path.split(arpa_a)[1]}-{alpha}-{os.path.split(arpa_b)[1]}-{beta}.arpa",)
mod_c = self.ngrammerge(arpa_a, alpha, arpa_b, beta, arpa_c, force)
return mod_c, arpa_c
def perplexity(self, ngram_mod: str, test_far: str) -> str:
"""
Calculates perplexity of a given ngram model on a test file.
Args:
ngram_mod (str): The path to the ngram model file.
test_far (str): The path to the test file.
Returns:
str: A string representation of the perplexity calculated.
Raises:
AssertionError: If the subprocess to calculate perplexity returns a non-zero exit code.
Example:
>>> perplexity("/path/to/ngram_model", "/path/to/test_file")
'Perplexity: 123.45'
"""
sh_args = [
os.path.join(self.ngram_bin_path, "ngramperplexity"),
"--v=1",
ngram_mod,
test_far,
]
ps = subprocess.Popen(sh_args, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = ps.communicate()
exit_code = ps.wait()
command = " ".join(sh_args)
assert (
exit_code == 0
), f"Exit_code must be 0.\n bash command: {command} \n stdout: {stdout} \n stderr: {stderr}"
perplexity_out = "\n".join(stdout.split("\n")[-6:-1])
return perplexity_out
def make_arpa(self, ngram_mod: str, ngram_arpa: str, force: bool):
"""
Converts an ngram model in binary format to ARPA format.
Args:
- ngram_mod (str): The path to the ngram model in binary format.
- ngram_arpa (str): The desired path for the ARPA format output file.
- force (bool): If True, the ARPA format file will be generated even if it already exists.
Returns:
- Tuple[bytes, bytes]
Raises:
- AssertionError: If the shell command execution returns a non-zero exit code.
- FileNotFoundError: If the binary ngram model file does not exist.
"""
if os.path.isfile(ngram_arpa) and not force:
logging.info("File " + ngram_arpa + " exists. Skipping.")
return None
else:
sh_args = [
os.path.join(self.ngram_bin_path, "ngramprint"),
"--ARPA",
ngram_mod,
ngram_arpa,
]
return subprocess.run(sh_args, capture_output=False, text=True, stdout=sys.stdout, stderr=sys.stderr,)
def test_perplexity(self, mod_c: str, symbols: str, test_txt: str, nemo_model_file: str, tmp_path: str) -> str:
"""
Tests the perplexity of a given ngram model on a test file.
Args:
mod_c (str): The path to the ngram model file.
symbols (str): The path to the symbol table file.
test_txt (str): The path to the test text file.
nemo_model_file (str): The path to the NeMo model file.
tmp_path (str): The path to the temporary directory where the test far file will be created.
force (bool): If True, overwrites any existing far file.
Returns:
str: A string representation of the perplexity calculated.
Example:
>>> test_perplexity("/path/to/ngram_model", "/path/to/symbol_table", "/path/to/test_file", "/path/to/tokenizer_model", "/path/to/tmp_dir", True)
'Perplexity: 123.45'
"""
test_far = farcompile(symbols, test_txt, tmp_path, nemo_model_file)
res_p = self.perplexity(mod_c, test_far)
return res_p
def farcompile(symbols: str, text_file: str, tmp_path: str, nemo_model_file: str) -> str:
"""
Compiles a text file into a FAR file using the given symbol table or tokenizer.
Args:
symbols (str): The path to the symbol table file.
text_file (str): The path to the text file to compile.
tmp_path (str): The path to the temporary directory where the test far file will be created.
nemo_model_file (str): The path to the NeMo model file (.nemo).
force (bool): If True, overwrites any existing FAR file.
Returns:
test_far (str): The path to the resulting FAR file.
Example:
>>> farcompile("/path/to/symbol_table", "/path/to/text_file", "/path/to/far_file", "/path/to/tokenizer_model", "/path/to/nemo_model", True)
"""
test_far = os.path.join(tmp_path, os.path.split(text_file)[1] + ".far")
sh_args = [
"farcompilestrings",
"--generate_keys=10",
"--fst_type=compact",
"--symbols=" + symbols,
"--keep_symbols",
">",
test_far,
]
tokenizer, encoding_level, is_aggregate_tokenizer = kenlm_utils.setup_tokenizer(nemo_model_file)
ps = subprocess.Popen(" ".join(sh_args), shell=True, stdin=subprocess.PIPE, stdout=sys.stdout, stderr=sys.stderr,)
kenlm_utils.iter_files(
source_path=[text_file],
dest_path=ps.stdin,
tokenizer=tokenizer,
encoding_level=encoding_level,
is_aggregate_tokenizer=is_aggregate_tokenizer,
verbose=1,
)
stdout, stderr = ps.communicate()
exit_code = ps.returncode
command = " ".join(sh_args)
assert exit_code == 0, f"Exit_code must be 0.\n bash command: {command} \n stdout: {stdout} \n stderr: {stderr}"
return test_far
def make_kenlm(kenlm_bin_path: str, ngram_arpa: str, force: bool):
"""
Builds a language model from an ARPA format file using the KenLM toolkit.
Args:
- kenlm_bin_path (str): The path to the KenLM toolkit binary.
- ngram_arpa (str): The path to the ARPA format file.
- force (bool): If True, the KenLM language model will be generated even if it already exists.
Raises:
- AssertionError: If the shell command execution returns a non-zero exit code.
- FileNotFoundError: If the KenLM binary or ARPA format file does not exist.
"""
ngram_kenlm = ngram_arpa + ".kenlm"
if os.path.isfile(ngram_kenlm) and not force:
logging.info("File " + ngram_kenlm + " exists. Skipping.")
return None
else:
sh_args = [os.path.join(kenlm_bin_path, "build_binary"), "trie", "-i", ngram_arpa, ngram_kenlm]
return subprocess.run(sh_args, capture_output=False, text=True, stdout=sys.stdout, stderr=sys.stderr,)
def make_symbol_list(nemo_model_file, symbols, force):
"""
Function: make_symbol_list
Create a symbol table for the input tokenizer model file.
Args:
nemo_model_file (str): Path to the NeMo model file.
symbols (str): Path to the file where symbol list will be saved.
force (bool): Flag to force creation of symbol list even if it already exists.
Returns:
None
Raises:
None
"""
if os.path.isfile(symbols) and not force:
logging.info("File " + symbols + " exists. Skipping.")
else:
if nemo_model_file.endswith('.nemo'):
asr_model = nemo_asr.models.ASRModel.restore_from(nemo_model_file, map_location=torch.device('cpu'))
else:
logging.warning(
"nemo_model_file does not end with .nemo, therefore trying to load a pretrained model with this name."
)
asr_model = nemo_asr.models.ASRModel.from_pretrained(nemo_model_file, map_location=torch.device('cpu'))
if isinstance(asr_model.decoder, RNNTDecoder):
vocab_size = asr_model.decoder.blank_idx
else:
vocab_size = len(asr_model.decoder.vocabulary)
vocab = [chr(idx + DEFAULT_TOKEN_OFFSET) for idx in range(vocab_size)]
with open(symbols, "w", encoding="utf-8") as f:
for i, v in enumerate(vocab):
f.write(v + " " + str(i) + "\n")
def main(
kenlm_bin_path: str,
ngram_bin_path: str,
arpa_a: str,
alpha: float,
arpa_b: str,
beta: float,
out_path: str,
test_file: str,
symbols: str,
nemo_model_file: str,
force: bool,
) -> None:
"""
Entry point function for merging ARPA format language models, testing perplexity, creating symbol list,
and making ARPA and Kenlm models.
Args:
- kenlm_bin_path (str): The path to the Kenlm binary.
- arpa_a (str): The path to the first ARPA format language model.
- alpha (float): The weight given to the first language model during merging.
- arpa_b (str): The path to the second ARPA format language model.
- beta (float): The weight given to the second language model during merging.
- out_path (str): The path where the output files will be saved.
- test_file (str): The path to the file on which perplexity needs to be calculated.
- symbols (str): The path to the file where symbol list for the tokenizer model will be saved.
- nemo_model_file (str): The path to the NeMo model file.
- force (bool): If True, overwrite existing files, otherwise skip the operations.
Returns:
- None
"""
nm = NgramMerge(ngram_bin_path)
mod_c, arpa_c = nm.merge(arpa_a, alpha, arpa_b, beta, out_path, force)
if test_file and nemo_model_file:
if not symbols:
symbols = os.path.join(out_path, os.path.split(nemo_model_file)[1] + ".syms")
make_symbol_list(nemo_model_file, symbols, force)
for test_f in test_file.split(","):
test_p = nm.test_perplexity(mod_c, symbols, test_f, nemo_model_file, out_path)
logging.info("Perplexity summary " + test_f + " : " + test_p)
logging.info("Making ARPA and Kenlm model " + arpa_c)
out = nm.make_arpa(mod_c, arpa_c, force)
if out:
logging.info("\n" + str(out) + "\n")
out = make_kenlm(kenlm_bin_path, arpa_c, force)
if out:
logging.info("\n" + str(out) + "\n")
def _parse_args():
parser = argparse.ArgumentParser(
description="Interpolate ARPA N-gram language models and make KenLM binary model to be used with beam search decoder of ASR models."
)
parser.add_argument(
"--kenlm_bin_path", required=True, type=str, help="The path to the bin folder of KenLM library.",
) # Use /workspace/nemo/decoders/kenlm/build/bin if installed it with scripts/asr_language_modeling/ngram_lm/install_beamsearch_decoders.sh
parser.add_argument(
"--ngram_bin_path", required=True, type=str, help="The path to the bin folder of OpenGrm Ngram library.",
) # Use /workspace/nemo/decoders/ngram-1.3.14/src/bin if installed it with scripts/installers/install_opengrm.sh
parser.add_argument("--arpa_a", required=True, type=str, help="Path to the arpa_a")
parser.add_argument("--alpha", required=True, type=float, help="Weight of arpa_a")
parser.add_argument("--arpa_b", required=True, type=str, help="Path to the arpa_b")
parser.add_argument("--beta", required=True, type=float, help="Weight of arpa_b")
parser.add_argument(
"--out_path", required=True, type=str, help="Path to write tmp and resulted files.",
)
parser.add_argument(
"--test_file",
required=False,
type=str,
default=None,
help="Path to test file to count perplexity if provided.",
)
parser.add_argument(
"--symbols",
required=False,
type=str,
default=None,
help="Path to symbols (.syms) file . Could be calculated if it is not provided. Use as: --symbols /path/to/earnest.syms",
)
parser.add_argument(
"--nemo_model_file",
required=False,
type=str,
default=None,
help="The path to '.nemo' file of the ASR model, or name of a pretrained NeMo model",
)
parser.add_argument("--force", "-f", action="store_true", help="Whether to recompile and rewrite all files")
return parser.parse_args()
if __name__ == "__main__":
main(**vars(_parse_args()))
| NeMo-main | scripts/asr_language_modeling/ngram_lm/ngram_merge.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Utility methods to be used for training N-gram LM with KenLM in train_kenlm.py
The BPE sub-words are encoded using the Unicode table.
This encoding scheme reduces the required memory significantly, and the LM and its binary blob format require less storage space.
The value DEFAULT_TOKEN_OFFSET from nemo.collections.asr.parts.submodules.ctc_beam_decoding is utilized as the offset value.
"""
CHUNK_SIZE = 8192
CHUNK_BUFFER_SIZE = 512
import gzip
import json
import os
import numpy as np
import torch
from joblib import Parallel, delayed
from tqdm.auto import tqdm
import nemo.collections.asr as nemo_asr
from nemo.collections.asr.parts.submodules.ctc_beam_decoding import DEFAULT_TOKEN_OFFSET
from nemo.utils import logging
# List of the supported models to be used with N-gram LM and beam search decoding
SUPPORTED_MODELS = {
'EncDecCTCModelBPE': 'subword',
'EncDecCTCModel': 'char',
'EncDecRNNTBPEModel': 'subword',
'EncDecRNNTModel': 'char',
'EncDecHybridRNNTCTCBPEModel': 'subword',
'EncDecHybridRNNTCTCModel': 'char',
}
def softmax(x):
e = np.exp(x - np.max(x))
return e / e.sum(axis=-1).reshape([x.shape[0], 1])
def get_train_list(args_train_path):
train_path = []
for train_item in args_train_path:
if os.path.isdir(train_item):
file_list = os.listdir(train_item)
train_path.extend([os.path.join(train_item, file) for file in file_list])
elif os.path.isfile(train_item):
train_path.append(train_item)
return sorted(train_path)
def setup_tokenizer(nemo_model_file):
""" TOKENIZER SETUP
nemo_model_file (str): The path to the NeMo model file (.nemo).
"""
logging.info(f"Loading nemo model '{nemo_model_file}' ...")
if nemo_model_file.endswith('.nemo'):
model = nemo_asr.models.ASRModel.restore_from(nemo_model_file, map_location=torch.device('cpu'))
else:
logging.warning(
"tokenizer_model_file does not end with .model or .nemo, therefore trying to load a pretrained model with this name."
)
model = nemo_asr.models.ASRModel.from_pretrained(nemo_model_file, map_location=torch.device('cpu'))
is_aggregate_tokenizer = False
tokenizer_nemo = None
encoding_level = SUPPORTED_MODELS.get(type(model).__name__, None)
if not encoding_level:
logging.warning(
f"Model type '{type(model).__name__}' may not be supported. Would try to train a char-level LM."
)
encoding_level = 'char'
if encoding_level == 'subword':
if type(model.tokenizer).__name__ == 'AggregateTokenizer':
is_aggregate_tokenizer = True
tokenizer_nemo = model.tokenizer
del model
return tokenizer_nemo, encoding_level, is_aggregate_tokenizer
def iter_files(source_path, dest_path, tokenizer, encoding_level, is_aggregate_tokenizer, verbose):
if isinstance(dest_path, list):
paths = zip(dest_path, source_path)
else: # dest_path is stdin of KenLM
paths = [(dest_path, path) for path in source_path]
for dest_path, input_path in paths:
dataset = read_train_file(input_path, is_aggregate_tokenizer=is_aggregate_tokenizer, verbose=verbose)
if encoding_level == "subword":
tokenize_text(
data=dataset,
tokenizer=tokenizer,
path=dest_path,
chunk_size=CHUNK_SIZE,
buffer_size=CHUNK_BUFFER_SIZE,
)
else: # encoding_level == "char"
if isinstance(dest_path, str):
with open(dest_path, 'w', encoding='utf-8') as f:
for line in dataset:
f.write(line[0] + "\n")
else: # write to stdin of KenLM
for line in dataset:
dest_path.write((line[0] + '\n').encode())
def read_train_file(
path, is_aggregate_tokenizer: bool = False, verbose: int = 0,
):
lines_read = 0
text_dataset, lang_dataset = [], []
if path[-8:] == '.json.gz': # for Common Crawl dataset
fin = gzip.open(path, 'r')
else:
fin = open(path, 'r', encoding='utf-8')
if verbose > 0:
reader = tqdm(iter(lambda: fin.readline(), ''), desc="Read 0 lines", unit=' lines')
else:
reader = fin
for line in reader:
lang = None
if line:
if path[-8:] == '.json.gz': # for Common Crawl dataset
line = json.loads(line.decode('utf-8'))['text']
elif path.endswith('.json'):
jline = json.loads(line)
line = jline['text']
if is_aggregate_tokenizer:
lang = jline['lang']
line_list = line.split("\n")
line = " ".join(line_list)
if line:
text_dataset.append(line)
if lang:
lang_dataset.append(lang)
lines_read += 1
if verbose > 0 and lines_read % 100000 == 0:
reader.set_description(f"Read {lines_read} lines")
else:
break
fin.close()
if is_aggregate_tokenizer:
assert len(text_dataset) == len(
lang_dataset
), f"text_dataset length {len(text_dataset)} and lang_dataset length {len(lang_dataset)} must be the same!"
return list(zip(text_dataset, lang_dataset))
else:
return [[text] for text in text_dataset]
def tokenize_str(texts, tokenizer):
tokenized_text = []
for text in texts:
tok_text = tokenizer.text_to_ids(*text)
tok_text = [chr(token + DEFAULT_TOKEN_OFFSET) for token in tok_text]
tokenized_text.append(tok_text)
return tokenized_text
def tokenize_text(data, tokenizer, path, chunk_size=8192, buffer_size=32):
dataset_len = len(data)
current_step = 0
if isinstance(path, str) and os.path.exists(path):
os.remove(path)
with Parallel(n_jobs=-2, verbose=0) as parallel:
while True:
start = current_step * chunk_size
end = min((current_step + buffer_size) * chunk_size, dataset_len)
tokenized_data = parallel(
delayed(tokenize_str)(data[start : start + chunk_size], tokenizer)
for start in range(start, end, chunk_size)
)
# Write dataset
write_dataset(tokenized_data, path)
current_step += len(tokenized_data)
logging.info(
f"Finished writing {len(tokenized_data)} chunks to {path}. Current chunk index = {current_step}"
)
del tokenized_data
if end >= dataset_len:
break
def write_dataset(chunks, path):
if isinstance(path, str):
with open(path, 'a+', encoding='utf-8') as f:
for chunk_idx in tqdm(range(len(chunks)), desc='Chunk ', total=len(chunks), unit=' chunks'):
for text in chunks[chunk_idx]:
line = ' '.join(text)
f.write(f"{line}\n")
else: # write to stdin of KenLM
for chunk_idx in range(len(chunks)):
for text in chunks[chunk_idx]:
line = ' '.join(text)
path.write((line + '\n').encode())
| NeMo-main | scripts/asr_language_modeling/ngram_lm/kenlm_utils.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
# This script would evaluate an N-gram language model trained with KenLM library (https://github.com/kpu/kenlm) in
# fusion with beam search decoders on top of a trained ASR model with CTC decoder. To evaluate a model with
# Transducer (RNN-T) decoder use another script 'scripts/asr_language_modeling/ngram_lm/eval_beamsearch_ngram_transducer.py'.
# NeMo's beam search decoders are capable of using the KenLM's N-gram models
# to find the best candidates. This script supports both character level and BPE level
# encodings and models which is detected automatically from the type of the model.
# You may train the LM model with 'scripts/asr_language_modeling/ngram_lm/train_kenlm.py'.
# Config Help
To discover all arguments of the script, please run :
python eval_beamsearch_ngram.py --help
python eval_beamsearch_ngram.py --cfg job
# USAGE
python eval_beamsearch_ngram.py nemo_model_file=<path to the .nemo file of the model> \
input_manifest=<path to the evaluation JSON manifest file> \
kenlm_model_file=<path to the binary KenLM model> \
beam_width=[<list of the beam widths, separated with commas>] \
beam_alpha=[<list of the beam alphas, separated with commas>] \
beam_beta=[<list of the beam betas, separated with commas>] \
preds_output_folder=<optional folder to store the predictions> \
probs_cache_file=null \
decoding_mode=beamsearch_ngram
...
# Grid Search for Hyper parameters
For grid search, you can provide a list of arguments as follows -
beam_width=[4,8,16,....] \
beam_alpha=[-2.0,-1.0,...,1.0,2.0] \
beam_beta=[-1.0,-0.5,0.0,...,1.0] \
# You may find more info on how to use this script at:
# https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/asr_language_modeling.html
"""
import contextlib
import json
import os
import pickle
from dataclasses import dataclass, field, is_dataclass
from pathlib import Path
from typing import List, Optional
import editdistance
import numpy as np
import torch
from omegaconf import MISSING, OmegaConf
from sklearn.model_selection import ParameterGrid
from tqdm.auto import tqdm
import nemo.collections.asr as nemo_asr
from nemo.collections.asr.models import EncDecHybridRNNTCTCModel
from nemo.collections.asr.parts.submodules import ctc_beam_decoding
from nemo.collections.asr.parts.utils.transcribe_utils import PunctuationCapitalization, TextProcessingConfig
from nemo.core.config import hydra_runner
from nemo.utils import logging
# fmt: off
@dataclass
class EvalBeamSearchNGramConfig:
"""
Evaluate an ASR model with beam search decoding and n-gram KenLM language model.
"""
# # The path of the '.nemo' file of the ASR model or the name of a pretrained model (ngc / huggingface)
nemo_model_file: str = MISSING
# File paths
input_manifest: str = MISSING # The manifest file of the evaluation set
kenlm_model_file: Optional[str] = None # The path of the KenLM binary model file
preds_output_folder: Optional[str] = None # The optional folder where the predictions are stored
probs_cache_file: Optional[str] = None # The cache file for storing the logprobs of the model
# Parameters for inference
acoustic_batch_size: int = 16 # The batch size to calculate log probabilities
beam_batch_size: int = 128 # The batch size to be used for beam search decoding
device: str = "cuda" # The device to load the model onto to calculate log probabilities
use_amp: bool = False # Whether to use AMP if available to calculate log probabilities
# Beam Search hyperparameters
# The decoding scheme to be used for evaluation.
# Can be one of ["greedy", "beamsearch", "beamsearch_ngram"]
decoding_mode: str = "beamsearch_ngram"
beam_width: List[int] = field(default_factory=lambda: [128]) # The width or list of the widths for the beam search decoding
beam_alpha: List[float] = field(default_factory=lambda: [1.0]) # The alpha parameter or list of the alphas for the beam search decoding
beam_beta: List[float] = field(default_factory=lambda: [0.0]) # The beta parameter or list of the betas for the beam search decoding
decoding_strategy: str = "beam"
decoding: ctc_beam_decoding.BeamCTCInferConfig = ctc_beam_decoding.BeamCTCInferConfig(beam_size=128)
text_processing: Optional[TextProcessingConfig] = TextProcessingConfig(
punctuation_marks = ".,?",
separate_punctuation = False,
do_lowercase = False,
rm_punctuation = False,
)
# fmt: on
def beam_search_eval(
model: nemo_asr.models.ASRModel,
cfg: EvalBeamSearchNGramConfig,
all_probs: List[torch.Tensor],
target_transcripts: List[str],
preds_output_file: str = None,
lm_path: str = None,
beam_alpha: float = 1.0,
beam_beta: float = 0.0,
beam_width: int = 128,
beam_batch_size: int = 128,
progress_bar: bool = True,
punctuation_capitalization: PunctuationCapitalization = None,
):
level = logging.getEffectiveLevel()
logging.setLevel(logging.CRITICAL)
# Reset config
model.change_decoding_strategy(None)
# Override the beam search config with current search candidate configuration
cfg.decoding.beam_size = beam_width
cfg.decoding.beam_alpha = beam_alpha
cfg.decoding.beam_beta = beam_beta
cfg.decoding.return_best_hypothesis = False
cfg.decoding.kenlm_path = cfg.kenlm_model_file
# Update model's decoding strategy config
model.cfg.decoding.strategy = cfg.decoding_strategy
model.cfg.decoding.beam = cfg.decoding
# Update model's decoding strategy
if isinstance(model, EncDecHybridRNNTCTCModel):
model.change_decoding_strategy(model.cfg.decoding, decoder_type='ctc')
decoding = model.ctc_decoding
else:
model.change_decoding_strategy(model.cfg.decoding)
decoding = model.decoding
logging.setLevel(level)
wer_dist_first = cer_dist_first = 0
wer_dist_best = cer_dist_best = 0
words_count = 0
chars_count = 0
sample_idx = 0
if preds_output_file:
out_file = open(preds_output_file, 'w', encoding='utf_8', newline='\n')
if progress_bar:
it = tqdm(
range(int(np.ceil(len(all_probs) / beam_batch_size))),
desc=f"Beam search decoding with width={beam_width}, alpha={beam_alpha}, beta={beam_beta}",
ncols=120,
)
else:
it = range(int(np.ceil(len(all_probs) / beam_batch_size)))
for batch_idx in it:
# disabling type checking
probs_batch = all_probs[batch_idx * beam_batch_size : (batch_idx + 1) * beam_batch_size]
probs_lens = torch.tensor([prob.shape[0] for prob in probs_batch])
with torch.no_grad():
packed_batch = torch.zeros(len(probs_batch), max(probs_lens), probs_batch[0].shape[-1], device='cpu')
for prob_index in range(len(probs_batch)):
packed_batch[prob_index, : probs_lens[prob_index], :] = torch.tensor(
probs_batch[prob_index], device=packed_batch.device, dtype=packed_batch.dtype
)
_, beams_batch = decoding.ctc_decoder_predictions_tensor(
packed_batch, decoder_lengths=probs_lens, return_hypotheses=True,
)
for beams_idx, beams in enumerate(beams_batch):
target = target_transcripts[sample_idx + beams_idx]
target_split_w = target.split()
target_split_c = list(target)
words_count += len(target_split_w)
chars_count += len(target_split_c)
wer_dist_min = cer_dist_min = 10000
for candidate_idx, candidate in enumerate(beams): # type: (int, ctc_beam_decoding.rnnt_utils.Hypothesis)
pred_text = candidate.text
if cfg.text_processing.do_lowercase:
pred_text = punctuation_capitalization.do_lowercase([pred_text])[0]
if cfg.text_processing.rm_punctuation:
pred_text = punctuation_capitalization.rm_punctuation([pred_text])[0]
if cfg.text_processing.separate_punctuation:
pred_text = punctuation_capitalization.separate_punctuation([pred_text])[0]
pred_split_w = pred_text.split()
wer_dist = editdistance.eval(target_split_w, pred_split_w)
pred_split_c = list(pred_text)
cer_dist = editdistance.eval(target_split_c, pred_split_c)
wer_dist_min = min(wer_dist_min, wer_dist)
cer_dist_min = min(cer_dist_min, cer_dist)
if candidate_idx == 0:
# first candidate
wer_dist_first += wer_dist
cer_dist_first += cer_dist
score = candidate.score
if preds_output_file:
out_file.write('{}\t{}\n'.format(pred_text, score))
wer_dist_best += wer_dist_min
cer_dist_best += cer_dist_min
sample_idx += len(probs_batch)
if preds_output_file:
out_file.close()
logging.info(f"Stored the predictions of beam search decoding at '{preds_output_file}'.")
if lm_path:
logging.info(
'WER/CER with beam search decoding and N-gram model = {:.2%}/{:.2%}'.format(
wer_dist_first / words_count, cer_dist_first / chars_count
)
)
else:
logging.info(
'WER/CER with beam search decoding = {:.2%}/{:.2%}'.format(
wer_dist_first / words_count, cer_dist_first / chars_count
)
)
logging.info(
'Oracle WER/CER in candidates with perfect LM= {:.2%}/{:.2%}'.format(
wer_dist_best / words_count, cer_dist_best / chars_count
)
)
logging.info(f"=================================================================================")
return wer_dist_first / words_count, cer_dist_first / chars_count
@hydra_runner(config_path=None, config_name='EvalBeamSearchNGramConfig', schema=EvalBeamSearchNGramConfig)
def main(cfg: EvalBeamSearchNGramConfig):
logging.warning("This file will be renamed to eval_beamsearch_ngram_ctc.py in the future NeMo (1.21) release.")
if is_dataclass(cfg):
cfg = OmegaConf.structured(cfg) # type: EvalBeamSearchNGramConfig
valid_decoding_modes = ["greedy", "beamsearch", "beamsearch_ngram"]
if cfg.decoding_mode not in valid_decoding_modes:
raise ValueError(
f"Given decoding_mode={cfg.decoding_mode} is invalid. Available options are :\n" f"{valid_decoding_modes}"
)
if cfg.nemo_model_file.endswith('.nemo'):
asr_model = nemo_asr.models.ASRModel.restore_from(cfg.nemo_model_file, map_location=torch.device(cfg.device))
else:
logging.warning(
"nemo_model_file does not end with .nemo, therefore trying to load a pretrained model with this name."
)
asr_model = nemo_asr.models.ASRModel.from_pretrained(
cfg.nemo_model_file, map_location=torch.device(cfg.device)
)
target_transcripts = []
manifest_dir = Path(cfg.input_manifest).parent
with open(cfg.input_manifest, 'r', encoding='utf_8') as manifest_file:
audio_file_paths = []
for line in tqdm(manifest_file, desc=f"Reading Manifest {cfg.input_manifest} ...", ncols=120):
data = json.loads(line)
audio_file = Path(data['audio_filepath'])
if not audio_file.is_file() and not audio_file.is_absolute():
audio_file = manifest_dir / audio_file
target_transcripts.append(data['text'])
audio_file_paths.append(str(audio_file.absolute()))
punctuation_capitalization = PunctuationCapitalization(cfg.text_processing.punctuation_marks)
if cfg.text_processing.do_lowercase:
target_transcripts = punctuation_capitalization.do_lowercase(target_transcripts)
if cfg.text_processing.rm_punctuation:
target_transcripts = punctuation_capitalization.rm_punctuation(target_transcripts)
if cfg.text_processing.separate_punctuation:
target_transcripts = punctuation_capitalization.separate_punctuation(target_transcripts)
if cfg.probs_cache_file and os.path.exists(cfg.probs_cache_file):
logging.info(f"Found a pickle file of probabilities at '{cfg.probs_cache_file}'.")
logging.info(f"Loading the cached pickle file of probabilities from '{cfg.probs_cache_file}' ...")
with open(cfg.probs_cache_file, 'rb') as probs_file:
all_probs = pickle.load(probs_file)
if len(all_probs) != len(audio_file_paths):
raise ValueError(
f"The number of samples in the probabilities file '{cfg.probs_cache_file}' does not "
f"match the manifest file. You may need to delete the probabilities cached file."
)
else:
@contextlib.contextmanager
def default_autocast():
yield
if cfg.use_amp:
if torch.cuda.is_available() and hasattr(torch.cuda, 'amp') and hasattr(torch.cuda.amp, 'autocast'):
logging.info("AMP is enabled!\n")
autocast = torch.cuda.amp.autocast
else:
autocast = default_autocast
else:
autocast = default_autocast
with autocast():
with torch.no_grad():
if isinstance(asr_model, EncDecHybridRNNTCTCModel):
asr_model.cur_decoder = 'ctc'
all_logits = asr_model.transcribe(audio_file_paths, batch_size=cfg.acoustic_batch_size, logprobs=True)
all_probs = all_logits
if cfg.probs_cache_file:
logging.info(f"Writing pickle files of probabilities at '{cfg.probs_cache_file}'...")
with open(cfg.probs_cache_file, 'wb') as f_dump:
pickle.dump(all_probs, f_dump)
wer_dist_greedy = 0
cer_dist_greedy = 0
words_count = 0
chars_count = 0
for batch_idx, probs in enumerate(all_probs):
preds = np.argmax(probs, axis=1)
preds_tensor = torch.tensor(preds, device='cpu').unsqueeze(0)
if isinstance(asr_model, EncDecHybridRNNTCTCModel):
pred_text = asr_model.ctc_decoding.ctc_decoder_predictions_tensor(preds_tensor)[0][0]
else:
pred_text = asr_model._wer.decoding.ctc_decoder_predictions_tensor(preds_tensor)[0][0]
if cfg.text_processing.do_lowercase:
pred_text = punctuation_capitalization.do_lowercase([pred_text])[0]
if cfg.text_processing.rm_punctuation:
pred_text = punctuation_capitalization.rm_punctuation([pred_text])[0]
if cfg.text_processing.separate_punctuation:
pred_text = punctuation_capitalization.separate_punctuation([pred_text])[0]
pred_split_w = pred_text.split()
target_split_w = target_transcripts[batch_idx].split()
pred_split_c = list(pred_text)
target_split_c = list(target_transcripts[batch_idx])
wer_dist = editdistance.eval(target_split_w, pred_split_w)
cer_dist = editdistance.eval(target_split_c, pred_split_c)
wer_dist_greedy += wer_dist
cer_dist_greedy += cer_dist
words_count += len(target_split_w)
chars_count += len(target_split_c)
logging.info('Greedy WER/CER = {:.2%}/{:.2%}'.format(wer_dist_greedy / words_count, cer_dist_greedy / chars_count))
asr_model = asr_model.to('cpu')
if cfg.decoding_mode == "beamsearch_ngram":
if not os.path.exists(cfg.kenlm_model_file):
raise FileNotFoundError(f"Could not find the KenLM model file '{cfg.kenlm_model_file}'.")
lm_path = cfg.kenlm_model_file
else:
lm_path = None
# 'greedy' decoding_mode would skip the beam search decoding
if cfg.decoding_mode in ["beamsearch_ngram", "beamsearch"]:
if cfg.beam_width is None or cfg.beam_alpha is None or cfg.beam_beta is None:
raise ValueError("beam_width, beam_alpha and beam_beta are needed to perform beam search decoding.")
params = {'beam_width': cfg.beam_width, 'beam_alpha': cfg.beam_alpha, 'beam_beta': cfg.beam_beta}
hp_grid = ParameterGrid(params)
hp_grid = list(hp_grid)
best_wer_beam_size, best_cer_beam_size = None, None
best_wer_alpha, best_cer_alpha = None, None
best_wer_beta, best_cer_beta = None, None
best_wer, best_cer = 1e6, 1e6
logging.info(f"==============================Starting the beam search decoding===============================")
logging.info(f"Grid search size: {len(hp_grid)}")
logging.info(f"It may take some time...")
logging.info(f"==============================================================================================")
if cfg.preds_output_folder and not os.path.exists(cfg.preds_output_folder):
os.mkdir(cfg.preds_output_folder)
for hp in hp_grid:
if cfg.preds_output_folder:
preds_output_file = os.path.join(
cfg.preds_output_folder,
f"preds_out_width{hp['beam_width']}_alpha{hp['beam_alpha']}_beta{hp['beam_beta']}.tsv",
)
else:
preds_output_file = None
candidate_wer, candidate_cer = beam_search_eval(
asr_model,
cfg,
all_probs=all_probs,
target_transcripts=target_transcripts,
preds_output_file=preds_output_file,
lm_path=lm_path,
beam_width=hp["beam_width"],
beam_alpha=hp["beam_alpha"],
beam_beta=hp["beam_beta"],
beam_batch_size=cfg.beam_batch_size,
progress_bar=True,
punctuation_capitalization=punctuation_capitalization,
)
if candidate_cer < best_cer:
best_cer_beam_size = hp["beam_width"]
best_cer_alpha = hp["beam_alpha"]
best_cer_beta = hp["beam_beta"]
best_cer = candidate_cer
if candidate_wer < best_wer:
best_wer_beam_size = hp["beam_width"]
best_wer_alpha = hp["beam_alpha"]
best_wer_beta = hp["beam_beta"]
best_wer = candidate_wer
logging.info(
f'Best WER Candidate = {best_wer:.2%} :: Beam size = {best_wer_beam_size}, '
f'Beam alpha = {best_wer_alpha}, Beam beta = {best_wer_beta}'
)
logging.info(
f'Best CER Candidate = {best_cer:.2%} :: Beam size = {best_cer_beam_size}, '
f'Beam alpha = {best_cer_alpha}, Beam beta = {best_cer_beta}'
)
logging.info(f"=================================================================================")
if __name__ == '__main__':
main()
| NeMo-main | scripts/asr_language_modeling/ngram_lm/eval_beamsearch_ngram.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script converts a filelist file where each line contains
<absolute path of wav file> to a manifest json file.
Optionally post processes the manifest file to create dev and train split for speaker embedding
training, also optionally segment an audio file in to segments of random DURATIONS and create those
wav files in CWD.
Args:
--filelist: path to file containing list of audio files
--manifest(optional): if you already have manifest file, but would like to process it for creating
segments and splitting then use manifest ignoring filelist
--id: index of speaker label in filename present in filelist file that is separated by '/'
--out: output manifest file name
--split: if you would want to split the manifest file for training purposes
you may not need this for test set. output file names is <out>_<train/dev>.json, defaults to False
--create_segments: if you would want to segment each manifest line to segments of [1,2,3,4] sec or less
you may not need this for test set, defaults to False
--min_spkrs_count: min number of samples per speaker to consider and ignore otherwise, defaults to 0 (all speakers)
"""
import argparse
import json
import os
import random
import librosa as l
import numpy as np
import soundfile as sf
import sox
from sklearn.model_selection import StratifiedShuffleSplit
from tqdm.contrib.concurrent import process_map
from nemo.collections.asr.parts.utils.manifest_utils import read_manifest
random.seed(42)
DURATIONS = sorted([3], reverse=True)
MIN_ENERGY = 0.01
CWD = os.getcwd()
def filter_manifest_line(manifest_line):
split_manifest = []
audio_path = manifest_line['audio_filepath']
start = manifest_line.get('offset', 0)
dur = manifest_line['duration']
label = manifest_line['label']
endname = os.path.splitext(audio_path.split(label, 1)[-1])[0]
to_path = os.path.join(CWD, 'segments', label)
to_path = os.path.join(to_path, endname[1:])
os.makedirs(os.path.dirname(to_path), exist_ok=True)
if dur >= min(DURATIONS):
signal, sr = sf.read(audio_path)
remaining_dur = dur - start
segments = DURATIONS.copy()
mode = int(remaining_dur // sum(DURATIONS))
rem = remaining_dur % sum(DURATIONS)
segments = mode * segments
for val in DURATIONS:
if rem >= val:
segments.append(val)
rem = rem - val
for temp_dur in segments:
segment_audio = signal[int(start * sr) : int(start * sr + temp_dur * sr)]
if l.feature.rms(y=segment_audio).mean() > MIN_ENERGY:
final_string = '_' + str(start) + '_' + str(temp_dur)
final_string = final_string.replace('.', '-')
to_file = to_path + final_string + '.wav'
c_start = int(float(start * sr))
c_end = c_start + int(float(temp_dur * sr))
segment = signal[c_start:c_end]
sf.write(to_file, segment, sr)
meta = manifest_line.copy()
meta['audio_filepath'] = to_file
meta['offset'] = 0
meta['duration'] = temp_dur
split_manifest.append(meta)
start = start + temp_dur
return split_manifest
def count_and_consider_only(speakers, lines, min_count=10):
"""
consider speakers only if samples per speaker is at least min_count
"""
uniq_speakers, indices, counts = np.unique(speakers, return_index=True, return_counts=True)
print("speaker count before filtering minimum number of speaker counts: ", len(uniq_speakers))
required_speakers = {}
for idx, count in enumerate(counts):
if count >= min_count:
required_speakers[uniq_speakers[idx]] = count
print("speaker count after filtering minimum number of speaker counts: ", len(required_speakers))
required_lines = []
speakers_only = []
for idx, speaker in enumerate(speakers):
if speaker in required_speakers:
required_lines.append(lines[idx])
speakers_only.append(speaker)
return speakers_only, required_lines
def write_file(name, lines, idx):
with open(name, 'w', encoding='utf-8') as fout:
for i in idx:
dic = lines[i]
json.dump(dic, fout)
fout.write('\n')
print("wrote", name)
def read_file(filelist, id=-1):
json_lines = []
with open(filelist, 'r') as fo:
lines = fo.readlines()
lines = sorted(lines)
for line in lines:
line = line.strip()
speaker = line.split('/')[id]
speaker = list(speaker)
speaker = ''.join(speaker)
meta = {"audio_filepath": line, "offset": 0, "duration": None, "label": speaker}
json_lines.append(meta)
return json_lines
def get_duration(json_line):
dur = json_line['duration']
if dur is None:
wav_path = json_line['audio_filepath']
json_line['duration'] = sox.file_info.duration(wav_path)
return json_line
def get_labels(lines):
labels = []
for line in lines:
label = line['label']
labels.append(label)
return labels
def main(filelist, manifest, id, out, split=False, create_segments=False, min_count=10):
if os.path.exists(out):
os.remove(out)
if filelist:
lines = read_file(filelist=filelist, id=id)
lines = process_map(get_duration, lines, chunksize=100)
out_file = os.path.splitext(filelist)[0] + '_manifest.json'
write_file(out_file, lines, range(len(lines)))
else:
lines = read_manifest(manifest)
lines = process_map(get_duration, lines, chunksize=100)
if create_segments:
print(f"creating and writing segments to {CWD}")
lines = process_map(filter_manifest_line, lines, chunksize=100)
temp = []
for line in lines:
temp.extend(line)
del lines
lines = temp
speakers = [x['label'] for x in lines]
if min_count:
speakers, lines = count_and_consider_only(speakers, lines, abs(min_count))
write_file(out, lines, range(len(lines)))
path = os.path.dirname(out)
if split:
speakers = [x['label'] for x in lines]
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=42)
for train_idx, test_idx in sss.split(speakers, speakers):
print("number of train samples after split: ", len(train_idx))
out = os.path.join(path, 'train.json')
write_file(out, lines, train_idx)
out = os.path.join(path, 'dev.json')
write_file(out, lines, test_idx)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--filelist", help="path to filelist file", type=str, required=False, default=None)
parser.add_argument("--manifest", help="manifest file name", type=str, required=False, default=None)
parser.add_argument(
"--id",
help="field num seperated by '/' to be considered as speaker label from filelist file, can be ignored if manifest file is already provided with labels",
type=int,
required=False,
default=None,
)
parser.add_argument("--out", help="manifest_file name", type=str, required=True)
parser.add_argument(
"--split",
help="bool if you would want to split the manifest file for training purposes",
required=False,
action='store_true',
)
parser.add_argument(
"--create_segments",
help="bool if you would want to segment each manifest line to segments of 4 sec or less",
required=False,
action='store_true',
)
parser.add_argument(
"--min_spkrs_count",
default=0,
type=int,
help="min number of samples per speaker to consider and ignore otherwise",
)
args = parser.parse_args()
main(
args.filelist, args.manifest, args.id, args.out, args.split, args.create_segments, args.min_spkrs_count,
)
| NeMo-main | scripts/speaker_tasks/filelist_to_manifest.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script creates a manifest file for diarization training. If you specify `pairwise_rttm_output_folder`, the script generates
a two-speaker subset of the original RTTM files. For example, an RTTM file with 4 speakers will obtain 6 different pairs and
6 RTTM files with two speakers in each RTTM file.
Args:
--input_manifest_path: input json file name
--output_manifest_path: output manifest_file name
--pairwise_rttm_output_folder: Save two-speaker pair RTTM files
--window: Window length for segmentation
--shift: Shift length for segmentation
--decimals: Rounding decimals
"""
import argparse
import copy
import itertools
import os
import random
from tqdm import tqdm
from nemo.collections.asr.parts.utils.manifest_utils import (
get_input_manifest_dict,
get_subsegment_dict,
rreplace,
write_truncated_subsegments,
)
from nemo.collections.asr.parts.utils.speaker_utils import (
audio_rttm_map,
rttm_to_labels,
segments_manifest_to_subsegments_manifest,
write_rttm2manifest,
)
from nemo.utils import logging
random.seed(42)
def labels_to_rttmfile(labels, uniq_id, filename, out_rttm_dir):
"""
Write rttm file with uniq_id name in out_rttm_dir with time_stamps in labels
"""
filename = os.path.join(out_rttm_dir, filename + '.rttm')
with open(filename, 'w') as f:
for line in labels:
line = line.strip()
start, end, speaker = line.split()
duration = float(end) - float(start)
start = float(start)
log = 'SPEAKER {} 1 {:.3f} {:.3f} <NA> <NA> {} <NA> <NA>\n'.format(uniq_id, start, duration, speaker)
f.write(log)
return filename
def split_into_pairwise_rttm(audio_rttm_map, input_manifest_path, output_dir):
"""
Create pairwise RTTM files and save it to `output_dir`. This function picks two speakers from the original RTTM files
then saves the two-speaker subset of RTTM to `output_dir`.
Args:
audio_rttm_map (dict):
A dictionary with keys of uniq id, which is being used to map audio files and corresponding rttm files
input_manifest_path (str):
Path of the input manifest file.
output_dir (str):
Path to the directory where the new RTTM files are saved.
"""
input_manifest_dict = get_input_manifest_dict(input_manifest_path)
rttmlist = []
rttm_split_manifest_dict = {}
split_audio_rttm_map = {}
logging.info("Creating split RTTM files.")
for uniq_id, line in tqdm(input_manifest_dict.items(), total=len(input_manifest_dict)):
audiopath = line['audio_filepath']
num_speakers = line['num_speakers']
rttm_filepath = line['rttm_filepath']
rttm = rttm_to_labels(rttm_filepath)
speakers = []
j = 0
while len(speakers) < num_speakers:
if rttm[j].split(' ')[2] not in speakers:
speakers.append(rttm[j].split(' ')[2])
j += 1
base_fn = audiopath.split('/')[-1].replace('.wav', '')
for pair in itertools.combinations(speakers, 2):
i, target_rttm = 0, []
while i < len(rttm):
entry = rttm[i]
sp_id = entry.split(' ')[2]
if sp_id in pair:
target_rttm.append(entry)
i += 1
pair_string = f".{pair[0]}_{pair[1]}"
uniq_id_pair = uniq_id + pair_string
filename = base_fn + pair_string
labels_to_rttmfile(target_rttm, base_fn, filename, output_dir)
rttm_path = output_dir + filename + ".rttm"
rttmlist.append(rttm_path)
line_mod = copy.deepcopy(line)
line_mod['rttm_filepath'] = rttm_path
meta = copy.deepcopy(audio_rttm_map[uniq_id])
meta['rttm_filepath'] = rttm_path
rttm_split_manifest_dict[uniq_id_pair] = line_mod
split_audio_rttm_map[uniq_id_pair] = meta
return rttm_split_manifest_dict, split_audio_rttm_map
def main(input_manifest_path, output_manifest_path, pairwise_rttm_output_folder, window, shift, step_count, decimals):
if '.json' not in input_manifest_path:
raise ValueError("input_manifest_path file should be .json file format")
if output_manifest_path and '.json' not in output_manifest_path:
raise ValueError("output_manifest_path file should be .json file format")
elif not output_manifest_path:
output_manifest_path = rreplace(input_manifest_path, '.json', f'.{step_count}seg.json')
if pairwise_rttm_output_folder is not None:
if not pairwise_rttm_output_folder.endswith('/'):
pairwise_rttm_output_folder = f"{pairwise_rttm_output_folder}/"
org_audio_rttm_map = audio_rttm_map(input_manifest_path)
input_manifest_dict, AUDIO_RTTM_MAP = split_into_pairwise_rttm(
audio_rttm_map=org_audio_rttm_map,
input_manifest_path=input_manifest_path,
output_dir=pairwise_rttm_output_folder,
)
else:
input_manifest_dict = get_input_manifest_dict(input_manifest_path)
AUDIO_RTTM_MAP = audio_rttm_map(input_manifest_path)
segment_manifest_path = rreplace(input_manifest_path, '.json', '_seg.json')
subsegment_manifest_path = rreplace(input_manifest_path, '.json', '_subseg.json')
# todo: do we need to expose this?
min_subsegment_duration = 0.05
step_count = int(step_count)
segments_manifest_file = write_rttm2manifest(AUDIO_RTTM_MAP, segment_manifest_path, decimals)
subsegments_manifest_file = subsegment_manifest_path
logging.info("Creating subsegments.")
segments_manifest_to_subsegments_manifest(
segments_manifest_file=segments_manifest_file,
subsegments_manifest_file=subsegments_manifest_file,
window=window,
shift=shift,
min_subsegment_duration=min_subsegment_duration,
include_uniq_id=True,
)
subsegments_dict = get_subsegment_dict(subsegments_manifest_file, window, shift, decimals)
write_truncated_subsegments(input_manifest_dict, subsegments_dict, output_manifest_path, step_count, decimals)
os.remove(segment_manifest_path)
os.remove(subsegment_manifest_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input_manifest_path", help="input json file name", type=str, required=True)
parser.add_argument(
"--output_manifest_path", help="output manifest_file name", type=str, default=None, required=False
)
parser.add_argument(
"--pairwise_rttm_output_folder",
help="Save two-speaker pair RTTM files",
type=str,
default=None,
required=False,
)
parser.add_argument("--window", help="Window length for segmentation", type=float, required=True)
parser.add_argument("--shift", help="Shift length for segmentation", type=float, required=True)
parser.add_argument("--decimals", help="Rounding decimals", type=int, default=3, required=False)
parser.add_argument(
"--step_count", help="Number of the unit segments you want to create per utterance", required=True,
)
args = parser.parse_args()
main(
args.input_manifest_path,
args.output_manifest_path,
args.pairwise_rttm_output_folder,
args.window,
args.shift,
args.step_count,
args.decimals,
)
| NeMo-main | scripts/speaker_tasks/create_msdd_train_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import random
from nemo.collections.asr.parts.utils.manifest_utils import create_manifest
random.seed(42)
"""
This script creates manifest file for speaker diarization inference purposes.
Useful to get manifest when you have list of audio files and optionally rttm and uem files for evaluation
Note: make sure basename for each file is unique and rttm files also has the corresponding base name for mapping
"""
def main(
wav_path, text_path=None, rttm_path=None, uem_path=None, ctm_path=None, manifest_filepath=None, add_duration=False
):
create_manifest(
wav_path,
manifest_filepath,
text_path=text_path,
rttm_path=rttm_path,
uem_path=uem_path,
ctm_path=ctm_path,
add_duration=add_duration,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--paths2audio_files", help="path to text file containing list of audio files", type=str, required=True
)
parser.add_argument("--paths2txt_files", help="path to text file containing list of transcription files", type=str)
parser.add_argument("--paths2rttm_files", help="path to text file containing list of rttm files", type=str)
parser.add_argument("--paths2uem_files", help="path to uem files", type=str)
parser.add_argument("--paths2ctm_files", help="path to ctm files", type=str)
parser.add_argument("--manifest_filepath", help="path to output manifest file", type=str, required=True)
parser.add_argument(
"--add_duration", help="add duration of audio files to output manifest files.", action='store_true',
)
args = parser.parse_args()
main(
args.paths2audio_files,
args.paths2txt_files,
args.paths2rttm_files,
args.paths2uem_files,
args.paths2ctm_files,
args.manifest_filepath,
args.add_duration,
)
| NeMo-main | scripts/speaker_tasks/pathfiles_to_diarize_manifest.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import shutil
from pathlib import Path
from nemo.collections.asr.parts.utils.manifest_utils import read_manifest, write_ctm, write_manifest
from nemo.utils import logging
def get_unaligned_files(unaligned_path):
"""
Get files without alignments in order to filter them out (as they cannot be used for data simulation).
In the unaligned file, each line contains the file name and the reason for the unalignment, if necessary to specify.
Example: unaligned.txt
<utterance_id> <comment>
1272-128104-0000 (no such file)
2289-152257-0025 (no such file)
2289-152257-0026 (mapping failed)
...
Args:
unaligned_path (str): Path to the file containing unaligned examples
Returns:
skip_files (list): Unaligned file names to skip
"""
skip_files = []
with open(unaligned_path, 'r', encoding='utf-8') as f:
for line in f.readlines():
line = line.strip()
if not line:
continue
unaligned_file = line.split()[0]
skip_files.append(unaligned_file)
return skip_files
def create_new_ctm_entry(session_name, speaker_id, wordlist, alignments, output_precision=3):
"""
Create new CTM entry (to write to output ctm file)
Args:
session_name (str): Current session name.
speaker_id (int): LibriSpeech speaker ID for the current entry.
wordlist (list): List of words
alignments (list): List of alignments
output_precision (int): Precision for CTM outputs
Returns:
arr (list): List of ctm entries
"""
arr = []
for i in range(len(wordlist)):
word = wordlist[i]
if word != "":
# note that using the current alignments the first word is always empty, so there is no error from indexing the array with i-1
align1 = float(round(alignments[i - 1], output_precision))
align2 = float(round(alignments[i] - alignments[i - 1], output_precision,))
text = f"{session_name} {speaker_id} {align1} {align2} {word} 0\n"
arr.append((align1, text))
return arr
def load_librispeech_alignment(alignment_filepath: str) -> dict:
"""
Load alignment data for librispeech
Args:
alignment_filepath (str): Path to the file containing alignments
Returns:
alignments (dict[tuple]): A dictionary containing file index and alignments
"""
alignments = {}
with open(alignment_filepath, "r") as fin:
for line in fin.readlines():
line = line.strip()
if not line:
continue
file_id, words, timestamps = line.split()
alignments[file_id] = (words, timestamps)
return alignments
def create_librispeech_ctm_alignments(
input_manifest_filepath, base_alignment_path, ctm_output_directory, libri_dataset_split
):
"""
Create new CTM alignments using input LibriSpeech word alignments.
Args:
input_manifest_filepath (str): Path to the input LibriSpeech manifest file
base_alignment_path (str): Path to the base directory containing the LibriSpeech word alignments
ctm_source_dir (str): Directory to write the CTM files to
libri_dataset_split (str): Which split of the LibriSpeech dataset is being used
"""
manifest = read_manifest(input_manifest_filepath)
unaligned_path = os.path.join(base_alignment_path, "unaligned.txt")
if os.path.exists(unaligned_path):
unaligned_file_ids = set(get_unaligned_files(unaligned_path))
else:
unaligned_file_ids = set()
libri_dataset_split = libri_dataset_split.replace("_", "-")
# delete output directory if it exists or throw warning
if os.path.isdir(ctm_output_directory):
logging.info(f"Removing existing output directory: {ctm_output_directory}")
shutil.rmtree(ctm_output_directory)
if not os.path.exists(ctm_output_directory):
logging.info(f"Creating output directory: {ctm_output_directory}")
os.mkdir(ctm_output_directory)
if len(manifest) == 0:
raise Exception(f"Input manifest is empty: {input_manifest_filepath}")
for entry in manifest:
audio_file = entry['audio_filepath']
file_id = Path(audio_file).stem
if file_id in unaligned_file_ids:
continue
speaker_id = file_id.split('-')[0]
book_id = file_id.split('-')[1]
book_dir = os.path.join(base_alignment_path, "LibriSpeech", libri_dataset_split, speaker_id, book_id)
alignment_filepath = os.path.join(book_dir, f"{speaker_id}-{book_id}.alignment.txt")
alignment_data = load_librispeech_alignment(alignment_filepath)
if file_id not in alignment_data:
logging.warning(f"Cannot find alignment data for {audio_file} in {alignment_filepath}")
continue
words, end_times = alignment_data[file_id]
words = words.replace('\"', '').lower().split(',')
end_times = [float(e) for e in end_times.replace('\"', '').split(',')]
ctm_list = create_new_ctm_entry(file_id, speaker_id, words, end_times)
write_ctm(os.path.join(ctm_output_directory, file_id + '.ctm'), ctm_list)
def create_manifest_with_alignments(
input_manifest_filepath,
ctm_source_dir,
output_manifest_filepath,
data_format_style,
silence_dur_threshold=0.1,
output_precision=3,
):
"""
Create new manifest file with word alignments using CTM files
Args:
input_manifest_filepath (str): Path to the input manifest file
ctm_source_dir (str): Directory to read the CTM files from
output_manifest_filepath (str): Path to the output manifest file containing word alignments
precision (int): How many decimal places to keep in the manifest file
"""
manifest = read_manifest(input_manifest_filepath)
target_manifest = []
src_i = 0
tgt_i = 0
while src_i < len(manifest):
f = manifest[src_i]
fn = f['audio_filepath'].split('/')[-1]
filename = fn.split('.')[0] # assuming that there is only one period in the input filenames
if "voxceleb" in data_format_style:
fn_split = f['audio_filepath'].split('/')
filename = fn_split[-3] + '-' + fn_split[-2] + '-' + fn_split[-1].split('.')[0]
ctm_filepath = os.path.join(ctm_source_dir, filename + '.ctm')
else:
ctm_filepath = os.path.join(ctm_source_dir, filename + '.ctm')
if not os.path.isfile(ctm_filepath):
logging.info(f"Skipping {filename}.wav as there is no corresponding CTM file")
src_i += 1
continue
with open(ctm_filepath, 'r') as ctm_file:
lines = ctm_file.readlines()
# One-word samples should be filtered out.
if len(lines) <= 1:
src_i += 1
continue
words = []
end_times = []
i = 0
prev_end = 0
for i in range(len(lines)):
ctm = lines[i].split(' ')
speaker_id = ctm[1]
start = float(ctm[2])
end = float(ctm[2]) + float(ctm[3])
start = round(start, output_precision)
end = round(end, output_precision)
interval = start - prev_end
if (i == 0 and interval > 0) or (i > 0 and interval > silence_dur_threshold):
words.append("")
end_times.append(start)
elif i > 0:
end_times[-1] = start
words.append(ctm[4])
end_times.append(end)
i += 1
prev_end = end
# append last end
if f['duration'] > prev_end:
words.append("")
end_times.append(f['duration'])
# build target manifest entry
target_manifest.append({})
target_manifest[tgt_i]['audio_filepath'] = f['audio_filepath']
target_manifest[tgt_i]['duration'] = f['duration']
target_manifest[tgt_i]['text'] = f['text']
target_manifest[tgt_i]['words'] = words
target_manifest[tgt_i]['alignments'] = end_times
target_manifest[tgt_i]['speaker_id'] = speaker_id
src_i += 1
tgt_i += 1
logging.info(f"Writing output manifest file to {output_manifest_filepath}")
write_manifest(output_manifest_filepath, target_manifest)
def main():
"""
Create a combined manifest file including word alignments and speaker IDs
"""
input_manifest_filepath = args.input_manifest_filepath
base_alignment_path = args.base_alignment_path
output_manifest_filepath = args.output_manifest_filepath
ctm_output_directory = args.ctm_output_directory
libri_dataset_split = args.libri_dataset_split
use_ctm_alignment_source = args.use_ctm_alignment_source
output_precision = args.output_precision
# Case 1: args.base_alignment_path is containing the ctm files
if use_ctm_alignment_source:
ctm_source_dir = args.base_alignment_path
# Case 2: args.base_alignment_path is containing *.lab style alignments for the dataset
else:
create_librispeech_ctm_alignments(
input_manifest_filepath, base_alignment_path, ctm_output_directory, libri_dataset_split
)
ctm_source_dir = ctm_output_directory
create_manifest_with_alignments(
input_manifest_filepath,
ctm_source_dir,
output_manifest_filepath,
data_format_style=args.data_format_style,
silence_dur_threshold=args.silence_dur_threshold,
output_precision=output_precision,
)
if __name__ == "__main__":
"""
This script creates a manifest file to be used for generating synthetic
multispeaker audio sessions. The script takes in the default manifest file
for a LibriSpeech dataset and corresponding word alignments and produces
a combined manifest file that contains word alignments and speaker IDs
per example. It can also be used to produce a manifest file for a different
dataset if alignments are passed in CTM files.
The alignments are obtained from: https://github.com/CorentinJ/librispeech-alignments
Args:
input_manifest_filepath (str): Path to input manifest file
base_alignment_path (str): Path to the base directory for the LibriSpeech alignment dataset
(specifically to the LibriSpeech-Alignments directory containing
both the LibriSpeech folder as well as the unaligned.txt file)
or to a directory containing the requisite CTM files
output_manifest_filepath (str): Path to output manifest file
ctm_output_directory (str): Path to output CTM directory (only used for LibriSpeech)
libri_dataset_split (str): Which dataset split to create a combined manifest file for
use_ctm_alignment_source (bool): If true, base_alignment_path points to a directory containing ctm files
"""
parser = argparse.ArgumentParser(description="LibriSpeech Alignment Manifest Creator")
parser.add_argument("--input_manifest_filepath", help="path to input manifest file", type=str, required=True)
parser.add_argument("--base_alignment_path", help="path to alignments (LibriSpeech)", type=str, required=False)
parser.add_argument("--output_manifest_filepath", help="path to output manifest file", type=str, required=True)
parser.add_argument(
"--ctm_output_directory",
help="path to output ctm directory for LibriSpeech (or to input CTM directory)",
type=str,
required=True,
)
parser.add_argument(
"--libri_dataset_split",
help="which test/dev/training set to create a manifest for (only used for LibriSpeech)",
type=str,
required=False,
default="",
)
parser.add_argument(
"--use_ctm_alignment_source",
help="if true, base_alignment_path points to a directory containing ctm files",
action='store_true',
required=False,
)
parser.add_argument(
"--data_format_style",
help="Use specific format for speaker IDs and utterance IDs. e.g. 'voxceleb', 'librispeech', 'swbd'",
default="",
type=str,
required=False,
)
parser.add_argument(
"--output_precision", help="precision for output alignments", type=int, required=False, default=3
)
parser.add_argument(
"--silence_dur_threshold", help="threshold for inserting silence", type=float, required=False, default=0.1
)
args = parser.parse_args()
main()
| NeMo-main | scripts/speaker_tasks/create_alignment_manifest.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
from nemo.collections.asr.metrics.der import evaluate_der
from nemo.collections.asr.parts.utils.diarization_utils import OfflineDiarWithASR
from nemo.collections.asr.parts.utils.manifest_utils import read_file
from nemo.collections.asr.parts.utils.speaker_utils import (
get_uniqname_from_filepath,
labels_to_pyannote_object,
rttm_to_labels,
)
"""
Evaluation script for diarization with ASR.
Calculates Diarization Error Rate (DER) with RTTM files and WER and cpWER with CTM files.
In the output ctm_eval.csv file in the output folder,
session-level DER, WER, cpWER and speaker counting accuracies are evaluated.
- Evaluation mode
diar_eval_mode == "full":
DIHARD challenge style evaluation, the most strict way of evaluating diarization
(collar, ignore_overlap) = (0.0, False)
diar_eval_mode == "fair":
Evaluation setup used in VoxSRC challenge
(collar, ignore_overlap) = (0.25, False)
diar_eval_mode == "forgiving":
Traditional evaluation setup
(collar, ignore_overlap) = (0.25, True)
diar_eval_mode == "all":
Compute all three modes (default)
Use CTM files to calculate WER and cpWER
```
python eval_diar_with_asr.py \
--hyp_rttm_list="/path/to/hypothesis_rttm_filepaths.list" \
--ref_rttm_list="/path/to/reference_rttm_filepaths.list" \
--hyp_ctm_list="/path/to/hypothesis_ctm_filepaths.list" \
--ref_ctm_list="/path/to/reference_ctm_filepaths.list" \
--root_path="/path/to/output/directory"
```
Use .json files to calculate WER and cpWER
```
python eval_diar_with_asr.py \
--hyp_rttm_list="/path/to/hypothesis_rttm_filepaths.list" \
--ref_rttm_list="/path/to/reference_rttm_filepaths.list" \
--hyp_json_list="/path/to/hypothesis_json_filepaths.list" \
--ref_ctm_list="/path/to/reference_ctm_filepaths.list" \
--root_path="/path/to/output/directory"
```
Only use RTTMs to calculate DER
```
python eval_diar_with_asr.py \
--hyp_rttm_list="/path/to/hypothesis_rttm_filepaths.list" \
--ref_rttm_list="/path/to/reference_rttm_filepaths.list" \
--root_path="/path/to/output/directory"
```
"""
def get_pyannote_objs_from_rttms(rttm_file_path_list):
"""Generate PyAnnote objects from RTTM file list
"""
pyannote_obj_list = []
for rttm_file in rttm_file_path_list:
rttm_file = rttm_file.strip()
if rttm_file is not None and os.path.exists(rttm_file):
uniq_id = get_uniqname_from_filepath(rttm_file)
ref_labels = rttm_to_labels(rttm_file)
reference = labels_to_pyannote_object(ref_labels, uniq_name=uniq_id)
pyannote_obj_list.append([uniq_id, reference])
return pyannote_obj_list
def make_meta_dict(hyp_rttm_list, ref_rttm_list):
"""Create a temporary `audio_rttm_map_dict` for evaluation
"""
meta_dict = {}
for k, rttm_file in enumerate(ref_rttm_list):
uniq_id = get_uniqname_from_filepath(rttm_file)
meta_dict[uniq_id] = {"rttm_filepath": rttm_file.strip()}
if hyp_rttm_list is not None:
hyp_rttm_file = hyp_rttm_list[k]
meta_dict[uniq_id].update({"hyp_rttm_filepath": hyp_rttm_file.strip()})
return meta_dict
def make_trans_info_dict(hyp_json_list_path):
"""Create `trans_info_dict` from the `.json` files
"""
trans_info_dict = {}
for json_file in hyp_json_list_path:
json_file = json_file.strip()
with open(json_file) as jsf:
json_data = json.load(jsf)
uniq_id = get_uniqname_from_filepath(json_file)
trans_info_dict[uniq_id] = json_data
return trans_info_dict
def read_file_path(list_path):
"""Read file path and strip to remove line change symbol
"""
return sorted([x.strip() for x in read_file(list_path)])
def main(
hyp_rttm_list_path: str,
ref_rttm_list_path: str,
hyp_ctm_list_path: str,
ref_ctm_list_path: str,
hyp_json_list_path: str,
diar_eval_mode: str = "all",
root_path: str = "./",
):
# Read filepath list files
hyp_rttm_list = read_file_path(hyp_rttm_list_path) if hyp_rttm_list_path else None
ref_rttm_list = read_file_path(ref_rttm_list_path) if ref_rttm_list_path else None
hyp_ctm_list = read_file_path(hyp_ctm_list_path) if hyp_ctm_list_path else None
ref_ctm_list = read_file_path(ref_ctm_list_path) if ref_ctm_list_path else None
hyp_json_list = read_file_path(hyp_json_list_path) if hyp_json_list_path else None
audio_rttm_map_dict = make_meta_dict(hyp_rttm_list, ref_rttm_list)
trans_info_dict = make_trans_info_dict(hyp_json_list) if hyp_json_list else None
all_hypothesis = get_pyannote_objs_from_rttms(hyp_rttm_list)
all_reference = get_pyannote_objs_from_rttms(ref_rttm_list)
diar_score = evaluate_der(
audio_rttm_map_dict=audio_rttm_map_dict,
all_reference=all_reference,
all_hypothesis=all_hypothesis,
diar_eval_mode=diar_eval_mode,
)
# Get session-level diarization error rate and speaker counting error
der_results = OfflineDiarWithASR.gather_eval_results(
diar_score=diar_score,
audio_rttm_map_dict=audio_rttm_map_dict,
trans_info_dict=trans_info_dict,
root_path=root_path,
)
if ref_ctm_list is not None:
# Calculate WER and cpWER if reference CTM files exist
if hyp_ctm_list is not None:
wer_results = OfflineDiarWithASR.evaluate(
audio_file_list=hyp_rttm_list,
hyp_trans_info_dict=None,
hyp_ctm_file_list=hyp_ctm_list,
ref_ctm_file_list=ref_ctm_list,
)
elif hyp_json_list is not None:
wer_results = OfflineDiarWithASR.evaluate(
audio_file_list=hyp_rttm_list,
hyp_trans_info_dict=trans_info_dict,
hyp_ctm_file_list=None,
ref_ctm_file_list=ref_ctm_list,
)
else:
raise ValueError("Hypothesis information is not provided in the correct format.")
else:
wer_results = {}
# Print average DER, WER and cpWER
OfflineDiarWithASR.print_errors(der_results=der_results, wer_results=wer_results)
# Save detailed session-level evaluation results in `root_path`.
OfflineDiarWithASR.write_session_level_result_in_csv(
der_results=der_results,
wer_results=wer_results,
root_path=root_path,
csv_columns=OfflineDiarWithASR.get_csv_columns(),
)
return None
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--hyp_rttm_list", help="path to the filelist of hypothesis RTTM files", type=str, required=True, default=None
)
parser.add_argument(
"--ref_rttm_list", help="path to the filelist of reference RTTM files", type=str, required=True, default=None
)
parser.add_argument(
"--hyp_ctm_list", help="path to the filelist of hypothesis CTM files", type=str, required=False, default=None
)
parser.add_argument(
"--ref_ctm_list", help="path to the filelist of reference CTM files", type=str, required=False, default=None
)
parser.add_argument(
"--hyp_json_list",
help="(Optional) path to the filelist of hypothesis JSON files",
type=str,
required=False,
default=None,
)
parser.add_argument(
"--diar_eval_mode",
help='evaluation mode: "all", "full", "fair", "forgiving"',
type=str,
required=False,
default="all",
)
parser.add_argument(
"--root_path", help='directory for saving result files', type=str, required=False, default="./"
)
args = parser.parse_args()
main(
args.hyp_rttm_list,
args.ref_rttm_list,
args.hyp_ctm_list,
args.ref_ctm_list,
args.hyp_json_list,
args.diar_eval_mode,
args.root_path,
)
| NeMo-main | scripts/speaker_tasks/eval_diar_with_asr.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import multiprocessing
import shutil
from collections import OrderedDict
from pathlib import Path
from pprint import pprint
from typing import Dict
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import sox
from scipy.stats import expon
from tqdm import tqdm
from nemo.collections.asr.parts.utils.vad_utils import (
get_nonspeech_segments,
load_speech_overlap_segments_from_rttm,
plot_sample_from_rttm,
)
"""
This script analyzes multi-speaker speech dataset and generates statistics.
The input directory </path/to/rttm_and_wav_directory> is required to contain the following files:
- rttm files (*.rttm)
- wav files (*.wav)
Usage:
python <NEMO_ROOT>/scripts/speaker_tasks/multispeaker_data_analysis.py \
</path/to/rttm_and_wav_directory> \
--session_dur 20 \
--silence_mean 0.2 \
--silence_var 100 \
--overlap_mean 0.15 \
--overlap_var 50 \
--num_workers 8 \
--num_samples 10 \
--output_dir <path/to/output_directory>
"""
def process_sample(sess_dict: Dict) -> Dict:
"""
Process each synthetic sample
Args:
sess_dict (dict): dictionary containing the following keys
rttm_file (str): path to the rttm file
session_dur (float): duration of the session (specified by argument)
precise (bool): whether to measure the precise duration of the session using sox
Returns:
results (dict): dictionary containing the following keys
session_dur (float): duration of the session
silence_len_list (list): list of silence durations of each silence occurrence
silence_dur (float): total silence duration in a session
silence_ratio (float): ratio of silence duration to session duration
overlap_len_list (list): list of overlap durations of each overlap occurrence
overlap_dur (float): total overlap duration
overlap_ratio (float): ratio of overlap duration to speech (non-silence) duration
"""
rttm_file = sess_dict["rttm_file"]
session_dur = sess_dict["session_dur"]
precise = sess_dict["precise"]
if precise or session_dur is None:
wav_file = rttm_file.parent / Path(rttm_file.stem + ".wav")
session_dur = sox.file_info.duration(str(wav_file))
speech_seg, overlap_seg = load_speech_overlap_segments_from_rttm(rttm_file)
speech_dur = sum([sess_dict[1] - sess_dict[0] for sess_dict in speech_seg])
silence_seg = get_nonspeech_segments(speech_seg, session_dur)
silence_len_list = [sess_dict[1] - sess_dict[0] for sess_dict in silence_seg]
silence_dur = max(0, session_dur - speech_dur)
silence_ratio = silence_dur / session_dur
overlap_len_list = [sess_dict[1] - sess_dict[0] for sess_dict in overlap_seg]
overlap_dur = sum(overlap_len_list) if len(overlap_len_list) else 0
overlap_ratio = overlap_dur / speech_dur
results = {
"session_dur": session_dur,
"silence_len_list": silence_len_list,
"silence_dur": silence_dur,
"silence_ratio": silence_ratio,
"overlap_len_list": overlap_len_list,
"overlap_dur": overlap_dur,
"overlap_ratio": overlap_ratio,
}
return results
def run_multispeaker_data_analysis(
input_dir,
session_dur=None,
silence_mean=None,
silence_var=None,
overlap_mean=None,
overlap_var=None,
precise=False,
save_path=None,
num_workers=1,
) -> Dict:
rttm_list = list(Path(input_dir).glob("*.rttm"))
"""
Analyze the multispeaker data and plot the distribution of silence and overlap durations.
Args:
input_dir (str): path to the directory containing the rttm files
session_dur (float): duration of the session (specified by argument)
silence_mean (float): mean of the silence duration distribution
silence_var (float): variance of the silence duration distribution
overlap_mean (float): mean of the overlap duration distribution
overlap_var (float): variance of the overlap duration distribution
precise (bool): whether to measure the precise duration of the session using sox
save_path (str): path to save the plots
Returns:
stats (dict): dictionary containing the statistics of the analyzed data
"""
print(f"Found {len(rttm_list)} files to be processed")
if len(rttm_list) == 0:
raise ValueError(f"No rttm files found in {input_dir}")
silence_duration = 0.0
total_duration = 0.0
overlap_duration = 0.0
silence_ratio_all = []
overlap_ratio_all = []
silence_length_all = []
overlap_length_all = []
queue = []
for rttm_file in tqdm(rttm_list):
queue.append(
{"rttm_file": rttm_file, "session_dur": session_dur, "precise": precise,}
)
if num_workers <= 1:
results = [process_sample(sess_dict) for sess_dict in tqdm(queue)]
else:
with multiprocessing.Pool(processes=num_workers) as p:
results = list(tqdm(p.imap(process_sample, queue), total=len(queue), desc='Processing', leave=True,))
for item in results:
total_duration += item["session_dur"]
silence_duration += item["silence_dur"]
overlap_duration += item["overlap_dur"]
silence_length_all += item["silence_len_list"]
overlap_length_all += item["overlap_len_list"]
silence_ratio_all.append(item["silence_ratio"])
overlap_ratio_all.append(item["overlap_ratio"])
actual_silence_mean = silence_duration / total_duration
actual_silence_var = np.var(silence_ratio_all)
actual_overlap_mean = overlap_duration / (total_duration - silence_duration)
actual_overlap_var = np.var(overlap_ratio_all)
stats = OrderedDict()
stats["total duration (hours)"] = f"{total_duration / 3600:.2f}"
stats["number of sessions"] = len(rttm_list)
stats["average session duration (seconds)"] = f"{total_duration / len(rttm_list):.2f}"
stats["actual silence ratio mean/var"] = f"{actual_silence_mean:.4f}/{actual_silence_var:.4f}"
stats["actual overlap ratio mean/var"] = f"{actual_overlap_mean:.4f}/{actual_overlap_var:.4f}"
stats["expected silence ratio mean/var"] = f"{silence_mean}/{silence_var}"
stats["expected overlap ratio mean/var"] = f"{overlap_mean}/{overlap_var}"
stats["save_path"] = save_path
print("-----------------------------------------------")
print(" Results ")
print("-----------------------------------------------")
for k, v in stats.items():
print(k, ": ", v)
print("-----------------------------------------------")
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(14, 14))
fig.suptitle(
f"Average session={total_duration/len(rttm_list):.2f} seconds, num sessions={len(rttm_list)}, total={total_duration/3600:.2f} hours"
)
sns.histplot(silence_ratio_all, ax=ax1)
ax1.set_xlabel("Silence ratio in a session")
ax1.set_title(
f"Target silence mean={silence_mean}, var={silence_var}. \nActual silence ratio={actual_silence_mean:.4f}, var={actual_silence_var:.4f}"
)
_, scale = expon.fit(silence_length_all, floc=0)
sns.histplot(silence_length_all, ax=ax2)
ax2.set_xlabel("Per-silence length in seconds")
ax2.set_title(f"Per-silence length histogram, \nfitted exponential distribution with mean={scale:.4f}")
sns.histplot(overlap_ratio_all, ax=ax3)
ax3.set_title(
f"Target overlap mean={overlap_mean}, var={overlap_var}. \nActual ratio={actual_overlap_mean:.4f}, var={actual_overlap_var:.4f}"
)
ax3.set_xlabel("Overlap ratio in a session")
_, scale2 = expon.fit(overlap_length_all, floc=0)
sns.histplot(overlap_length_all, ax=ax4)
ax4.set_title(f"Per overlap length histogram, \nfitted exponential distribution with mean={scale2:.4f}")
ax4.set_xlabel("Duration in seconds")
if save_path:
fig.savefig(save_path)
print(f"Figure saved at: {save_path}")
return stats
def visualize_multispeaker_data(input_dir: str, output_dir: str, num_samples: int = 10) -> None:
"""
Visualize a set of randomly sampled data in the input directory
Args:
input_dir (str): Path to the input directory
output_dir (str): Path to the output directory
num_samples (int): Number of samples to visualize
"""
rttm_list = list(Path(input_dir).glob("*.rttm"))
idx_list = np.random.permutation(len(rttm_list))[:num_samples]
print(f"Visualizing {num_samples} random samples")
for idx in idx_list:
rttm_file = rttm_list[idx]
audio_file = rttm_file.parent / Path(rttm_file.stem + ".wav")
output_file = Path(output_dir) / Path(rttm_file.stem + ".png")
plot_sample_from_rttm(audio_file=audio_file, rttm_file=rttm_file, save_path=str(output_file), show=False)
print(f"Sample plots saved at: {output_dir}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input_dir", default="", help="Input directory")
parser.add_argument("-sd", "--session_dur", default=None, type=float, help="Duration per session in seconds")
parser.add_argument("-sm", "--silence_mean", default=None, type=float, help="Expected silence ratio mean")
parser.add_argument("-sv", "--silence_var", default=None, type=float, help="Expected silence ratio variance")
parser.add_argument("-om", "--overlap_mean", default=None, type=float, help="Expected overlap ratio mean")
parser.add_argument("-ov", "--overlap_var", default=None, type=float, help="Expected overlap ratio variance")
parser.add_argument("-w", "--num_workers", default=1, type=int, help="Number of CPU workers to use")
parser.add_argument("-s", "--num_samples", default=10, type=int, help="Number of random samples to plot")
parser.add_argument("-o", "--output_dir", default="analysis/", type=str, help="Directory for saving output figure")
parser.add_argument(
"--precise", action="store_true", help="Set to get precise duration, with significant time cost"
)
args = parser.parse_args()
print("Running with params:")
pprint(vars(args))
output_dir = Path(args.output_dir)
if output_dir.exists():
print(f"Removing existing output directory: {args.output_dir}")
shutil.rmtree(str(output_dir))
output_dir.mkdir(parents=True)
run_multispeaker_data_analysis(
input_dir=args.input_dir,
session_dur=args.session_dur,
silence_mean=args.silence_mean,
silence_var=args.silence_var,
overlap_mean=args.overlap_mean,
overlap_var=args.overlap_var,
precise=args.precise,
save_path=str(Path(args.output_dir, "statistics.png")),
num_workers=args.num_workers,
)
visualize_multispeaker_data(input_dir=args.input_dir, output_dir=args.output_dir, num_samples=args.num_samples)
print("The multispeaker data analysis has been completed.")
print(f"Please check the output directory: \n{args.output_dir}")
| NeMo-main | scripts/speaker_tasks/multispeaker_data_analysis.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import multiprocessing as mp
from itertools import repeat
from pathlib import Path
import librosa
from tqdm import tqdm
from nemo.collections.asr.parts.utils.manifest_utils import read_manifest, write_manifest
from nemo.collections.asr.parts.utils.vad_utils import get_frame_labels, load_speech_segments_from_rttm
"""
This script generates a manifest file for synthetic data generated using the NeMo multispeaker speech data simulator.
The audio created from the simulator can be used to train a VAD model using the manifest file contains the following fields:
The manifest file contains the following fields:
audio_filepath (str): Path to audio file.
offset (float): Offset in seconds for the start of the audio file.
duration (float): Duration in seconds for the audio file.
text (str): Transcription of the audio file.
label (list): List of frame labels for the audio file.
orig_sample_rate (int): Original sample rate of the audio file.
vad_frame_unit_secs (float): Duration in seconds for each frame label.
Usage:
python build_synthetic_vad_manifest.py \
--input_dir /path/to/synthetic/data \
--frame_length 0.04 \
--output_file /path/to/output/manifest.json
"""
def generate_manifest_entry(inputs):
"""
Generates a manifest entry for a single audio file.
This function is parallelized using multiprocessing.Pool.
Args:
inputs (tuple): Tuple containing audio file path and frame length in seconds.
inputs[0]:
audio_filepath (str): Path to audio file.
inputs[1]:
vad_frame_unit_secs (float): Duration in seconds for each frame label.
Returns:
entry (dict): Dictionary containing manifest entry.
"""
audio_filepath, vad_frame_unit_secs = inputs
audio_filepath = Path(audio_filepath)
y, sr = librosa.load(str(audio_filepath))
dur = librosa.get_duration(y=y, sr=sr)
manifest_path = audio_filepath.parent / Path(f"{audio_filepath.stem}.json")
audio_manifest = read_manifest(manifest_path)
text = " ".join([x["text"] for x in audio_manifest])
rttm_path = audio_filepath.parent / Path(f"{audio_filepath.stem}.rttm")
segments = load_speech_segments_from_rttm(rttm_path)
labels = get_frame_labels(segments, vad_frame_unit_secs, 0.0, dur)
entry = {
"audio_filepath": str(audio_filepath.absolute()),
"offset": 0.0,
"duration": dur,
"text": text,
"label": labels,
"orig_sample_rate": sr,
"vad_frame_unit_secs": vad_frame_unit_secs,
}
return entry
def main(args):
wav_list = list(Path(args.input_dir).glob("*.wav"))
print(f"Found {len(wav_list)} in directory: {args.input_dir}")
inputs = zip(wav_list, repeat(args.frame_length))
with mp.Pool(processes=mp.cpu_count()) as pool:
manifest_data = list(tqdm(pool.imap(generate_manifest_entry, inputs), total=len(wav_list)))
write_manifest(args.output_file, manifest_data)
print(f"Manifest saved to: {args.output_file}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input_dir", default=None, help="Path to directory containing synthetic data")
parser.add_argument(
"-l", "--frame_length", default=0.04, type=float, help="Duration in seconds for each frame label"
)
parser.add_argument("-o", "--output_file", default=None, help="Path to output manifest file")
args = parser.parse_args()
main(args)
| NeMo-main | scripts/speaker_tasks/create_synth_vad_manifest.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script generates a NeMo-Megatron compatible `.nemo` file for a Huggingface T5-v1_1 model.
List of Huggingface models that this script can covert:
1. google/t5-v1_1-small
2. google/t5-v1_1-base
3. google/t5-v1_1-large
4. google/t5-v1_1-xl
5. google/t5-v1_1-xxl
6. google/mt5-small
7. google/mt5-base
8. google/mt5-large
9. google/mt5-xl
10. google/mt5-xxl
11. google/ul2
13. bigscience/T0pp
14. google/t5-small-lm-adapt
15. google/t5-base-lm-adapt
16. google/t5-large-lm-adapt
17. google/t5-xl-lm-adapt
18. google/t5-xxl-lm-adapt
19. google/flan-t5-small
20. google/flan-t5-base
21. google/flan-t5-large
22. google/flan-t5-xl
23. google/flan-t5-xxl
Use instructions:
python hf_t5-v1_1_to_nemo.py \
--hf_model_name bigscience/T0pp \
--nemo_state_dict /path/to/nemo_state_dict.pt \
--nemo_file_path /path/to/nemo_file.nemo
"""
import collections
import os
import tempfile
from argparse import ArgumentParser
import torch
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from transformers import AutoTokenizer, T5ForConditionalGeneration
from nemo.collections.nlp.models.language_modeling.megatron_t5_model import MegatronT5Model
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy, NLPSaveRestoreConnector
try:
import accelerate
except ImportError:
raise ImportError("Please install accelerate package via `pip install accelerate` to use this script.")
def convert_weights(hf_model, nemo_state_dict_path):
if hf_model == 'google/ul2':
torch_dtype = torch.bfloat16
else:
torch_dtype = torch.float32
hf_model = T5ForConditionalGeneration.from_pretrained(hf_model, low_cpu_mem_usage=True, torch_dtype=torch_dtype)
hf_model_config = hf_model.config
with tempfile.TemporaryDirectory() as tmp:
torch.save(hf_model.state_dict(), os.path.join(tmp, 'model.pt'))
hf_weights = torch.load(os.path.join(tmp, 'model.pt'))
nemo_weights = collections.OrderedDict()
print(f'Found {len(hf_weights.keys())} keys in the checkpoint')
def _get_model_type_block_layer(k):
if k.startswith('encoder'):
model_type = 'encoder'
elif k.startswith('decoder'):
model_type = 'decoder'
else:
raise ValueError(f"Unknown model type for {k}")
return model_type, int(k.split('.')[2]), int(k.split('.')[4])
for k, v in hf_weights.items():
#################################################
###### Enc-Dec Embeddings and Output Layer ######
#################################################
# Tied decoder embedding and decoder output layer.
if k == 'shared.weight':
pass
elif k == 'lm_head.weight':
nemo_weights['enc_dec_model.tokens_head.weight'] = v
print(
f'Mapped {k} to enc_dec_model.decoder_embedding.word_embeddings.weight and enc_dec_model.tokens_head.weight'
)
# Decoder embeddings
elif k == 'decoder.embed_tokens.weight':
nemo_weights['enc_dec_model.decoder_embedding.word_embeddings.weight'] = v
elif k == 'encoder.embed_tokens.weight':
nemo_weights['enc_dec_model.encoder_embedding.word_embeddings.weight'] = v
print(f'Mapped {k} to enc_dec_model.encoder_embedding.word_embeddings.weight')
#################################################
################# RPE Weights ###################
#################################################
elif k == 'encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight':
nemo_weights['enc_dec_model.encoder_relative_position_embedding.relative_position_embedding.weight'] = v
print(
f'Mapped {k} to enc_dec_model.encoder_relative_position_embedding.relative_position_embedding.weight'
)
elif k == 'decoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight':
nemo_weights['enc_dec_model.decoder_relative_position_embedding.relative_position_embedding.weight'] = v
print(
f'Mapped {k} to enc_dec_model.decoder_relative_position_embedding.relative_position_embedding.weight'
)
# Block in HF corresponds to layer in NeMo.
# Layer in HF does not correspond to anything in NeMo. Layer 0 is self attn, layer 1 is cross-attn.
#################################################
############### Attention Layers ################
#################################################
# Self-Attention
# Q, k, V in NeMo-Megatron is bundled into a single matrix.
elif 'SelfAttention.q.weight' in k:
model_type, block_number, layer_number = _get_model_type_block_layer(k)
k_weight = hf_weights[k.replace('q.weight', 'k.weight')]
v_weight = hf_weights[k.replace('q.weight', 'v.weight')]
concat_weights = torch.cat([v, k_weight, v_weight], dim=0)
nemo_weights[
f'enc_dec_model.enc_dec_model.{model_type}.model.layers.{block_number}.self_attention.query_key_value.weight'
] = concat_weights
print(
f'Mapped {k} to enc_dec_model.enc_dec_model.{model_type}.model.layers.{block_number}.self_attention.query_key_value.weight'
)
# We can skip processing of k, v weights since we already concat them into qkv above.
elif 'SelfAttention.k.weight' in k or 'SelfAttention.v.weight' in k:
pass
# Output self-attn matrix.
elif 'SelfAttention.o.weight' in k:
model_type, block_number, layer_number = _get_model_type_block_layer(k)
block_number = int(k.split('.')[2]) # Block in HF corresponds to layer in NeMo.
layer_number = int(
k.split('.')[4]
) # Layer in HF does not correspond to anything in NeMo. Layer 0 is self attn, layer 1 is cross-attn.
nemo_weights[
f'enc_dec_model.enc_dec_model.{model_type}.model.layers.{block_number}.self_attention.dense.weight'
] = v
print(
f'Mapped {k} to enc_dec_model.enc_dec_model.{model_type}.model.layers.{block_number}.self_attention.dense.weight'
)
# Cross-Attention projection matrices are merged into K, V matrices in NeMo-Megatron
elif 'EncDecAttention.k.weight' in k:
model_type, block_number, layer_number = _get_model_type_block_layer(k)
v_weight = hf_weights[k.replace('k.weight', 'v.weight')]
concat_weights = torch.cat([v, v_weight], dim=0)
nemo_weights[
f'enc_dec_model.enc_dec_model.decoder.model.layers.{block_number}.inter_attention.key_value.weight'
] = concat_weights
print(
f'Mapped {k} to enc_dec_model.enc_dec_model.decoder.model.layers.{block_number}.inter_attention.key_value.weight'
)
# We can skip processing of v weights since we already concat them with k above.
elif 'EncDecAttention.v.weight' in k:
pass
# Cross-Attention Q matrix is separate in NeMo-Megatron
elif 'EncDecAttention.q.weight' in k:
model_type, block_number, layer_number = _get_model_type_block_layer(k)
nemo_weights[
f'enc_dec_model.enc_dec_model.decoder.model.layers.{block_number}.inter_attention.query.weight'
] = v
print(
f'Mapped {k} to enc_dec_model.enc_dec_model.decoder.model.layers.{block_number}.inter_attention.query.weight'
)
# Cross-Attention Q matrix is separate in NeMo-Megatron
elif 'EncDecAttention.o.weight' in k:
model_type, block_number, layer_number = _get_model_type_block_layer(k)
nemo_weights[
f'enc_dec_model.enc_dec_model.decoder.model.layers.{block_number}.inter_attention.dense.weight'
] = v
print(
f'Mapped {k} to enc_dec_model.enc_dec_model.decoder.model.layers.{block_number}.inter_attention.dense.weight'
)
#################################################
#################$ FFN Layers ###################
#################################################
elif 'DenseReluDense.wi_0.weight' in k:
model_type, block_number, layer_number = _get_model_type_block_layer(k)
nemo_weights[
f'enc_dec_model.enc_dec_model.{model_type}.model.layers.{block_number}.mlp.dense_h_to_4h.weight'
] = v
print(
f'Mapped {k} to enc_dec_model.enc_dec_model.{model_type}.model.layers.{block_number}.mlp.dense_h_to_4h.weight'
)
elif 'DenseReluDense.wi_1.weight' in k:
model_type, block_number, layer_number = _get_model_type_block_layer(k)
nemo_weights[
f'enc_dec_model.enc_dec_model.{model_type}.model.layers.{block_number}.mlp.dense_h_to_4h_2.weight'
] = v
print(
f'Mapped {k} to enc_dec_model.enc_dec_model.{model_type}.model.layers.{block_number}.mlp.dense_h_to_4h_2.weight'
)
elif 'DenseReluDense.wo.weight' in k:
model_type, block_number, layer_number = _get_model_type_block_layer(k)
nemo_weights[
f'enc_dec_model.enc_dec_model.{model_type}.model.layers.{block_number}.mlp.dense_4h_to_h.weight'
] = v
print(
f'Mapped {k} to enc_dec_model.enc_dec_model.{model_type}.model.layers.{block_number}.mlp.dense_4h_to_h.weight'
)
#################################################
#################$ LayerNorm ####################
#################################################
elif 'layer_norm' in k:
if 'final' in k:
model_type = 'encoder' if k.startswith('encoder') else 'decoder'
nemo_weights[f'enc_dec_model.enc_dec_model.{model_type}.model.final_layernorm.weight'] = v
print(f'Mapped {k} to enc_dec_model.enc_dec_model.{model_type}.model.final_layernorm.weight')
else:
model_type, block_number, layer_number = _get_model_type_block_layer(k)
if layer_number == 0 and model_type == 'encoder':
nemo_weights[
f'enc_dec_model.enc_dec_model.{model_type}.model.layers.{block_number}.input_layernorm.weight'
] = v
print(
f'Mapped {k} to enc_dec_model.enc_dec_model.{model_type}.model.layers.{block_number}.input_layernorm.weight'
)
elif layer_number == 1 and model_type == 'encoder':
nemo_weights[
f'enc_dec_model.enc_dec_model.{model_type}.model.layers.{block_number}.post_attention_layernorm.weight'
] = v
print(
f'Mapped {k} to enc_dec_model.enc_dec_model.{model_type}.model.layers.{block_number}.post_attention_layernorm.weight'
)
elif layer_number == 0 and model_type == 'decoder':
nemo_weights[
f'enc_dec_model.enc_dec_model.{model_type}.model.layers.{block_number}.input_layernorm.weight'
] = v
print(
f'Mapped {k} to enc_dec_model.enc_dec_model.{model_type}.model.layers.{block_number}.input_layernorm.weight'
)
elif layer_number == 1 and model_type == 'decoder':
nemo_weights[
f'enc_dec_model.enc_dec_model.{model_type}.model.layers.{block_number}.post_attention_layernorm.weight'
] = v
print(
f'Mapped {k} to enc_dec_model.enc_dec_model.{model_type}.model.layers.{block_number}.post_attention_layernorm.weight'
)
elif layer_number == 2 and model_type == 'decoder':
nemo_weights[
f'enc_dec_model.enc_dec_model.{model_type}.model.layers.{block_number}.post_inter_attention_layernorm.weight'
] = v
print(
f'Mapped {k} to enc_dec_model.enc_dec_model.{model_type}.model.layers.{block_number}.post_inter_attention_layernorm.weight'
)
else:
raise ValueError("Unknown layer_norm key: {}".format(k))
else:
raise ValueError(f"Unknown key: {k}")
torch.save(nemo_weights, nemo_state_dict_path)
print("Saved weights to {}".format(nemo_state_dict_path))
return hf_model_config
def package_into_nemo_file(
state_dict_path, base_yaml_config, hf_model_config, nemo_file_path, hf_model_name, megatron_amp_O2
):
"""
Packages the state dict, config file and tokenizer into a `.nemo` file.
"""
trainer = Trainer(devices=1, strategy=NLPDDPStrategy(), accelerator="cpu", precision=32)
base_cfg = OmegaConf.load(base_yaml_config)
if hf_model_config.dense_act_fn == "silu":
act_fn = "swiglu"
elif hf_model_config.dense_act_fn == "gelu_new":
act_fn = "geglu"
# FLAN-T5 models have things configured this way.
elif hf_model_config.dense_act_fn == "gelu" and hf_model_config.is_gated_act:
act_fn = "geglu"
else:
raise ValueError(f"Unknown dense_act_fn: {hf_model_config.dense_act_fn}")
with open_dict(base_cfg):
base_cfg.encoder.num_layers = hf_model_config.num_layers
base_cfg.encoder.hidden_size = hf_model_config.d_model
base_cfg.encoder.ffn_hidden_size = hf_model_config.d_ff
base_cfg.encoder.kv_channels = hf_model_config.d_kv
base_cfg.encoder.num_attention_heads = hf_model_config.num_heads
base_cfg.encoder.activation = act_fn
base_cfg.encoder.relative_attention_num_buckets = hf_model_config.relative_attention_num_buckets
base_cfg.decoder.num_layers = hf_model_config.num_decoder_layers
base_cfg.decoder.hidden_size = hf_model_config.d_model
base_cfg.decoder.ffn_hidden_size = hf_model_config.d_ff
base_cfg.decoder.kv_channels = hf_model_config.d_kv
base_cfg.decoder.num_attention_heads = hf_model_config.num_heads
base_cfg.decoder.activation = act_fn
base_cfg.decoder.relative_attention_num_buckets = hf_model_config.relative_attention_num_buckets
base_cfg.megatron_amp_O2 = megatron_amp_O2
with tempfile.TemporaryDirectory() as tmp:
tokenizer = AutoTokenizer.from_pretrained(hf_model_name)
tokenizer_path = tokenizer.save_vocabulary(tmp)[0]
base_cfg.tokenizer.model = tokenizer_path
model = MegatronT5Model(base_cfg, trainer).to('cpu')
model._save_restore_connector = NLPSaveRestoreConnector()
state_dict = torch.load(state_dict_path)
if megatron_amp_O2:
new_state_dict = {}
for key in state_dict.keys():
new_key = key.replace('model.', 'model.module.', 1)
new_state_dict[new_key] = state_dict[key]
state_dict = new_state_dict
model.load_state_dict(state_dict)
model.save_to(nemo_file_path)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument(
"--hf_model_name",
type=str,
required=True,
help="Valid Huggingface T5v1_1 model name ex: google/t5-v1_1-large or google/ul2. Example something that can be loaded with T5ForConditionalGeneration.from_pretrained()",
)
parser.add_argument(
"--nemo_state_dict_path",
type=str,
required=True,
help="Path to write the intermediate nemo state dict file ex: /path/to/nemo_state_dict.pt",
)
parser.add_argument(
"--nemo_file_path",
type=str,
required=True,
help="Path to write the converted .nemo file ex: /path/to/t5_base_converted_to_nemo.nemo",
)
parser.add_argument(
"--base_yaml_config",
type=str,
default="hf_t5v1_1_base_config.yaml",
help="Path to a base yaml config that we edit based on the provided model.",
)
parser.add_argument(
"--megatron_amp_O2",
action="store_true",
help="Whether to store O2 weights. This may be useful for models like ul2 where only pre-trained half precision weights were released.",
)
args = parser.parse_args()
if not os.path.exists(args.base_yaml_config):
raise FileNotFoundError(f"Base yaml config file {args.base_yaml_config} does not exist.")
hf_model_config = convert_weights(args.hf_model_name, args.nemo_state_dict_path)
package_into_nemo_file(
state_dict_path=args.nemo_state_dict_path,
base_yaml_config=args.base_yaml_config,
hf_model_config=hf_model_config,
nemo_file_path=args.nemo_file_path,
hf_model_name=args.hf_model_name,
megatron_amp_O2=args.megatron_amp_O2,
)
| NeMo-main | scripts/nlp_language_modeling/hf_t5-v1_1_to_nemo.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
In order to build a regexp tokenizer model use the following command.
The script will create:
.vocab file - with learned vocabulary
.model file - with provided regex
To build vocabulary from text files:
python -- scripts/nlp_language_modeling/build_regex_tokenizer.py \
--regex '\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\\\|\/|:|~|@|\?|>|\*|\$|\%[0-9]{2}|[0-9]' \
--input_type text \
--output_file regex_tokenizer -- \
data_file1.txt data_file2.txt
To build vocabulary from CSV files ("smiles" column):
python -- scripts/nlp_language_modeling/build_regex_tokenizer.py \
--regex '\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\\\|\/|:|~|@|\?|>|\*|\$|\%[0-9]{2}|[0-9]' \
--input_type csv \
--input_csv_col smiles \
--output_file regex_tokenizer -- \
data_file1.csv data_file2.csv
"""
import argparse
from nemo.collections.common.tokenizers.regex_tokenizer import RegExTokenizer
from nemo.utils import logging
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Builds vocabulary from regex tokenizer. Outputs .model (regular expression) and .vocab (learned vocabualry)",
)
parser.add_argument(
'input_files', type=str, nargs='+', help='Input text/csv file',
)
parser.add_argument(
'--regex', type=str, required=True, help='Regular expression to split text',
)
parser.add_argument(
'--output_file',
type=str,
required=True,
help='Output base file name. Two files will be created: .vocab (learned vocabulary), .model (the regex)',
)
parser.add_argument(
'--input_type',
type=str,
required=False,
choices=["text", "csv"],
default="text",
help='Type of input file: text, csv',
)
parser.add_argument(
'--input_csv_col', type=str, required=False, default="smiles", help='Column of data in CSV file',
)
args = parser.parse_args()
tokenizer = RegExTokenizer(regex=args.regex)
# build vocabulary from all files
for input_file in args.input_files:
if args.input_type == "csv":
tokenizer.build_vocab_from_csv(data_csv_file=input_file, col=args.input_csv_col)
elif args.input_type == "text":
tokenizer.build_vocab_from_text(data_text_file=input_file)
else:
raise ValueError(f"Unknown input_type = {args.input_type}")
# save model
if not args.output_file.endswith(".model"):
args.output_file += ".model"
logging.info("Adding .model to output file")
tokenizer.save_tokenizer(args.output_file)
| NeMo-main | scripts/nlp_language_modeling/build_regex_tokenizer.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processing data for megatron pretraining."""
import argparse
import glob
from nemo.collections.nlp.data.language_modeling.text_memmap_dataset import build_index_files
def main():
parser = argparse.ArgumentParser(description="Builds index files for a list of text files",)
parser.add_argument(
'dataset_paths', type=str, nargs='+', help='Input text files (support glob)',
)
parser.add_argument(
'--newline_int', type=int, default=10, help='Int value to split text (default: newline "\\n"',
)
parser.add_argument(
'--workers',
type=int,
default=None,
help='Number of workers to parse files in parallel (default: max(cpu num // 2, 1)',
)
args = parser.parse_args()
# expand all dataset_paths
dataset_paths = []
for ds in args.dataset_paths:
dataset_paths.extend(glob.glob(ds))
# build index files in parallel
build_index_files(
dataset_paths=dataset_paths, newline_int=args.newline_int, workers=args.workers,
)
if __name__ == '__main__':
main()
| NeMo-main | scripts/nlp_language_modeling/build_index_memmap_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A script to convert the Mosaic MPT-7B checkpoint on HuggingFace to Megatron GPTModel
This script is hardcoded specifically for the MPT-7B pretrained model only, and is not
generalisable to any other models.
This script will load and convert the model entirely on CPU for OOM safety, but there
is an option to put the model onto GPU before the save down, which sets the map_location
to cuda for the restore_from call. You can do this by adding --cuda to this script call.
This script requires that you have downloaded the 2 .bin weight files for MPT-7B from
HuggingFace located here: https://huggingface.co/mosaicml/mpt-7b/tree/main
These files MUST have the following file names and be saved somewhere where this script
can read them:
pytorch_model-00001-of-00002.bin
pytorch_model-00002-of-00002.bin
This script will generate a Megatron model with TP=1 and PP=1. If you need different TP/PP
values, then after running this script, please use the script located below to set whatever
TP/PP values you want:
NeMo/examples/nlp/language_modeling/megatron_change_num_partitions.py
* Please note: when using the above script, you MUST also pass the `-–megatron_legacy` flag
Failure to do this will result in a corrupt model! *
This script also requires a baseline config file from which to override default parameters.
You can specify the location of this file using the -c argument. You can use any Nemo config
file which is appropriate, but in the default case, we highly recommend you use the following:
NeMo/examples/nlp/language_modeling/conf/megatron_gpt_config.yaml
Here is an example usage command:
```python
python scripts/nlp_language_modeling/convert_mpt_7b_hf_to_nemo.py -c /path/to/megatron_gpt_config.yaml -i /path/to/mpt_7b -o /path/to/save
```
"""
import argparse
import os
import pytorch_lightning as pl
import torch
import yaml
from omegaconf import OmegaConf
from nemo.collections.nlp.models.language_modeling.megatron import GPTModel
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.utils import logging
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-i', '--input', required=True, type=str, help='path to the two MPT-7B .bin weight files from HuggingFace'
)
parser.add_argument(
'-c', '--config', required=True, type=str, help='the path to the megatron_gpt_config.yaml file'
)
parser.add_argument(
'-o', '--output', required=False, default=None, type=str, help='path to dir where to store output .nemo file'
)
parser.add_argument('--cuda', action='store_true', help='put Nemo model onto GPU prior to savedown')
args = parser.parse_args()
if not os.path.exists(args.input):
logging.critical(f'Input directory [ {args.input} ] does not exist or cannot be found. Aborting.')
exit(255)
if not os.path.exists(args.config):
logging.critical(f'Path to config file [ {args.config} ] does not exist or cannot be found. Aborting.')
exit(255)
with open(args.config, 'r', encoding='utf_8') as fr:
orig_cfg = yaml.safe_load(fr)
model_dict = orig_cfg['model']
if 'tokenizer' in model_dict:
del model_dict['tokenizer']
if 'data' in model_dict:
del model_dict['data']
override_model_dict = {
'micro_batch_size': 1,
'global_batch_size': 4,
'rampup_batch_size': None,
'tensor_model_parallel_size': 1,
'pipeline_model_parallel_size': 1,
'virtual_pipeline_model_parallel_size': None,
'megatron_amp_O2': True,
'transformer_engine': False,
'use_cpu_initialization': False,
'hidden_size': 4096,
'encoder_seq_length': 2048,
'max_position_embeddings': 2048,
'num_layers': 32,
'num_attention_heads': 32,
'ffn_hidden_size': 4 * 4096,
'precision': 'bf16',
'layernorm_epsilon': 1e-5,
'pre_process': True,
'post_process': True,
'num_tokentypes': 0,
'apply_query_key_layer_scaling': False,
'parallel_output': False,
'bias': False,
'bias_dropout_add_fusion': False,
'bias_activation_fusion': False,
'transformer_block_type': 'pre_ln',
'normalization': 'low_precision_layernorm',
'fp32_residual_connection': False,
'hidden_dropout': 0,
'attention_dropout': 0,
'ffn_dropout': 0,
'megatron_legacy': True,
'share_embeddings_and_output_weights': True,
'sequence_parallel': False,
'position_embedding_type': 'alibi',
'normalize_attention_scores': True,
'use_flash_attention': False,
'override_vocab_size': 50432,
}
tokeniser_dict = {
'library': 'huggingface',
'type': 'EleutherAI/gpt-neox-20b',
'use_fast': True,
}
trainer_dict = {
'devices': 1,
'num_nodes': 1,
'accelerator': 'gpu' if args.cuda else 'cpu',
'precision': 'bf16',
'logger': False, # logger provided by exp_manager
'enable_checkpointing': False,
'replace_sampler_ddp': False,
'max_epochs': -1, # PTL default. In practice, max_steps will be reached first.
'max_steps': 100000, # consumed_samples = global_step * micro_batch_size * data_parallel_size * accumulate_grad_batches
'log_every_n_steps': 10,
'val_check_interval': 100,
'limit_val_batches': 50,
'limit_test_batches': 500,
'accumulate_grad_batches': 1,
'gradient_clip_val': 1.0,
'benchmark': False,
'enable_model_summary': False,
}
model_dict.update(override_model_dict)
model_dict['tokenizer'] = tokeniser_dict
omega_cfg = OmegaConf.create(model_dict)
trainer = pl.Trainer(**trainer_dict)
model = MegatronGPTModel(omega_cfg, trainer)
model_keys = list(model.state_dict().keys())
model_dtypes = list(set([model.state_dict()[x].dtype for x in model_keys]))
if not (len(model_dtypes) == 1 and model_dtypes[0] is torch.bfloat16):
model = model.bfloat16()
if args.cuda:
model = model.cuda()
mpt_1 = torch.load(os.path.join(args.input, 'pytorch_model-00001-of-00002.bin'), map_location="cpu")
mpt_2 = torch.load(os.path.join(args.input, 'pytorch_model-00002-of-00002.bin'), map_location="cpu")
mpt_dict = {**mpt_1, **mpt_2}
del mpt_1, mpt_2
def convert_state_dict(state_dict, amp=False):
def get_new_key(old_key):
if old_key == 'transformer.wte.weight':
return 'language_model.embedding.word_embeddings.weight'
elif old_key == 'transformer.norm_f.weight':
return 'language_model.encoder.final_layernorm.weight'
else:
p1 = old_key.replace('transformer.blocks.', 'language_model.encoder.layers.')
p2 = p1.replace('norm_1.weight', 'input_layernorm.weight')
p3 = p2.replace('attn.Wqkv.weight', 'self_attention.query_key_value.weight')
p4 = p3.replace('attn.out_proj.weight', 'self_attention.dense.weight')
p5 = p4.replace('norm_2.weight', 'post_attention_layernorm.weight')
p6 = p5.replace('ffn.up_proj.weight', 'mlp.dense_h_to_4h.weight')
p7 = p6.replace('ffn.down_proj.weight', 'mlp.dense_4h_to_h.weight')
return p7
new_dict = {}
for old_key, val in state_dict.items():
new_key = get_new_key(old_key)
if amp:
new_key = 'module.' + new_key
new_dict[new_key] = val
return new_dict
convert_dict = convert_state_dict(mpt_dict, amp=model_dict['megatron_amp_O2'])
if model_dict['megatron_amp_O2']:
missing_keys, unexpected_keys = model.model.load_state_dict(convert_dict, strict=True)
else:
missing_keys, unexpected_keys = super(GPTModel, model.model).load_state_dict(convert_dict, strict=True)
if len(missing_keys) > 0:
logging.critical('Missing keys were detected during the load, something has gone wrong. Aborting.')
logging.critical(f'Missing keys: \n{missing_keys}')
exit(255)
if len(unexpected_keys) > 0:
logging.warning('Unexpected keys were detected which should not happen. Please investigate.')
logging.warning(f'Unexpected keys: \n{unexpected_keys}')
if args.output is None:
args.output = os.path.dirname(os.path.abspath(__file__))
model.save_to(os.path.join(args.output, 'megatron_mpt_7b_base_tp1_pp1.nemo'))
| NeMo-main | scripts/nlp_language_modeling/convert_mpt_7b_hf_to_nemo.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is the script to build Faiss retrieval index for KNN look up.
For more information about Faiss, check https://faiss.ai/
It requires the retrieval DB text data to be converted into `bin` and `idx` files by `preprocess_data_for_megatron.py` script.
Here is an example to using it:
```python
python scripts/nlp_language_modeling/build_retrieval_index.py \
--input_file=PATH_TO_DB_FILE \
--tokenizer-library=sentencepiece \
--tokenizer-model=tokenizer.model \
--train_index_size=128000 \
--train_chunk_size=51200 \
--devices=0,1,2,3 \
--batch_size=1280 \
--output_file=index.sav
```
It creates a index.sav which can be loaded by Faiss. It can look up the KNN chunk ids of the
DB dataset given the input embedding vector.
To use it in multiple stages, it follows the example as shown in
https://github.com/facebookresearch/faiss/blob/main/demos/demo_ondisk_ivf.py
stage-0: train on the dataset, example,
```python
python scripts/nlp_language_modeling/build_retrieval_index.py \
--input_file=PATH_TO_DB_FILE \
--tokenizer-library=sentencepiece \
--tokenizer-model=tokenizer.model \
--train_index_size=128000 \
--train_chunk_size=51200 \
--workers=2 \
--devices=0,1,2,3 \
--percent=0.9 \
--stage=0 \
--output_file=index_learned.save
```
stage-1: build partial indexes, each containing a fraction of the dataset. This can be done in parallel on several machines. example,
```python
python scripts/nlp_language_modeling/build_retrieval_index.py \
--input_file=PATH_TO_DB_FILE \
--tokenizer-library=sentencepiece \
--tokenizer-model=tokenizer.model \
--train_index_size=128000 \
--train_chunk_size=51200 \
--workers=2 \
--devices=0,1,2,3 \
--percent=0.9 \
--stage=1 \
--shard_id=0 \
--total_shards=10 \
--learned_index=index_learned.save \
--output_file=index_shard2.save
```
stage-2: merge the shard indexes into one that is written directly to disk (needs not to fit in RAM), example
```python
python scripts/nlp_language_modeling/build_retrieval_index.py \
--stage=2 \
--learned_index=index_learned.save \
--shard_index_input=index_shard \
--output_file=index_final.save
```
"""
import argparse
import multiprocessing
import pathlib
import sys
import time
from multiprocessing import Pool
from typing import Union
import faiss
import numpy as np
import torch
from faiss.contrib.ondisk import merge_ondisk
from sentence_transformers import SentenceTransformer
from nemo.collections.nlp.data.language_modeling.megatron.indexed_retrieval_dataset import MMapRetrievalIndexedDataset
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer
from nemo.utils import logging
QUEUE_SIZE = 30
queue = multiprocessing.Queue(QUEUE_SIZE)
emb_queue = multiprocessing.Queue(QUEUE_SIZE)
def get_tokenizer(args):
tokenizer = get_nmt_tokenizer(
library=args.tokenizer_library,
model_name=args.tokenizer_type,
tokenizer_model=args.tokenizer_model,
vocab_file=args.vocab_file,
merges_file=args.merge_file,
delimiter=args.delimiter,
)
if not hasattr(tokenizer, "pad_id"):
tokenizer.add_special_tokens({'pad_token': '<pad>'})
elif hasattr(tokenizer, "pad_id") and (tokenizer.pad_id is None or tokenizer.pad_id < 0):
tokenizer.add_special_tokens({'pad_token': '<pad>'})
return tokenizer
def process_sentence_chunks(
ds: MMapRetrievalIndexedDataset,
tokenizer,
chunk_size: int,
warm_up_size: int,
percent: float,
stage: Union[int, None],
workers: int,
shard_id: int,
total_shards: int,
):
"""
This function takes chunked tokens from the retrieval dataset and map it back to text.
In stage 0, it only loads the first `warm_up_size` chunks that is used for building the Faiss index structure.
In other stages, in addition to the warm_up_size chunks, it also sends the chunked text and add their embeddings into the index.
In stage 1, it divides the total work into `total_shards`, and process only at the `shard_id`. If the stage is None, it process all the chunks.
"""
total_chunks = ds.chunks
num_docs = len(ds._index.sizes)
assert len(ds._index.sizes) == len(ds._index._chunk_id_start)
if percent < 1.0:
use_num_docs = int(num_docs * percent)
logging.info(f"Use {use_num_docs} out of {num_docs} docs to build index")
total_chunks = ds._index._chunk_id_start[min(use_num_docs, num_docs - 1)]
logging.info(f"{total_chunks} chunks are used to build the index")
start = 0
if stage is None or stage == 0:
beg = time.time()
# only prepare the warmup batch for stage None and stage 0
assert warm_up_size < total_chunks
warm_chunk_ids = np.random.randint(0, total_chunks, warm_up_size)
warm_up_slices = []
for warm_up_id in warm_chunk_ids:
warm_up_slices.append(ds.get_chunk(warm_up_id, force_no_cont_ids=True))
with Pool(workers) as p:
sentences = p.map(tokenizer.ids_to_text, warm_up_slices)
end = time.time()
logging.info(f"token-to-text {total_chunks} chunks takes {end-beg}")
queue.put((sentences, None))
if stage == 0:
# first the task for stage 0
queue.put((None, None))
return
elif stage == 1:
shard_size = total_chunks // total_shards
splits = list(range(0, total_chunks, shard_size))
if shard_id < total_shards - 1:
start = splits[shard_id]
total_chunks = splits[shard_id + 1]
elif shard_id == total_shards - 1:
start = splits[shard_id]
total_chunks = total_chunks
else:
raise ValueError(f'{shard_id} bigger than {total_shards}')
logging.info(f'shard_id {shard_id}, create index from chunk {start} to {total_chunks}')
threshold = 0.1
with Pool(workers) as p:
while start < total_chunks:
if start / total_chunks > threshold:
logging.info(f"sentence processing {start / total_chunks} is done")
threshold += 0.1
slice_id = (start, min(start + chunk_size, total_chunks))
beg = time.time()
id_slices = ds.get_chunk(slice(*slice_id), force_no_cont_ids=True)
end = time.time()
logging.info(f"load {chunk_size} chunks takes {end-beg}")
start = min(start + chunk_size, total_chunks)
sentences = p.map(tokenizer.ids_to_text, id_slices)
end2 = time.time()
logging.info(f"tokenize {chunk_size} chunks takes {end2-end}")
queue.put((sentences, slice_id))
queue.put((None, None))
def get_sentence_chunks():
return queue.get()
def calculate_embedding(pool, batch_size):
while True:
sentences, slice_id = get_sentence_chunks()
if sentences is None:
break
beg = time.time()
emb = model.encode_multi_process(sentences=sentences, pool=pool, batch_size=batch_size)
end = time.time()
logging.info(f"one embedding {len(emb)} batch size takes {end-beg}")
emb_queue.put((emb, slice_id))
emb_queue.put((None, None))
def get_emb():
return emb_queue.get()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="build Faiss index",)
parser.add_argument(
'--input_file', type=str, required=False, help='Input file',
)
parser.add_argument(
'--train_index_size', type=int, required=False, help='The number of sentences that is used to train the index',
)
parser.add_argument(
'--train_chunk_size', type=int, default=10000, help='The sentences in chunks that is added to the index',
)
parser.add_argument(
'--sentence_transformer_model',
type=str,
default='bert-base-nli-mean-tokens',
help='sentence transformer to load',
)
parser.add_argument(
'--output_file', type=str, required=True, help='Output Faiss index file',
)
parser.add_argument(
'--percent', type=float, default=1.0, help='percent of documents used for building the search index',
)
parser.add_argument(
'--devices', type=str, default=None, help='delimited list input with cuda devices. Specify like 0,1,2'
)
parser.add_argument(
"--batch_size", type=int, default=4000, help="Batch size for encoding. Use max according to GPU MEM"
)
parser.add_argument("--subquantizers", type=int, default=8, help="Quantizer code size")
group = parser.add_argument_group(title='tokenizer')
group.add_argument(
'--tokenizer-library',
type=str,
required=False,
choices=['yttm', 'sentencepiece', 'megatron', 'huggingface', 'tabular'],
help='What tokenizer library to use.',
)
group.add_argument(
'--tokenizer-type', type=str, default=None, help='What type of tokenizer to use.',
)
group.add_argument(
'--tokenizer-model', type=str, default=None, help='Path to tokenizer model.',
)
group.add_argument('--no_pq', action='store_true', help="don't use the Product Quantizer")
group.add_argument('--vocab-file', type=str, default=None, help='Path to the vocab file')
group.add_argument('--workers', type=int, default=None, help='number of workers to run tokenizer')
group.add_argument(
'--stage',
type=int,
default=None,
help='used for building the large index in multiple stages',
choices=[0, 1, 2],
)
group.add_argument('--faiss_factory', type=str, default=None, help="faiss index factory str")
group.add_argument('--faiss_factory_metric', type=str, default='IP', help="faiss index factory metric, l2 or IP")
group.add_argument('--shard_id', type=int, default=None, help='run the job to create the shard_id index')
group.add_argument('--total_shards', type=int, default=None, help='total number of faiss index shards')
group.add_argument(
'--learned_index', type=str, default=None, help='the learned faiss index file, which is prepared at stage 0'
)
group.add_argument(
'--shard_index_input', type=str, default=None, help='the shard faiss index files, which are created at stage 1'
)
group.add_argument('--merge-file', type=str, default=None, help='Path to the BPE merge file (if necessary).')
group.add_argument('--delimiter', type=str, default=None, help='delimiter used for tabular tokenizer')
args = parser.parse_args()
has_gpu = torch.cuda.is_available() and hasattr(faiss, "index_gpu_to_cpu")
if not hasattr(faiss, "index_gpu_to_cpu"):
logging.warning(
"faiss doesn't support gpu index. Please check https://github.com/facebookresearch/faiss/blob/main/INSTALL.md"
)
if args.stage == 2:
# combine shard index files into one
logging.info('loading trained index')
# construct the output index
index = faiss.read_index(args.learned_index)
input_file = pathlib.Path(args.shard_index_input)
path = input_file.parent
fname = input_file.name
all_files = [str(i) for i in pathlib.Path(path).glob(fname + '*')]
merge_ondisk(index, all_files, str(path / 'merged.index'))
faiss.write_index(index, args.output_file)
logging.info(f'Write to {args.output_file}, Size of Index : {index.ntotal}')
# consolidate it as one index
if args.devices is None or not torch.cuda.is_available():
device_list = None
else:
device_list = ['cuda:' + str(device) for device in args.devices.split(',')]
index = faiss.read_index(args.output_file)
co = faiss.GpuMultipleClonerOptions()
co.useFloat16 = True
co.usePrecomputed = False
co.shard = True
index = faiss.index_cpu_to_all_gpus(index, co, ngpu=len(device_list))
index = faiss.index_gpu_to_cpu(index)
faiss.write_index(index, args.output_file)
sys.exit(0)
model = SentenceTransformer(args.sentence_transformer_model)
tokenizer = get_tokenizer(args)
ds = MMapRetrievalIndexedDataset(args.input_file, skip_warmup=True)
# make sure the dataset is padded as retrieval database
assert ds._index.retrieval_db
if args.stage is None or args.stage == 0:
if ds.chunks < args.train_index_size:
raise ValueError(
f"the train index size {args.train_index_size} is larger than the total number of chunks {ds.chunks} in the dataset"
)
# Where nlist is 4*sqrt(N) to 16*sqrt(N), with N the size of the dataset.
# This just clusters the vectors with k-means. You will need between 30*K and 256*K vectors for training (the more the better).
total_chunks = ds.chunks
if args.percent < 1.0:
num_docs = len(ds._index.sizes)
use_num_docs = int(num_docs * args.percent)
total_chunks = ds._index._chunk_id_start[min(use_num_docs, num_docs - 1)]
nlist = int(4 * np.sqrt(total_chunks))
assert 30 * nlist < args.train_index_size, f"need more training samples, at least {30 * nlist}"
process = multiprocessing.Process(
target=process_sentence_chunks,
args=(
ds,
tokenizer,
args.train_chunk_size,
args.train_index_size,
args.percent,
args.stage,
args.workers,
args.shard_id,
args.total_shards,
),
)
process.start()
if args.devices is None or not torch.cuda.is_available():
device_list = None
else:
device_list = ['cuda:' + str(device) for device in args.devices.split(',')]
pool = model.start_multi_process_pool(device_list)
emb_process = multiprocessing.Process(target=calculate_embedding, args=(pool, args.batch_size))
emb_process.start()
# get first batch of sentences to build up the index
# sentences = get_sentence_chunks()
if args.stage is None or args.stage == 0:
emb, slice_id = get_emb()
# initialize the Faiss index
# m is number of subquantizers. So vector of size D is broken into m sub-vectors of size D/m
m = args.subquantizers
k = 4 # num_nearest neighbors to get
quantizer = faiss.IndexFlatIP(emb.shape[1])
# 8 specifies that each sub-vector is encoded as 8 bits
if args.no_pq:
index = faiss.IndexIVFFlat(quantizer, emb.shape[1], nlist)
elif args.faiss_factory is not None:
if args.faiss_factory_metric == 'IP':
metric = faiss.METRIC_INNER_PRODUCT
else:
metric = faiss.METRIC_L2
index = faiss.index_factory(emb.shape[1], args.faiss_factory, metric)
else:
index = faiss.IndexIVFPQ(quantizer, emb.shape[1], nlist, m, 8)
if has_gpu:
co = faiss.GpuMultipleClonerOptions()
co.useFloat16 = True
co.usePrecomputed = False
co.shard = True
index = faiss.index_cpu_to_all_gpus(index, co, ngpu=len(device_list))
elif args.stage == 1:
# stage 1, need to load the index from file
index = faiss.read_index(args.learned_index)
if has_gpu:
co = faiss.GpuMultipleClonerOptions()
co.useFloat16 = True
co.usePrecomputed = False
co.shard = True
index = faiss.index_cpu_to_all_gpus(index, co, ngpu=len(device_list))
else:
raise ValueError(f'should not come here')
if args.stage is not None:
logging.info(f'build index at stage {args.stage}')
if args.stage is None or args.stage == 0:
# train the index
beg = time.time()
index.train(emb)
end = time.time()
logging.info(f'Trained Index takes {end-beg}')
# just need to have the learned index
if has_gpu:
index = faiss.index_gpu_to_cpu(index)
faiss.write_index(index, args.output_file)
model.stop_multi_process_pool(pool)
process.join()
emb_process.join()
sys.exit(0)
while True:
emb, slice_id = get_emb()
if emb is None:
break
beg = time.time()
index.add_with_ids(emb, np.arange(slice_id[0], slice_id[1]).astype(np.int64))
end = time.time()
logging.info(f'add index {slice_id[0]} - {slice_id[1]} takes {end-beg}')
model.stop_multi_process_pool(pool)
process.join()
emb_process.join()
logging.info('Writing Index file')
if has_gpu:
index = faiss.index_gpu_to_cpu(index)
faiss.write_index(index, args.output_file)
logging.info(f'Size of Index : {index.ntotal}')
| NeMo-main | scripts/nlp_language_modeling/build_retrieval_index.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script to extract the final p-tuning representations used for inference.
Here is an example usage command:
```python
python scripts/nlp_language_modeling/extract_inference_only_weights.py p_tuning.nemo
```
"""
import argparse
import os
import torch
parser = argparse.ArgumentParser()
parser.add_argument("nemo", help="path to nemo file", type=str)
parser.add_argument("taskname", help="taskname for the nemo model", type=str, default="taskname", required=False)
args = parser.parse_args()
os.system(f"tar xvf {args.nemo}")
for p in '', 'mp_rank_00/', 'tp_rank_00_pp_rank_000/':
try:
a = torch.load(f'{p}model_weights.ckpt')
break
except FileNotFoundError:
pass
inf_weights = a['prompt_table'][f'prompt_table.{args.taskname}.prompt_embeddings.weight']
torch.save(inf_weights, "p_tuned.inf_only.ckpt")
| NeMo-main | scripts/nlp_language_modeling/extract_inference_only_weights.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processing data for megatron pretraining.
It can be used to convert the text data into indexed dataset for BERT, GPT, T5, RETRO models etc.
Example script to preprocess the loose JSON file for BERT model
```python
python scripts/nlp_language_modeling/preprocess_data_for_megatron.py \
--input=PATH_TO_THE_RETRIEVAL_DB_LOOSE_JSON_FILE \
--json-keys=text \
--vocab-file=PATH_TO_VOCAB_FILE \
--dataset-impl=mmap \
--output-prefix=YOUR_DATA_PREFIX \
--tokenizer-library=megatron \
--tokenizer-type=BertWordPieceCase \
--split-sentences \
--workers=48
```
Example script to preprocess the loose JSON file for GPT model
```python
python scripts/nlp_language_modeling/preprocess_data_for_megatron.py \
--input=PATH_TO_THE_RETRIEVAL_DB_LOOSE_JSON_FILE \
--json-keys=text \
--tokenizer-library=megatron \
--tokenizer-type=GPT2BPETokenizer \
--dataset-impl=mmap \
--merge-file=YOUR_MERGE_FILE \
--vocab-file=YOUR_VOCAB_FILE \
--output-prefix=YOUR_DATA_PREFIX \
--append-eod \
--workers=48
```
Example script to preprocess the loose JSON file for retrieval DB Dataset
```python
python scripts/nlp_language_modeling/preprocess_data_for_megatron.py \
--input=PATH_TO_THE_RETRIEVAL_DB_LOOSE_JSON_FILE \
--json-keys=text \
--tokenizer-library=sentencepiece \
--dataset-impl=retmmap \
--tokenizer-model=tokenizer.model \
--output-prefix=retro_db \
--need-pad-id \
--append-eod \
--retrieval-db \
--chunk_size=64 \
--workers=64
```
Example script to preprocess the JSON file for retrieval training dataset
```python
python scripts/nlp_language_modeling/preprocess_data_for_megatron.py \
--input=PATH_TO_THE_RETRIEVAL_TRAIN_VAL_TEST_LOOSE_JSON_FILE \
--json-keys=text \
--tokenizer-library=sentencepiece \
--dataset-impl=retmmap \
--tokenizer-model=tokenizer.model \
--output-prefix=retro_data \
--need-pad-id \
--append-eod \
--chunk_size=64 \
--workers=64
```
"""
import argparse
import gzip
import json
import multiprocessing
import os
import pathlib
import sys
import time
import ftfy
import torch
from nemo.collections.nlp.data.language_modeling.megatron import indexed_dataset
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer
try:
import nltk
nltk_available = True
except ImportError:
nltk_available = False
# https://stackoverflow.com/questions/33139531/preserve-empty-lines-with-nltks-punkt-tokenizer
class CustomLanguageVars(nltk.tokenize.punkt.PunktLanguageVars):
_period_context_fmt = r"""
\S* # some word material
%(SentEndChars)s # a potential sentence ending
\s* # <-- THIS is what I changed
(?=(?P<after_tok>
%(NonWord)s # either other punctuation
|
(?P<next_tok>\S+) # <-- Normally you would have \s+ here
))"""
class IdentitySplitter(object):
def tokenize(self, *text):
return text
def get_tokenizer(args):
tokenizer = get_nmt_tokenizer(
library=args.tokenizer_library,
model_name=args.tokenizer_type,
tokenizer_model=args.tokenizer_model,
vocab_file=args.vocab_file,
merges_file=args.merge_file,
delimiter=args.delimiter,
)
if args.need_pad_id:
if not hasattr(tokenizer, "pad_id"):
tokenizer.add_special_tokens({'pad_token': '<pad>'})
elif hasattr(tokenizer, "pad_id") and (tokenizer.pad_id is None or tokenizer.pad_id < 0):
tokenizer.add_special_tokens({'pad_token': '<pad>'})
return tokenizer
class Encoder(object):
def __init__(self, args):
self.args = args
def initializer(self):
# Use Encoder class as a container for global data
Encoder.tokenizer = get_tokenizer(self.args)
if self.args.split_sentences:
if not nltk_available:
print("NLTK is not available to split sentences.")
exit()
splitter = nltk.load("tokenizers/punkt/english.pickle")
if self.args.keep_newlines:
# this prevents punkt from eating newlines after sentences
Encoder.splitter = nltk.tokenize.punkt.PunktSentenceTokenizer(
train_text=splitter._params, lang_vars=CustomLanguageVars()
)
else:
Encoder.splitter = splitter
else:
Encoder.splitter = IdentitySplitter()
def encode(self, json_line):
if not self.args.text_file:
data = json.loads(json_line)
ids = {}
for key in self.args.json_keys:
text = data[key]
if self.args.apply_ftfy:
text = ftfy.fix_text(text)
doc_ids = []
for sentence in Encoder.splitter.tokenize(text):
sentence_ids = Encoder.tokenizer.text_to_ids(sentence)
if len(sentence_ids) > 0:
doc_ids.append(sentence_ids)
if len(doc_ids) > 0 and self.args.append_eod:
doc_ids[-1].append(Encoder.tokenizer.eos_id)
ids[key] = doc_ids
else:
data = json_line
ids = {}
text = data.strip()
if self.args.apply_ftfy:
text = ftfy.fix_text(text)
doc_ids = []
for sentence in Encoder.splitter.tokenize(text):
sentence_ids = Encoder.tokenizer.text_to_ids(sentence)
if len(sentence_ids) > 0:
doc_ids.append(sentence_ids)
if len(doc_ids) > 0 and self.args.append_eod:
doc_ids[-1].append(Encoder.tokenizer.eos_id)
ids['text'] = doc_ids
return ids, len(json_line)
def get_args():
parser = argparse.ArgumentParser()
group = parser.add_argument_group(title='input data')
group.add_argument(
'--input',
type=str,
required=True,
help='Path to the input json or json.gz file. If preprocessing an entire folder, set the --preproc-folder flag and provide the path to the folder in this arg.',
)
group.add_argument(
'--json-keys', nargs='+', default=['text'], help='space separate listed of keys to extract from json'
)
group.add_argument('--split-sentences', action='store_true', help='Split documents into sentences.')
group.add_argument('--keep-newlines', action='store_true', help='Keep newlines between sentences when splitting.')
group.add_argument('--text_file', action='store_true', help='Use text file instead of json.')
group = parser.add_argument_group(title='tokenizer')
group.add_argument(
'--tokenizer-library',
type=str,
required=True,
choices=['yttm', 'sentencepiece', 'megatron', 'huggingface', 'tabular'],
help='What tokenizer library to use.',
)
group.add_argument(
'--tokenizer-type', type=str, default=None, help='What type of tokenizer to use.',
)
group.add_argument(
'--tokenizer-model', type=str, default=None, help='Path to tokenizer model.',
)
group.add_argument('--vocab-file', type=str, default=None, help='Path to the vocab file')
group.add_argument('--files-filter', type=str, default='**/*.json*', help='files filter str')
group.add_argument('--merge-file', type=str, default=None, help='Path to the BPE merge file (if necessary).')
group.add_argument('--delimiter', type=str, default=None, help='delimiter used for tabular tokenizer')
group.add_argument('--append-eod', action='store_true', help='Append an <eod> token to the end of a document.')
group.add_argument('--retrieval-db', action='store_true', help='Dataset used for retrieval.')
group.add_argument('--need-pad-id', action='store_true', help='Whether we need the pad id for the tokenizer')
group = parser.add_argument_group(title='output data')
group.add_argument('--output-prefix', type=str, required=True, help='Path to binary output file without suffix')
group.add_argument('--dataset-impl', type=str, default='mmap', choices=['lazy', 'cached', 'mmap', 'retmmap'])
group = parser.add_argument_group(title='runtime')
group.add_argument('--workers', type=int, default=1, help='Number of worker processes to launch')
group.add_argument('--chunk_size', type=int, default=64, help='chunk size used for retrieval')
group.add_argument(
'--chunk_stride_size', type=int, default=64, help='the stride size for neighbor chunks used for retrieval'
)
group.add_argument('--log-interval', type=int, default=100, help='Interval between progress updates')
group.add_argument(
'--preproc-folder',
action='store_true',
help='If set, will preprocess all .json or .json.gz files into a single .bin and .idx file. Folder path provided via the --input arg',
)
group.add_argument('--apply-ftfy', action='store_true', help='If set, will apply ftfy to the input text')
args = parser.parse_args()
args.keep_empty = False
if args.tokenizer_type is not None and args.tokenizer_type.lower().startswith('bert'):
if not args.split_sentences:
print("Bert tokenizer detected, are you sure you don't want to split sentences?")
# some default/dummy values for the tokenizer
args.rank = 0
args.make_vocab_size_divisible_by = 128
args.tensor_model_parallel_size = 1
args.vocab_extra_ids = 0
# TODO: There are dependencies b/w libraries and model files / tokenizer type strings to check.
assert args.tokenizer_type is not None or args.tokenizer_model is not None
return args
def main():
args = get_args()
startup_start = time.time()
if args.preproc_folder:
print('Searching folder for .json or .json.gz files...')
assert os.path.exists(args.input), f'Folder does not exist: {args.input}'
json_files = (str(f) for f in pathlib.Path(args.input).glob(args.files_filter))
json_files = [f for f in json_files if f.endswith('.json') or f.endswith('.json.gz')]
if len(json_files) == 0:
raise FileNotFoundError('No .json or .json.gz files found in folder.')
else:
print(f'Found {len(json_files)} .json or .json.gz files.')
else:
assert os.path.exists(args.input), f'File does not exist: {args.input}'
json_files = [args.input]
if nltk_available and args.split_sentences:
nltk.download("punkt", quiet=True)
encoder = Encoder(args)
if args.dataset_impl == 'retmmap':
assert args.need_pad_id, "retmmap need --need_pad_id flag"
tokenizer = get_tokenizer(args)
level = "document"
if args.split_sentences:
level = "sentence"
print(f"Vocab size: {tokenizer.vocab_size}")
print(f"Output prefix: {args.output_prefix}")
output_bin_files = {}
output_idx_files = {}
builders = {}
for key in args.json_keys:
output_bin_files[key] = "{}_{}_{}.bin".format(args.output_prefix, key, level)
output_idx_files[key] = "{}_{}_{}.idx".format(args.output_prefix, key, level)
builders[key] = indexed_dataset.make_builder(
output_bin_files[key],
impl=args.dataset_impl,
chunk_size=args.chunk_size,
pad_id=tokenizer.pad_id if hasattr(tokenizer, "pad_id") else 0,
retrieval_db=args.retrieval_db,
vocab_size=tokenizer.vocab_size,
stride=args.chunk_stride_size,
)
startup_end = time.time()
proc_start = time.time()
total_bytes_processed = 0
print("Time to startup:", startup_end - startup_start)
pool = multiprocessing.Pool(args.workers, initializer=encoder.initializer)
for idx, json_file in enumerate(json_files):
print(f'Processing file {json_file} {idx + 1}/{len(json_files)}')
if json_file.endswith('.gz'):
fin = gzip.open(json_file, 'r')
else:
fin = open(args.input, 'r', encoding='utf-8')
encoded_docs = pool.imap(encoder.encode, fin, 25)
for i, (doc, bytes_processed) in enumerate(encoded_docs, start=1):
total_bytes_processed += bytes_processed
for key, sentences in doc.items():
if len(sentences) == 0:
continue
for sentence in sentences:
builders[key].add_item(torch.IntTensor(sentence))
builders[key].end_document()
if i % args.log_interval == 0:
current = time.time()
elapsed = current - proc_start
mbs = total_bytes_processed / elapsed / 1024 / 1024
print(f"Processed {i} documents", f"({i/elapsed} docs/s, {mbs} MB/s).", file=sys.stderr)
for key in args.json_keys:
builders[key].finalize(output_idx_files[key])
if __name__ == '__main__':
main()
| NeMo-main | scripts/nlp_language_modeling/preprocess_data_for_megatron.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.models.language_modeling.megatron_gpt_prompt_learning_model import (
MegatronGPTPromptLearningModel,
)
from nemo.collections.nlp.models.language_modeling.megatron_t5_prompt_learning_model import (
MegatronT5PromptLearningModel,
)
from nemo.collections.nlp.modules.common.megatron.megatron_init import fake_initialize_model_parallel
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
from nemo.core.config import hydra_runner
from nemo.utils.app_state import AppState
from nemo.utils.model_utils import inject_model_parallel_rank
try:
from megatron.core import parallel_state
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
"""
This is the script to convert the p-tuning PTL checkpoint file to nemo file for evaluation.
Example usage:
Assume the model has TP=2, PP=2 in the following use cases.
```
python scripts/nlp_language_modeling/convert_prompt_learning_ckpt_to_nemo.py \
trainer.devices=4 \
trainer.num_nodes=1 \
trainer.precision=bf16 \
tensor_model_parallel_size=2 \
pipeline_model_parallel_size=2 \
checkpoint_dir=/results/ptune_squad/checkpoints \
checkpoint_name='megatron_gpt_prompt_tune--val_loss=3.401-step=500.ckpt' \
hparams_file=/results/ptune_squad/version_1/hparams.yaml
```
Note, the hparam file can be found under the pytorch lightning experiment result directory. The filename is `hparams.yaml`
"""
@hydra_runner(config_path="conf", config_name="prompt_learning_ckpt_to_nemo")
def main(cfg) -> None:
# trainer required for restoring model parallel models
trainer = Trainer(strategy=NLPDDPStrategy(), **cfg.trainer)
assert (
cfg.trainer.devices * cfg.trainer.num_nodes
== cfg.tensor_model_parallel_size * cfg.pipeline_model_parallel_size
), "devices * num_nodes should equal tensor_model_parallel_size * pipeline_model_parallel_size"
if cfg.checkpoint_dir:
app_state = AppState()
if cfg.tensor_model_parallel_size > 1 or cfg.pipeline_model_parallel_size > 1:
app_state.model_parallel_size = cfg.tensor_model_parallel_size * cfg.pipeline_model_parallel_size
(
app_state.tensor_model_parallel_rank,
app_state.pipeline_model_parallel_rank,
app_state.model_parallel_size,
app_state.data_parallel_size,
app_state.pipeline_model_parallel_split_rank,
app_state.virtual_pipeline_model_parallel_rank,
) = fake_initialize_model_parallel(
world_size=app_state.model_parallel_size,
rank=trainer.global_rank,
tensor_model_parallel_size_=cfg.tensor_model_parallel_size,
pipeline_model_parallel_size_=cfg.pipeline_model_parallel_size,
pipeline_model_parallel_split_rank_=cfg.pipeline_model_parallel_split_rank,
)
app_state.tensor_model_parallel_size = cfg.tensor_model_parallel_size
app_state.pipeline_model_parallel_size = cfg.pipeline_model_parallel_size
checkpoint_path = inject_model_parallel_rank(os.path.join(cfg.checkpoint_dir, cfg.checkpoint_name))
# check model type
if cfg.model_type.lower() == 't5':
model: MegatronT5PromptLearningModel = MegatronT5PromptLearningModel.load_from_checkpoint(
checkpoint_path, hparams_file=cfg.hparams_file, trainer=trainer
)
elif cfg.model_type.lower() == 'gpt':
model: MegatronGPTPromptLearningModel = MegatronGPTPromptLearningModel.load_from_checkpoint(
checkpoint_path, hparams_file=cfg.hparams_file, trainer=trainer
)
else:
raise ValueError("Model Type Not Supported!")
else:
raise ValueError("need at least a nemo file or checkpoint dir")
# check whether the DDP is initialized
if parallel_state.is_unitialized():
def dummy():
return
if trainer.strategy.launcher is not None:
trainer.strategy.launcher.launch(dummy, trainer=trainer)
trainer.strategy.setup_environment()
model = model.cuda()
model.on_train_end()
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | scripts/nlp_language_modeling/convert_prompt_learning_ckpt_to_nemo.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is the script to build KNN index map from Training dataset to Retrieval dataset.
For example, it maps chunk_id i from training dataset to K chunk ids in the nearest neighbor in the retrieval dataset.
It requires the training text data to be converted into `bin` and `idx` files by `preprocess_data_for_megatron.py` script.
It also requires the Faiss Index file for the Retrieval dataset built by `build_retrieval_index.py` script.
Here is an example to using it:
```python
python scripts/nlp_language_modeling/build_knn_map_index.py \
--input_file=PATH_TO_INPUT_TRAINING_DATA \
--tokenizer-library=sentencepiece \
--tokenizer-model=tokenizer.model \
--process_chunk_size=51200 \
--K_neighbors=16 \
--faiss_index=PATH_TO_FAISS_INDEX_FILE \
--devices=0,1,2,3 \
--batch_size=1280 \
--remove_duplicate \
--output_file=knn_map.idx
```
Use `--remove_duplicate` flag if the data and retrieval dataset are the same. It will remove the neighbors from the same document.
It creates a knn_map.idx KNNIndex file.
During training of RETRO model, it can look up the KNN chunk ids of the
DB dataset given the input training data chunk id.
For large dataset, we can build the KNN index in multiple stages
stage-1: build sharding indexes, each containing a fraction of the dataset. This can be done in parallel on several machines. example,
```python
python scripts/nlp_language_modeling/build_knn_map_index.py \
--input_file=PATH_TO_INPUT_TRAINING_DATA \
--tokenizer-library=megatron \
--tokenizer-type=GPT2BPETokenizer \
--merge-file=/dataset/gpt2-merges.txt \
--vocab-file=/dataset/gpt2-vocab.json \
--process_chunk_size=10000 \
--K_neighbors=16 \
--remove_duplicate \
--workers=2 \
--shard_id=0 \
--total_shards=2 \
--devices=0,1,2 \
--stage=1 \
--nprobe=10 \
--output_file=knn_shard0.save \
--faiss_index=faiss.index
```
stage-2: merge the sharding indexes into one that is written directly to disk, example
```python
python scripts/nlp_language_modeling/build_knn_map_index.py \
--stage=2 \
--output_file=knn_final.save \
--shard_index_input=knn_shard
```
"""
import argparse
import multiprocessing
import pathlib
import sys
import time
from multiprocessing import Pool
import faiss
import numpy as np
import torch
from numba import njit, prange
from sentence_transformers import SentenceTransformer
from nemo.collections.nlp.data.language_modeling.megatron.indexed_retrieval_dataset import (
KNNIndex,
MMapRetrievalIndexedDataset,
merge_knn_files,
)
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer
from nemo.utils import logging
QUEUE_SIZE = 30
queue = multiprocessing.Queue(QUEUE_SIZE)
emb_queue = multiprocessing.Queue(QUEUE_SIZE)
@njit(parallel=True)
def build_map(chunk_start, result, total_chunks, start_id, end_id):
"""
Build the map from chunk_id to a range of chunk ids that are from the same document.
The chunk_id is in range [start_id, end_id)
"""
size = len(chunk_start)
for i in prange(size):
beg = chunk_start[i]
end = chunk_start[i + 1] if i < size - 1 else total_chunks
if start_id < end and beg < end_id: # [beg, end) intersect [start_id, end_id)
result[max(beg - start_id, 0) : (end - start_id), 0] = beg
result[max(beg - start_id, 0) : (end - start_id), 1] = end
@njit(parallel=True)
def _dedup(chunk_id_to_range, I, tmp_neighbors, chunk_id_start, offset):
for cid in prange(len(I)):
if chunk_id_start + cid - offset >= 0 and chunk_id_start + cid - offset < len(chunk_id_to_range):
beg, end = chunk_id_to_range[chunk_id_start + cid - offset]
position = 0
for target_chunk_id in I[cid]:
if beg <= target_chunk_id < end:
# target chunk is from the same document
continue
tmp_neighbors[cid, position] = target_chunk_id
position += 1
def dedup(chunk_id_to_range, I, tmp_neighbors, chunk_id_start, offset):
"""
deduplicate the KNN who are from the same document as the data chunks.
chunk_id_to_range is calculated by build_map function, which maps chunk_id - offset to range of ids of the same document
I is original KNN search result from Faiss.
chunk_id_start is the chunk_id offset.
offset is the map offset
filtered KNN will be stored in the tmp_neighbors
"""
if chunk_id_start < offset or chunk_id_start + len(I) - offset > len(chunk_id_to_range):
raise ValueError('chunk_id_start out side the range')
_dedup(chunk_id_to_range, I, tmp_neighbors, chunk_id_start, offset)
def get_tokenizer(args):
tokenizer = get_nmt_tokenizer(
library=args.tokenizer_library,
model_name=args.tokenizer_type,
tokenizer_model=args.tokenizer_model,
vocab_file=args.vocab_file,
merges_file=args.merge_file,
delimiter=args.delimiter,
)
if not hasattr(tokenizer, "pad_id"):
tokenizer.add_special_tokens({'pad_token': '<pad>'})
elif hasattr(tokenizer, "pad_id") and (tokenizer.pad_id is None or tokenizer.pad_id < 0):
tokenizer.add_special_tokens({'pad_token': '<pad>'})
return tokenizer
def calculate_start_end(total_chunks, total_shards, shard_id):
shard_size = total_chunks // total_shards
splits = list(range(0, total_chunks, shard_size))
if shard_id < total_shards - 1:
start = splits[shard_id]
total_chunks = splits[shard_id + 1]
elif shard_id == total_shards - 1:
start = splits[shard_id]
total_chunks = total_chunks
else:
raise ValueError(f'{shard_id} bigger than {total_shards}')
return start, total_chunks
def process_sentence_chunks(
ds: MMapRetrievalIndexedDataset,
tokenizer,
chunk_size: int,
stage: int,
workers: int,
shard_id: int,
total_shards: int,
):
"""
This function takes chunked tokens from the retrieval dataset and map it back to text.
In stage 1, it divides the total work into `total_shards`, and process only at the `shard_id`.
If the stage is None, it process all the chunks.
"""
total_chunks = ds.chunks
start = 0
threshold = 0
if stage == 1:
start, total_chunks = calculate_start_end(
total_chunks=total_chunks, total_shards=total_shards, shard_id=shard_id
)
logging.info(f'shard_id {shard_id}, create index from chunk {start} to {total_chunks}')
with Pool(workers) as p:
while start < total_chunks:
if start / total_chunks > threshold:
logging.info(f"sentence processing {start / total_chunks} is done")
threshold += 0.1
slice_id = (start, min(start + chunk_size, total_chunks))
beg = time.time()
id_slices = ds.get_chunk(slice(*slice_id), force_no_cont_ids=True)
end = time.time()
logging.info(f"load {chunk_size} chunks takes {end-beg}")
start = min(start + chunk_size, total_chunks)
sentences = p.map(tokenizer.ids_to_text, id_slices)
end2 = time.time()
logging.info(f"tokenize {chunk_size} chunks takes {end2-end}")
queue.put((sentences, slice_id))
queue.put((None, None))
def get_sentence_chunks():
return queue.get()
def calculate_embedding(pool, batch_size):
while True:
sentences, slice_id = get_sentence_chunks()
if sentences is None:
break
beg = time.time()
emb = model.encode_multi_process(sentences=sentences, pool=pool, batch_size=batch_size)
end = time.time()
logging.info(f"one embedding {len(emb)} batch size takes {end-beg}")
emb_queue.put((emb, slice_id))
emb_queue.put((None, None))
def get_emb():
return emb_queue.get()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="build Faiss index",)
parser.add_argument(
'--input_file', type=str, required=False, help='Input file',
)
parser.add_argument("--faiss_index", type=str, required=False, help='faiss index file for retrieval dataset')
parser.add_argument(
'--process_chunk_size',
type=int,
default=10000,
help='The sentences in chunks that is queries to build map index',
)
parser.add_argument(
'--remove_duplicate',
action='store_true',
help='Remove the knn neighbors that is from the same document as the data.',
)
parser.add_argument(
'--K_neighbors', type=int, default=16, help='The number of neighbors to query',
)
parser.add_argument(
'--dedup_margin',
type=int,
default=2,
help='extra neighbors to fill the spaces of the chunks in the duplicated documents',
)
parser.add_argument(
'--sentence_transformer_model',
type=str,
default='bert-base-nli-mean-tokens',
help='sentence transformer to load',
)
parser.add_argument('--shard_id', type=int, default=None, help='run the job to create the shard_id index')
parser.add_argument('--total_shards', type=int, default=None, help='total number of knn index shards')
parser.add_argument(
'--output_file', type=str, required=True, help='Output KNN Map index file',
)
parser.add_argument(
'--devices', type=str, default=None, help='delimited list input with cuda devices. Specify like 0,1,2'
)
parser.add_argument(
"--batch_size", type=int, default=4000, help="Batch size for encoding. Use max according to GPU MEM"
)
group = parser.add_argument_group(title='tokenizer')
group.add_argument(
'--tokenizer-library',
type=str,
required=False,
choices=['yttm', 'sentencepiece', 'megatron', 'huggingface', 'tabular'],
help='What tokenizer library to use.',
)
group.add_argument(
'--tokenizer-type', type=str, default=None, help='What type of tokenizer to use.',
)
group.add_argument(
'--tokenizer-model', type=str, default=None, help='Path to tokenizer model.',
)
group.add_argument('--vocab-file', type=str, default=None, help='Path to the vocab file')
group.add_argument('--merge-file', type=str, default=None, help='Path to the BPE merge file (if necessary).')
group.add_argument('--delimiter', type=str, default=None, help='delimiter used for tabular tokenizer')
group.add_argument(
'--stage',
type=int,
default=None,
help='used for building the large knn index in multiple stages',
choices=[1, 2],
)
group.add_argument('--workers', type=int, default=None, help='number of workers to run tokenizer')
group.add_argument(
'--nprobe',
type=int,
default=10,
help='number of probes, higher number of probes renders better results but runs slower',
)
group.add_argument(
'--shard_index_input',
type=str,
default=None,
help='the knn sharding index files, which are created at stage 1',
)
args = parser.parse_args()
has_gpu = torch.cuda.is_available() and hasattr(faiss, "index_gpu_to_cpu")
if not hasattr(faiss, "index_gpu_to_cpu"):
logging.warning(
"faiss doesn't support gpu index. Please check https://github.com/facebookresearch/faiss/blob/main/INSTALL.md"
)
if args.stage == 2:
# combine shard index files into one
input_file = pathlib.Path(args.shard_index_input)
path = input_file.parent
fname = input_file.name
all_files = [str(i) for i in pathlib.Path(path).glob(fname + '*')]
merge_knn_files(all_files, args.output_file)
f = KNNIndex(args.output_file)
logging.info(f'Write to {args.output_file}, Size of Index : {f.len}')
logging.info(f'Index neighbors: {f.K}')
logging.info(f'Index chunk start id: {f.chunk_start_id}')
logging.info(f'Index chunk end id: {f.chunk_end_id}')
sys.exit(0)
model = SentenceTransformer(args.sentence_transformer_model)
tokenizer = get_tokenizer(args)
ds = MMapRetrievalIndexedDataset(args.input_file)
if args.devices is None or not torch.cuda.is_available():
device_list = None
else:
device_list = ['cuda:' + str(device) for device in args.devices.split(',')]
index = faiss.read_index(args.faiss_index)
if has_gpu:
co = faiss.GpuMultipleClonerOptions()
co.useFloat16 = True
co.usePrecomputed = False
co.shard = True
index = faiss.index_cpu_to_all_gpus(index, co, ngpu=len(device_list))
index.nprobe = args.nprobe
start = 0
total_chunks = ds.chunks
if args.stage == 1:
start, total_chunks = calculate_start_end(
total_chunks=total_chunks, total_shards=args.total_shards, shard_id=args.shard_id
)
process = multiprocessing.Process(
target=process_sentence_chunks,
args=(ds, tokenizer, args.process_chunk_size, args.stage, args.workers, args.shard_id, args.total_shards),
)
process.start()
pool = model.start_multi_process_pool(device_list)
emb_process = multiprocessing.Process(target=calculate_embedding, args=(pool, args.batch_size))
emb_process.start()
if ds._index.retrieval_db and args.remove_duplicate:
neighbors = args.K_neighbors + args.dedup_margin
# build the id maps for quick dedup
id_start = np.array(ds._index._chunk_id_start)
chunk_id_to_doc_id_map = np.zeros((total_chunks - start, 2), dtype=np.int64)
build_map(id_start, chunk_id_to_doc_id_map, ds.chunks, start, total_chunks)
else:
neighbors = args.K_neighbors
chunk_id_start = start
with KNNIndex.writer(args.output_file, args.K_neighbors, offset=start) as w:
while True:
emb, slice_id = get_emb()
if emb is None:
break
beg = time.time()
D, I = index.search(emb, neighbors)
end = time.time()
logging.info(f'search {slice_id[0]} - {slice_id[1]} takes {end-beg}')
assert chunk_id_start == slice_id[0]
if ds._index.retrieval_db and args.remove_duplicate:
beg = time.time()
tmp_neighbors = np.ones_like(I) * -1
dedup(chunk_id_to_doc_id_map, I, tmp_neighbors, chunk_id_start, start)
I = tmp_neighbors[:, : args.K_neighbors]
end = time.time()
logging.info(f'dedup {slice_id[0]} - {slice_id[1]} takes {end-beg}')
beg = time.time()
w.write(I)
end = time.time()
logging.info(f'write {slice_id[0]} - {slice_id[1]} takes {end-beg}')
chunk_id_start += len(I)
process.join()
emb_process.join()
model.stop_multi_process_pool(pool)
| NeMo-main | scripts/nlp_language_modeling/build_knn_map_index.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is the script to exam the KNN mapping quality between indexed data and indexed retrieval database.
It requires the training text data to be converted into `bin` and `idx` files by `preprocess_data_for_megatron.py` script.
It also requires KNNIndex built by `build_retrieval_index.py` script.
Here is an example to using it:
```python
python scripts/nlp_language_modeling/exam_knn_map_quality.py \
--input_data_prefix=PATH_TO_DATA \
--input_retrieval_prefix=PATH_TO_RETRIEVAL_DATA \
--knn_index=PATH_TO_KNN_MAP_INDEX \
--chunk_ids 2 3000 4000 5000 6000 \
--tokenizer-library=sentencepiece \
--tokenizer-model=tokenizer.model
```
"""
import argparse
from nemo.collections.nlp.data.language_modeling.megatron.indexed_retrieval_dataset import (
KNNIndex,
MMapRetrievalIndexedDataset,
)
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer
from nemo.utils import logging
def get_tokenizer(args):
tokenizer = get_nmt_tokenizer(
library=args.tokenizer_library,
model_name=args.tokenizer_type,
tokenizer_model=args.tokenizer_model,
vocab_file=args.vocab_file,
merges_file=args.merge_file,
delimiter=args.delimiter,
)
if not hasattr(tokenizer, "pad_id"):
tokenizer.add_special_tokens({'pad_token': '<pad>'})
elif hasattr(tokenizer, "pad_id") and (tokenizer.pad_id is None or tokenizer.pad_id < 0):
tokenizer.add_special_tokens({'pad_token': '<pad>'})
return tokenizer
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="build Faiss index",)
parser.add_argument(
'--input_data_prefix', type=str, required=True, help='Input data prefix',
)
parser.add_argument(
'--input_retrieval_prefix', type=str, required=True, help='Input retrieval data prefix',
)
parser.add_argument(
'--knn_index', type=str, required=True, help='Input knn map index file',
)
parser.add_argument(
'--neighbors', type=int, default=None, help='number of neighbors',
)
parser.add_argument(
'--chunk_ids',
nargs='+',
default=[1, 3, 5, 7],
type=int,
help='space separate listed of chunk ids in input data',
)
group = parser.add_argument_group(title='tokenizer')
group.add_argument(
'--tokenizer-library',
type=str,
required=True,
choices=['yttm', 'sentencepiece', 'megatron', 'huggingface', 'tabular'],
help='What tokenizer library to use.',
)
group.add_argument(
'--tokenizer-type', type=str, default=None, help='What type of tokenizer to use.',
)
group.add_argument(
'--tokenizer-model', type=str, default=None, help='Path to tokenizer model.',
)
group.add_argument('--vocab-file', type=str, default=None, help='Path to the vocab file')
group.add_argument('--merge-file', type=str, default=None, help='Path to the BPE merge file (if necessary).')
group.add_argument('--delimiter', type=str, default=None, help='delimiter used for tabular tokenizer')
args = parser.parse_args()
tokenizer = get_tokenizer(args)
data_ds = MMapRetrievalIndexedDataset(args.input_data_prefix)
retrieval_ds = MMapRetrievalIndexedDataset(args.input_retrieval_prefix)
knn_index = KNNIndex(args.knn_index)
assert knn_index.len == data_ds.chunks
logging.info(f'Data index has {data_ds.chunks} chunks')
logging.info(f'Retrieval Data index has {retrieval_ds.chunks} chunks')
logging.info(f'KNN index has {knn_index.K} neighbors')
assert data_ds._index.chunk_size == retrieval_ds._index.chunk_size
print_num_neighbors = knn_index.K
if args.neighbors is not None:
assert args.neighbors <= knn_index.K
print_num_neighbors = args.neighbors
for chunk_id in args.chunk_ids:
token_ids = data_ds.get_chunk(chunk_id, force_no_cont_ids=True)
assert token_ids.shape[0] == data_ds._index.chunk_size
query_text = tokenizer.ids_to_text(token_ids)
neighbor_chunk_ids = knn_index.get_KNN_chunk_ids(chunk_id)
neighbor_chunk_ids = neighbor_chunk_ids[:print_num_neighbors]
print(f'Query: {query_text}')
for i, neighbor in enumerate(neighbor_chunk_ids):
token_ids = retrieval_ds.get_chunk(neighbor)
half = token_ids.shape[0] // 2
assert half == data_ds._index.chunk_size
neighbor_match = tokenizer.ids_to_text(token_ids[:half])
neighbor_extend = tokenizer.ids_to_text(token_ids[half:])
print(f' ->K{i}: {neighbor_match} --- {neighbor_extend}')
print(' --------------- ')
| NeMo-main | scripts/nlp_language_modeling/exam_knn_map_quality.py |
#!/usr/bin/env python
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Augment text by corrupting words in a human-like manner.
Support letetrs swap/drop, and AugLy <https://github.com/facebookresearch/AugLy>.
"""
from argparse import ArgumentParser
import numpy as np
try:
import augly.text as txtaugs
except Exception as e:
txtaugs = None
# =============================================================================#
# Augmentations
# =============================================================================#
def aug_switch_near_letters(word, p=0.0):
"""
Switch two consecutive letters in a word
"""
if np.random.rand() < p:
if len(word) > 1:
i = np.random.randint(len(word) - 1)
j = i + 1
word = word[:i] + word[j] + word[i] + word[j + 1 :]
return word
def aug_drop_letter(word, p=0.0):
"""
Switch two consecutive letters in a word
"""
if np.random.rand() < p:
if len(word) > 1:
i = np.random.randint(len(word))
word = word[:i] + word[i + 1 :]
return word
# =============================================================================#
# Main
# =============================================================================#
def main():
parser = ArgumentParser()
parser.add_argument("--source", type=str, required=True, help="Input file")
parser.add_argument("--target", type=str, required=True, help="Output file")
parser.add_argument(
"--p_switch_near_letters_order",
type=float,
default=0.0,
help="Probability of switching two consecutive letters in a word",
)
parser.add_argument("--p_drop_letter", type=float, default=0.0, help="Probability of dropping a letter in a word")
# AugLy
parser.add_argument(
"--p_augly", type=float, default=0.0, help="Probability of augly to apply a transformation (per word)"
)
args = parser.parse_args()
if (args.p_augly > 0) and (txtaugs is None):
raise ImportError("Cannot use AugLy, module failed to import. Did you install it? (pip install augly)")
# collect ops
ops = []
if args.p_switch_near_letters_order > 0:
ops.append(lambda w: aug_switch_near_letters(w, p=args.p_switch_near_letters_order))
if args.p_drop_letter > 0:
ops.append(lambda w: aug_drop_letter(w, p=args.p_drop_letter))
# apply ops
with open(args.target, 'w') as target_f:
for line in open(args.source).readlines():
line = line.strip()
words = line.split(" ")
for op in ops:
words = list(map(op, words))
# clean double spaces from dropped words
line = " ".join(words).replace(" ", " ")
if args.p_augly > 0:
line = txtaugs.simulate_typos(
[line], aug_char_p=args.p_augly, aug_word_p=args.p_augly, aug_char_min=0, aug_word_min=0,
)[0]
target_f.write(line + "\n")
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | scripts/nlp_language_modeling/augment-text.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Conversion script to convert Huggingface LLaMA checkpoints into nemo checkpoint.
Example to run this conversion script:
python convert_hf_llama_to_nemo.py \
--in-file <path_to_hf_checkpoints_folder> \
--out-file <path_to_output_nemo_file> \
[--fast-swiglu\
"""
import os
from argparse import ArgumentParser
from collections import OrderedDict
import torch
from omegaconf import OmegaConf
from pytorch_lightning.core.saving import _load_state as ptl_load_state
from pytorch_lightning.trainer.trainer import Trainer
from transformers import LlamaForCausalLM, LlamaTokenizer
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.parts.nlp_overrides import (
GradScaler,
MegatronHalfPrecisionPlugin,
NLPDDPStrategy,
NLPSaveRestoreConnector,
PipelineMixedPrecisionPlugin,
)
from nemo.utils import logging
def get_args():
parser = ArgumentParser()
parser.add_argument(
"--in-file", type=str, default=None, required=True, help="Path to Huggingface LLaMA checkpoints",
)
parser.add_argument("--out-file", type=str, default=None, required=True, help="Path to output .nemo file.")
parser.add_argument("--precision", type=str, default="32", help="Model precision")
args = parser.parse_args()
return args
def load_model(cls, checkpoint, strict, **kwargs):
try:
if 'cfg' in kwargs:
model = ptl_load_state(cls, checkpoint, strict=strict, **kwargs)
else:
# model = ptl_load_state(
# cls, checkpoint, strict=strict, cfg=checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY], **kwargs
# )
model = cls(cfg=checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY], **kwargs)
for name, module in model.named_parameters():
if name in checkpoint['state_dict']:
module.data = checkpoint['state_dict'][name]
checkpoint['state_dict'].pop(name)
else:
print(f"Unexpected key: {name} not in checkpoint but in model.")
for name, buffer in model.named_buffers():
if name in checkpoint['state_dict']:
buffer.data = checkpoint['state_dict'][name]
checkpoint['state_dict'].pop(name)
if len(checkpoint['state_dict'].keys()) != 0:
raise RuntimeError(
f"Additional keys: {checkpoint['state_dict'].keys()} in checkpoint but not in model."
)
# register the artifacts
cfg = checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY]
if cfg.tokenizer.model is not None:
model.register_artifact("tokenizer.tokenizer_model", cfg.tokenizer.model)
if cfg.tokenizer.vocab_file is not None:
model.register_artifact("tokenizer.vocab_file", cfg.tokenizer.vocab_file)
if cfg.tokenizer.merge_file is not None:
model.register_artifact("tokenizer.merge_file", cfg.tokenizer.merge_file)
finally:
cls._set_model_restore_state(is_being_restored=False)
return model
def load_config(args, llama_config):
nemo_config = OmegaConf.load(
os.path.join(os.path.dirname(__file__), '../../examples/nlp/language_modeling/conf/megatron_llama_config.yaml')
).model
nemo_config.encoder_seq_length = llama_config['max_position_embeddings']
nemo_config.num_layers = int(llama_config['num_hidden_layers'])
nemo_config.hidden_size = llama_config['hidden_size']
nemo_config.ffn_hidden_size = llama_config['intermediate_size']
nemo_config.num_attention_heads = llama_config['num_attention_heads']
nemo_config.max_position_embeddings = llama_config['max_position_embeddings']
nemo_config.init_method_std = llama_config['initializer_range']
nemo_config.layernorm_epsilon = llama_config['rms_norm_eps']
if 'num_key_value_heads' in llama_config:
nemo_config.num_query_groups = llama_config['num_key_value_heads']
nemo_config.use_cpu_initialization = True
nemo_config.activation = 'fast-swiglu'
nemo_config.tokenizer.model = llama_config['tokenizer_model']
if llama_config['rope_scaling'] is not None:
if llama_config['rope_scaling']['type'] == 'linear':
nemo_config['seq_len_interpolation_factor'] = llama_config['rope_scaling']['factor']
else:
raise ValueError("Only linear rope scaling type is supported now")
base = 128
while llama_config['vocab_size'] % base != 0:
base //= 2
nemo_config.make_vocab_size_divisible_by = base
return nemo_config
def convert(args):
logging.info(f"loading checkpoint {args.in_file}")
model = LlamaForCausalLM.from_pretrained(args.in_file)
tokenizer = LlamaTokenizer.from_pretrained(args.in_file)
hf_config = vars(model.config)
hf_config['tokenizer_model'] = str(tokenizer.vocab_file)
print(f"hf_config: {hf_config}")
print("named parameters:")
for name, param in model.named_parameters():
print(f"- {name}")
nemo_config = load_config(args, hf_config)
if args.precision in ["32", "16"]:
precision = int(float(args.precision))
elif args.precision in ["bf16", "bf16-mixed"]:
if torch.cuda.is_available() and torch.cuda.is_bf16_supported():
precision = args.precision
else:
logging.warning("BF16 is not supported on this device. Using FP16 instead.")
precision = args.precision[2:] # prune bf in string
else:
precision = args.precision
plugins = []
if precision in [16, '16', 'bf16', '16-mixed', 'bf16-mixed']:
scaler = None
if precision in [16, '16', '16-mixed']:
scaler = GradScaler(
init_scale=nemo_config.get('native_amp_init_scale', 2 ** 32),
growth_interval=nemo_config.get('native_amp_growth_interval', 1000),
hysteresis=nemo_config.get('hysteresis', 2),
)
# MixedPrecisionPlugin in PTL >= 2.0 requires precision to be 16-mixed or bf16-mixed
plugin_precision = '16-mixed'
else:
plugin_precision = 'bf16-mixed'
if nemo_config.get('megatron_amp_O2', False):
plugins.append(MegatronHalfPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
else:
plugins.append(PipelineMixedPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
if precision == 32:
dtype = torch.float32
elif precision in [16, "16", "16-mixed"]:
dtype = torch.float16
elif precision in ["bf16", "bf16-mixed"]:
dtype = torch.bfloat16
else:
dtype = torch.float32 # fallback
nemo_config.precision = precision
print(f"nemo_config: {nemo_config}")
trainer = Trainer(plugins=plugins, accelerator='cpu', precision=precision, strategy=NLPDDPStrategy())
hidden_size = hf_config["hidden_size"]
head_num = hf_config["num_attention_heads"]
head_size = hidden_size // head_num
num_layers = hf_config["num_hidden_layers"]
mcore_gpt = nemo_config.mcore_gpt
assert mcore_gpt == nemo_config.get(
'transformer_engine', False
), "mcore_gpt transformer_engine must be enabled (or disabled) together."
param_to_weights = lambda param: param.float()
checkpoint = OrderedDict()
checkpoint['state_dict'] = OrderedDict()
embed_weight = model.state_dict()[f'model.embed_tokens.weight']
if mcore_gpt:
embed_weights_base_name = f'model.embedding.word_embeddings.weight'
else:
embed_weights_base_name = f'model.language_model.embedding.word_embeddings.weight'
checkpoint['state_dict'][embed_weights_base_name] = param_to_weights(embed_weight)
# in hf, this is defined as register_buffer(..., persistent=False) so it won't be in the state dict
if f'model.layers.0.self_attn.rotary_emb.inv_freq' in model.state_dict():
rotary_embed_weight = model.state_dict()[f'model.layers.0.self_attn.rotary_emb.inv_freq']
if mcore_gpt:
rotary_embed_weight_base_name = f'model.rotary_pos_emb.inv_freq'
else:
rotary_embed_weight_base_name = f'model.language_model.rotary_pos_emb.inv_freq'
checkpoint['state_dict'][rotary_embed_weight_base_name] = param_to_weights(rotary_embed_weight)
if nemo_config.num_query_groups is None or nemo_config.num_query_groups == head_num:
num_query_groups = head_num
else:
num_query_groups = nemo_config.num_query_groups
assert head_num % num_query_groups == 0, 'head_num must be divisible by num_query_groups'
if mcore_gpt:
assert nemo_config.activation.startswith('fast-'), 'mcore only supports fast version of gated linear unit.'
for l in range(int(num_layers)):
print(f"converting layer {l}")
old_tensor_shape = model.state_dict()[f'model.layers.{l}.self_attn.q_proj.weight'].size()
new_q_tensor_shape = (head_num, head_size) + old_tensor_shape[1:]
new_kv_tensor_shape = (num_query_groups, head_size) + old_tensor_shape[1:]
q = model.state_dict()[f'model.layers.{l}.self_attn.q_proj.weight'].view(*new_q_tensor_shape)
k = model.state_dict()[f'model.layers.{l}.self_attn.k_proj.weight'].view(*new_kv_tensor_shape)
v = model.state_dict()[f'model.layers.{l}.self_attn.v_proj.weight'].view(*new_kv_tensor_shape)
qkv_weights = torch.empty((0, head_size) + old_tensor_shape[1:])
heads_per_group = head_num // num_query_groups
for i in range(num_query_groups):
qkv_weights = torch.cat((qkv_weights, q[i * heads_per_group : (i + 1) * heads_per_group, :, :]))
qkv_weights = torch.cat((qkv_weights, k[i : i + 1, :, :]))
qkv_weights = torch.cat((qkv_weights, v[i : i + 1, :, :]))
qkv_weights = qkv_weights.reshape([head_size * (head_num + 2 * num_query_groups), hidden_size])
if mcore_gpt:
qkv_weights_base_name = f'model.decoder.layers.{l}.self_attention.linear_qkv.weight'
else:
qkv_weights_base_name = f'model.language_model.encoder.layers.{l}.self_attention.query_key_value.weight'
checkpoint['state_dict'][qkv_weights_base_name] = param_to_weights(qkv_weights)
# attention dense
o_weight = model.state_dict()[f'model.layers.{l}.self_attn.o_proj.weight']
if mcore_gpt:
o_weight_base_name = f'model.decoder.layers.{l}.self_attention.linear_proj.weight'
else:
o_weight_base_name = f'model.language_model.encoder.layers.{l}.self_attention.dense.weight'
checkpoint['state_dict'][o_weight_base_name] = param_to_weights(o_weight)
# MLP
mlp_down_weight = model.state_dict()[f'model.layers.{l}.mlp.gate_proj.weight']
mlp_gate_weight = model.state_dict()[f'model.layers.{l}.mlp.up_proj.weight']
if mcore_gpt:
mlp_down_base_name = f'model.decoder.layers.{l}.mlp.linear_fc1.weight'
else:
mlp_down_base_name = f'model.language_model.encoder.layers.{l}.mlp.dense_h_to_4h.weight'
mlp_down_weight = torch.cat((mlp_down_weight, mlp_gate_weight), axis=0)
checkpoint['state_dict'][mlp_down_base_name] = param_to_weights(mlp_down_weight)
mlp_up_weight = model.state_dict()[f'model.layers.{l}.mlp.down_proj.weight']
if mcore_gpt:
mlp_up_base_name = f'model.decoder.layers.{l}.mlp.linear_fc2.weight'
else:
mlp_up_base_name = f'model.language_model.encoder.layers.{l}.mlp.dense_4h_to_h.weight'
checkpoint['state_dict'][mlp_up_base_name] = param_to_weights(mlp_up_weight)
# LayerNorm
input_ln_weight = model.state_dict()[f'model.layers.{l}.input_layernorm.weight']
if mcore_gpt:
input_ln_base_name = f'model.decoder.layers.{l}.self_attention.linear_qkv.layer_norm_weight'
else:
input_ln_base_name = f'model.language_model.encoder.layers.{l}.input_layernorm.weight'
checkpoint['state_dict'][input_ln_base_name] = param_to_weights(input_ln_weight)
post_attn_ln_weight = model.state_dict()[f'model.layers.{l}.post_attention_layernorm.weight']
if mcore_gpt:
post_attn_ln_base_name = f'model.decoder.layers.{l}.mlp.linear_fc1.layer_norm_weight'
else:
post_attn_ln_base_name = f'model.language_model.encoder.layers.{l}.post_attention_layernorm.weight'
checkpoint['state_dict'][post_attn_ln_base_name] = param_to_weights(post_attn_ln_weight)
print(f"done layer {l}")
final_ln_weight = model.state_dict()[f'model.norm.weight']
if mcore_gpt:
final_ln_base_name = f'model.decoder.final_layernorm.weight'
else:
final_ln_base_name = f'model.language_model.encoder.final_layernorm.weight'
checkpoint['state_dict'][final_ln_base_name] = param_to_weights(final_ln_weight)
output_layer_weight = model.state_dict()[f'lm_head.weight']
if mcore_gpt:
output_layer_base_name = f'model.output_layer.weight'
else:
output_layer_base_name = f'model.language_model.output_layer.weight'
checkpoint['state_dict'][output_layer_base_name] = param_to_weights(output_layer_weight)
checkpoint[MegatronGPTModel.CHECKPOINT_HYPER_PARAMS_KEY] = nemo_config
del model
if nemo_config.get('megatron_amp_O2', False):
keys = list(checkpoint['state_dict'].keys())
for key in keys:
checkpoint['state_dict'][key.replace('model.', 'model.module.', 1)] = checkpoint['state_dict'].pop(key)
model = load_model(MegatronGPTModel, checkpoint, strict=False, trainer=trainer)
model._save_restore_connector = NLPSaveRestoreConnector()
# cast to target precision and disable cpu init
model = model.to(dtype=dtype)
model.cfg.use_cpu_initialization = False
model.save_to(args.out_file)
logging.info(f'NeMo model saved to: {args.out_file}')
if __name__ == '__main__':
args = get_args()
convert(args)
| NeMo-main | scripts/nlp_language_modeling/convert_hf_llama_to_nemo.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
"""
from nemo.collections.nlp.modules.common.megatron.retrieval_services.static_retrieval_server import RetrievalServer
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer
from nemo.core.config import hydra_runner
def get_tokenizer(args):
tokenizer = get_nmt_tokenizer(
library=args.library,
model_name=args.type,
tokenizer_model=args.model,
vocab_file=args.vocab_file,
merges_file=args.merge_file,
delimiter=args.delimiter,
)
if not hasattr(tokenizer, "pad_id"):
tokenizer.add_special_tokens({'pad_token': '<pad>'})
elif hasattr(tokenizer, "pad_id") and (tokenizer.pad_id is None or tokenizer.pad_id < 0):
tokenizer.add_special_tokens({'pad_token': '<pad>'})
return tokenizer
@hydra_runner(config_path="conf", config_name="static_retrieval_service")
def main(cfg) -> None:
tokenizer = get_tokenizer(cfg.tokenizer)
server = RetrievalServer(
cfg.service.faiss_index,
cfg.service.faiss_devices,
cfg.service.nprobe,
cfg.service.retrieval_index,
tokenizer,
cfg.service.query_bert_ip,
cfg.service.query_bert_port,
)
server.run("0.0.0.0", cfg.service.port)
if __name__ == "__main__":
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | scripts/nlp_language_modeling/service_launch_scripts/start_static_retrieval_service.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
"""
from nemo.collections.nlp.modules.common.megatron_web_server import RetroDemoWebApp
from nemo.core.config import hydra_runner
@hydra_runner(config_path="conf", config_name="retro_web_server")
def main(cfg) -> None:
demo = RetroDemoWebApp(cfg.text_service_ip, cfg.text_service_port, cfg.combo_service_ip, cfg.combo_service_port)
demo.run_demo(cfg.share, cfg.username, cfg.password, cfg.port)
if __name__ == "__main__":
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | scripts/nlp_language_modeling/service_launch_scripts/start_web_service.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
"""
from nemo.collections.nlp.modules.common.megatron.retrieval_services.dynamic_retrieval_server import (
DynamicRetrievalServer,
)
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer
from nemo.core.config import hydra_runner
def get_tokenizer(args):
tokenizer = get_nmt_tokenizer(
library=args.library,
model_name=args.type,
tokenizer_model=args.model,
vocab_file=args.vocab_file,
merges_file=args.merge_file,
delimiter=args.delimiter,
)
if not hasattr(tokenizer, "pad_id"):
tokenizer.add_special_tokens({'pad_token': '<pad>'})
elif hasattr(tokenizer, "pad_id") and (tokenizer.pad_id is None or tokenizer.pad_id < 0):
tokenizer.add_special_tokens({'pad_token': '<pad>'})
return tokenizer
@hydra_runner(config_path="conf", config_name="dynamic_retrieval_service")
def main(cfg) -> None:
tokenizer = get_tokenizer(cfg.tokenizer)
server = DynamicRetrievalServer(
cfg.service.faiss_devices,
tokenizer,
cfg.service.chunk_size,
cfg.service.stride,
cfg.service.faiss_index,
cfg.service.store_file,
cfg.service.ctx_bert_ip,
cfg.service.ctx_bert_port,
cfg.service.query_bert_ip,
cfg.service.query_bert_port,
cfg.service.output_filename,
)
server.run("0.0.0.0", cfg.service.port)
if __name__ == "__main__":
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | scripts/nlp_language_modeling/service_launch_scripts/start_dynamic_retrieval_service.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
"""
from nemo.collections.nlp.modules.common.megatron.retrieval_services.combo_retrieval_server import ComboRetrievalServer
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer
from nemo.core.config import hydra_runner
def get_tokenizer(args):
tokenizer = get_nmt_tokenizer(
library=args.library,
model_name=args.type,
tokenizer_model=args.model,
vocab_file=args.vocab_file,
merges_file=args.merge_file,
delimiter=args.delimiter,
)
if not hasattr(tokenizer, "pad_id"):
tokenizer.add_special_tokens({'pad_token': '<pad>'})
elif hasattr(tokenizer, "pad_id") and (tokenizer.pad_id is None or tokenizer.pad_id < 0):
tokenizer.add_special_tokens({'pad_token': '<pad>'})
return tokenizer
@hydra_runner(config_path="conf", config_name="combo_retrieval_service")
def main(cfg) -> None:
tokenizer = get_tokenizer(cfg.tokenizer)
server = ComboRetrievalServer(tokenizer, cfg.service.child_services)
server.run("0.0.0.0", cfg.service.port)
if __name__ == "__main__":
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | scripts/nlp_language_modeling/service_launch_scripts/start_combo_retrieval_service.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
"""
from nemo.collections.nlp.modules.common.megatron.retrieval_services.bert_service import start_sentence_bert_server
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer
from nemo.core.config import hydra_runner
def get_tokenizer(args):
tokenizer = get_nmt_tokenizer(
library=args.library,
model_name=args.type,
tokenizer_model=args.model,
vocab_file=args.vocab_file,
merges_file=args.merge_file,
delimiter=args.delimiter,
)
if not hasattr(tokenizer, "pad_id"):
tokenizer.add_special_tokens({'pad_token': '<pad>'})
elif hasattr(tokenizer, "pad_id") and (tokenizer.pad_id is None or tokenizer.pad_id < 0):
tokenizer.add_special_tokens({'pad_token': '<pad>'})
return tokenizer
@hydra_runner(config_path="conf", config_name="bert_service")
def main(cfg) -> None:
tokenizer = get_tokenizer(cfg.tokenizer)
start_sentence_bert_server(
cfg.name,
cfg.sentence_bert.devices,
tokenizer,
cfg.sentence_bert.sentence_bert,
cfg.sentence_bert.sentence_bert_batch,
port=cfg.sentence_bert.port,
)
if __name__ == "__main__":
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | scripts/nlp_language_modeling/service_launch_scripts/start_bert_service.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from nemo.collections.nlp.models.language_modeling.megatron_retrieval_model import MegatronRetrievalModel
from nemo.collections.nlp.modules.common.text_generation_server import MegatronServer
from nemo.collections.nlp.modules.common.text_generation_utils import generate
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy, NLPSaveRestoreConnector
from nemo.core.config import hydra_runner
try:
from megatron.core import parallel_state
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
"""
This is the script to launch RETRO Model text generation server.
Usage:
Assume the model has TP=1, PP=1
run greedy inference from a nemo file:
python megatron_retro_eval.py \
trainer.devices=1 \
trainer.num_nodes=1 \
trainer.accelerator=gpu \
trainer.precision=16 \
inference.tokens_to_generate=128 \
inference.greedy=True \
retro_model_file=path_to_retro_nemo_file \
tensor_model_parallel_size=1 \
pipeline_model_parallel_size=1 \
retrieval_service.faiss_devices='0' \
retrieval_service.faiss_index=path_to_faiss_index \
retrieval_service.retrieval_index=path_to_retrieval_dataset \
retrieval_service.neighbors=20
"""
@hydra_runner(config_path="conf", config_name="retro_text_generation_server")
def main(cfg) -> None:
trainer = Trainer(strategy=NLPDDPStrategy(), **cfg.trainer)
model_path = cfg.retro_model_file
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(model_path):
save_restore_connector.model_extracted_dir = model_path
model_cfg = MegatronRetrievalModel.restore_from(
model_path, trainer=trainer, return_config=True, save_restore_connector=save_restore_connector,
)
with open_dict(model_cfg):
model_cfg.precision = trainer.precision
model_cfg.sequence_parallel = False
model_cfg.activations_checkpoint_granularity = None
model_cfg.activations_checkpoint_method = None
model = MegatronRetrievalModel.restore_from(
model_path, trainer=trainer, save_restore_connector=save_restore_connector, override_config_path=model_cfg,
)
# check whether the DDP is initialized
if parallel_state.is_unitialized():
def dummy():
return
if model.trainer.strategy.launcher is not None:
model.trainer.strategy.launcher.launch(dummy, trainer=model.trainer)
model.trainer.strategy.setup_environment()
retrieval_service = OmegaConf.to_container(cfg.retrieval_service)
model.set_inference_config(None, retrieval_service)
# running text generation, use inference server
if parallel_state.is_pipeline_first_stage() and parallel_state.get_tensor_model_parallel_rank() == 0:
server = MegatronServer(model.cuda(), inference_strategy=model.inference_strategy)
server.run("0.0.0.0", port=cfg.port)
while True:
choice = torch.cuda.LongTensor(1)
torch.distributed.broadcast(choice, 0)
if choice[0].item() == 0:
generate(model.cuda(), strategy=model.inference_strategy)
if __name__ == '__main__':
main()
| NeMo-main | scripts/nlp_language_modeling/service_launch_scripts/start_retro_model_service.py |
#!/usr/bin/env
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Merge lora weights into a base GPT LM. Only PP=1 supported so far.
"""
import os
import tempfile
from typing import Any, Dict
import torch
from omegaconf import OmegaConf, open_dict
from pytorch_lightning.trainer.trainer import Trainer
from torch.utils.data import DataLoader, Dataset
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.models.language_modeling.megatron_gpt_peft_models import MegatronGPTLoRAModel
from nemo.collections.nlp.models.language_modeling.megatron_gpt_sft_model import MegatronGPTSFTModel
from nemo.collections.nlp.modules.common.megatron.megatron_init import fake_initialize_model_parallel
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy, NLPSaveRestoreConnector
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.app_state import AppState
from nemo.utils.model_utils import inject_model_parallel_rank
try:
from megatron.core import parallel_state
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
class RequestDataSet(Dataset):
def __init__(self, sentences):
super().__init__()
self.sentences = sentences
def __len__(self,):
return len(self.sentences)
def __getitem__(self, idx):
return self.sentences[idx]
def load_lora(lora_nemo, tp):
lora_state_dict = {}
with tempfile.TemporaryDirectory() as tmpdir:
NLPSaveRestoreConnector._unpack_nemo_file(lora_nemo, tmpdir)
# assert os.path.isdir(lora_extracted_dir), "requires the untar'ed the lora .nemo file"
for i in range(tp):
if tp == 1:
ckpt_file = f"{tmpdir}/model_weights.ckpt"
else:
ckpt_file = f"{tmpdir}/mp_rank_0{i}/model_weights.ckpt"
l = torch.load(ckpt_file, map_location=torch.device('cpu'))
lora_state_dict[i] = l
return lora_state_dict
def fix_for_O2(state_dict):
new_state_dict = {}
for k, v in state_dict.items():
new_state_dict[k.replace('model.language_model', 'model.module.language_model')] = v
return new_state_dict
def merge(
base_model_state_dict: Dict[str, Any],
lora_state_dict: Dict[int, Any],
tp: int,
num_layers: int,
curr_rank: int,
mcore: bool,
):
"""
Iterate through all the self_attention.query_key_value projection feedforward weights in all the layers.
Collect the corresponding lora weights for each layer and across tp ranks.
Computes the "full rank" weight from the two low-rank weights and add it to the self_attention.query_key_value weight.
Args:
base_model_state_dict: A state_dict for the base model for the current rank.
lora_state_dict: A complete set of weights for the lora model across all tp ranks. They key for this dict is an int tp rank.
tp: the tensor_model_parallel_size for the base_model (and the lora model)
num_layers: the number of layers in the base_model to iterate over.
curr_rank: current tp rank of the base model which is being merged with Lora.
mcore: whether the model uses megatron core.
"""
for nl in range(num_layers):
if mcore:
key_self_attn_kqv = f'model.decoder.layers.{nl}.self_attention.linear_qkv.weight'
key_lora_in = f'model.decoder.layers.{nl}.self_attention.adapter_layer.lora_kqv_adapter.linear_in.weight'
key_lora_out = f'model.decoder.layers.{nl}.self_attention.adapter_layer.lora_kqv_adapter.linear_out.weight'
else:
key_self_attn_kqv = f'model.language_model.encoder.layers.{nl}.self_attention.query_key_value.weight'
key_lora_in = f'model.language_model.encoder.layers.{nl}.self_attention.adapter_layer.lora_kqv_adapter.linear_in.weight'
key_lora_out = f'model.language_model.encoder.layers.{nl}.self_attention.adapter_layer.lora_kqv_adapter.linear_out.weight'
wt_lora_in = torch.cat([lora_state_dict[_tp][key_lora_in] for _tp in range(tp)], dim=0)
wt_lora_out = lora_state_dict[curr_rank][key_lora_out]
wt_self_attn = base_model_state_dict[key_self_attn_kqv]
wt_lora = wt_lora_out @ wt_lora_in
base_model_state_dict[key_self_attn_kqv] = wt_self_attn + wt_lora.type_as(wt_self_attn)
print("mergeing for weight", key_self_attn_kqv)
return base_model_state_dict
@hydra_runner(config_path="conf", config_name="merge_lora_weights")
def main(cfg) -> None:
# trainer required for restoring model parallel models
trainer = Trainer(strategy=NLPDDPStrategy(), **cfg.trainer)
if (
cfg.tensor_model_parallel_size < 0
or cfg.pipeline_model_parallel_size < 0
or cfg.get('pipeline_model_parallel_split_rank', -1) < 0
):
model_config = MegatronGPTModel.restore_from(
restore_path=cfg.gpt_model_file, trainer=trainer, return_config=True,
)
with open_dict(cfg):
cfg.tensor_model_parallel_size = model_config.get('tensor_model_parallel_size', 1)
cfg.pipeline_model_parallel_size = model_config.get('pipeline_model_parallel_size', 1)
cfg.pipeline_model_parallel_split_rank = model_config.get('pipeline_model_parallel_split_rank', 0)
assert (
cfg.trainer.devices * cfg.trainer.num_nodes
== cfg.tensor_model_parallel_size * cfg.pipeline_model_parallel_size
), "devices * num_nodes should equal tensor_model_parallel_size * pipeline_model_parallel_size"
if cfg.gpt_model_file:
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(cfg.gpt_model_file):
save_restore_connector.model_extracted_dir = cfg.gpt_model_file
pretrained_cfg = MegatronGPTModel.restore_from(
restore_path=cfg.gpt_model_file,
trainer=trainer,
return_config=True,
save_restore_connector=save_restore_connector,
)
OmegaConf.set_struct(pretrained_cfg, True)
with open_dict(pretrained_cfg):
pretrained_cfg.sequence_parallel = False
pretrained_cfg.activations_checkpoint_granularity = None
pretrained_cfg.activations_checkpoint_method = None
pretrained_cfg.precision = trainer.precision
model = MegatronGPTModel.restore_from(
restore_path=cfg.gpt_model_file,
trainer=trainer,
override_config_path=pretrained_cfg,
map_location=torch.device("cpu"),
save_restore_connector=save_restore_connector,
)
elif cfg.checkpoint_dir:
app_state = AppState()
if cfg.tensor_model_parallel_size > 1 or cfg.pipeline_model_parallel_size > 1:
app_state.model_parallel_size = cfg.tensor_model_parallel_size * cfg.pipeline_model_parallel_size
app_state.tensor_model_parallel_size = cfg.tensor_model_parallel_size
app_state.pipeline_model_parallel_size = cfg.pipeline_model_parallel_size
(
app_state.tensor_model_parallel_rank,
app_state.pipeline_model_parallel_rank,
app_state.model_parallel_size,
app_state.data_parallel_size,
app_state.pipeline_model_parallel_split_rank,
app_state.virtual_pipeline_model_parallel_rank,
) = fake_initialize_model_parallel(
world_size=app_state.model_parallel_size,
rank=trainer.global_rank,
tensor_model_parallel_size_=cfg.tensor_model_parallel_size,
pipeline_model_parallel_size_=cfg.pipeline_model_parallel_size,
pipeline_model_parallel_split_rank_=cfg.pipeline_model_parallel_split_rank,
)
checkpoint_path = inject_model_parallel_rank(os.path.join(cfg.checkpoint_dir, cfg.checkpoint_name))
model = MegatronGPTModel.load_from_checkpoint(checkpoint_path, hparams_file=cfg.hparams_file, trainer=trainer)
else:
raise ValueError("need at least a nemo file or checkpoint dir")
lora_model_cfg = MegatronGPTLoRAModel.restore_from(
restore_path=cfg.lora_model_path, trainer=trainer, return_config=True, mcore=model.mcore_gpt,
)
# load the lora weights on cpu for all ranks of the lora model
lora_weights = load_lora(cfg.lora_model_path, model.cfg.tensor_model_parallel_size)
# merge the lora weights with the base model, for this current rank.
merged_weights = merge(
model.state_dict(),
lora_weights,
tp=model.cfg.tensor_model_parallel_size,
num_layers=model.cfg.num_layers,
curr_rank=model.global_rank,
)
# load the merged_weights back into the base model, for this current rank.
if model.cfg.megatron_amp_O2:
merged_weights = fix_for_O2(merged_weights)
model.load_state_dict(merged_weights)
# Going to go through the motions of inference to force PTL to run subprocess for loading all base model's ranks.
input = "Context: In 2004, philosopher and psychologist Michel ter Hark (Groningen, The Netherlands) published a book, called Popper, Otto Selz and the rise of evolutionary epistemology, in which he claimed that Popper took some of his ideas from his tutor, the German psychologist Otto Selz. Selz never published his ideas, partly because of the rise of Nazism, which forced him to quit his work in 1933, and the prohibition of referring to Selz' work. Popper, the historian of ideas and his scholarship, is criticised in some academic quarters for his rejection of Plato, Hegel and Marx. Question: Who claimed Otto Selz deserved credit for ideas published by Popper? Answer:"
ds = RequestDataSet([input])
request_dl = DataLoader(dataset=ds, batch_size=1)
config = {'greedy': True, 'compute_logprob': False, 'tokens_to_generate': 5, 'add_BOS': False}
model.set_inference_config(config)
response = trainer.predict(model, request_dl)
print(response)
with open_dict(model.cfg):
model.cfg.restore_from_path = cfg.merged_model_path
model.cfg.data = lora_model_cfg.data
model.cfg.target = f"{MegatronGPTSFTModel.__module__}.{MegatronGPTSFTModel.__name__}"
model.save_to(cfg.merged_model_path)
logging.info(f"saved merged model to {cfg.merged_model_path}")
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | scripts/nlp_language_modeling/merge_lora_weights/merge.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
from argparse import ArgumentParser
from multiprocessing import Pool
from sacremoses import MosesDetokenizer
from nemo.collections.common.tokenizers import AutoTokenizer
"""
This script converts the NaturalInstructions v2 dataset into individual JSONL files.
Use instructions:
1. Download the NaturalInstructions dataset by cloning it from allenai:
git clone https://github.com/allenai/natural-instructions. The raw data should be in the tasks folder.
2. Run this script:
python preprocess_niv2.py \
--niv2_dataset_path natural-instructions/tasks \
--jsonl_output_path natural-instructions/train_tasks_default_jsonl \
--splits_file_path natural-instructions/splits/default/train_tasks.txt
3. The output will be in the jsonl_output_path directory.
4. Each JSONL file is compatible with NeMo's T0JSONLMemMapDataset (https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/nlp/data/language_modeling/t0_dataset.py)
"""
def remove_newline_and_detokenize(x, detokenizer):
x = re.sub(r'\\n+', ' ', x)
x = re.sub(r'\n+', ' ', x)
x = re.sub(r'\\r+', ' ', x)
x = re.sub(r'\r+', ' ', x)
x = x.strip()
x = detokenizer.detokenize([x])
return x
def detokenize(x, detokenizer):
x = x.strip()
# NOTE: Commenting this out since sacremoses seems to remove \n as part of detokenization.
# x = detokenizer.detokenize([x])
return x
def is_empty(x, tokenizer):
return len(tokenizer.text_to_tokens(x.strip())) < 1
def write_dataset_to_file(file_name, output_file_name, detokenizer, tokenizer, idx, total_num_files, remove_newline):
print(f'Processing file {idx + 1}/{total_num_files} : {file_name} -> {output_file_name}')
dataset = json.load(open(file_name, 'r'))
with open(output_file_name, 'w') as f:
instances = dataset['Instances']
definitions = dataset['Definition']
for definition in definitions:
if is_empty(definition, tokenizer):
continue
for instance in instances:
id = instance['id']
input = instance['input']
outputs = instance['output']
# On rare occasions, the same instance can have multiple outputs. We add all of them as examples.
if is_empty(input, tokenizer):
continue
for output in outputs:
if is_empty(output, tokenizer):
continue
if remove_newline:
prompted_input = definition + ' ' + input
else:
prompted_input = definition + '\n\n' + input
proc_func = remove_newline_and_detokenize if remove_newline else detokenize
prompted_input = proc_func(prompted_input, detokenizer)
output = proc_func(output, detokenizer)
instance_object = {
'id': id,
'input': prompted_input,
'output': output,
}
f.write(json.dumps(instance_object) + '\n')
def process_folder(data_folder, output_folder, splits_file, remove_newline):
detokenizer = MosesDetokenizer('en')
tokenizer = AutoTokenizer("gpt2")
assert os.path.isdir(data_folder)
assert os.path.exists(splits_file)
if not os.path.exists(output_folder):
os.system(f'mkdir -p {output_folder}')
if not os.path.exists(os.path.join(output_folder, 'train')):
os.system(f'mkdir -p {os.path.join(output_folder, "train")}')
if not os.path.exists(os.path.join(output_folder, 'test')):
os.system(f'mkdir -p {os.path.join(output_folder, "test")}')
splits_file_names = [line.strip() + '.json' for line in open(splits_file, 'r')]
print(f'Found {len(os.listdir(data_folder))} files in the data folder ...')
print(f'Found {len(splits_file_names)} in the splits in the splits file ...')
print(f'Processing {len(splits_file_names)}/{len(os.listdir(data_folder))} files ...')
pool_args = []
for idx, file_name in enumerate(splits_file_names):
print(f'Processing file {idx}/{len(splits_file_names)}: {file_name}')
if not os.path.exists(os.path.join(data_folder, file_name)):
raise FileNotFoundError(f'Could not find {os.path.join(data_folder, file_name)}')
if not file_name.endswith('.json'):
print(f'Skipping {file_name} because it is not a JSON file')
output_file_name = os.path.join(output_folder, file_name.replace('.json', '.jsonl'))
pool_args.append(
(
os.path.join(data_folder, file_name),
output_file_name,
detokenizer,
tokenizer,
idx,
len(splits_file_names),
remove_newline,
)
)
write_dataset_to_file(
os.path.join(data_folder, file_name),
output_file_name,
detokenizer,
tokenizer,
idx,
len(splits_file_names),
remove_newline,
)
pool = Pool(42)
pool.starmap(write_dataset_to_file, pool_args)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument(
"--niv2_dataset_path",
type=str,
required=True,
help="Path to raw P3 data. Should be a folder containing folders for each task. After cloning the repo this should correspond to P3/data",
)
parser.add_argument(
"--jsonl_output_path",
type=str,
required=True,
help="Path to output folder where JSONL files will be written.",
)
parser.add_argument(
"--splits_file_path", type=str, default="default", help="Path to the file that contains splits. ex: ",
)
parser.add_argument(
"--remove_newline", action="store_true", help="Whether to remove newlines from the input and output.",
)
args = parser.parse_args()
process_folder(args.niv2_dataset_path, args.jsonl_output_path, args.splits_file_path, args.remove_newline)
| NeMo-main | scripts/nlp_language_modeling/niv2/preprocess_niv2.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""script to clean the data for sft chat dataset. It will remove the records if all the tokens are masked due to truncation by sequence length
Example usage:
MPT-7B:
python data_clean.py --dataset_file /dataset/INPUT.jsonl --output_file /dataset/OUTPUT.jsonl --library huggingface --model_name EleutherAI/gpt-neox-20b --seq_len 4096
NeMo GPT:
python data_clean.py --dataset_file /dataset/INPUT.jsonl --output_file /dataset/OUTPUT.jsonl --library sentencepiece --model_file sentencepiece.model --seq_len 4096
"""
import argparse
import json
from nemo.collections.nlp.data.language_modeling.megatron.gpt_sft_chat_dataset import GPTSFTChatDataset
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer
def data_clean(
dataset_file,
output_file,
seq_len=4096,
library='huggingface',
model_name='EleutherAI/gpt-neox-20b',
tokenizer_model=None,
):
tokenizer = get_nmt_tokenizer(
library=library, model_name=model_name, tokenizer_model=tokenizer_model, use_fast=True
)
if library == 'huggingface':
tokenizer.add_special_tokens({'additional_special_tokens': ['<extra_id_0>', '<extra_id_1>', '<extra_id_2>']})
d = GPTSFTChatDataset(dataset_file, tokenizer, seq_len, 1)
total_records = len(d)
removed_ids = set()
for i in range(total_records):
if i % 1000 == 0:
print(i)
try:
if d[i]['mask'][: seq_len + 1].sum().item() == 0:
removed_ids.add(i)
print(f'removed {i}')
continue
except:
removed_ids.add(i)
print(f'Exception removed {i}')
with open(dataset_file, 'r', encoding='utf-8') as f:
with open(output_file, 'w', encoding='utf-8') as o:
for i, line in enumerate(f):
if i in removed_ids:
continue
obj = json.loads(line)
o.write(json.dumps(obj, ensure_ascii=False) + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_file", type=str, required=True, default='/dataset/input.jsonl')
parser.add_argument(
"--model_file", type=str, required=False, default=None, help="Path to the sentence piece model file."
)
parser.add_argument(
"--library",
type=str,
required=False,
default='huggingface',
help="tokenizer library, huggingface or sentencepiece",
)
parser.add_argument(
"--model_name",
type=str,
required=False,
default='EleutherAI/gpt-neox-20b',
help="huggingface tokenizer model name",
)
parser.add_argument("--output_file", type=str, required=True)
parser.add_argument("--seq_len", type=int, required=False, default=4096)
args = parser.parse_args()
data_clean(
dataset_file=args.dataset_file,
output_file=args.output_file,
seq_len=args.seq_len,
library=args.library,
model_name=args.model_name,
tokenizer_model=args.model_file,
)
| NeMo-main | scripts/nlp_language_modeling/sft/data_clean.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.