python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.models.zero_shot_intent_recognition.zero_shot_intent_model import ZeroShotIntentModel
| NeMo-main | nemo/collections/nlp/models/zero_shot_intent_recognition/__init__.py |
# Copyright 2018 The HuggingFace Inc. team.
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict, List, Optional, Union
import numpy as np
import torch
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from nemo.collections.nlp.data.zero_shot_intent_recognition.zero_shot_intent_dataset import (
ZeroShotIntentDataset,
ZeroShotIntentInferenceDataset,
calc_class_weights_from_dataloader,
)
from nemo.collections.nlp.models import TextClassificationModel
from nemo.core.classes.common import PretrainedModelInfo
from nemo.utils import logging
__all__ = ['ZeroShotIntentModel']
class ZeroShotIntentModel(TextClassificationModel):
"""TextClassificationModel to be trained on two- or three-class textual entailment data, to be used for zero shot intent recognition."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
super().__init__(cfg=cfg, trainer=trainer)
def _setup_dataloader_from_config(self, cfg: DictConfig) -> 'torch.utils.data.DataLoader':
data_dir = self._cfg.dataset.data_dir
file_name = cfg.file_name
input_file = os.path.join(data_dir, file_name)
if not os.path.exists(input_file):
raise FileNotFoundError(
f"File {input_file} not found. Please check file paths and file names in the config."
)
dataset = ZeroShotIntentDataset(
file_path=input_file,
tokenizer=self.tokenizer,
max_seq_length=self._cfg.dataset.max_seq_length,
sent1_col=self._cfg.dataset.sentence_1_column,
sent2_col=self._cfg.dataset.sentence_2_column,
label_col=self._cfg.dataset.label_column,
num_classes=self.cfg.dataset.num_classes,
use_cache=self._cfg.dataset.use_cache,
)
return torch.utils.data.DataLoader(
dataset=dataset,
collate_fn=dataset.collate_fn,
batch_size=cfg.batch_size,
shuffle=cfg.shuffle,
num_workers=cfg.get("num_workers", 0),
pin_memory=cfg.get("pin_memory", False),
drop_last=cfg.get("drop_last", False),
)
def setup_training_data(self, train_data_config: Optional[DictConfig]):
if not train_data_config or not train_data_config.file_name:
logging.info(
f"Dataloader config or file_name for the training set is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config)
# calculate the class weights to be used in the loss function
if self.cfg.dataset.class_balancing == 'weighted_loss':
self.class_weights = calc_class_weights_from_dataloader(
self._train_dl, self.cfg.dataset.num_classes, self.cfg.dataset.data_dir
)
else:
self.class_weights = None
# we need to create/update the loss module by using the weights calculated from the training data
self.create_loss_module()
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
if not val_data_config or not val_data_config.file_name:
logging.info(
f"Dataloader config or file_path for the validation data set is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config)
def setup_test_data(self, test_data_config: Optional[DictConfig]):
if not test_data_config or not test_data_config.file_name:
logging.info(
f"Dataloader config or file_path for the test data set is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config)
def _setup_infer_dataloader(
self,
queries: List[str],
candidate_labels: List[str],
hypothesis_template=str,
batch_size=1,
max_seq_length: int = -1,
) -> 'torch.utils.data.DataLoader':
"""
Setup method for inference data loader. Here the premise-hypothesis pairs are made from queries and candidate labels.
Args:
queries: the queries to classify
candidate_labels: strings to be used as labels
hypothesis_template: the template used to turn each label into an NLI-style hypothesis. Must include a {}
or similar syntax for the candidate label to be inserted.
batch_size: batch size to use during inference
max_seq_length: maximum length of queries, default is -1 for no limit
Returns:
A pytorch DataLoader.
"""
dataset = ZeroShotIntentInferenceDataset(
queries=queries,
candidate_labels=candidate_labels,
tokenizer=self.tokenizer,
max_seq_length=max_seq_length,
hypothesis_template=hypothesis_template,
)
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=2,
pin_memory=False,
drop_last=False,
collate_fn=dataset.collate_fn,
)
def predict(
self,
queries: Union[str, List[str]],
candidate_labels: Union[str, List[str]],
hypothesis_template='This example is {}.',
batch_size=1,
multi_label=True,
entailment_idx=1,
contradiction_idx=0,
) -> List[Dict]:
"""
Given a list of queries and a list of candidate labels, return a ranked list of labels and scores for each query.
Example usage:
queries = ["I'd like a veggie burger, fries, and a coke", "Turn off the lights in the living room",]
candidate_labels = ["Food order", "Change lighting"]
model.predict(queries, candidate_labels)
Example output:
[{'sentence': "I'd like a veggie burger, fries, and a coke",
'labels': ['Food order', 'Change lighting'],
'scores': [0.8557153344154358, 0.12036784738302231]},
{'sentence': 'Turn off the lights in the living room',
'labels': ['Change lighting', 'Food order'],
'scores': [0.8506497144699097, 0.06594637036323547]}]
Args:
queries: the query or list of queries to classify
candidate_labels: string or list of strings to be used as labels
hypothesis_template: the template used to turn each label into an NLI-style hypothesis. Must include a {}
or similar syntax for the candidate label to be inserted.
batch_size: the batch size to use for inference.
multi_label: whether or not multiple candidate labels can be true. If False, the scores are normalized
such that all class probabilities sum to 1. If True, the labels are
considered independent and probabilities are normalized for each candidate by doing a softmax of
the entailment score vs. the contradiction score.
entailment_idx: the index of the "entailment" class in the trained model; models trained on MNLI
using NeMo's glue_benchmark.py or zero_shot_intent_model.py use an index of 1 by default.
contradiction_idx: the index of the "contradiction" class in the trained model; models trained on MNLI
using NeMo's glue_benchmark.py or zero_shot_intent_model.py use an index of 0 by default.
Returns:
list of dictionaries; one dict per input query. Each dict has keys "sentence", "labels", "scores".
labels and scores are parallel lists (with each score corresponding to the label at the same index),
sorted from highest to lowest score.
"""
if not queries:
raise ValueError("No queries were passed for classification!")
if not candidate_labels:
raise ValueError("No candidate labels were provided!")
queries = [queries] if isinstance(queries, str) else queries
candidate_labels = [candidate_labels] if isinstance(candidate_labels, str) else candidate_labels
if len(candidate_labels) == 1:
multi_label = True
mode = self.training
try:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Switch model to evaluation mode
self.eval()
self.to(device)
infer_datalayer = self._setup_infer_dataloader(
queries,
candidate_labels,
hypothesis_template=hypothesis_template,
batch_size=batch_size,
max_seq_length=self._cfg.dataset.max_seq_length,
)
all_batch_logits = []
for batch in infer_datalayer:
input_ids, input_type_ids, input_mask, _ = batch
logits = self.forward(
input_ids=input_ids.to(device),
token_type_ids=input_type_ids.to(device),
attention_mask=input_mask.to(device),
)
all_batch_logits.append(logits.detach().cpu().numpy())
all_logits = np.concatenate(all_batch_logits)
outputs = all_logits.reshape((len(queries), len(candidate_labels), -1))
if not multi_label:
# softmax the "entailment" logits over all candidate labels
entail_logits = outputs[..., entailment_idx]
scores = np.exp(entail_logits) / np.exp(entail_logits).sum(-1, keepdims=True)
else:
# softmax over the entailment vs. contradiction dim for each label independently
entail_contr_logits = outputs[..., [contradiction_idx, entailment_idx]]
scores = np.exp(entail_contr_logits) / np.exp(entail_contr_logits).sum(-1, keepdims=True)
scores = scores[..., 1]
result = []
for i in range(len(queries)):
sorted_idxs = list(reversed(scores[i].argsort()))
result.append(
{
"sentence": queries[i],
"labels": [candidate_labels[j] for j in sorted_idxs],
"scores": scores[i][sorted_idxs].tolist(),
}
)
finally:
# set mode back to its original value
self.train(mode=mode)
return result
@classmethod
def list_available_models(cls) -> List[PretrainedModelInfo]:
"""
This method returns a list of pre-trained models which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
result.append(
PretrainedModelInfo(
pretrained_model_name="zeroshotintent_en_bert_base_uncased",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/zeroshotintent_en_bert_base_uncased/versions/1.4.1/files/zeroshotintent_en_bert_base_uncased.nemo",
description="ZeroShotIntentModel trained by fine tuning BERT-base-uncased on the MNLI (Multi-Genre Natural Language Inference) dataset, which achieves an accuracy of 84.9% and 84.8% on the matched and mismatched dev sets, respectively.",
)
)
result.append(
PretrainedModelInfo(
pretrained_model_name="zeroshotintent_en_megatron_uncased",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/zeroshotintent_en_megatron_uncased/versions/1.4.1/files/zeroshotintent_en_megatron_uncased.nemo",
description="ZeroShotIntentModel trained by fine tuning Megatron-BERT-345m=M-uncased on the MNLI (Multi-Genre Natural Language Inference) dataset, which achieves an accuracy of 90.0% and 89.9% on the matched and mismatched dev sets, respectively",
)
)
return result
| NeMo-main | nemo/collections/nlp/models/zero_shot_intent_recognition/zero_shot_intent_model.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
import torch
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from transformers import AutoTokenizer
from nemo.collections.common.losses import MultiSimilarityLoss
from nemo.collections.nlp.data import EntityLinkingDataset
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.core.classes.common import typecheck
from nemo.core.classes.exportable import Exportable
from nemo.core.neural_types import LogitsType, NeuralType
from nemo.utils import logging
__all__ = ['EntityLinkingModel']
class EntityLinkingModel(NLPModel, Exportable):
"""
Second stage pretraining of BERT based language model
for entity linking task. An implementation of Liu et. al's
NAACL 2021 paper Self-Alignment Pretraining for Biomedical Entity Representations.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"logits": NeuralType(('B', 'D'), LogitsType())}
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
"""Initializes the SAP-BERT model for entity linking."""
# tokenizer needed before super().__init__() so dataset and loader can process data
self._setup_tokenizer(cfg.tokenizer)
super().__init__(cfg=cfg, trainer=trainer)
# Token to use for the self-alignment loss, typically the first token, [CLS]
self._idx_conditioned_on = 0
self.loss = MultiSimilarityLoss()
def _setup_tokenizer(self, cfg: DictConfig):
tokenizer = AutoTokenizer.from_pretrained(
cfg.tokenizer_name, vocab_file=cfg.vocab_file, do_lower_case=cfg.do_lower_case
)
self.tokenizer = tokenizer
@typecheck()
def forward(self, input_ids, token_type_ids, attention_mask):
hidden_states = self.bert_model(
input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask
)
if isinstance(hidden_states, tuple):
hidden_states = hidden_states[0]
# normalize to unit sphere
logits = torch.nn.functional.normalize(hidden_states[:, self._idx_conditioned_on], p=2, dim=1)
return logits
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
input_ids, token_type_ids, attention_mask, concept_ids = batch
logits = self.forward(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
train_loss = self.loss(logits=logits, labels=concept_ids)
# No hard examples found in batch,
# shouldn't use this batch to update model weights
if train_loss == 0:
train_loss = None
lr = None
else:
lr = self._optimizer.param_groups[0]["lr"]
self.log("train_loss", train_loss)
self.log("lr", lr, prog_bar=True)
return {"loss": train_loss, "lr": lr}
def validation_step(self, batch, batch_idx):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
input_ids, input_type_ids, input_mask, concept_ids = batch
with torch.no_grad():
logits = self.forward(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)
val_loss = self.loss(logits=logits, labels=concept_ids)
# No hard examples found in batch,
# val loss not used to update model weights
if val_loss == 0:
val_loss = None
else:
self.log("val_loss", val_loss)
logging.info(f"val loss: {val_loss}")
loss = {"val_loss": val_loss}
self.validation_step_outputs.append(loss)
return loss
def on_validation_epoch_end(self):
"""
Called at the end of validation to aggregate outputs.
Args:
outputs: list of individual outputs of each validation step.
Returns:
"""
if self.validation_step_outputs:
avg_loss = torch.stack(
[x["val_loss"] for x in self.validation_step_outputs if x["val_loss"] != None]
).mean()
self.log(f"val_loss", avg_loss, prog_bar=True)
self.validation_step_outputs.clear() # free memory
return {"val_loss": avg_loss}
def setup_training_data(self, train_data_config: Optional[DictConfig]):
if not train_data_config or not train_data_config.data_file:
logging.info(
f"Dataloader config or file_path or processed data path for the train dataset is missing, \
so no data loader for train is created!"
)
self._train_dl = None
return
self._train_dl = self.setup_dataloader(cfg=train_data_config)
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
if not val_data_config or not val_data_config.data_file:
logging.info(
f"Dataloader config or file_path or processed data path for the val dataset is missing, \
so no data loader for validation is created!"
)
self._validation_dl = None
return
self._validation_dl = self.setup_dataloader(cfg=val_data_config)
def setup_dataloader(self, cfg: Dict, is_index_data: bool = False) -> 'torch.utils.data.DataLoader':
dataset = EntityLinkingDataset(
tokenizer=self.tokenizer,
data_file=cfg.data_file,
max_seq_length=cfg.max_seq_length,
is_index_data=is_index_data,
)
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=cfg.batch_size,
collate_fn=dataset.collate_fn,
shuffle=cfg.get("shuffle", True),
num_workers=cfg.get("num_workers", 2),
pin_memory=cfg.get("pin_memory", False),
drop_last=cfg.get("drop_last", False),
)
@classmethod
def list_available_models(cls) -> Optional[Dict[str, str]]:
pass
@classmethod
def from_pretrained(cls, name: str):
pass
| NeMo-main | nemo/collections/nlp/models/entity_linking/entity_linking_model.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.models.entity_linking.entity_linking_model import EntityLinkingModel
| NeMo-main | nemo/collections/nlp/models/entity_linking/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.models.question_answering.qa_base_model import BaseQAModel
from nemo.collections.nlp.models.question_answering.qa_bert_model import BERTQAModel
from nemo.collections.nlp.models.question_answering.qa_gpt_model import GPTQAModel
from nemo.collections.nlp.models.question_answering.qa_model import QAModel
from nemo.collections.nlp.models.question_answering.qa_s2s_model import S2SQAModel
| NeMo-main | nemo/collections/nlp/models/question_answering/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from typing import Optional
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from torch.cuda.amp import autocast
from nemo.collections.common.losses import SpanningLoss
from nemo.collections.nlp.data import SquadDataset
from nemo.collections.nlp.data.question_answering_squad.qa_squad_processing import (
EVALUATION_MODE,
INFERENCE_MODE,
TRAINING_MODE,
)
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.collections.nlp.modules.common import TokenClassifier
from nemo.collections.nlp.parts.utils_funcs import tensor2list
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.utils import logging
__all__ = ['QAModel']
class QAModel(NLPModel):
"""
BERT encoder with QA head training.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
super().__init__(cfg=cfg, trainer=trainer)
self.classifier = TokenClassifier(
hidden_size=self.hidden_size,
num_classes=cfg.token_classifier.num_classes,
num_layers=cfg.token_classifier.num_layers,
activation=cfg.token_classifier.activation,
log_softmax=cfg.token_classifier.log_softmax,
dropout=cfg.token_classifier.dropout,
use_transformer_init=cfg.token_classifier.use_transformer_init,
)
self.loss = SpanningLoss()
@typecheck()
def forward(self, input_ids, attention_mask, token_type_ids):
with autocast():
hidden_states = self.bert_model(
input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask
)
if isinstance(hidden_states, tuple):
hidden_states = hidden_states[0]
logits = self.classifier(hidden_states=hidden_states)
return logits
def training_step(self, batch, batch_idx):
input_ids, input_type_ids, input_mask, unique_ids, start_positions, end_positions = batch
logits = self.forward(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)
loss, _, _ = self.loss(logits=logits, start_positions=start_positions, end_positions=end_positions)
lr = self._optimizer.param_groups[0]['lr']
self.log('train_loss', loss)
self.log('lr', lr, prog_bar=True)
return {'loss': loss, 'lr': lr}
def validation_step(self, batch, batch_idx):
if self.trainer.testing:
prefix = 'test'
else:
prefix = 'val'
input_ids, input_type_ids, input_mask, unique_ids, start_positions, end_positions = batch
logits = self.forward(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)
loss, start_logits, end_logits = self.loss(
logits=logits, start_positions=start_positions, end_positions=end_positions
)
tensors = {
'unique_ids': unique_ids,
'start_logits': start_logits,
'end_logits': end_logits,
}
loss = {f'{prefix}_loss': loss, f'{prefix}_tensors': tensors}
self.validation_step_outputs.append(loss) if prefix == 'val' else self.test_step_outputs.append(loss)
return loss
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def on_validation_epoch_end(self):
if self.trainer.testing:
prefix = 'test'
outputs = self.test_step_outputs
else:
prefix = 'val'
outputs = self.validation_step_outputs
avg_loss = torch.stack([x[f'{prefix}_loss'] for x in outputs]).mean()
unique_ids = torch.cat([x[f'{prefix}_tensors']['unique_ids'] for x in outputs])
start_logits = torch.cat([x[f'{prefix}_tensors']['start_logits'] for x in outputs])
end_logits = torch.cat([x[f'{prefix}_tensors']['end_logits'] for x in outputs])
all_unique_ids = []
all_start_logits = []
all_end_logits = []
if torch.distributed.is_initialized():
world_size = torch.distributed.get_world_size()
for ind in range(world_size):
all_unique_ids.append(torch.empty_like(unique_ids))
all_start_logits.append(torch.empty_like(start_logits))
all_end_logits.append(torch.empty_like(end_logits))
torch.distributed.all_gather(all_unique_ids, unique_ids)
torch.distributed.all_gather(all_start_logits, start_logits)
torch.distributed.all_gather(all_end_logits, end_logits)
else:
all_unique_ids.append(unique_ids)
all_start_logits.append(start_logits)
all_end_logits.append(end_logits)
exact_match, f1, all_predictions, all_nbest = -1, -1, [], []
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
unique_ids = []
start_logits = []
end_logits = []
for u in all_unique_ids:
unique_ids.extend(tensor2list(u))
for u in all_start_logits:
start_logits.extend(tensor2list(u))
for u in all_end_logits:
end_logits.extend(tensor2list(u))
eval_dataset = self._test_dl.dataset if self.trainer.testing else self._validation_dl.dataset
exact_match, f1, all_predictions, all_nbest = eval_dataset.evaluate(
unique_ids=unique_ids,
start_logits=start_logits,
end_logits=end_logits,
n_best_size=self._cfg.dataset.n_best_size,
max_answer_length=self._cfg.dataset.max_answer_length,
version_2_with_negative=self._cfg.dataset.version_2_with_negative,
null_score_diff_threshold=self._cfg.dataset.null_score_diff_threshold,
do_lower_case=self._cfg.dataset.do_lower_case,
)
logging.info(f"{prefix} exact match {exact_match}")
logging.info(f"{prefix} f1 {f1}")
self.log(f'{prefix}_loss', avg_loss)
self.log(f'{prefix}_exact_match', exact_match)
self.log(f'{prefix}_f1', f1)
self.validation_step_outputs.clear() if prefix == 'val' else self.test_step_outputs.clear() # free memory
def on_test_epoch_end(self):
return self.on_validation_epoch_end()
@torch.no_grad()
def inference(
self,
file: str,
batch_size: int = 1,
num_samples: int = -1,
output_nbest_file: Optional[str] = None,
output_prediction_file: Optional[str] = None,
):
"""
Get prediction for unlabeled inference data
Args:
file: inference data
batch_size: batch size to use during inference
num_samples: number of samples to use of inference data. Default: -1 if all data should be used.
output_nbest_file: optional output file for writing out nbest list
output_prediction_file: optional output file for writing out predictions
Returns:
model predictions, model nbest list
"""
# store predictions for all queries in a single list
all_predictions = []
all_nbest = []
mode = self.training
device = 'cuda' if torch.cuda.is_available() else 'cpu'
try:
# Switch model to evaluation mode
self.eval()
self.to(device)
logging_level = logging.get_verbosity()
logging.set_verbosity(logging.WARNING)
dataloader_cfg = {
"batch_size": batch_size,
"file": file,
"shuffle": False,
"num_samples": num_samples,
'num_workers': 2,
'pin_memory': False,
'drop_last': False,
}
dataloader_cfg = OmegaConf.create(dataloader_cfg)
infer_datalayer = self._setup_dataloader_from_config(cfg=dataloader_cfg, mode=INFERENCE_MODE)
all_logits = []
all_unique_ids = []
for i, batch in enumerate(infer_datalayer):
input_ids, token_type_ids, attention_mask, unique_ids = batch
logits = self.forward(
input_ids=input_ids.to(device),
token_type_ids=token_type_ids.to(device),
attention_mask=attention_mask.to(device),
)
all_logits.append(logits)
all_unique_ids.append(unique_ids)
logits = torch.cat(all_logits)
unique_ids = tensor2list(torch.cat(all_unique_ids))
s, e = logits.split(dim=-1, split_size=1)
start_logits = tensor2list(s.squeeze(-1))
end_logits = tensor2list(e.squeeze(-1))
(all_predictions, all_nbest, scores_diff) = infer_datalayer.dataset.get_predictions(
unique_ids=unique_ids,
start_logits=start_logits,
end_logits=end_logits,
n_best_size=self._cfg.dataset.n_best_size,
max_answer_length=self._cfg.dataset.max_answer_length,
version_2_with_negative=self._cfg.dataset.version_2_with_negative,
null_score_diff_threshold=self._cfg.dataset.null_score_diff_threshold,
do_lower_case=self._cfg.dataset.do_lower_case,
)
with open(file, 'r') as test_file_fp:
test_data = json.load(test_file_fp)["data"]
id_to_question_mapping = {}
for title in test_data:
for par in title["paragraphs"]:
for question in par["qas"]:
id_to_question_mapping[question["id"]] = question["question"]
for question_id in all_predictions:
all_predictions[question_id] = (id_to_question_mapping[question_id], all_predictions[question_id])
if output_nbest_file is not None:
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest, indent=4) + "\n")
if output_prediction_file is not None:
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
finally:
# set mode back to its original value
self.train(mode=mode)
logging.set_verbosity(logging_level)
return all_predictions, all_nbest
def setup_training_data(self, train_data_config: Optional[DictConfig]):
if not train_data_config or not train_data_config.file:
logging.info(
f"Dataloader config or file_path for the train is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config, mode=TRAINING_MODE)
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
if not val_data_config or not val_data_config.file:
logging.info(
f"Dataloader config or file_path for the validation is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config, mode=EVALUATION_MODE)
def setup_test_data(self, test_data_config: Optional[DictConfig]):
if not test_data_config or test_data_config.file is None:
logging.info(
f"Dataloader config or file_path for the test is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config, mode=EVALUATION_MODE)
def _setup_dataloader_from_config(self, cfg: DictConfig, mode: str):
dataset = SquadDataset(
tokenizer=self.tokenizer,
data_file=cfg.file,
keep_doc_spans='all', # self._cfg.dataset.keep_doc_spans,
doc_stride=self._cfg.dataset.doc_stride,
max_query_length=self._cfg.dataset.max_query_length,
max_seq_length=self._cfg.dataset.max_seq_length,
version_2_with_negative=self._cfg.dataset.version_2_with_negative,
num_samples=cfg.num_samples,
mode=mode,
use_cache=self._cfg.dataset.use_cache,
)
dl = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=cfg.batch_size,
collate_fn=dataset.collate_fn,
drop_last=cfg.drop_last,
shuffle=cfg.shuffle,
num_workers=cfg.num_workers,
pin_memory=cfg.pin_memory,
)
return dl
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
result.append(
PretrainedModelInfo(
pretrained_model_name="qa_squadv1.1_bertbase",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv1_1_bertbase/versions/1.0.0rc1/files/qa_squadv1.1_bertbase.nemo",
description="Question answering model finetuned from NeMo BERT Base Uncased on SQuAD v1.1 dataset which obtains an exact match (EM) score of 82.78% and an F1 score of 89.97%.",
)
)
result.append(
PretrainedModelInfo(
pretrained_model_name="qa_squadv2.0_bertbase",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv2_0_bertbase/versions/1.0.0rc1/files/qa_squadv2.0_bertbase.nemo",
description="Question answering model finetuned from NeMo BERT Base Uncased on SQuAD v2.0 dataset which obtains an exact match (EM) score of 75.04% and an F1 score of 78.08%.",
)
)
result.append(
PretrainedModelInfo(
pretrained_model_name="qa_squadv1_1_bertlarge",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv1_1_bertlarge/versions/1.0.0rc1/files/qa_squadv1.1_bertlarge.nemo",
description="Question answering model finetuned from NeMo BERT Large Uncased on SQuAD v1.1 dataset which obtains an exact match (EM) score of 85.44% and an F1 score of 92.06%.",
)
)
result.append(
PretrainedModelInfo(
pretrained_model_name="qa_squadv2.0_bertlarge",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv2_0_bertlarge/versions/1.0.0rc1/files/qa_squadv2.0_bertlarge.nemo",
description="Question answering model finetuned from NeMo BERT Large Uncased on SQuAD v2.0 dataset which obtains an exact match (EM) score of 80.22% and an F1 score of 83.05%.",
)
)
result.append(
PretrainedModelInfo(
pretrained_model_name="qa_squadv1_1_megatron_cased",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv1_1_megatron_cased/versions/1.0.0rc1/files/qa_squadv1.1_megatron_cased.nemo",
description="Question answering model finetuned from Megatron Cased on SQuAD v1.1 dataset which obtains an exact match (EM) score of 88.18% and an F1 score of 94.07%.",
)
)
result.append(
PretrainedModelInfo(
pretrained_model_name="qa_squadv2.0_megatron_cased",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv2_0_megatron_cased/versions/1.0.0rc1/files/qa_squadv2.0_megatron_cased.nemo",
description="Question answering model finetuned from Megatron Cased on SQuAD v2.0 dataset which obtains an exact match (EM) score of 84.73% and an F1 score of 87.89%.",
)
)
result.append(
PretrainedModelInfo(
pretrained_model_name="qa_squadv1.1_megatron_uncased",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv1_1_megatron_uncased/versions/1.0.0rc1/files/qa_squadv1.1_megatron_uncased.nemo",
description="Question answering model finetuned from Megatron Unased on SQuAD v1.1 dataset which obtains an exact match (EM) score of 87.61% and an F1 score of 94.00%.",
)
)
result.append(
PretrainedModelInfo(
pretrained_model_name="qa_squadv2.0_megatron_uncased",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv2_0_megatron_uncased/versions/1.0.0rc1/files/qa_squadv2.0_megatron_uncased.nemo",
description="Question answering model finetuned from Megatron Uncased on SQuAD v2.0 dataset which obtains an exact match (EM) score of 84.48% and an F1 score of 87.65%.",
)
)
return result
| NeMo-main | nemo/collections/nlp/models/question_answering/qa_model.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from typing import List, Optional
import numpy as np
import torch
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from transformers.models.bert.tokenization_bert import BasicTokenizer
from nemo.collections.common.losses import SpanningLoss
from nemo.collections.common.parts.utils import _compute_softmax
from nemo.collections.nlp.data.question_answering.data_processor.qa_processing import QAProcessor
from nemo.collections.nlp.data.question_answering.dataset.qa_bert_dataset import BERTQADataset
from nemo.collections.nlp.metrics.qa_metrics import QAMetrics
from nemo.collections.nlp.models.question_answering.qa_base_model import BaseQAModel
from nemo.collections.nlp.modules.common import TokenClassifier
from nemo.collections.nlp.parts.utils_funcs import tensor2list
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.utils import logging
class BERTQAModel(BaseQAModel):
""" BERT model with a QA (token classification) head """
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
super().__init__(cfg=cfg, trainer=trainer, no_lm_init=False)
self.classifier = TokenClassifier(
hidden_size=self.hidden_size,
num_classes=cfg.token_classifier.num_classes,
num_layers=cfg.token_classifier.num_layers,
activation=cfg.token_classifier.activation,
log_softmax=cfg.token_classifier.log_softmax,
dropout=cfg.token_classifier.dropout,
use_transformer_init=cfg.token_classifier.use_transformer_init,
)
self.loss = SpanningLoss()
def training_step(self, batch, batch_idx):
input_ids, input_type_ids, input_mask, unique_ids, start_positions, end_positions = batch
logits = self.forward(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)
loss, _, _ = self.loss(logits=logits, start_positions=start_positions, end_positions=end_positions)
lr = self._optimizer.param_groups[0]['lr']
self.log('lr', lr, prog_bar=True)
self.log("train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return {'loss': loss, 'lr': lr}
def validation_step(self, batch, batch_idx):
prefix = "test" if self.trainer.testing else "val"
input_ids, input_type_ids, input_mask, unique_ids, start_positions, end_positions = batch
logits = self.forward(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)
loss, start_logits, end_logits = self.loss(
logits=logits, start_positions=start_positions, end_positions=end_positions
)
tensors = {
'unique_ids': unique_ids,
'start_logits': start_logits,
'end_logits': end_logits,
}
loss = {f'{prefix}_loss': loss, f'{prefix}_tensors': tensors}
if prefix == "val":
self.validation_step_outputs.append(loss)
else:
self.test_step_outputs.append(loss)
return loss
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def on_validation_epoch_end(self):
prefix = "test" if self.trainer.testing else "val"
if prefix == 'val':
avg_loss = torch.stack([x[f'{prefix}_loss'] for x in self.validation_step_outputs]).mean()
unique_ids = torch.cat([x[f'{prefix}_tensors']['unique_ids'] for x in self.validation_step_outputs])
start_logits = torch.cat([x[f'{prefix}_tensors']['start_logits'] for x in self.validation_step_outputs])
end_logits = torch.cat([x[f'{prefix}_tensors']['end_logits'] for x in self.validation_step_outputs])
self.validation_step_outputs.clear() # free memory
else:
avg_loss = torch.stack([x[f'{prefix}_loss'] for x in self.test_step_outputs]).mean()
unique_ids = torch.cat([x[f'{prefix}_tensors']['unique_ids'] for x in self.test_step_outputs])
start_logits = torch.cat([x[f'{prefix}_tensors']['start_logits'] for x in self.test_step_outputs])
end_logits = torch.cat([x[f'{prefix}_tensors']['end_logits'] for x in self.test_step_outputs])
self.test_step_outputs.clear() # free memory
all_unique_ids = []
all_start_logits = []
all_end_logits = []
if torch.distributed.is_initialized():
world_size = torch.distributed.get_world_size()
for ind in range(world_size):
all_unique_ids.append(torch.empty_like(unique_ids))
all_start_logits.append(torch.empty_like(start_logits))
all_end_logits.append(torch.empty_like(end_logits))
torch.distributed.all_gather(all_unique_ids, unique_ids)
torch.distributed.all_gather(all_start_logits, start_logits)
torch.distributed.all_gather(all_end_logits, end_logits)
else:
all_unique_ids.append(unique_ids)
all_start_logits.append(start_logits)
all_end_logits.append(end_logits)
eval_results, all_predictions, all_nbest = {}, [], []
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
unique_ids = []
start_logits = []
end_logits = []
for u in all_unique_ids:
unique_ids.extend(tensor2list(u))
for u in all_start_logits:
start_logits.extend(tensor2list(u))
for u in all_end_logits:
end_logits.extend(tensor2list(u))
eval_dataset = self._test_dl.dataset if self.trainer.testing else self._validation_dl.dataset
eval_results, _, _ = self.evaluate(
eval_dataset.features,
eval_dataset.examples,
eval_dataset.processor,
unique_ids=unique_ids,
start_logits=start_logits,
end_logits=end_logits,
n_best_size=self._cfg.dataset.n_best_size,
max_answer_length=self._cfg.dataset.max_answer_length,
version_2_with_negative=self._cfg.dataset.version_2_with_negative,
null_score_diff_threshold=self._cfg.dataset.null_score_diff_threshold,
do_lower_case=self._cfg.dataset.do_lower_case,
)
self.log(f'{prefix}_loss', avg_loss)
for eval_key in eval_results:
logging.info(f"{prefix} {eval_key}: {eval_results[eval_key]}")
self.log(f"{prefix}_{eval_key}", eval_results[eval_key])
def on_test_epoch_end(self):
return self.on_validation_epoch_end()
@typecheck()
def forward(self, input_ids, attention_mask, token_type_ids):
with torch.cuda.amp.autocast():
hidden_states = self.bert_model(
input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask
)
if isinstance(hidden_states, tuple):
hidden_states = hidden_states[0]
logits = self.classifier(hidden_states=hidden_states)
return logits
@torch.no_grad()
def inference(
self,
file: str,
batch_size: int = 1,
num_samples: int = -1,
output_nbest_file: Optional[str] = None,
output_prediction_file: Optional[str] = None,
):
"""
Get prediction for unlabeled inference data
Args:
file: inference data
batch_size: batch size to use during inference
num_samples: number of samples to use of inference data. Default: -1 if all data should be used.
output_nbest_file: optional output file for writing out nbest list
output_prediction_file: optional output file for writing out predictions
Returns:
model predictions, model nbest list
"""
# store predictions for all queries in a single list
all_predictions = []
all_nbest = []
mode = self.training
device = "cuda" if isinstance(self.trainer.device_ids, list) else "cpu"
try:
# Switch model to evaluation mode
self.eval()
self.to(device)
logging_level = logging.get_verbosity()
logging.set_verbosity(logging.WARNING)
infer_datalayer = self.setup_inference_data(
file, batch_size=batch_size, num_samples=num_samples, num_workers=2,
)
all_logits = []
all_unique_ids = []
for i, batch in enumerate(infer_datalayer):
input_ids, token_type_ids, attention_mask, unique_ids = batch
logits = self.forward(
input_ids=input_ids.to(device),
token_type_ids=token_type_ids.to(device),
attention_mask=attention_mask.to(device),
)
all_logits.append(logits)
all_unique_ids.append(unique_ids)
logits = torch.cat(all_logits)
unique_ids = tensor2list(torch.cat(all_unique_ids))
s, e = logits.split(dim=-1, split_size=1)
start_logits = tensor2list(s.squeeze(-1))
end_logits = tensor2list(e.squeeze(-1))
(all_predictions, all_nbest, scores_diff) = self.get_predictions(
infer_datalayer.dataset.features,
infer_datalayer.dataset.examples,
infer_datalayer.dataset.processor,
unique_ids=unique_ids,
start_logits=start_logits,
end_logits=end_logits,
n_best_size=self._cfg.dataset.n_best_size,
max_answer_length=self._cfg.dataset.max_answer_length,
version_2_with_negative=self._cfg.dataset.version_2_with_negative,
null_score_diff_threshold=self._cfg.dataset.null_score_diff_threshold,
do_lower_case=self._cfg.dataset.do_lower_case,
)
if output_prediction_file:
QAMetrics.dump_predicted_answers_to_file(
output_prediction_file, infer_datalayer.dataset.examples, all_predictions,
)
if output_nbest_file:
QAMetrics.dump_nbest_predictions_to_file(
output_nbest_file,
infer_datalayer.dataset.examples,
all_nbest,
keys_to_dump=["text", "probability"],
)
finally:
# set mode back to its original value
self.train(mode=mode)
logging.set_verbosity(logging_level)
return all_predictions, all_nbest
def evaluate(
self,
features: List,
examples: List,
processor: object,
unique_ids: List[str],
start_logits: List[List[float]],
end_logits: List[List[float]],
n_best_size: int,
max_answer_length: int,
do_lower_case: bool,
version_2_with_negative: bool,
null_score_diff_threshold: float,
):
(all_predictions, all_nbest_json, scores_diff_json) = self.get_predictions(
features,
examples,
processor,
unique_ids,
start_logits,
end_logits,
n_best_size,
max_answer_length,
do_lower_case,
version_2_with_negative,
null_score_diff_threshold,
)
eval_results = QAMetrics.evaluate_predictions(examples, all_predictions)
return eval_results, all_predictions, all_nbest_json
def get_predictions(
self,
features: List,
examples: List,
processor: object,
unique_ids: List[int],
start_logits: List[List[float]],
end_logits: List[List[float]],
n_best_size: int,
max_answer_length: int,
do_lower_case: bool,
version_2_with_negative: bool,
null_score_diff_threshold: float,
):
example_index_to_features = collections.defaultdict(list)
unique_id_to_pos = {}
for index, unique_id in enumerate(unique_ids):
unique_id_to_pos[unique_id] = index
for feature in features:
example_index_to_features[feature.example_index].append(feature)
_PrelimPrediction = collections.namedtuple(
"PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]
)
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(examples):
# finish this loop if we went through all batch examples
if example_index >= len(unique_ids):
break
curr_features = example_index_to_features[example_index]
doc_tokens, _, _, _, _ = BERTQADataset.get_doc_tokens_and_offset_from_context_id(
example.context_id,
example.start_position_character,
example.is_impossible,
example.answer_text,
processor.doc_id_to_context_text,
)
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
# large and positive
score_null = 1000000
# the paragraph slice with min null score
min_null_feature_index = 0
# start logit at the slice with min null score
null_start_logit = 0
# end logit at the slice with min null score
null_end_logit = 0
for (feature_index, feature) in enumerate(curr_features):
pos = unique_id_to_pos[feature.unique_id]
start_indexes = self._get_best_indexes(start_logits[pos], n_best_size)
end_indexes = self._get_best_indexes(end_logits[pos], n_best_size)
# if we could have irrelevant answers,
# get the min score of irrelevant
if version_2_with_negative:
feature_null_score = start_logits[pos][0] + end_logits[pos][0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = start_logits[pos][0]
null_end_logit = end_logits[pos][0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions,
# e.g., predict that the start of the span is in the
# question. We throw out all invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=start_logits[pos][start_index],
end_logit=end_logits[pos][end_index],
)
)
if version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit,
)
)
prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True)
_NbestPrediction = collections.namedtuple("NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = curr_features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = doc_tokens[orig_doc_start : (orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = self._get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit))
# if we didn't include the empty option in the n-best, include it
if version_2_with_negative:
if "" not in seen_predictions:
nbest.append(_NbestPrediction(text="", start_logit=null_start_logit, end_logit=null_end_logit))
# In very rare edge cases we could only
# have single null pred. We just create a nonce prediction
# in this case to avoid failure.
if len(nbest) == 1:
nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["question"] = example.question_text
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = (
entry.start_logit
if (isinstance(entry.start_logit, float) or isinstance(entry.start_logit, int))
else list(entry.start_logit)
)
output["end_logit"] = (
entry.end_logit
if (isinstance(entry.end_logit, float) or isinstance(entry.end_logit, int))
else list(entry.end_logit)
)
nbest_json.append(output)
assert len(nbest_json) >= 1
if not version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score -
# the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - best_non_null_entry.end_logit
scores_diff_json[example.qas_id] = score_diff
if score_diff > null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
return all_predictions, all_nbest_json, scores_diff_json
def _setup_dataloader_from_config(self, cfg: DictConfig, mode: str):
processor = QAProcessor(cfg.file, mode)
dataset = BERTQADataset(
data_file=cfg.file,
processor=processor,
tokenizer=self.tokenizer,
keep_doc_spans=self._cfg.dataset.keep_doc_spans,
doc_stride=self._cfg.dataset.doc_stride,
max_query_length=self._cfg.dataset.max_query_length,
max_seq_length=self._cfg.dataset.max_seq_length,
version_2_with_negative=self._cfg.dataset.version_2_with_negative,
num_samples=cfg.num_samples,
mode=mode,
use_cache=self._cfg.dataset.use_cache,
)
data_loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=cfg.batch_size,
collate_fn=dataset.collate_fn,
drop_last=cfg.drop_last,
shuffle=cfg.shuffle,
num_workers=cfg.num_workers,
pin_memory=cfg.pin_memory,
)
return data_loader
def _get_best_indexes(self, logits, n_best_size):
""" Get the n-best logits from a list """
best_indices = np.argsort(logits)[::-1]
return best_indices[:n_best_size]
def _get_final_text(self, pred_text: str, orig_text: str, do_lower_case: bool, verbose_logging: bool = False):
"""
Project the tokenized prediction back to the original text.
When we created the data, we kept track of the alignment between original
(whitespace tokenized) tokens and our WordPiece tokenized tokens. So
now `orig_text` contains the span of our original text corresponding to
the span that we predicted.
However, `orig_text` may contain extra characters that we don't want in
our prediction.
For example, let's say:
pred_text = steve smith
orig_text = Steve Smith's
We don't want to return `orig_text` because it contains the extra "'s".
We don't want to return `pred_text` because it's already been normalized
(the SQuAD eval script also does punctuation stripping/lower casing but
our tokenizer does additional normalization like stripping accent
characters).
What we really want to return is "Steve Smith".
Therefore, we have to apply a semi-complicated alignment heuristic
between `pred_text` and `orig_text` to get a character-to-character
alignment. This can fail in certain cases in which case we just return
`orig_text`
"""
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return ns_text, ns_to_s_map
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logging.warning("Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logging.warning(
"Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text,
)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logging.warning("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logging.warning("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position : (orig_end_position + 1)]
return output_text
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
result.append(
PretrainedModelInfo(
pretrained_model_name="qa_squadv1.1_bertbase",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv1_1_bertbase/versions/1.0.0rc1/files/qa_squadv1.1_bertbase.nemo",
description="Question answering model finetuned from NeMo BERT Base Uncased on SQuAD v1.1 dataset which obtains an exact match (EM) score of 82.78% and an F1 score of 89.97%.",
)
)
result.append(
PretrainedModelInfo(
pretrained_model_name="qa_squadv2.0_bertbase",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv2_0_bertbase/versions/1.0.0rc1/files/qa_squadv2.0_bertbase.nemo",
description="Question answering model finetuned from NeMo BERT Base Uncased on SQuAD v2.0 dataset which obtains an exact match (EM) score of 75.04% and an F1 score of 78.08%.",
)
)
result.append(
PretrainedModelInfo(
pretrained_model_name="qa_squadv1_1_bertlarge",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv1_1_bertlarge/versions/1.0.0rc1/files/qa_squadv1.1_bertlarge.nemo",
description="Question answering model finetuned from NeMo BERT Large Uncased on SQuAD v1.1 dataset which obtains an exact match (EM) score of 85.44% and an F1 score of 92.06%.",
)
)
result.append(
PretrainedModelInfo(
pretrained_model_name="qa_squadv2.0_bertlarge",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv2_0_bertlarge/versions/1.0.0rc1/files/qa_squadv2.0_bertlarge.nemo",
description="Question answering model finetuned from NeMo BERT Large Uncased on SQuAD v2.0 dataset which obtains an exact match (EM) score of 80.22% and an F1 score of 83.05%.",
)
)
result.append(
PretrainedModelInfo(
pretrained_model_name="qa_squadv1_1_megatron_cased",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv1_1_megatron_cased/versions/1.0.0rc1/files/qa_squadv1.1_megatron_cased.nemo",
description="Question answering model finetuned from Megatron Cased on SQuAD v1.1 dataset which obtains an exact match (EM) score of 88.18% and an F1 score of 94.07%.",
)
)
result.append(
PretrainedModelInfo(
pretrained_model_name="qa_squadv2.0_megatron_cased",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv2_0_megatron_cased/versions/1.0.0rc1/files/qa_squadv2.0_megatron_cased.nemo",
description="Question answering model finetuned from Megatron Cased on SQuAD v2.0 dataset which obtains an exact match (EM) score of 84.73% and an F1 score of 87.89%.",
)
)
result.append(
PretrainedModelInfo(
pretrained_model_name="qa_squadv1.1_megatron_uncased",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv1_1_megatron_uncased/versions/1.0.0rc1/files/qa_squadv1.1_megatron_uncased.nemo",
description="Question answering model finetuned from Megatron Unased on SQuAD v1.1 dataset which obtains an exact match (EM) score of 87.61% and an F1 score of 94.00%.",
)
)
result.append(
PretrainedModelInfo(
pretrained_model_name="qa_squadv2.0_megatron_uncased",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/qa_squadv2_0_megatron_uncased/versions/1.0.0rc1/files/qa_squadv2.0_megatron_uncased.nemo",
description="Question answering model finetuned from Megatron Uncased on SQuAD v2.0 dataset which obtains an exact match (EM) score of 84.48% and an F1 score of 87.65%.",
)
)
return result
| NeMo-main | nemo/collections/nlp/models/question_answering/qa_bert_model.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from typing import List, Optional
import torch
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from transformers import AutoModelForCausalLM
from nemo.collections.nlp.data.question_answering.data_processor.qa_processing import QAProcessor
from nemo.collections.nlp.data.question_answering.dataset.qa_gpt_dataset import GPTQADataset
from nemo.collections.nlp.metrics.qa_metrics import QAMetrics
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.models.question_answering.qa_base_model import BaseQAModel
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.utils import logging
class GPTQAModel(BaseQAModel):
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
self.cfg = cfg
self.setup_tokenizer(cfg.tokenizer)
self.tokenizer.tokenizer.pad_token = self.tokenizer.tokenizer.eos_token
self.epoch_number = 0
super().__init__(cfg=cfg, trainer=trainer, no_lm_init=True)
if self.cfg.library == "huggingface":
self.language_model = AutoModelForCausalLM.from_pretrained(cfg.language_model.pretrained_model_name)
self.language_model.resize_token_embeddings(len(self.tokenizer.tokenizer))
if self.cfg.language_model.lm_checkpoint:
self.language_model.load_state_dict(torch.load(self.cfg.language_model.lm_checkpoint))
elif self.cfg.library == "megatron":
self.language_model = MegatronGPTModel.restore_from(cfg.language_model.lm_checkpoint, trainer=trainer)
def training_step(self, batch, batch_idx):
input_ids, input_attn_mask, _, _, labels = batch
loss, _ = self(input_ids, input_attn_mask, labels)
lr = self._optimizer.param_groups[0]['lr']
self.log('lr', lr, prog_bar=True)
self.log("train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return {'loss': loss}
def validation_step(self, batch, batch_idx):
prefix = "test" if self.trainer.testing else "val"
input_ids, input_attn_mask, unique_ids, training_mask_end, labels = batch
loss, per_sample_perplexity = self.forward(input_ids, input_attn_mask, labels)
generated_answers = self._generate_candidates(input_ids, input_attn_mask, training_mask_end)
labels[labels == -100] = self.tokenizer.tokenizer.pad_token_id
loss = {
"unique_ids": unique_ids,
f"{prefix}_loss": loss,
"per_sample_perplexity": per_sample_perplexity,
"input": self.tokenizer.tokenizer.batch_decode(input_ids, skip_special_tokens=True),
"ground_truth_answers": self.tokenizer.tokenizer.batch_decode(labels, skip_special_tokens=True),
"generated_answers": generated_answers,
}
if prefix == 'val':
self.validation_step_outputs.append(loss)
else:
self.test_step_outputs.append(loss)
return loss
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def on_validation_epoch_end(self):
prefix = "test" if self.trainer.testing else "val"
if prefix == 'val':
loss_terms = [x[f"{prefix}_loss"] for x in self.validation_step_outputs]
generated_answers, unique_ids, per_sample_perplexity = QAMetrics.convert_dict_outputs_to_lists(
self.validation_step_outputs, ["generated_answers", "unique_ids", "per_sample_perplexity"]
)
self.validation_step_outputs.clear() # free memory
else:
loss_terms = [x[f"{prefix}_loss"] for x in self.test_step_outputs]
generated_answers, unique_ids, per_sample_perplexity = QAMetrics.convert_dict_outputs_to_lists(
self.test_step_outputs, ["generated_answers", "unique_ids", "per_sample_perplexity"]
)
self.test_step_outputs.clear() # free memory
avg_loss = torch.stack(loss_terms).mean()
eval_dataset = self._test_dl.dataset if self.trainer.testing else self._validation_dl.dataset
eval_results, _, _ = self.evaluate(
eval_dataset.features, eval_dataset.examples, unique_ids, per_sample_perplexity, generated_answers,
)
self.log(f'{prefix}_loss', avg_loss)
for eval_key in eval_results:
logging.info(f"{prefix} {eval_key}: {eval_results[eval_key]}")
self.log(f"{prefix}_{eval_key}", eval_results[eval_key])
def on_test_epoch_end(self):
self.on_validation_epoch_end()
@typecheck()
def forward(self, input_ids, input_attn_mask, labels):
loss, per_sample_perplexity = None, None
if self.cfg.library == "huggingface":
output = self.language_model(input_ids=input_ids, attention_mask=input_attn_mask, labels=labels)
loss, lm_logits = output['loss'], output['logits']
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
per_sample_perplexity = self._get_per_sample_perplexity(shift_logits, shift_labels)
elif self.cfg.library == "megatron":
raise NotImplementedError()
return loss, per_sample_perplexity
@torch.no_grad()
def inference(
self,
file: str,
batch_size: int = 1,
num_samples: int = -1,
output_prediction_file: Optional[str] = None,
output_nbest_file: Optional[str] = None,
):
all_predictions = []
mode = self.training
device = "cuda" if isinstance(self.trainer.device_ids, list) else "cpu"
if self.cfg.library == "huggingface":
try:
self.eval()
self.to(device)
logging_level = logging.get_verbosity()
logging.set_verbosity(logging.WARNING)
inference_dl = self.setup_inference_data(file, batch_size=batch_size, num_samples=num_samples)
outputs = self._inference(inference_dl, device)
generated_answers, unique_ids, per_sample_perplexity = QAMetrics.convert_dict_outputs_to_lists(
outputs, ["generated_answers", "unique_ids", "per_sample_perplexity"]
)
all_predictions, all_nbest_perdictions = self._get_predictions(
inference_dl.dataset.features,
inference_dl.dataset.examples,
unique_ids,
per_sample_perplexity,
generated_answers,
)
if output_prediction_file:
QAMetrics.dump_predicted_answers_to_file(
output_prediction_file, inference_dl.dataset.examples, all_predictions
)
if output_nbest_file:
QAMetrics.dump_nbest_predictions_to_file(
output_nbest_file,
inference_dl.dataset.examples,
all_nbest_perdictions,
keys_to_dump=["generated_text", "perplexity"],
)
finally:
# set mode back to its original value
self.train(mode=mode)
logging.set_verbosity(logging_level)
elif self.cfg.library == 'megatron':
raise ValueError("Megatron Inference is not supported by GPTQAModel")
return all_predictions, all_nbest_perdictions
def evaluate(
self, features, examples, unique_ids, per_sample_perplexity, generated_texts,
):
all_predictions, all_nbest_predictions = self._get_predictions(
features, examples, unique_ids, per_sample_perplexity, generated_texts,
)
eval_results = QAMetrics.evaluate_predictions(examples, all_predictions)
return eval_results, all_predictions, all_nbest_predictions
def _setup_dataloader_from_config(self, cfg: DictConfig, mode: str):
processor = QAProcessor(cfg.file, mode)
dataset = GPTQADataset(
data_file=cfg.file,
processor=processor,
tokenizer=self.tokenizer,
keep_doc_spans=self._cfg.dataset.keep_doc_spans,
doc_stride=self._cfg.dataset.doc_stride,
max_query_length=self._cfg.dataset.max_query_length,
max_seq_length=self._cfg.dataset.max_seq_length,
max_answer_length=self._cfg.dataset.max_answer_length,
check_if_answer_in_context=self._cfg.dataset.check_if_answer_in_context,
num_samples=cfg.num_samples,
mode=mode,
use_cache=self._cfg.dataset.use_cache,
)
data_loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=cfg.batch_size,
collate_fn=dataset.collate_fn,
drop_last=cfg.drop_last,
shuffle=cfg.shuffle,
num_workers=cfg.num_workers,
pin_memory=cfg.pin_memory,
)
return data_loader
def _get_predictions(
self, features, examples: List, unique_ids: List[int], per_sample_perplexity: List, generated_texts: List,
):
unique_id_to_pos = {}
for index, unique_id in enumerate(unique_ids):
unique_id_to_pos[unique_id] = index
example_index_to_features = collections.defaultdict(list)
for feature in features:
example_index_to_features[feature.example_index].append(feature)
_PrelimPrediction = collections.namedtuple(
"PrelimPrediction", ["feature_index", "perplexity", "generated_text"]
)
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
for (example_index, example) in enumerate(examples):
# finish this loop if we went through all batch examples
if example_index >= len(unique_ids):
break
curr_features = example_index_to_features[example_index]
prelim_predictions = []
for (feature_index, feature) in enumerate(curr_features):
pos = unique_id_to_pos[feature.unique_id]
curr_perplexity = per_sample_perplexity[pos]
curr_generated_text = generated_texts[pos]
prelim_prediction = _PrelimPrediction(feature_index, curr_perplexity, curr_generated_text)
prelim_predictions.append(prelim_prediction)
prelim_predictions = sorted(prelim_predictions, key=lambda x: x.perplexity)
all_predictions[example.qas_id] = prelim_predictions[0].generated_text
all_nbest_json[example.qas_id] = [pred._asdict() for pred in prelim_predictions]
return all_predictions, all_nbest_json
def _inference(self, inference_dl, device):
outputs = []
for i, batch in enumerate(inference_dl):
input_ids, input_attn_mask, unique_ids, training_mask_end = batch
input_ids, input_attn_mask, training_mask_end = (
tensor.to(device) for tensor in [input_ids, input_attn_mask, training_mask_end]
)
input_ids, input_attn_mask, labels, generated_texts = self._prep_inference_labels(
input_ids, input_attn_mask, training_mask_end, device
)
_, per_sample_perplexity = self.forward(input_ids, input_attn_mask, labels)
labels[labels == -100] = self.tokenizer.tokenizer.pad_token_id
outputs.append(
{
"unique_ids": unique_ids,
"per_sample_perplexity": per_sample_perplexity,
"generated_answers": generated_texts,
}
)
return outputs
def _prep_inference_labels(self, input_ids, input_attn_mask, training_mask_end, device):
# generate answers by decoding inputs and format into ipnut template
decoded_inputs = self.tokenizer.tokenizer.batch_decode(input_ids, skip_special_tokens=True)
generated_texts = self._generate_candidates(input_ids, input_attn_mask, training_mask_end)
inputs_with_answer = [
f"{inp}{ans}{self.tokenizer.tokenizer.eos_token}" if ans else f"{inp}{self.tokenizer.tokenizer.eos_token}"
for inp, ans in zip(decoded_inputs, generated_texts)
]
# encode template with generated answers
encoded_dict = self.tokenizer.tokenizer(
inputs_with_answer,
truncation=True,
max_length=self._cfg.dataset.max_seq_length,
padding="max_length",
return_tensors="pt",
)
input_ids, input_attn_mask = (
tensor.to(device) for tensor in [encoded_dict["input_ids"], encoded_dict["attention_mask"]]
)
labels = GPTQADataset.update_labels_for_no_pad_loss(input_ids, training_mask_end, input_attn_mask)
if len(labels.shape) == 1:
labels = torch.unsqueeze(labels, 0)
labels = labels.to(device)
return input_ids, input_attn_mask, labels, generated_texts
def _generate_candidates(self, input_ids, input_attn_mask, training_mask_end):
num_tokens_to_generate = self.cfg.tokens_to_generate
if self.cfg.library == "huggingface":
generated_token_ids = []
max_length = 0
for i in range(input_ids.size(0)):
param_dict = {
"input_ids": input_ids[i : i + 1, : training_mask_end[i]],
"max_length": training_mask_end[i] + num_tokens_to_generate,
"pad_token_id": self.tokenizer.tokenizer.pad_token_id,
}
generated_token_ids.append(self.language_model.generate(**param_dict))
max_length = max(max_length, generated_token_ids[-1].size(1))
# pad each generated to ensure they are of same length in dim 1, therefore stack-able
generated_token_ids = [
torch.cat(
[i, torch.ones((1, max_length - i.size(1))).to(i.device) * self.tokenizer.tokenizer.pad_token_id],
axis=-1,
)
for i in generated_token_ids
]
generated_token_ids = torch.cat(generated_token_ids, axis=0)
generated_answers = self._get_answers_from_generated_tokens(
generated_token_ids, training_mask_end=training_mask_end
)
elif self.cfg.library == 'megatron':
raise ValueError("Megatron Generation is not supported by GPTQAModel")
return generated_answers
def _get_answers_from_generated_tokens(self, token_ids, training_mask_end=None):
answers = []
for i in range(token_ids.size(0)):
start_point = 0 if training_mask_end is None else training_mask_end[i].item()
stop_point = token_ids.size(1)
for j in range(start_point, stop_point):
if token_ids.data[i, j] == self.tokenizer.tokenizer.pad_token_id:
stop_point = j
break
curr_answer = self.tokenizer.tokenizer.decode(
token_ids[i, start_point:stop_point], skip_special_tokens=True
).strip()
answers.append(curr_answer)
return answers
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
return result
| NeMo-main | nemo/collections/nlp/models/question_answering/qa_gpt_model.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from typing import List, Optional
import torch
from omegaconf import DictConfig, OmegaConf, open_dict
from pytorch_lightning import Trainer
from torch.cuda.amp import autocast
from transformers import AutoModelForSeq2SeqLM
from nemo.collections.nlp.data.question_answering.data_processor.qa_processing import QAProcessor
from nemo.collections.nlp.data.question_answering.dataset.qa_s2s_dataset import S2SQADataset
from nemo.collections.nlp.metrics.qa_metrics import QAMetrics
from nemo.collections.nlp.models.language_modeling.megatron_t5_model import MegatronT5Model
from nemo.collections.nlp.models.question_answering.qa_base_model import BaseQAModel
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.utils import logging
class S2SQAModel(BaseQAModel):
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
self.cfg = cfg
if self.cfg.library == "huggingface":
self.setup_tokenizer(cfg.tokenizer)
elif self.cfg.library == "megatron":
# supporting MegatronT5Model in precision = fp16
t5_cfg = MegatronT5Model.restore_from(
restore_path=cfg.language_model.lm_checkpoint, trainer=trainer, return_config=True
)
# Override the T5 configuration with the one from the config file.
OmegaConf.set_struct(t5_cfg, True)
with open_dict(t5_cfg):
t5_cfg.masked_softmax_fusion = False
t5_cfg.precision = 16
language_model = MegatronT5Model.restore_from(
restore_path=cfg.language_model.lm_checkpoint, trainer=trainer, override_config_path=t5_cfg
)
self.tokenizer = language_model.tokenizer
super().__init__(cfg=cfg, trainer=trainer, no_lm_init=True)
if self.cfg.library == "huggingface":
self.language_model = AutoModelForSeq2SeqLM.from_pretrained(cfg.language_model.pretrained_model_name)
self.language_model.resize_token_embeddings(len(self.tokenizer.tokenizer))
if self.cfg.language_model.lm_checkpoint:
self.language_model.load_state_dict(torch.load(self.cfg.language_model.lm_checkpoint))
elif self.cfg.library == "megatron":
self.language_model = language_model
def training_step(self, batch, batch_idx):
input_ids, input_attn_mask, unique_ids, labels = batch
loss, _ = self.forward(input_ids, input_attn_mask, labels)
lr = self._optimizer.param_groups[0]['lr']
self.log('lr', lr, prog_bar=True)
self.log("train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return {'loss': loss}
def validation_step(self, batch, batch_idx):
prefix = "test" if self.trainer.testing else "val"
input_ids, input_attn_mask, unique_ids, labels = batch
loss, per_sample_perplexity = self.forward(input_ids, input_attn_mask, labels)
generated_answers = self._generate_candidates(input_ids, input_attn_mask)
labels[labels == -100] = self.tokenizer.tokenizer.pad_token_id
loss = {
"unique_ids": unique_ids,
f"{prefix}_loss": loss,
"per_sample_perplexity": per_sample_perplexity,
"input": self.tokenizer.tokenizer.batch_decode(input_ids, skip_special_tokens=True),
"ground_truth_answers": self.tokenizer.tokenizer.batch_decode(labels, skip_special_tokens=True),
"generated_answers": generated_answers,
}
if prefix == 'val':
self.validation_step_outputs.append(loss)
else:
self.test_step_outputs.append(loss)
return loss
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def on_validation_epoch_end(self):
prefix = "test" if self.trainer.testing else "val"
if prefix == 'val':
loss_terms = [x[f"{prefix}_loss"] for x in self.validation_step_outputs]
generated_answers, unique_ids, per_sample_perplexity = QAMetrics.convert_dict_outputs_to_lists(
self.validation_step_outputs, ["generated_answers", "unique_ids", "per_sample_perplexity"]
)
self.validation_step_outputs.clear() # free memory
else:
loss_terms = [x[f"{prefix}_loss"] for x in self.test_step_outputs]
generated_answers, unique_ids, per_sample_perplexity = QAMetrics.convert_dict_outputs_to_lists(
self.test_step_outputs, ["generated_answers", "unique_ids", "per_sample_perplexity"]
)
self.test_step_outputs.clear() # free memory
avg_loss = torch.stack(loss_terms).mean()
eval_dataset = self._test_dl.dataset if self.trainer.testing else self._validation_dl.dataset
eval_results, _, _ = self.evaluate(
eval_dataset.features, eval_dataset.examples, unique_ids, per_sample_perplexity, generated_answers,
)
self.log(f'{prefix}_loss', avg_loss)
for eval_key in eval_results:
logging.info(f"{prefix} {eval_key}: {eval_results[eval_key]}")
self.log(f"{prefix}_{eval_key}", eval_results[eval_key])
def on_test_epoch_end(self):
self.on_validation_epoch_end()
@typecheck()
def forward(self, input_ids, input_attn_mask, labels):
loss, per_sample_perplexity = None, None
if self.cfg.library == "huggingface":
with autocast(enabled=False):
output = self.language_model(input_ids=input_ids, attention_mask=input_attn_mask, labels=labels)
loss = output['loss']
lm_logits = output['logits']
per_sample_perplexity = self._get_per_sample_perplexity(lm_logits, labels)
elif self.cfg.library == "megatron":
labels = torch.where(labels != -100, labels, torch.zeros_like(labels))
output_attn_masks = torch.where(labels > 0, torch.ones_like(labels), torch.zeros_like(labels))
unmasked_unreduced_loss = self.language_model(
input_ids, labels[:, :-1], input_attn_mask, output_attn_masks[:, :-1], lm_labels=labels[:, 1:],
)
loss = self.language_model.loss_func(output_attn_masks[:, 1:], unmasked_unreduced_loss)
per_sample_perplexity = torch.exp(unmasked_unreduced_loss)
return loss, per_sample_perplexity
@torch.no_grad()
def inference(
self,
file: str,
batch_size: int = 1,
num_samples: int = -1,
output_prediction_file: Optional[str] = None,
output_nbest_file: Optional[str] = None,
):
all_predictions = []
mode = self.training
device = "cuda" if isinstance(self.trainer.device_ids, list) else "cpu"
if self.cfg.library == "huggingface":
try:
# switch model to evaluation mode
self.eval()
self.to(device)
logging_level = logging.get_verbosity()
logging.set_verbosity(logging.WARNING)
inference_dl = self.setup_inference_data(file, batch_size=batch_size, num_samples=num_samples)
outputs = self._inference(inference_dl, device)
generated_answers, unique_ids, per_sample_perplexity = QAMetrics.convert_dict_outputs_to_lists(
outputs, ["generated_answers", "unique_ids", "per_sample_perplexity"]
)
all_predictions, all_nbest_predictions = self._get_predictions(
inference_dl.dataset.features,
inference_dl.dataset.examples,
unique_ids,
per_sample_perplexity,
generated_answers,
)
if output_prediction_file:
QAMetrics.dump_predicted_answers_to_file(
output_prediction_file, inference_dl.dataset.examples, all_predictions
)
if output_nbest_file:
QAMetrics.dump_nbest_predictions_to_file(
output_nbest_file,
inference_dl.dataset.examples,
all_nbest_predictions,
keys_to_dump=["generated_text", "perplexity"],
)
finally:
# set mode back to its original value
self.train(mode=mode)
logging.set_verbosity(logging_level)
elif self.cfg.library == 'megatron':
raise ValueError("Megatron Inference is not supported by S2SQAModel")
return all_predictions, all_nbest_predictions
def evaluate(
self, features, examples, unique_ids, per_sample_perplexity, generated_texts,
):
all_predictions, all_nbest_json = self._get_predictions(
features, examples, unique_ids, per_sample_perplexity, generated_texts,
)
eval_results = QAMetrics.evaluate_predictions(examples, all_predictions)
return eval_results, all_predictions, all_nbest_json
def _setup_dataloader_from_config(self, cfg: DictConfig, mode: str):
processor = QAProcessor(cfg.file, mode)
dataset = S2SQADataset(
data_file=cfg.file,
processor=processor,
tokenizer=self.tokenizer,
keep_doc_spans=self._cfg.dataset.keep_doc_spans,
doc_stride=self._cfg.dataset.doc_stride,
max_query_length=self._cfg.dataset.max_query_length,
max_seq_length=self._cfg.dataset.max_seq_length,
max_answer_length=self._cfg.dataset.max_answer_length,
check_if_answer_in_context=self._cfg.dataset.check_if_answer_in_context,
num_samples=cfg.num_samples,
mode=mode,
use_cache=self._cfg.dataset.use_cache,
)
data_loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=cfg.batch_size,
collate_fn=dataset.collate_fn,
drop_last=cfg.drop_last,
shuffle=cfg.shuffle,
num_workers=cfg.num_workers,
pin_memory=cfg.pin_memory,
)
return data_loader
def _get_predictions(
self, features, examples: List, unique_ids: List[int], per_sample_perplexity: List, generated_texts: List,
):
unique_id_to_pos = {}
for index, unique_id in enumerate(unique_ids):
unique_id_to_pos[unique_id] = index
example_index_to_features = collections.defaultdict(list)
for feature in features:
example_index_to_features[feature.example_index].append(feature)
_PrelimPrediction = collections.namedtuple(
"PrelimPrediction", ["feature_index", "perplexity", "generated_text"]
)
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
for (example_index, example) in enumerate(examples):
# finish this loop if we went through all batch examples
if example_index >= len(unique_ids):
break
curr_features = example_index_to_features[example_index]
prelim_predictions = []
for (feature_index, feature) in enumerate(curr_features):
pos = unique_id_to_pos[feature.unique_id]
curr_perplexity = per_sample_perplexity[pos]
curr_generated_text = generated_texts[pos]
prelim_prediction = _PrelimPrediction(feature_index, curr_perplexity, curr_generated_text)
prelim_predictions.append(prelim_prediction)
prelim_predictions = sorted(prelim_predictions, key=lambda x: x.perplexity)
all_predictions[example.qas_id] = prelim_predictions[0].generated_text
all_nbest_json[example.qas_id] = [pred._asdict() for pred in prelim_predictions]
return all_predictions, all_nbest_json
def _inference(self, inference_dl, device):
outputs = []
for i, batch in enumerate(inference_dl):
# get predictions
input_ids, input_attn_mask, unique_ids = batch
input_ids, input_attn_mask = (tensor.to(device) for tensor in [input_ids, input_attn_mask])
generated_texts = self._generate_candidates(input_ids, input_attn_mask)
labels = self._prep_inference_labels(generated_texts, device)
_, per_sample_perplexity = self.forward(input_ids, input_attn_mask, labels)
labels[labels == -100] = self.tokenizer.tokenizer.pad_token_id
outputs.append(
{
"unique_ids": unique_ids,
"per_sample_perplexity": per_sample_perplexity,
"generated_answers": generated_texts,
}
)
return outputs
def _prep_inference_labels(self, generated_texts, device):
encoded_output_dict = self.tokenizer.tokenizer(
generated_texts,
truncation=True,
max_length=self._cfg.dataset.max_answer_length,
padding="max_length",
return_tensors="pt",
)
input_ids = encoded_output_dict["input_ids"].to(device)
labels = torch.squeeze(input_ids)
labels[labels == self.tokenizer.tokenizer.pad_token_id] = -100
if len(labels.shape) == 1:
labels = torch.unsqueeze(labels, 0)
labels = labels.to(device)
return labels
def _generate_candidates(self, input_ids, input_attn_mask):
num_tokens_to_generate = self.cfg.tokens_to_generate
if self.cfg.library == "huggingface":
param_dict = {
"input_ids": input_ids,
"attention_mask": input_attn_mask,
"max_length": num_tokens_to_generate,
}
generated_tokens = self.language_model.generate(**param_dict)
generated_answers = self.tokenizer.tokenizer.batch_decode(generated_tokens, skip_special_tokens=True,)
generated_answers = [ans.strip() for ans in generated_answers]
elif self.cfg.library == 'megatron':
raise ValueError("Megatron Generation is not supported by S2SQAModel")
return generated_answers
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
return result
| NeMo-main | nemo/collections/nlp/models/question_answering/qa_s2s_model.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from nemo.collections.nlp.data.question_answering.data_processor.qa_processing import (
EVALUATION_MODE,
INFERENCE_MODE,
TRAINING_MODE,
)
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.utils import logging
class BaseQAModel(NLPModel):
def __init__(self, cfg: DictConfig, trainer: Trainer = None, no_lm_init=True):
self.cfg = cfg
super().__init__(cfg=cfg, trainer=trainer, no_lm_init=no_lm_init)
def setup_training_data(self, train_data_config: Optional[DictConfig]):
if not train_data_config or not train_data_config.file:
logging.info(
f"Dataloader config or file_path for the train is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config, mode=TRAINING_MODE)
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
if not val_data_config or not val_data_config.file:
logging.info(
f"Dataloader config or file_path for the validation is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config, mode=EVALUATION_MODE)
def setup_test_data(self, test_data_config: Optional[DictConfig]):
if not test_data_config or test_data_config.file is None:
logging.info(
f"Dataloader config or file_path for the test is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config, mode=EVALUATION_MODE)
def setup_inference_data(self, input_file, batch_size=1, num_samples=-1, num_workers=2):
dataloader_cfg = {
"batch_size": batch_size,
"file": input_file,
"shuffle": False,
"num_samples": num_samples,
'num_workers': num_workers,
'pin_memory': False,
'drop_last': False,
}
dataloader_cfg = OmegaConf.create(dataloader_cfg)
inference_dl = self._setup_dataloader_from_config(cfg=dataloader_cfg, mode=INFERENCE_MODE)
return inference_dl
def _setup_dataloader_from_config(self, cfg: DictConfig, mode: str):
raise NotImplementedError()
@torch.no_grad()
def _get_per_sample_perplexity(self, logits, labels):
""" Returns average perplexity for each sample in the batch """
loss_fct = torch.nn.CrossEntropyLoss(ignore_index=-100, reduction='none')
unreduced_loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1),)
unreduced_loss = unreduced_loss.reshape(labels.shape)
mask_0 = unreduced_loss != 0
per_sample_perplexity = torch.exp((unreduced_loss * mask_0).sum(axis=1) / mask_0.sum(axis=1))
return per_sample_perplexity
| NeMo-main | nemo/collections/nlp/models/question_answering/qa_base_model.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
from typing import Dict, List, Optional
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from torch.utils.data import DataLoader
from nemo.collections.common.losses import AggregatorLoss, CrossEntropyLoss
from nemo.collections.nlp.data.intent_slot_classification import (
IntentSlotClassificationDataset,
IntentSlotDataDesc,
IntentSlotInferenceDataset,
)
from nemo.collections.nlp.metrics.classification_report import ClassificationReport
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.collections.nlp.modules.common import SequenceTokenClassifier
from nemo.collections.nlp.parts.utils_funcs import tensor2list
from nemo.core.classes import typecheck
from nemo.core.classes.common import PretrainedModelInfo
from nemo.utils import logging
class IntentSlotClassificationModel(NLPModel):
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
""" Initializes BERT Joint Intent and Slot model.
"""
self.max_seq_length = cfg.language_model.max_seq_length
# init superclass
# Check the presence of data_dir.
if not cfg.data_dir or not os.path.exists(cfg.data_dir):
# Set default values of data_desc.
self._set_defaults_data_desc(cfg)
else:
self.data_dir = cfg.data_dir
# Update configuration of data_desc.
self._set_data_desc_to_cfg(cfg, cfg.data_dir, cfg.train_ds, cfg.validation_ds)
super().__init__(cfg=cfg, trainer=trainer)
# Initialize Classifier.
self._reconfigure_classifier()
def _set_defaults_data_desc(self, cfg):
"""
Method makes sure that cfg.data_desc params are set.
If not, set's them to "dummy" defaults.
"""
if not hasattr(cfg, "data_desc"):
OmegaConf.set_struct(cfg, False)
cfg.data_desc = {}
# Intents.
cfg.data_desc.intent_labels = " "
cfg.data_desc.intent_label_ids = {" ": 0}
cfg.data_desc.intent_weights = [1]
# Slots.
cfg.data_desc.slot_labels = " "
cfg.data_desc.slot_label_ids = {" ": 0}
cfg.data_desc.slot_weights = [1]
cfg.data_desc.pad_label = "O"
OmegaConf.set_struct(cfg, True)
def _set_data_desc_to_cfg(self, cfg, data_dir, train_ds, validation_ds):
""" Method creates IntentSlotDataDesc and copies generated values to cfg.data_desc. """
# Save data from data desc to config - so it can be reused later, e.g. in inference.
data_desc = IntentSlotDataDesc(data_dir=data_dir, modes=[train_ds.prefix, validation_ds.prefix])
OmegaConf.set_struct(cfg, False)
if not hasattr(cfg, "data_desc") or cfg.data_desc is None:
cfg.data_desc = {}
# Intents.
cfg.data_desc.intent_labels = list(data_desc.intents_label_ids.keys())
cfg.data_desc.intent_label_ids = data_desc.intents_label_ids
cfg.data_desc.intent_weights = data_desc.intent_weights
# Slots.
cfg.data_desc.slot_labels = list(data_desc.slots_label_ids.keys())
cfg.data_desc.slot_label_ids = data_desc.slots_label_ids
cfg.data_desc.slot_weights = data_desc.slot_weights
cfg.data_desc.pad_label = data_desc.pad_label
# for older(pre - 1.0.0.b3) configs compatibility
if not hasattr(cfg, "class_labels") or cfg.class_labels is None:
cfg.class_labels = {}
cfg.class_labels = OmegaConf.create(
{'intent_labels_file': 'intent_labels.csv', 'slot_labels_file': 'slot_labels.csv'}
)
slot_labels_file = os.path.join(data_dir, pathlib.Path(cfg.class_labels.slot_labels_file).name)
intent_labels_file = os.path.join(data_dir, pathlib.Path(cfg.class_labels.intent_labels_file).name)
self._save_label_ids(data_desc.slots_label_ids, slot_labels_file)
self._save_label_ids(data_desc.intents_label_ids, intent_labels_file)
self.register_artifact('class_labels.intent_labels_file', intent_labels_file)
self.register_artifact('class_labels.slot_labels_file', slot_labels_file)
OmegaConf.set_struct(cfg, True)
def _save_label_ids(self, label_ids: Dict[str, int], filename: str) -> None:
""" Saves label ids map to a file """
with open(filename, 'w') as out:
labels, _ = zip(*sorted(label_ids.items(), key=lambda x: x[1]))
out.write('\n'.join(labels))
logging.info(f'Labels: {label_ids}')
logging.info(f'Labels mapping saved to : {out.name}')
def _reconfigure_classifier(self):
""" Method reconfigures the classifier depending on the settings of model cfg.data_desc """
self.classifier = SequenceTokenClassifier(
hidden_size=self.hidden_size,
num_intents=len(self.cfg.data_desc.intent_labels),
num_slots=len(self.cfg.data_desc.slot_labels),
dropout=self.cfg.head.fc_dropout,
num_layers=self.cfg.head.num_output_layers,
log_softmax=False,
)
# define losses
if self.cfg.class_balancing == 'weighted_loss':
# You may need to increase the number of epochs for convergence when using weighted_loss
self.intent_loss = CrossEntropyLoss(logits_ndim=2, weight=self.cfg.data_desc.intent_weights)
self.slot_loss = CrossEntropyLoss(logits_ndim=3, weight=self.cfg.data_desc.slot_weights)
else:
self.intent_loss = CrossEntropyLoss(logits_ndim=2)
self.slot_loss = CrossEntropyLoss(logits_ndim=3)
self.total_loss = AggregatorLoss(
num_inputs=2, weights=[self.cfg.intent_loss_weight, 1.0 - self.cfg.intent_loss_weight]
)
# setup to track metrics
self.intent_classification_report = ClassificationReport(
num_classes=len(self.cfg.data_desc.intent_labels),
label_ids=self.cfg.data_desc.intent_label_ids,
dist_sync_on_step=True,
mode='micro',
)
self.slot_classification_report = ClassificationReport(
num_classes=len(self.cfg.data_desc.slot_labels),
label_ids=self.cfg.data_desc.slot_label_ids,
dist_sync_on_step=True,
mode='micro',
)
def update_data_dir_for_training(self, data_dir: str, train_ds, validation_ds) -> None:
"""
Update data directory and get data stats with Data Descriptor.
Also, reconfigures the classifier - to cope with data with e.g. different number of slots.
Args:
data_dir: path to data directory
"""
logging.info(f'Setting data_dir to {data_dir}.')
self.data_dir = data_dir
# Update configuration with new data.
self._set_data_desc_to_cfg(self.cfg, data_dir, train_ds, validation_ds)
# Reconfigure the classifier for different settings (number of intents, slots etc.).
self._reconfigure_classifier()
def update_data_dir_for_testing(self, data_dir) -> None:
"""
Update data directory.
Args:
data_dir: path to data directory
"""
logging.info(f'Setting data_dir to {data_dir}.')
self.data_dir = data_dir
@typecheck()
def forward(self, input_ids, attention_mask, token_type_ids):
"""
No special modification required for Lightning, define it as you normally would
in the `nn.Module` in vanilla PyTorch.
"""
hidden_states = self.bert_model(
input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask
)
if isinstance(hidden_states, tuple):
hidden_states = hidden_states[0]
intent_logits, slot_logits = self.classifier(hidden_states=hidden_states)
return intent_logits.float(), slot_logits.float()
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
# forward pass
input_ids, input_type_ids, input_mask, loss_mask, subtokens_mask, intent_labels, slot_labels = batch
intent_logits, slot_logits = self(
input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask
)
# calculate combined loss for intents and slots
intent_loss = self.intent_loss(logits=intent_logits, labels=intent_labels)
slot_loss = self.slot_loss(logits=slot_logits, labels=slot_labels, loss_mask=loss_mask)
train_loss = self.total_loss(loss_1=intent_loss, loss_2=slot_loss)
lr = self._optimizer.param_groups[0]['lr']
self.log('train_loss', train_loss)
self.log('lr', lr, prog_bar=True)
return {
'loss': train_loss,
'lr': lr,
}
def validation_step(self, batch, batch_idx):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
input_ids, input_type_ids, input_mask, loss_mask, subtokens_mask, intent_labels, slot_labels = batch
intent_logits, slot_logits = self(
input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask
)
# calculate combined loss for intents and slots
intent_loss = self.intent_loss(logits=intent_logits, labels=intent_labels)
slot_loss = self.slot_loss(logits=slot_logits, labels=slot_labels, loss_mask=loss_mask)
val_loss = self.total_loss(loss_1=intent_loss, loss_2=slot_loss)
# calculate accuracy metrics for intents and slot reporting
# intents
preds = torch.argmax(intent_logits, axis=-1)
self.intent_classification_report.update(preds, intent_labels)
# slots
subtokens_mask = subtokens_mask > 0.5
preds = torch.argmax(slot_logits, axis=-1)[subtokens_mask]
slot_labels = slot_labels[subtokens_mask]
self.slot_classification_report.update(preds, slot_labels)
loss = {
'val_loss': val_loss,
'intent_tp': self.intent_classification_report.tp,
'intent_fn': self.intent_classification_report.fn,
'intent_fp': self.intent_classification_report.fp,
'slot_tp': self.slot_classification_report.tp,
'slot_fn': self.slot_classification_report.fn,
'slot_fp': self.slot_classification_report.fp,
}
self.validation_step_outputs.append(loss)
return loss
def on_validation_epoch_end(self):
"""
Called at the end of validation to aggregate outputs.
:param outputs: list of individual outputs of each validation step.
"""
prefix = "test" if self.trainer.testing else "val"
if prefix == "val":
outputs = self.validation_step_outputs
else:
outputs = self.test_step_outputs
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
# calculate metrics and log classification report (separately for intents and slots)
intent_precision, intent_recall, intent_f1, intent_report = self.intent_classification_report.compute()
logging.info(f'Intent report: {intent_report}')
slot_precision, slot_recall, slot_f1, slot_report = self.slot_classification_report.compute()
logging.info(f'Slot report: {slot_report}')
self.log(f'{prefix}_loss', avg_loss)
self.log('intent_precision', intent_precision)
self.log('intent_recall', intent_recall)
self.log('intent_f1', intent_f1)
self.log('slot_precision', slot_precision)
self.log('slot_recall', slot_recall)
self.log('slot_f1', slot_f1)
self.intent_classification_report.reset()
self.slot_classification_report.reset()
self.validation_step_outputs.clear() if prefix == 'val' else self.test_step_outputs.clear()
return {
f'{prefix}_loss': avg_loss,
'intent_precision': intent_precision,
'intent_recall': intent_recall,
'intent_f1': intent_f1,
'slot_precision': slot_precision,
'slot_recall': slot_recall,
'slot_f1': slot_f1,
}
def test_step(self, batch, batch_idx):
"""
Lightning calls this inside the test loop with the data from the test dataloader
passed in as `batch`.
"""
loss = self.validation_step(batch, batch_idx)
self.test_step_outputs.append(loss)
return loss
def on_test_epoch_end(self):
"""
Called at the end of test to aggregate outputs.
:param outputs: list of individual outputs of each test step.
"""
return self.on_validation_epoch_end()
def setup_training_data(self, train_data_config: Optional[DictConfig]):
self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config)
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config)
def setup_test_data(self, test_data_config: Optional[DictConfig]):
self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config)
def _setup_dataloader_from_config(self, cfg: DictConfig):
input_file = f'{self.data_dir}/{cfg.prefix}.tsv'
slot_file = f'{self.data_dir}/{cfg.prefix}_slots.tsv'
if not (os.path.exists(input_file) and os.path.exists(slot_file)):
raise FileNotFoundError(
f'{input_file} or {slot_file} not found. Please refer to the documentation for the right format \
of Intents and Slots files.'
)
dataset = IntentSlotClassificationDataset(
input_file=input_file,
slot_file=slot_file,
tokenizer=self.tokenizer,
max_seq_length=self.max_seq_length,
num_samples=cfg.num_samples,
pad_label=self.cfg.data_desc.pad_label,
ignore_extra_tokens=self.cfg.ignore_extra_tokens,
ignore_start_end=self.cfg.ignore_start_end,
)
return DataLoader(
dataset=dataset,
batch_size=cfg.batch_size,
shuffle=cfg.shuffle,
num_workers=cfg.num_workers,
pin_memory=cfg.pin_memory,
drop_last=cfg.drop_last,
collate_fn=dataset.collate_fn,
)
def _setup_infer_dataloader(self, queries: List[str], test_ds) -> 'torch.utils.data.DataLoader':
"""
Setup function for a infer data loader.
Args:
queries: text
batch_size: batch size to use during inference
Returns:
A pytorch DataLoader.
"""
dataset = IntentSlotInferenceDataset(
tokenizer=self.tokenizer, queries=queries, max_seq_length=-1, do_lower_case=False
)
return torch.utils.data.DataLoader(
dataset=dataset,
collate_fn=dataset.collate_fn,
batch_size=test_ds.batch_size,
shuffle=test_ds.shuffle,
num_workers=test_ds.num_workers,
pin_memory=test_ds.pin_memory,
drop_last=test_ds.drop_last,
)
def predict_from_examples(self, queries: List[str], test_ds) -> List[List[str]]:
"""
Get prediction for the queries (intent and slots)
Args:
queries: text sequences
test_ds: Dataset configuration section.
Returns:
predicted_intents, predicted_slots: model intent and slot predictions
"""
predicted_intents = []
predicted_slots = []
mode = self.training
try:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Retrieve intent and slot vocabularies from configuration.
intent_labels = self.cfg.data_desc.intent_labels
slot_labels = self.cfg.data_desc.slot_labels
# Initialize tokenizer.
# if not hasattr(self, "tokenizer"):
# self._setup_tokenizer(self.cfg.tokenizer)
# Initialize modules.
# self._reconfigure_classifier()
# Switch model to evaluation mode
self.eval()
self.to(device)
# Dataset.
infer_datalayer = self._setup_infer_dataloader(queries, test_ds)
for batch in infer_datalayer:
input_ids, input_type_ids, input_mask, loss_mask, subtokens_mask = batch
intent_logits, slot_logits = self.forward(
input_ids=input_ids.to(device),
token_type_ids=input_type_ids.to(device),
attention_mask=input_mask.to(device),
)
# predict intents and slots for these examples
# intents
intent_preds = tensor2list(torch.argmax(intent_logits, axis=-1))
# convert numerical outputs to Intent and Slot labels from the dictionaries
for intent_num in intent_preds:
if intent_num < len(intent_labels):
predicted_intents.append(intent_labels[int(intent_num)])
else:
# should not happen
predicted_intents.append("Unknown Intent")
# slots
slot_preds = torch.argmax(slot_logits, axis=-1)
for slot_preds_query, mask_query in zip(slot_preds, subtokens_mask):
query_slots = ''
for slot, mask in zip(slot_preds_query, mask_query):
if mask == 1:
if slot < len(slot_labels):
query_slots += slot_labels[int(slot)] + ' '
else:
query_slots += 'Unknown_slot '
predicted_slots.append(query_slots.strip())
finally:
# set mode back to its original value
self.train(mode=mode)
return predicted_intents, predicted_slots
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
model = PretrainedModelInfo(
pretrained_model_name="Joint_Intent_Slot_Assistant",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemonlpmodels/versions/1.0.0a5/files/Joint_Intent_Slot_Assistant.nemo",
description="This models is trained on this https://github.com/xliuhw/NLU-Evaluation-Data dataset which includes 64 various intents and 55 slots. Final Intent accuracy is about 87%, Slot accuracy is about 89%.",
)
result.append(model)
return result
| NeMo-main | nemo/collections/nlp/models/intent_slot_classification/intent_slot_classification_model.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.models.intent_slot_classification.intent_slot_classification_model import (
IntentSlotClassificationModel,
)
from nemo.collections.nlp.models.intent_slot_classification.multi_label_intent_slot_classification_model import (
MultiLabelIntentSlotClassificationModel,
)
| NeMo-main | nemo/collections/nlp/models/intent_slot_classification/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List, Optional, Tuple
import numpy as np
import numpy.typing as npt
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from sklearn.metrics import f1_score, precision_score, recall_score
from torch.utils.data import DataLoader
from nemo.collections.common.losses import AggregatorLoss, BCEWithLogitsLoss, CrossEntropyLoss
from nemo.collections.nlp.data.intent_slot_classification import (
MultiLabelIntentSlotClassificationDataset,
MultiLabelIntentSlotDataDesc,
)
from nemo.collections.nlp.metrics.classification_report import ClassificationReport, MultiLabelClassificationReport
from nemo.collections.nlp.models.intent_slot_classification import IntentSlotClassificationModel
from nemo.collections.nlp.modules.common import SequenceTokenClassifier
from nemo.collections.nlp.parts.utils_funcs import tensor2list
from nemo.core.classes.common import PretrainedModelInfo
from nemo.utils import logging
class MultiLabelIntentSlotClassificationModel(IntentSlotClassificationModel):
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
"""
Initializes BERT Joint Intent and Slot model.
Args:
cfg: configuration object
trainer: trainer for Pytorch Lightning
"""
self.max_seq_length = cfg.language_model.max_seq_length
# Optimal Threshold
self.threshold = 0.5
self.max_f1 = 0
# Check the presence of data_dir.
if not cfg.data_dir or not os.path.exists(cfg.data_dir):
# Set default values of data_desc.
self._set_defaults_data_desc(cfg)
else:
self.data_dir = cfg.data_dir
# Update configuration of data_desc.
self._set_data_desc_to_cfg(cfg, cfg.data_dir, cfg.train_ds, cfg.validation_ds)
# init superclass
super().__init__(cfg=cfg, trainer=trainer)
# Initialize Classifier.
self._reconfigure_classifier()
def _set_data_desc_to_cfg(
self, cfg: DictConfig, data_dir: str, train_ds: DictConfig, validation_ds: DictConfig
) -> None:
"""
Creates MultiLabelIntentSlotDataDesc and copies generated values to Configuration object's data descriptor.
Args:
cfg: configuration object
data_dir: data directory
train_ds: training dataset file name
validation_ds: validation dataset file name
Returns:
None
"""
# Save data from data desc to config - so it can be reused later, e.g. in inference.
data_desc = MultiLabelIntentSlotDataDesc(data_dir=data_dir, modes=[train_ds.prefix, validation_ds.prefix])
OmegaConf.set_struct(cfg, False)
if not hasattr(cfg, "data_desc") or cfg.data_desc is None:
cfg.data_desc = {}
# Intents.
cfg.data_desc.intent_labels = list(data_desc.intents_label_ids.keys())
cfg.data_desc.intent_label_ids = data_desc.intents_label_ids
cfg.data_desc.intent_weights = data_desc.intent_weights
# Slots.
cfg.data_desc.slot_labels = list(data_desc.slots_label_ids.keys())
cfg.data_desc.slot_label_ids = data_desc.slots_label_ids
cfg.data_desc.slot_weights = data_desc.slot_weights
cfg.data_desc.pad_label = data_desc.pad_label
# for older(pre - 1.0.0.b3) configs compatibility
if not hasattr(cfg, "class_labels") or cfg.class_labels is None:
cfg.class_labels = {}
cfg.class_labels = OmegaConf.create(
{"intent_labels_file": "intent_labels.csv", "slot_labels_file": "slot_labels.csv",}
)
slot_labels_file = os.path.join(data_dir, cfg.class_labels.slot_labels_file)
intent_labels_file = os.path.join(data_dir, cfg.class_labels.intent_labels_file)
self._save_label_ids(data_desc.slots_label_ids, slot_labels_file)
self._save_label_ids(data_desc.intents_label_ids, intent_labels_file)
self.register_artifact("class_labels.intent_labels_file", intent_labels_file)
self.register_artifact("class_labels.slot_labels_file", slot_labels_file)
OmegaConf.set_struct(cfg, True)
def _reconfigure_classifier(self) -> None:
""" Method reconfigures the classifier depending on the settings of model cfg.data_desc """
self.classifier = SequenceTokenClassifier(
hidden_size=self.bert_model.config.hidden_size,
num_intents=len(self.cfg.data_desc.intent_labels),
num_slots=len(self.cfg.data_desc.slot_labels),
dropout=self.cfg.head.fc_dropout,
num_layers=self.cfg.head.num_output_layers,
log_softmax=False,
)
# define losses
if self.cfg.class_balancing == "weighted_loss":
# You may need to increase the number of epochs for convergence when using weighted_loss
self.intent_loss = BCEWithLogitsLoss(logits_ndim=2, pos_weight=self.cfg.data_desc.intent_weights)
self.slot_loss = CrossEntropyLoss(logits_ndim=3, weight=self.cfg.data_desc.slot_weights)
else:
self.intent_loss = BCEWithLogitsLoss(logits_ndim=2)
self.slot_loss = CrossEntropyLoss(logits_ndim=3)
self.total_loss = AggregatorLoss(
num_inputs=2, weights=[self.cfg.intent_loss_weight, 1.0 - self.cfg.intent_loss_weight],
)
# setup to track metrics
self.intent_classification_report = MultiLabelClassificationReport(
num_classes=len(self.cfg.data_desc.intent_labels),
label_ids=self.cfg.data_desc.intent_label_ids,
dist_sync_on_step=True,
mode="micro",
)
self.slot_classification_report = ClassificationReport(
num_classes=len(self.cfg.data_desc.slot_labels),
label_ids=self.cfg.data_desc.slot_label_ids,
dist_sync_on_step=True,
mode="micro",
)
def validation_step(self, batch, batch_idx) -> None:
"""
Validation Loop. Pytorch Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
Args:
batch: batches of data from DataLoader
batch_idx: batch idx from DataLoader
Returns:
None
"""
(input_ids, input_type_ids, input_mask, loss_mask, subtokens_mask, intent_labels, slot_labels,) = batch
intent_logits, slot_logits = self(
input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask,
)
# calculate combined loss for intents and slots
intent_loss = self.intent_loss(logits=intent_logits, labels=intent_labels)
slot_loss = self.slot_loss(logits=slot_logits, labels=slot_labels, loss_mask=loss_mask)
val_loss = self.total_loss(loss_1=intent_loss, loss_2=slot_loss)
intent_probabilities = torch.round(torch.sigmoid(intent_logits))
self.intent_classification_report.update(intent_probabilities, intent_labels)
# slots
subtokens_mask = subtokens_mask > 0.5
preds = torch.argmax(slot_logits, axis=-1)[subtokens_mask]
slot_labels = slot_labels[subtokens_mask]
self.slot_classification_report.update(preds, slot_labels)
loss = {
"val_loss": val_loss,
"intent_tp": self.intent_classification_report.tp,
"intent_fn": self.intent_classification_report.fn,
"intent_fp": self.intent_classification_report.fp,
"slot_tp": self.slot_classification_report.tp,
"slot_fn": self.slot_classification_report.fn,
"slot_fp": self.slot_classification_report.fp,
}
self.validation_step_outputs.append(loss)
return loss
def _setup_dataloader_from_config(self, cfg: DictConfig) -> DataLoader:
"""
Creates the DataLoader from the configuration object
Args:
cfg: configuration object
Returns:
DataLoader for model's data
"""
input_file = f"{self.data_dir}/{cfg.prefix}.tsv"
slot_file = f"{self.data_dir}/{cfg.prefix}_slots.tsv"
intent_dict_file = self.data_dir + "/dict.intents.csv"
lines = open(intent_dict_file, "r").readlines()
lines = [line.strip() for line in lines if line.strip()]
num_intents = len(lines)
if not (os.path.exists(input_file) and os.path.exists(slot_file)):
raise FileNotFoundError(
f"{input_file} or {slot_file} not found. Please refer to the documentation for the right format \
of Intents and Slots files."
)
dataset = MultiLabelIntentSlotClassificationDataset(
input_file=input_file,
slot_file=slot_file,
num_intents=num_intents,
tokenizer=self.tokenizer,
max_seq_length=self.max_seq_length,
num_samples=cfg.num_samples,
pad_label=self.cfg.data_desc.pad_label,
ignore_extra_tokens=self.cfg.ignore_extra_tokens,
ignore_start_end=self.cfg.ignore_start_end,
)
return DataLoader(
dataset=dataset,
batch_size=cfg.batch_size,
shuffle=cfg.shuffle,
num_workers=cfg.num_workers,
pin_memory=cfg.pin_memory,
drop_last=cfg.drop_last,
collate_fn=dataset.collate_fn,
)
def prediction_probabilities(self, queries: List[str], test_ds: DictConfig) -> npt.NDArray:
"""
Get prediction probabilities for the queries (intent and slots)
Args:
queries: text sequences
test_ds: Dataset configuration section.
Returns:
numpy array of intent probabilities
"""
probabilities = []
mode = self.training
try:
device = "cuda" if torch.cuda.is_available() else "cpu"
# Switch model to evaluation mode
self.eval()
self.to(device)
# Dataset.
infer_datalayer = self._setup_infer_dataloader(queries, test_ds)
for batch in infer_datalayer:
input_ids, input_type_ids, input_mask, loss_mask, subtokens_mask = batch
intent_logits, slot_logits = self.forward(
input_ids=input_ids.to(device),
token_type_ids=input_type_ids.to(device),
attention_mask=input_mask.to(device),
)
# predict intents for these examples
probabilities.append(torch.sigmoid(intent_logits).detach().cpu().numpy())
probabilities = np.concatenate(probabilities)
finally:
# set mode back to its original value
self.train(mode=mode)
return probabilities
def optimize_threshold(self, test_ds: DictConfig, file_name: str) -> None:
"""
Set the optimal threshold of the model from performance on validation set. This threshold is used to round the
logits to 0 or 1.
Args:
test_ds: location of test dataset
file_name: name of input file to retrieve validation set
Returns:
None
"""
input_file = f"{self.data_dir}/{file_name}.tsv"
with open(input_file, "r") as f:
input_lines = f.readlines()[1:] # Skipping headers at index 0
dataset = list(input_lines)
metrics_labels, sentences = [], []
for input_line in dataset:
sentence = input_line.strip().split("\t")[0]
sentences.append(sentence)
parts = input_line.strip().split("\t")[1:][0]
parts = list(map(int, parts.split(",")))
parts = [1 if label in parts else 0 for label in range(len(self.cfg.data_desc.intent_labels))]
metrics_labels.append(parts)
# Retrieve class probabilities for each sentence
intent_probabilities = self.prediction_probabilities(sentences, test_ds)
metrics_dict = {}
# Find optimal logits rounding threshold for intents
for i in np.arange(0.5, 0.96, 0.01):
predictions = (intent_probabilities >= i).tolist()
precision = precision_score(metrics_labels, predictions, average='micro')
recall = recall_score(metrics_labels, predictions, average='micro')
f1 = f1_score(metrics_labels, predictions, average='micro')
metrics_dict[i] = [precision, recall, f1]
max_precision = max(metrics_dict, key=lambda x: metrics_dict[x][0])
max_recall = max(metrics_dict, key=lambda x: metrics_dict[x][1])
max_f1_score = max(metrics_dict, key=lambda x: metrics_dict[x][2])
logging.info(
f'Best Threshold for F1-Score: {max_f1_score}, [Precision, Recall, F1-Score]: {metrics_dict[max_f1_score]}'
)
logging.info(
f'Best Threshold for Precision: {max_precision}, [Precision, Recall, F1-Score]: {metrics_dict[max_precision]}'
)
logging.info(
f'Best Threshold for Recall: {max_recall}, [Precision, Recall, F1-Score]: {metrics_dict[max_recall]}'
)
if metrics_dict[max_f1_score][2] > self.max_f1:
self.max_f1 = metrics_dict[max_f1_score][2]
logging.info(f'Setting Threshold to: {max_f1_score}')
self.threshold = max_f1_score
def predict_from_examples(
self, queries: List[str], test_ds: DictConfig, threshold: float = None
) -> Tuple[List[List[Tuple[str, float]]], List[str], List[List[int]]]:
"""
Get prediction for the queries (intent and slots)
Args:
queries: text sequences
test_ds: Dataset configuration section.
threshold: Threshold for rounding prediction logits
Returns:
predicted_intents: model intent predictions with their probabilities
Example: [[('flight', 0.84)], [('airfare', 0.54),
('flight', 0.73), ('meal', 0.24)]]
predicted_slots: model slot predictions
Example: ['O B-depart_date.month_name B-depart_date.day_number',
'O O B-flight_stop O O O']
predicted_vector: model intent predictions for each individual query. Binary values within each list
indicate whether a class is prediced for the given query (1 for True, 0 for False)
Example: [[1,0,0,0,0,0], [0,0,1,0,0,0]]
"""
predicted_intents = []
if threshold is None:
threshold = self.threshold
logging.info(f'Using threshold = {threshold}')
predicted_slots = []
predicted_vector = []
mode = self.training
try:
device = "cuda" if torch.cuda.is_available() else "cpu"
# Retrieve intent and slot vocabularies from configuration.
intent_labels = self.cfg.data_desc.intent_labels
slot_labels = self.cfg.data_desc.slot_labels
# Switch model to evaluation mode
self.eval()
self.to(device)
# Dataset.
infer_datalayer = self._setup_infer_dataloader(queries, test_ds)
for batch in infer_datalayer:
input_ids, input_type_ids, input_mask, loss_mask, subtokens_mask = batch
intent_logits, slot_logits = self.forward(
input_ids=input_ids.to(device),
token_type_ids=input_type_ids.to(device),
attention_mask=input_mask.to(device),
)
# predict intents and slots for these examples
# intents
intent_preds = tensor2list(torch.sigmoid(intent_logits))
# convert numerical outputs to Intent and Slot labels from the dictionaries
for intents in intent_preds:
intent_lst = []
temp_list = []
for intent_num, probability in enumerate(intents):
if probability >= threshold:
intent_lst.append((intent_labels[int(intent_num)], round(probability, 2)))
temp_list.append(1)
else:
temp_list.append(0)
predicted_vector.append(temp_list)
predicted_intents.append(intent_lst)
# slots
slot_preds = torch.argmax(slot_logits, axis=-1)
temp_slots_preds = []
for slot_preds_query, mask_query in zip(slot_preds, subtokens_mask):
temp_slots = ""
query_slots = ""
for slot, mask in zip(slot_preds_query, mask_query):
if mask == 1:
if slot < len(slot_labels):
query_slots += slot_labels[int(slot)] + " "
temp_slots += f"{slot} "
else:
query_slots += "Unknown_slot "
temp_slots += "0 "
predicted_slots.append(query_slots.strip())
temp_slots_preds.append(temp_slots)
finally:
# set mode back to its original value
self.train(mode=mode)
return predicted_intents, predicted_slots, predicted_vector
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
To be added
"""
result = []
return result
| NeMo-main | nemo/collections/nlp/models/intent_slot_classification/multi_label_intent_slot_classification_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import perf_counter
from typing import Dict, Optional
import torch
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from nemo.collections.common.losses import CrossEntropyLoss
from nemo.collections.nlp.data.spellchecking_asr_customization import (
SpellcheckingAsrCustomizationDataset,
SpellcheckingAsrCustomizationTestDataset,
TarredSpellcheckingAsrCustomizationDataset,
bert_example,
)
from nemo.collections.nlp.data.text_normalization_as_tagging.utils import read_label_map
from nemo.collections.nlp.metrics.classification_report import ClassificationReport
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.collections.nlp.modules.common.token_classifier import TokenClassifier
from nemo.collections.nlp.parts.utils_funcs import tensor2list
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types import LogitsType, NeuralType
from nemo.utils import logging
from nemo.utils.decorators import experimental
__all__ = ["SpellcheckingAsrCustomizationModel"]
@experimental
class SpellcheckingAsrCustomizationModel(NLPModel):
"""
https://arxiv.org/abs/2306.02317
BERT-based model for Spellchecking ASR Customization.
It takes as input ASR hypothesis and candidate customization entries.
It labels the hypothesis with correct entry index or 0.
Example input: [CLS] a s t r o n o m e r s _ d i d i e _ s o m o n _ a n d _ t r i s t i a n _ g l l o [SEP] d i d i e r _ s a u m o n [SEP] a s t r o n o m i e [SEP] t r i s t a n _ g u i l l o t [SEP] ...
Input segments: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4
Example output: 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 3 3 3 3 3 3 3 3 3 3 3 3 3 0 ...
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"logits": NeuralType(('B', 'T', 'D'), LogitsType()),
}
@property
def input_module(self):
return self
@property
def output_module(self):
return self
def __init__(self, cfg: DictConfig, trainer: Trainer = None) -> None:
super().__init__(cfg=cfg, trainer=trainer)
# Label map contains 11 labels: 0 for nothing, 1..10 for target candidate ids
label_map_file = self.register_artifact("label_map", cfg.label_map, verify_src_exists=True)
# Semiotic classes for this model consist only of classes CUSTOM(means fragment containing custom candidate) and PLAIN (any other single-character fragment)
# They are used only during validation step, to calculate accuracy for CUSTOM and PLAIN classes separately
semiotic_classes_file = self.register_artifact(
"semiotic_classes", cfg.semiotic_classes, verify_src_exists=True
)
self.label_map = read_label_map(label_map_file)
self.semiotic_classes = read_label_map(semiotic_classes_file)
self.num_labels = len(self.label_map)
self.num_semiotic_labels = len(self.semiotic_classes)
self.id_2_tag = {tag_id: tag for tag, tag_id in self.label_map.items()}
self.id_2_semiotic = {semiotic_id: semiotic for semiotic, semiotic_id in self.semiotic_classes.items()}
self.max_sequence_len = cfg.get('max_sequence_len', self.tokenizer.tokenizer.model_max_length)
# Setup to track metrics
# We will have (len(self.semiotic_classes) + 1) labels.
# Last one stands for WRONG (span in which the predicted tags don't match the labels)
# This is needed to feed the sequence of classes to classification_report during validation
label_ids = self.semiotic_classes.copy()
label_ids["WRONG"] = len(self.semiotic_classes)
self.tag_classification_report = ClassificationReport(
len(self.semiotic_classes) + 1, label_ids=label_ids, mode='micro', dist_sync_on_step=True
)
self.hidden_size = cfg.hidden_size
# hidden size is doubled because in forward we concatenate embeddings for characters and embeddings for subwords
self.logits = TokenClassifier(
self.hidden_size * 2, num_classes=self.num_labels, num_layers=1, log_softmax=False, dropout=0.1
)
self.loss_fn = CrossEntropyLoss(logits_ndim=3)
self.builder = bert_example.BertExampleBuilder(
self.label_map, self.semiotic_classes, self.tokenizer.tokenizer, self.max_sequence_len
)
@typecheck()
def forward(
self,
input_ids,
input_mask,
segment_ids,
input_ids_for_subwords,
input_mask_for_subwords,
segment_ids_for_subwords,
character_pos_to_subword_pos,
):
"""
Same BERT-based model is used to calculate embeddings for sequence of single characters and for sequence of subwords.
Then we concatenate subword embeddings to each character corresponding to this subword.
We return logits for each character x 11 labels: 0 - character doesn't belong to any candidate, 1..10 - character belongs to candidate with this id.
# Arguments
input_ids: token_ids for single characters; .shape = [batch_size, char_seq_len]; .dtype = int64
input_mask: mask for input_ids(1 - real, 0 - padding); .shape = [batch_size, char_seq_len]; .dtype = int64
segment_ids: segment types for input_ids (0 - ASR-hypothesis, 1..10 - candidate); .shape = [batch_size, char_seq_len]; .dtype = int64
input_ids_for_subwords: token_ids for subwords; .shape = [batch_size, subword_seq_len]; .dtype = int64
input_mask_for_subwords: mask for input_ids_for_subwords(1 - real, 0 - padding); .shape = [batch_size, subword_seq_len]; .dtype = int64
segment_ids_for_subwords: segment types for input_ids_for_subwords (0 - ASR-hypothesis, 1..10 - candidate); .shape = [batch_size, subword_seq_len]; .dtype = int64
character_pos_to_subword_pos: tensor mapping character position in the input sequence to subword position; .shape = [batch_size, char_seq_len]; .dtype = int64
"""
# src_hiddens.shape = [batch_size, char_seq_len, bert_hidden_size]; .dtype=float32
src_hiddens = self.bert_model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
# src_hiddens_for_subwords.shape = [batch_size, subword_seq_len, bert_hidden_size]; .dtype=float32
src_hiddens_for_subwords = self.bert_model(
input_ids=input_ids_for_subwords,
token_type_ids=segment_ids_for_subwords,
attention_mask=input_mask_for_subwords,
)
# Next three commands concatenate subword embeddings to each character embedding of the corresponding subword
# index.shape = [batch_size, char_seq_len, bert_hidden_size]; .dtype=int64
index = character_pos_to_subword_pos.unsqueeze(-1).expand((-1, -1, src_hiddens_for_subwords.shape[2]))
# src_hiddens_2.shape = [batch_size, char_seq_len, bert_hidden_size]; .dtype=float32
src_hiddens_2 = torch.gather(src_hiddens_for_subwords, 1, index)
# src_hiddens.shape = [batch_size, char_seq_len, bert_hidden_size * 2]; .dtype=float32
src_hiddens = torch.cat((src_hiddens, src_hiddens_2), 2)
# logits.shape = [batch_size, char_seq_len, num_labels]; num_labels=11: ids from 0 to 10; .dtype=float32
logits = self.logits(hidden_states=src_hiddens)
return logits
# Training
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
(
input_ids,
input_mask,
segment_ids,
input_ids_for_subwords,
input_mask_for_subwords,
segment_ids_for_subwords,
character_pos_to_subword_pos,
labels_mask,
labels,
_,
) = batch
logits = self.forward(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
input_ids_for_subwords=input_ids_for_subwords,
input_mask_for_subwords=input_mask_for_subwords,
segment_ids_for_subwords=segment_ids_for_subwords,
character_pos_to_subword_pos=character_pos_to_subword_pos,
)
loss = self.loss_fn(logits=logits, labels=labels, loss_mask=labels_mask)
lr = self._optimizer.param_groups[0]['lr']
self.log('train_loss', loss)
self.log('lr', lr, prog_bar=True)
return {'loss': loss, 'lr': lr}
# Validation and Testing
def validation_step(self, batch, batch_idx):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
(
input_ids,
input_mask,
segment_ids,
input_ids_for_subwords,
input_mask_for_subwords,
segment_ids_for_subwords,
character_pos_to_subword_pos,
labels_mask,
labels,
spans,
) = batch
logits = self.forward(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
input_ids_for_subwords=input_ids_for_subwords,
input_mask_for_subwords=input_mask_for_subwords,
segment_ids_for_subwords=segment_ids_for_subwords,
character_pos_to_subword_pos=character_pos_to_subword_pos,
)
tag_preds = torch.argmax(logits, dim=2)
# Update tag classification_report
for input_mask_seq, segment_seq, prediction_seq, label_seq, span_seq in zip(
input_mask.tolist(), segment_ids.tolist(), tag_preds.tolist(), labels.tolist(), spans.tolist()
):
# Here we want to track whether the predicted output matches ground truth labels for each whole span.
# We construct the special input for classification report, for example:
# span_labels = [PLAIN, PLAIN, PLAIN, PLAIN, CUSTOM, CUSTOM]
# span_predictions = [PLAIN, WRONG, PLAIN, PLAIN, WRONG, CUSTOM]
# Note that the number of PLAIN and CUSTOM occurrences in the report is not comparable,
# because PLAIN is for characters, and CUSTOM is for phrases.
span_labels = []
span_predictions = []
plain_cid = self.semiotic_classes["PLAIN"]
wrong_cid = self.tag_classification_report.num_classes - 1
# First we loop through all predictions for input characters with label=0, they are regarded as separate spans with PLAIN class.
# It either stays as PLAIN if the model prediction is 0, or turns to WRONG.
for i in range(len(segment_seq)):
if input_mask_seq[i] == 0:
continue
if segment_seq[i] > 0: # token does not belong to ASR-hypothesis => it's over
break
if label_seq[i] == 0:
span_labels.append(plain_cid)
if prediction_seq[i] == 0:
span_predictions.append(plain_cid)
else:
span_predictions.append(wrong_cid)
# if label_seq[i] != 0 then it belongs to CUSTOM span and will be handled later
# Second we loop through spans tensor which contains only spans for CUSTOM class.
# It stays as CUSTOM if all predictions for the whole span are equal to the labels, otherwise it turns to WRONG.
for cid, start, end in span_seq:
if cid == -1:
break
span_labels.append(cid)
if prediction_seq[start:end] == label_seq[start:end]:
span_predictions.append(cid)
else:
span_predictions.append(wrong_cid)
if len(span_labels) != len(span_predictions):
raise ValueError(
"Length mismatch: len(span_labels)="
+ str(len(span_labels))
+ "; len(span_predictions)="
+ str(len(span_predictions))
)
self.tag_classification_report(
torch.tensor(span_predictions).to(self.device), torch.tensor(span_labels).to(self.device)
)
val_loss = self.loss_fn(logits=logits, labels=labels, loss_mask=labels_mask)
return {'val_loss': val_loss}
def validation_epoch_end(self, outputs):
"""
Called at the end of validation to aggregate outputs.
:param outputs: list of individual outputs of each validation step.
"""
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
# Calculate metrics and classification report
# Note that in our task recall = accuracy, and the recall column is the per class accuracy
_, tag_accuracy, _, tag_report = self.tag_classification_report.compute()
logging.info("Total tag accuracy: " + str(tag_accuracy))
logging.info(tag_report)
self.log('val_loss', avg_loss, prog_bar=True)
self.log('tag accuracy', tag_accuracy)
self.tag_classification_report.reset()
def test_step(self, batch, batch_idx):
"""
Lightning calls this inside the test loop with the data from the test dataloader
passed in as `batch`.
"""
return self.validation_step(batch, batch_idx)
def test_epoch_end(self, outputs):
"""
Called at the end of test to aggregate outputs.
:param outputs: list of individual outputs of each test step.
"""
return self.validation_epoch_end(outputs)
# Functions for inference
@torch.no_grad()
def infer(self, dataloader_cfg: DictConfig, input_name: str, output_name: str) -> None:
""" Main function for Inference
Args:
dataloader_cfg: config for dataloader
input_name: Input file with tab-separated text records. Each record consists of 2 items:
- ASR hypothesis
- candidate phrases separated by semicolon
output_name: Output file with tab-separated text records. Each record consists of 2 items:
- ASR hypothesis
- candidate phrases separated by semicolon
- list of possible replacements with probabilities (start, pos, candidate_id, prob), separated by semicolon
- list of labels, predicted for each letter (for debug purposes)
Returns: None
"""
mode = self.training
device = "cuda" if torch.cuda.is_available() else "cpu"
try:
# Switch model to evaluation mode
self.eval()
self.to(device)
logging_level = logging.get_verbosity()
logging.set_verbosity(logging.WARNING)
infer_datalayer = self._setup_infer_dataloader(dataloader_cfg, input_name)
all_tag_preds = (
[]
) # list(size=number of sentences) of lists(size=number of letters) of tag predictions (best candidate_id for each letter)
all_possible_replacements = (
[]
) # list(size=number of sentences) of lists(size=number of potential replacements) of tuples(start, pos, candidate_id, prob)
for batch in iter(infer_datalayer):
(
input_ids,
input_mask,
segment_ids,
input_ids_for_subwords,
input_mask_for_subwords,
segment_ids_for_subwords,
character_pos_to_subword_pos,
fragment_indices,
) = batch
# tag_logits.shape = [batch_size, char_seq_len, num_labels]; num_labels=11: ids from 0 to 10; .dtype=float32
tag_logits = self.forward(
input_ids=input_ids.to(self.device),
input_mask=input_mask.to(self.device),
segment_ids=segment_ids.to(self.device),
input_ids_for_subwords=input_ids_for_subwords.to(self.device),
input_mask_for_subwords=input_mask_for_subwords.to(self.device),
segment_ids_for_subwords=segment_ids_for_subwords.to(self.device),
character_pos_to_subword_pos=character_pos_to_subword_pos.to(self.device),
)
# fragment_indices.shape=[batsh_size, num_fragments, 3], where last dimension is [start, end, label], where label is candidate id from 1 to 10
# Next we want to convert predictions for separate letters to probabilities for each whole fragment from fragment_indices.
# To achieve this we first sum the letter logits in each fragment and divide by its length.
# (We use .cumsum and then difference between end and start to get sum per fragment).
# Then we convert logits to probs with softmax and for each fragment extract only the prob for given label.
# Finally we get a list of tuples (start, end, label, prob)
indices_len = fragment_indices.shape[1]
# this padding adds a row of zeros (size=num_labels) as first element of sequence in second dimension. This is needed for cumsum operations.
padded_logits = torch.nn.functional.pad(tag_logits, pad=(0, 0, 1, 0))
(
batch_size,
seq_len,
num_labels,
) = padded_logits.shape # seq_len is +1 compared to that of tag_logits, because of padding
# cumsum.shape=[batch_size, seq_len, num_labels]
cumsum = padded_logits.cumsum(dim=1)
# the size -1 is inferred from other dimensions. We get rid of batch dimension.
cumsum_view = cumsum.view(-1, num_labels)
word_index = (
torch.ones((batch_size, indices_len), dtype=torch.long)
* torch.arange(batch_size).reshape((-1, 1))
* seq_len
).view(-1)
lower_index = (fragment_indices[..., 0]).view(-1) + word_index
higher_index = (fragment_indices[..., 1]).view(-1) + word_index
d_index = (higher_index - lower_index).reshape((-1, 1)).to(self.device) # word lengths
dlog = cumsum_view[higher_index, :] - cumsum_view[lower_index, :] # sum of logits
# word_logits.shape=[batch_size, indices_len, num_labels]
word_logits = (dlog / d_index.float()).view(batch_size, indices_len, num_labels)
# convert logits to probs, same shape
word_probs = torch.nn.functional.softmax(word_logits, dim=-1).to(self.device)
# candidate_index.shape=[batch_size, indices_len]
candidate_index = fragment_indices[:, :, 2].to(self.device)
# candidate_probs.shape=[batch_size, indices_len]
candidate_probs = torch.take_along_dim(word_probs, candidate_index.unsqueeze(2), dim=-1).squeeze(2)
for i in range(batch_size):
possible_replacements = []
for j in range(indices_len):
start, end, candidate_id = (
int(fragment_indices[i][j][0]),
int(fragment_indices[i][j][1]),
int(fragment_indices[i][j][2]),
)
if candidate_id == 0: # this is padding
continue
prob = round(float(candidate_probs[i][j]), 5)
if prob < 0.01:
continue
# -1 because in the output file we will not have a [CLS] token
possible_replacements.append(
str(start - 1) + " " + str(end - 1) + " " + str(candidate_id) + " " + str(prob)
)
all_possible_replacements.append(possible_replacements)
# torch.argmax(tag_logits, dim=-1) gives a tensor of best predicted labels with shape [batch_size, char_seq_len], .dtype = int64
# character_preds is list of lists of predicted labels
character_preds = tensor2list(torch.argmax(tag_logits, dim=-1))
all_tag_preds.extend(character_preds)
if len(all_possible_replacements) != len(all_tag_preds) or len(all_possible_replacements) != len(
infer_datalayer.dataset.examples
):
raise IndexError(
"number of sentences mismatch: len(all_possible_replacements)="
+ str(len(all_possible_replacements))
+ "; len(all_tag_preds)="
+ str(len(all_tag_preds))
+ "; len(infer_datalayer.dataset.examples)="
+ str(len(infer_datalayer.dataset.examples))
)
# save results to file
with open(output_name, "w", encoding="utf-8") as out:
for i in range(len(infer_datalayer.dataset.examples)):
hyp, ref = infer_datalayer.dataset.hyps_refs[i]
num_letters = hyp.count(" ") + 1
tag_pred_str = " ".join(list(map(str, all_tag_preds[i][1 : (num_letters + 1)])))
possible_replacements_str = ";".join(all_possible_replacements[i])
out.write(hyp + "\t" + ref + "\t" + possible_replacements_str + "\t" + tag_pred_str + "\n")
except Exception as e:
raise ValueError("Error processing file " + input_name)
finally:
# set mode back to its original value
self.train(mode=mode)
logging.set_verbosity(logging_level)
# Functions for processing data
def setup_training_data(self, train_data_config: Optional[DictConfig]):
if not train_data_config or not train_data_config.data_path:
logging.info(
f"Dataloader config or file_path for the train is missing, so no data loader for train is created!"
)
self._train_dl = None
return
self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config, data_split="train")
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
if not val_data_config or not val_data_config.data_path:
logging.info(
f"Dataloader config or file_path for the validation is missing, so no data loader for validation is created!"
)
self._validation_dl = None
return
self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config, data_split="val")
def setup_test_data(self, test_data_config: Optional[DictConfig]):
if not test_data_config or test_data_config.data_path is None:
logging.info(
f"Dataloader config or file_path for the test is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config, data_split="test")
def _setup_dataloader_from_config(self, cfg: DictConfig, data_split: str):
start_time = perf_counter()
logging.info(f'Creating {data_split} dataset')
if cfg.get("use_tarred_dataset", False):
dataset = TarredSpellcheckingAsrCustomizationDataset(
cfg.data_path,
shuffle_n=cfg.get("tar_shuffle_n", 100),
global_rank=self.global_rank,
world_size=self.world_size,
pad_token_id=self.builder._pad_id,
)
else:
input_file = cfg.data_path
dataset = SpellcheckingAsrCustomizationDataset(input_file=input_file, example_builder=self.builder)
dl = torch.utils.data.DataLoader(
dataset=dataset, batch_size=cfg.batch_size, shuffle=cfg.shuffle, collate_fn=dataset.collate_fn
)
running_time = perf_counter() - start_time
logging.info(f'Took {running_time} seconds')
return dl
def _setup_infer_dataloader(self, cfg: DictConfig, input_name: str) -> 'torch.utils.data.DataLoader':
"""
Setup function for a infer data loader.
Args:
cfg: config dictionary containing data loader params like batch_size, num_workers and pin_memory
input_name: path to input file.
Returns:
A pytorch DataLoader.
"""
dataset = SpellcheckingAsrCustomizationTestDataset(input_name, example_builder=self.builder)
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=cfg["batch_size"],
shuffle=False,
num_workers=cfg.get("num_workers", 0),
pin_memory=cfg.get("pin_memory", False),
drop_last=False,
collate_fn=dataset.collate_fn,
)
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
return None
| NeMo-main | nemo/collections/nlp/models/spellchecking_asr_customization/spellchecking_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.models.spellchecking_asr_customization.spellchecking_model import (
SpellcheckingAsrCustomizationModel,
)
| NeMo-main | nemo/collections/nlp/models/spellchecking_asr_customization/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import itertools
import random
from typing import List, Optional
import numpy as np
import torch
from omegaconf.dictconfig import DictConfig
from omegaconf.listconfig import ListConfig
from pytorch_lightning.trainer.trainer import Trainer
from sacrebleu import corpus_bleu
from nemo.collections.nlp.data.common.sequence_to_sequence_dataset import (
BinarizedMemmapSequenceToSequenceDataset,
TextMemmapSequenceToSequenceDataset,
)
from nemo.collections.nlp.data.language_modeling.megatron.base_dataset_utils import (
get_datasets_weights_and_num_samples,
)
from nemo.collections.nlp.data.language_modeling.megatron.blendable_dataset import BlendableDataset
from nemo.collections.nlp.data.language_modeling.megatron.megatron_batch_samplers import (
MegatronPretrainingBatchSampler,
)
from nemo.collections.nlp.data.language_modeling.megatron.xlm_dataset import (
BinarizedMemmapCrossLingualMLMAndTranslationDataset,
TextMemmapCrossLingualMLMAndTranslationDataset,
)
from nemo.collections.nlp.models.language_modeling.megatron_lm_encoder_decoder_model import (
MegatronLMEncoderDecoderModel,
)
from nemo.collections.nlp.models.language_modeling.megatron_t5_model import MegatronT5Model
from nemo.collections.nlp.models.machine_translation.mt_enc_dec_model import MTEncDecModel
from nemo.collections.nlp.modules.common.megatron.megatron_export import DecEmb, EncEmb, TokensHeadEmb
from nemo.collections.nlp.modules.common.megatron.utils import get_iterator_k_split
from nemo.collections.nlp.parts.nlp_overrides import GlobalBatchDataFetcher
from nemo.collections.nlp.parts.utils_funcs import get_last_rank
from nemo.core.classes import Exportable
from nemo.utils import AppState, logging, timers
try:
from apex.transformer.pipeline_parallel.utils import (
_reconfigure_microbatch_calculator,
get_micro_batch_size,
get_num_microbatches,
)
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
try:
from megatron.core import parallel_state
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
__all__ = ["MegatronNMTModel"]
class MultilingualModelType(enum.Enum):
one_to_many = 1
many_to_one = 2
many_to_many = 3
class MegatronNMTModel(MegatronLMEncoderDecoderModel, Exportable):
"""
Megatron NMT training
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
# All of the lines below need to be set when the parent class calls self._build_tokenizer()
self.encoder_tokenizer_library = cfg.encoder_tokenizer.get('library', 'yttm')
self.decoder_tokenizer_library = cfg.decoder_tokenizer.get('library', 'yttm')
self.multilingual_lang_tokens = {}
self.src_language = cfg.get("src_language", None)
self.tgt_language = cfg.get("tgt_language", None)
self.multilingual = cfg.get("multilingual", False)
self.multilingual_ids = []
self.validate_input_ids = cfg.get("validate_input_ids", True)
self.objective = cfg.train_ds.get("objective", "nmt")
if self.objective == 'nmt-xlm':
if not self.multilingual:
raise ValueError("nmt-xlm objective requires model.multilingual=True")
if self.multilingual:
self.multilingual_type = self._determine_multilingual_training_type()
self._setup_multilingual_special_tokens()
else:
self.multilingual_type = None
super().__init__(cfg, trainer=trainer)
def _determine_multilingual_training_type(self):
"""Determines whether we are doing one-many, many-one, or many-many training based on the config."""
if self.objective == 'nmt-xlm':
return MultilingualModelType.many_to_many
if isinstance(self.src_language, ListConfig) and isinstance(self.tgt_language, ListConfig):
return MultilingualModelType.many_to_many
elif isinstance(self.src_language, ListConfig):
return MultilingualModelType.many_to_one
elif isinstance(self.tgt_language, ListConfig):
return MultilingualModelType.one_to_many
else:
raise ValueError(
f"Invalid multilingual training config: {self.src_language}, {self.tgt_language}. Must have either src/tgt as a list of languages."
)
def _setup_multilingual_special_tokens(self):
if self.multilingual_type == MultilingualModelType.many_to_many:
if self.objective == 'nmt-xlm':
unique_langs = set(self.src_language + self.tgt_language)
else:
# We don't take a set() for tgt_language here because the same lang can appear multiple times.
unique_langs = set(self.tgt_language)
for lng in unique_langs:
self.multilingual_lang_tokens["<" + lng + ">"] = "<" + lng + ">"
elif self.multilingual_type == MultilingualModelType.many_to_one:
# Do nothing here since many -> one does not need special tokens for the target language.
pass
elif self.multilingual_type == MultilingualModelType.one_to_many:
for lng in self.tgt_language:
self.multilingual_lang_tokens["<" + lng + ">"] = "<" + lng + ">"
else:
raise ValueError(f"Invalid multilingual training type: {self.multilingual_type}")
def setup(self, stage=None):
# NOTE: super().__init__ will try and setup train/val/test datasets, but we sidestep this using a if self._train_ds is not None condition
# We then set things up for real only once setup() of this class is called.
resume_checkpoint_path = self.trainer.ckpt_path
if resume_checkpoint_path:
init_consumed_samples = self._extract_consumed_samples_from_ckpt(resume_checkpoint_path)
else:
init_consumed_samples = 0
self.init_consumed_samples = init_consumed_samples
if stage == 'predict':
return
# If the user wants to manually override train and validation dataloaders before calling `.fit()`
if self._train_dl is not None and self._validation_dl is not None:
return
self.build_train_valid_test_datasets()
self.setup_training_data(self._cfg.train_ds)
self.setup_validation_data(self._cfg.validation_ds)
if hasattr(self._cfg, 'test_ds'):
self.setup_test_data(self._cfg.test_ds)
# when using pipeline model parallel the final stage need to initialize word embeddings
if parallel_state.get_pipeline_model_parallel_world_size() > 1:
assert (
self.cfg.share_token_embeddings
), "share_word_embedding must be True when using pipeline model parallel > 1"
assert (
self.cfg.share_decoder_tokens_head_embeddings
), "share_decoder_tokens_head_embeddings must be True when using pipeline model parallel > 1"
self.enc_dec_model.sync_initial_word_embeddings()
if (
self.cfg.encoder.get('position_embedding_type') != 'relative'
and self.cfg.decoder.get('position_embedding_type') != 'relative'
):
self.enc_dec_model.sync_initial_position_embeddings()
# Synchronize RPE embeddings across pipeline parallel ranks.
else:
if self.cfg.encoder.get('position_embedding_type', 'learned_absolute') == 'relative':
self.enc_dec_model.sync_initial_encoder_relative_position_embeddings()
if self.cfg.decoder.get('position_embedding_type', 'learned_absolute') == 'relative':
self.enc_dec_model.sync_initial_decoder_relative_position_embeddings()
if self.cfg.decoder.get(
'position_embedding_type', 'learned_absolute'
) == 'relative' and not self.cfg.decoder.get('relative_position_bias_self_attention_only', True):
self.enc_dec_model.sync_initial_decoder_cross_attention_relative_position_embeddings()
def _build_tokenizer(self):
# Instantiates tokenizers and register to be saved with NeMo Model archive
# After this call, there will be self.encoder_tokenizer and self.decoder_tokenizer
# Which can convert between tokens and token_ids for SRC and TGT languages correspondingly.
encoder_tokenizer_model = self.register_artifact(
"encoder_tokenizer.model", self._cfg.encoder_tokenizer.get('model')
)
decoder_tokenizer_model = self.register_artifact(
"decoder_tokenizer.model", self._cfg.decoder_tokenizer.get('model')
)
self.encoder_tokenizer, self.decoder_tokenizer = MTEncDecModel.setup_enc_dec_tokenizers(
encoder_tokenizer_library=self.encoder_tokenizer_library,
encoder_tokenizer_model=encoder_tokenizer_model,
encoder_bpe_dropout=self._cfg.encoder_tokenizer.get('bpe_dropout', 0.0)
if self._cfg.encoder_tokenizer.get('bpe_dropout', 0.0) is not None
else 0.0,
encoder_model_name=self._cfg.encoder_tokenizer.get('type', None),
encoder_r2l=self._cfg.encoder_tokenizer.get('r2l', False),
decoder_tokenizer_library=self.decoder_tokenizer_library,
encoder_tokenizer_vocab_file=self._cfg.encoder_tokenizer.get('vocab_file', None),
decoder_tokenizer_model=decoder_tokenizer_model,
decoder_bpe_dropout=self._cfg.decoder_tokenizer.get('bpe_dropout', 0.0)
if self._cfg.decoder_tokenizer.get('bpe_dropout', 0.0) is not None
else 0.0,
decoder_model_name=self._cfg.encoder_tokenizer.get('type', None),
decoder_r2l=self._cfg.decoder_tokenizer.get('r2l', False),
encoder_sentencepiece_legacy=self._cfg.encoder_tokenizer.get('sentencepiece_legacy', False),
decoder_sentencepiece_legacy=self._cfg.decoder_tokenizer.get('sentencepiece_legacy', False),
)
def _build_vocab(self):
if hasattr(self.cfg, "data") and self.cfg.train_ds.get('objective', 'nmt') != 'nmt-xlm':
if hasattr(self.cfg.data, "dataset_type"):
# This happens only when restoring a pre-trained model. We need to add all of the special tokens that were added while pre-training to avoid a checkpoint shape mismatch while restoring.
MegatronT5Model.add_special_tokens_to_tokenizer(
tokenizer=self.encoder_tokenizer,
tokenizer_cfg=self.cfg.encoder_tokenizer,
dataset_type=self.cfg.data.dataset_type,
)
MegatronT5Model.add_special_tokens_to_tokenizer(
tokenizer=self.decoder_tokenizer,
tokenizer_cfg=self.cfg.decoder_tokenizer,
dataset_type=self.cfg.data.dataset_type,
)
if self.cfg.train_ds.get('objective', 'nmt') == 'nmt-xlm':
if self.cfg.encoder_tokenizer.library != 'sentencepiece':
raise ValueError(
f"NMT-XLM objective requires sentencepiece tokenizer, but got encoder tokenizer library : {self.cfg.encoder_tokenizer.library}"
)
if self.cfg.decoder_tokenizer.library != 'sentencepiece':
raise ValueError(
f"NMT-XLM objective requires sentencepiece tokenizer, but got decoder tokenizer library : {self.cfg.decoder_tokenizer.library}"
)
MegatronT5Model.add_special_tokens_to_tokenizer(
tokenizer=self.encoder_tokenizer, tokenizer_cfg=self.cfg.encoder_tokenizer, dataset_type='ul2',
)
MegatronT5Model.add_special_tokens_to_tokenizer(
tokenizer=self.decoder_tokenizer, tokenizer_cfg=self.cfg.decoder_tokenizer, dataset_type='ul2',
)
# Set up pre and post processors as well.
# NOTE: multilingual language tokens are set up after other special tokens such as eos, pad, sentinel tokens etc are added.
if self.multilingual:
(
self.source_processor_list,
self.target_processor_list,
self.multilingual_lang_to_id,
) = MTEncDecModel.setup_multilingual_ids_and_processors(
src_language=self.src_language,
tgt_language=self.tgt_language,
encoder_tokenizer=self.encoder_tokenizer, # Multilingual training requires shared tokenizers.
decoder_tokenizer=self.decoder_tokenizer,
encoder_tokenizer_library=self.encoder_tokenizer_library,
decoder_tokenizer_library=self.decoder_tokenizer_library,
)
self.multilingual_ids = list(self.multilingual_lang_to_id.values())
else:
# After this call, the model will have self.source_processor and self.target_processor objects
self.source_processor, self.target_processor = MTEncDecModel.setup_pre_and_post_processing_utils(
self.src_language, self.tgt_language, self.encoder_tokenizer_library, self.decoder_tokenizer_library,
)
self.multilingual_ids = [None]
self.padded_vocab_size = self._vocab_size_with_padding(
orig_vocab_size=self.encoder_tokenizer.vocab_size,
make_vocab_size_divisible_by=self._cfg.get('make_vocab_size_divisible_by', 128),
tensor_model_parallel_size=self._cfg.get('tensor_model_parallel_size', 1),
)
def fwd_bwd_step(self, dataloader_iter, batch_idx, forward_only):
"""
Dataloader produces a global batch which is turned into a list of microbatches.
The list of microbatches is then piped through the pipeline using Apex fwd/bwd functions.
"""
batch = next(dataloader_iter)
if isinstance(batch, dict):
# convert to list if not already converted.
batch = self._process_batch(batch)
# Get seq length of batch
encoder_seq_length = batch[0].size(1)
decoder_seq_length = batch[1].size(1)
tensor_shape = [encoder_seq_length, get_micro_batch_size(), self.cfg.encoder.hidden_size]
data_iter = get_iterator_k_split(batch, get_num_microbatches())
return self._execute_fwd_bwd_function(
data_iterator=data_iter,
forward_only=forward_only,
tensor_shape=tensor_shape,
decoder_seq_length=decoder_seq_length,
)
def eval_step(self, dataloader_iter, batch_idx, dataloader_idx=0):
# Check if iterator is exhausted
dataloader_iter, done = self._val_iterator_done(dataloader_iter)
if done:
return
# Need to squeze dim 0 for old NMT datasets since things are pre-batched and we ask the dataloader for batch size 1.
batch = next(dataloader_iter)
batch = [x.squeeze(dim=0) if x.ndim == 3 else x for x in batch]
batch = self.process_global_batch_for_text_translation_datasets(batch)
# Eval step requires text datasets so we need to reconfigure MBS on each batch.
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=batch['text_enc'].size(0) * parallel_state.get_data_parallel_world_size(),
micro_batch_size=batch['text_enc'].size(0),
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
# This returns the averaged loss across data-parallel groups.
reduced_loss = self.fwd_bwd_step(itertools.chain([batch]), batch_idx, True)
tokens_enc, labels, enc_mask = batch['text_enc'], batch['labels'], batch['enc_mask']
predicted_tokens_ids, _ = self.decode(
tokens_enc,
enc_mask,
tokens_enc.size(1)
+ self._cfg.max_generation_delta, # Generate up to src-length + max generation delta. TODO: Implement better stopping when everything hits <EOS>.
tokenizer=self.decoder_tokenizer,
)
if self.multilingual:
source_processor = self.source_processor_list[dataloader_idx]
target_processor = self.target_processor_list[dataloader_idx]
else:
source_processor = self.source_processor
target_processor = self.target_processor
# Post-process the translations and inputs to log.
preds = self.postprocess_outputs(
outputs=predicted_tokens_ids, tokenizer=self.decoder_tokenizer, processor=target_processor,
)
labels = self.postprocess_outputs(
outputs=labels, tokenizer=self.decoder_tokenizer, processor=target_processor,
)
encoder_inputs = self.postprocess_outputs(
outputs=tokens_enc, tokenizer=self.encoder_tokenizer, processor=source_processor,
)
loss_dict = {
'inputs': encoder_inputs,
'translations': preds,
'ground_truths': labels,
}
if isinstance(reduced_loss, dict):
loss_dict.update(reduced_loss)
else:
loss_dict['loss'] = reduced_loss
if type(self.trainer.val_dataloaders) == list and len(self.trainer.val_dataloaders) > 1:
self.validation_step_outputs[dataloader_idx].append(loss_dict)
else:
self.validation_step_outputs.append(loss_dict)
return loss_dict
def postprocess_outputs(self, outputs, tokenizer, processor):
# Convert ids to lists.
outputs = outputs.cpu().numpy().tolist()
# Filter out the special tokens and de-tokenize.
results = []
for item in outputs:
if tokenizer.eos_id in item:
idx = item.index(tokenizer.eos_id)
item = item[:idx]
# Legacy sentencepiece detokenization still preserves special tokens which messes up exact string match.
if hasattr(tokenizer, 'special_token_to_id'):
item = [id for id in item if id not in tokenizer.special_token_to_id.values()]
item = tokenizer.ids_to_text(item)
results.append(item)
if processor is not None:
results = [processor.detokenize(item.split(' ')) for item in results]
return results
def validation_step(self, dataloader_iter, batch_idx, dataloader_idx=0):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
return self.eval_step(dataloader_iter, batch_idx, dataloader_idx)
def _setup_eval_dataloader_from_config(self, cfg: DictConfig, dataset):
rank = parallel_state.get_data_parallel_rank()
world_size = parallel_state.get_data_parallel_world_size()
dataloaders = []
for _dataset in dataset:
sampler = torch.utils.data.distributed.DistributedSampler(
_dataset, num_replicas=world_size, rank=rank, shuffle=False
)
dataloaders.append(
torch.utils.data.DataLoader(
dataset=_dataset,
batch_size=1,
sampler=sampler,
num_workers=cfg.get("num_workers", 0),
pin_memory=cfg.get("pin_memory", False),
drop_last=cfg.get("drop_last", False),
shuffle=False,
persistent_workers=True if cfg.get("num_workers", 0) > 0 else False,
)
)
return dataloaders
def on_validation_epoch_end(self):
return self.eval_epoch_end(self.validation_step_outputs, 'val')
def on_test_epoch_end(self):
return self.eval_epoch_end(self.test_step_outputs, 'test')
def eval_epoch_end(self, outputs, mode):
if not outputs:
return
if isinstance(outputs[0], dict):
outputs = [outputs]
loss_list = []
bleu_score_list = []
for dataloader_idx, output in enumerate(outputs):
if parallel_state.is_pipeline_last_stage():
# only the last pipeline parallel stages return loss
averaged_loss = torch.stack([x['loss'] for x in output]).mean()
else:
averaged_loss = torch.tensor(0.0).to(self.device)
# we can only log on one rank if it is rank zero so we broadcast from last rank
torch.distributed.broadcast(averaged_loss, get_last_rank())
# averaged_loss = average_losses_across_data_parallel_group([x['loss'] for x in output])
inputs = list(itertools.chain(*[x['inputs'] for x in output]))
translations = list(itertools.chain(*[x['translations'] for x in output]))
ground_truths = list(itertools.chain(*[x['ground_truths'] for x in output]))
assert len(translations) == len(inputs)
assert len(translations) == len(ground_truths)
# Gather translations and ground truths from all workers
tr_gt_inp = [None for _ in range(parallel_state.get_data_parallel_world_size())]
# we also need to drop pairs where ground truth is an empty string
torch.distributed.all_gather_object(
tr_gt_inp,
[(t, g, i) for (t, g, i) in zip(translations, ground_truths, inputs)],
group=parallel_state.get_data_parallel_group(),
)
# if parallel_state.get_data_parallel_rank() == 0:
if self.global_rank == 0:
_translations = []
_ground_truths = []
_inputs = []
# Deduplicate sentences that may have been distributed across multiple data parallel ranks.
gt_inp_set = set()
for rank in range(0, parallel_state.get_data_parallel_world_size()):
for t, g, i in tr_gt_inp[rank]:
if g + i not in gt_inp_set:
gt_inp_set.add(g + i)
_translations.append(t)
_ground_truths.append(g)
_inputs.append(i)
if self.tgt_language in ['ja']:
sacre_bleu = corpus_bleu(_translations, [_ground_truths], tokenize="ja-mecab")
elif self.tgt_language in ['zh']:
sacre_bleu = corpus_bleu(_translations, [_ground_truths], tokenize="zh")
else:
sacre_bleu = corpus_bleu(_translations, [_ground_truths], tokenize="13a")
bleu_score = sacre_bleu.score
dataset_name = "Validation" if mode == 'val' else "Test"
logging.info(f"{dataset_name}, Dataloader index: {dataloader_idx}, Set size: {len(_translations)}")
logging.info(f"{dataset_name}, Dataloader index: {dataloader_idx}, SacreBLEU = {bleu_score}")
logging.info(f"{dataset_name}, Dataloader index: {dataloader_idx}, Translation Examples:")
logging.info('============================================================')
for example_idx in range(0, 3):
random_index = random.randint(0, len(_translations) - 1)
logging.info(" " + '\u0332'.join(f"Example {example_idx}:"))
logging.info(f" Input: {_inputs[random_index]}")
logging.info(f" Prediction: {_translations[random_index]}")
logging.info(f" Ground Truth: {_ground_truths[random_index]}")
logging.info('============================================================')
else:
bleu_score = 0.0
bleu_score = torch.FloatTensor([bleu_score]).to(self.device)
# BLEU score is computed on global rank 0 only and then broadcasted to other ranks.
torch.distributed.all_reduce(bleu_score, op=torch.distributed.ReduceOp.SUM)
bleu_score = bleu_score.cpu().item()
loss_list.append(averaged_loss.cpu().numpy())
bleu_score_list.append(bleu_score)
if dataloader_idx == 0:
if self.multilingual:
self._log_multilingual_bleu_and_loss(dataloader_idx, bleu_score, averaged_loss, mode)
else:
self.log(f'{mode}_sacreBLEU', bleu_score, batch_size=1)
self.log(f'{mode}_loss', averaged_loss, prog_bar=True, batch_size=1)
else:
if self.multilingual:
self._log_multilingual_bleu_and_loss(dataloader_idx, bleu_score, averaged_loss, mode)
else:
self.log(f'{mode}_sacreBLEU_dl_index_{dataloader_idx}', bleu_score, batch_size=1)
self.log(f'{mode}_loss_dl_index_{dataloader_idx}', averaged_loss, prog_bar=False, batch_size=1)
outputs[dataloader_idx].clear() # free memory
if len(loss_list) > 1:
self.log(f"{mode}_loss_avg", np.mean(loss_list), sync_dist=True, batch_size=1)
self.log(f"{mode}_sacreBLEU_avg", np.mean(bleu_score_list), batch_size=1)
app_state = AppState()
if hasattr(self, "_train_ds"):
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=self._cfg.train_ds.global_batch_size,
micro_batch_size=self._cfg.train_ds.micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
def _log_multilingual_bleu_and_loss(self, dataloader_idx, bleu_score, loss, mode):
"""
Function to log multilingual BLEU scores with the right source-target language string instead of just the dataloader idx.
"""
# Check if one-many or many-one and log with lang ids instead of dataloader_idx
if isinstance(self.src_language, ListConfig):
translation_lang_string = f'{self.src_language[dataloader_idx]}-{self.tgt_language}'
else:
translation_lang_string = f'{self.src_language}-{self.tgt_language[dataloader_idx]}'
self.log(f'{mode}_sacreBLEU_{translation_lang_string}', bleu_score, sync_dist=True, batch_size=1)
self.log(f'{mode}_loss_{translation_lang_string}', loss, sync_dist=True, batch_size=1)
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
if hasattr(self, '_validation_ds'):
self._validation_dl = self._setup_eval_dataloader_from_config(
cfg=val_data_config, dataset=self._validation_ds
)
def setup_test_data(self, test_data_config: Optional[DictConfig]):
if hasattr(self, '_test_ds'):
self._test_dl = self._setup_eval_dataloader_from_config(cfg=test_data_config, dataset=self._test_ds)
def setup_training_data(self, train_data_config: Optional[DictConfig]):
if hasattr(self, '_train_ds'):
consumed_samples = self.compute_consumed_samples(0)
self._train_dl = self._setup_megatron_dataloader_from_config(
cfg=train_data_config, dataset=self._train_ds, consumed_samples=consumed_samples
)
def _setup_megatron_dataloader_from_config(self, cfg, dataset, consumed_samples):
logging.info(f'Building dataloader with consumed samples: {consumed_samples}')
if isinstance(dataset, BlendableDataset):
collate_fn = dataset.datasets[0].collate_fn
else:
collate_fn = dataset.collate_fn
batch_sampler = MegatronPretrainingBatchSampler(
total_samples=len(dataset),
consumed_samples=consumed_samples,
micro_batch_size=cfg.micro_batch_size,
global_batch_size=cfg.global_batch_size,
data_parallel_rank=parallel_state.get_data_parallel_rank(),
data_parallel_size=parallel_state.get_data_parallel_world_size(),
drop_last=True,
)
return torch.utils.data.DataLoader(
dataset,
batch_sampler=batch_sampler,
collate_fn=collate_fn,
num_workers=cfg.num_workers,
pin_memory=cfg.pin_memory,
persistent_workers=True if cfg.num_workers > 0 else False,
)
def process_global_batch_for_text_translation_datasets(self, batch):
"""Override parent process_batch since TranslationDataset does not return dictionaries."""
# Convert each microbatch into a dictionary.
src_ids, src_mask, tgt_ids, tgt_mask, labels = batch
batch = {
'text_enc': src_ids,
'text_dec': tgt_ids,
'labels': labels,
'enc_mask': src_mask.long(), # super().process_batch() expects torch.int64
'dec_mask': tgt_mask.long(), # super().process_batch() expects torch.int64
'loss_mask': tgt_mask.long(), # super().process_batch() expects torch.int64
}
# Parent function will pad microbatches to the same length.
return self._process_global_batch_without_megatron_batch_sampler([batch], tokenizer=self.encoder_tokenizer)
def _build_eval_dataset(self, data_cfg):
# Set up prepend IDs for validation datasets even if not multingual.
if self._cfg.train_ds.get('objective', 'nmt') == 'nmt-xlm' or (
self.multilingual and self.multilingual_type != MultilingualModelType.many_to_one
):
multilingual_ids = [self.multilingual_lang_to_id[lang] for lang in self.cfg.tgt_language]
dataset = MTEncDecModel._setup_eval_dataset_from_config(
cfg=data_cfg,
multilingual=True,
multilingual_ids=multilingual_ids,
encoder_tokenizer=self.encoder_tokenizer,
decoder_tokenizer=self.decoder_tokenizer,
add_bos_eos_to_encoder=self._cfg.train_ds.get('objective', 'nmt')
!= 'nmt-xlm', # nmt-xlm does not add bos/eos to encoder while training so make sure this happens for validation as well.
)
else:
num_eval_datasets = len(data_cfg.src_file_name) if isinstance(data_cfg.src_file_name, ListConfig) else 1
multilingual_ids = [None] * num_eval_datasets
dataset = MTEncDecModel._setup_eval_dataset_from_config(
cfg=data_cfg,
multilingual=self.multilingual,
multilingual_ids=multilingual_ids,
encoder_tokenizer=self.encoder_tokenizer,
decoder_tokenizer=self.decoder_tokenizer,
)
return dataset
def build_train_valid_test_datasets(self):
"""Builds the train, validation, and test datasets."""
self._train_ds = self.build_memmap_dataset_from_config(self._cfg.train_ds)
if self._cfg.validation_ds.get("dataset_type", "text") != "text":
raise ValueError(f"Validation dataset type must be 'text', found {self._cfg.validation_ds.dataset_type}")
self._validation_ds = self._build_eval_dataset(self._cfg.validation_ds)
if hasattr(self._cfg, 'test_ds'):
self._test_ds = self._build_eval_dataset(self._cfg.test_ds)
def _instantiate_memmap_dataset(
self, cfg, src_file, tgt_file, src_language, tgt_language, num_samples, prepend_id=None
):
if cfg.dataset_type == 'bin_memmap':
if cfg.get("objective", "nmt") == "nmt":
dataset = BinarizedMemmapSequenceToSequenceDataset(
src_dataset_prefix=src_file,
tgt_dataset_prefix=tgt_file,
src_tokenizer=self.encoder_tokenizer,
tgt_tokenizer=self.decoder_tokenizer,
max_src_seq_length=cfg.max_seq_length,
max_tgt_seq_length=cfg.max_seq_length,
max_num_samples=num_samples[0],
seed=self._cfg.seed,
prepend_id=prepend_id,
)
elif cfg.get("objective", "nmt") == "nmt-xlm":
# Pass sentinel tokens to the dataset after removing language tokens.
additional_special_ids = self.encoder_tokenizer.additional_special_tokens_ids
sentinel_tokens = [id for id in additional_special_ids if id not in self.multilingual_ids]
dataset = BinarizedMemmapCrossLingualMLMAndTranslationDataset(
src_dataset_prefix=src_file,
tgt_dataset_prefix=tgt_file,
src_tokenizer=self.encoder_tokenizer,
tgt_tokenizer=self.decoder_tokenizer,
src_language=src_language,
tgt_language=tgt_language,
max_src_seq_length=cfg.max_seq_length // 2,
max_tgt_seq_length=cfg.max_seq_length // 2,
max_seq_length_dec=cfg.max_seq_length,
max_num_samples=num_samples[0],
sampling_ratios=cfg.sampling_ratios,
sentinel_tokens=sentinel_tokens,
seed=self._cfg.seed,
)
elif cfg.dataset_type == 'text_memmap':
if cfg.get("objective", "nmt") == "nmt":
dataset = TextMemmapSequenceToSequenceDataset(
src_file_name=src_file,
tgt_file_name=tgt_file,
src_tokenizer=self.encoder_tokenizer,
tgt_tokenizer=self.decoder_tokenizer,
max_src_seq_length=cfg.max_seq_length,
max_tgt_seq_length=cfg.max_seq_length,
max_num_samples=num_samples[0],
seed=self._cfg.seed,
prepend_id=prepend_id,
)
elif cfg.get("objective", "nmt") == "nmt-xlm":
additional_special_ids = self.encoder_tokenizer.additional_special_tokens_ids
sentinel_tokens = [id for id in additional_special_ids if id not in self.multilingual_ids]
dataset = TextMemmapCrossLingualMLMAndTranslationDataset(
src_file_name=src_file,
tgt_file_name=tgt_file,
src_tokenizer=self.encoder_tokenizer,
tgt_tokenizer=self.decoder_tokenizer,
src_language=src_language,
tgt_language=tgt_language,
max_src_seq_length=cfg.max_seq_length // 2,
max_tgt_seq_length=cfg.max_seq_length // 2,
max_seq_length_dec=cfg.max_seq_length,
max_num_samples=num_samples[0],
sampling_ratios=cfg.sampling_ratios,
sentinel_tokens=sentinel_tokens,
seed=self._cfg.seed,
)
return dataset
def build_memmap_dataset_from_config(self, cfg: DictConfig):
"""Builds a memmap dataset from a existing binary based o nthe provided config."""
is_src_listconfig = isinstance(cfg.src_file_name, ListConfig)
is_tgt_listconfig = isinstance(cfg.tgt_file_name, ListConfig)
# If multilingual, make sure both source and target are list configs
if self.multilingual:
if not (is_src_listconfig and is_tgt_listconfig):
raise ValueError(
f"Multilingual datasets must be configured with a ListConfig for both src_file_name and tgt_file_name"
)
if is_src_listconfig and not is_tgt_listconfig or is_tgt_listconfig and not is_src_listconfig:
raise ValueError(
f"Datasets must be configured with a ListConfig for both src_file_name and tgt_file_name or neither. Found only one of them as listconfig."
)
if is_src_listconfig and is_tgt_listconfig:
if len(cfg.src_file_name) != len(cfg.tgt_file_name):
raise ValueError(f"Datasets must have the same number of files in src_file_name and tgt_file_name")
if cfg.concat_sampling_probabilities is None or not isinstance(
cfg.concat_sampling_probabilities, ListConfig
):
raise ValueError(
f"concat_sampling_probabilities must be a ListConfig with the same number of files in src_file_name and tgt_file_name, found {cfg.concat_sampling_probabilities}"
)
if len(cfg.concat_sampling_probabilities) != len(cfg.src_file_name):
raise ValueError(
f"concat_sampling_probabilities must be of the same size as src_file_name and tgt_file_name. Provided size {len(cfg.concat_sampling_probabilities)}, number of datasets {len(cfg.src_file_name)}"
)
# Construct the data prefix list for `get_datasets_weights_and_num_samples()` that is of the format [weight1,file_name1,weight2,file_name2,...]
data_prefix = []
for weight, prefix in zip(cfg.concat_sampling_probabilities, cfg.src_file_name):
data_prefix.append(weight)
data_prefix.append(prefix)
num_train_samples = [self.trainer.max_steps * self._cfg.global_batch_size]
_, _, num_train_samples_per_dataset = get_datasets_weights_and_num_samples(data_prefix, num_train_samples)
num_train_samples_after_blend = sum([x[0] for x in num_train_samples_per_dataset])
datasets = []
# For many -> one multilingual or bilingual models, we don't need to prepend a language token ID
if not self.multilingual or self.multilingual_type == MultilingualModelType.many_to_one:
multilingual_ids = [None] * len(cfg.src_file_name)
# For one -> many and many -> many multilingual models, we need to prepend a language token ID
else:
multilingual_ids = [self.multilingual_lang_to_id[lang] for lang in self.cfg.tgt_language]
for idx, (src_file, tgt_file, num_samples) in enumerate(
zip(cfg.src_file_name, cfg.tgt_file_name, num_train_samples_per_dataset)
):
dataset = self._instantiate_memmap_dataset(
cfg=cfg,
src_file=src_file,
tgt_file=tgt_file,
num_samples=num_samples,
prepend_id=multilingual_ids[idx],
src_language=self.src_language
if not isinstance(self.src_language, ListConfig)
else self.src_language[idx],
tgt_language=self.tgt_language
if not isinstance(self.tgt_language, ListConfig)
else self.tgt_language[idx],
)
datasets.append(dataset)
dataset = BlendableDataset(
datasets=datasets, weights=cfg.concat_sampling_probabilities, size=num_train_samples_after_blend
)
else:
dataset = self._instantiate_memmap_dataset(
cfg=cfg,
src_file=cfg.src_file_name,
tgt_file=cfg.tgt_file_name,
num_samples=[self.trainer.max_steps * self._cfg.global_batch_size],
src_language=self.src_language,
tgt_language=self.tgt_language,
)
return dataset
def list_available_models(self):
pass
def on_validation_epoch_start(self):
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=parallel_state.get_data_parallel_world_size(),
micro_batch_size=1,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
@torch.no_grad()
def translate(
self,
text: List[str],
source_lang: str = None,
target_lang: str = None,
return_beam_scores: bool = False,
log_timing: bool = False,
) -> List[str]:
"""
Translates list of sentences from source language to target language.
Should be regular text, this method performs its own tokenization/de-tokenization
Args:
text: list of strings to translate
source_lang: if not "ignore", corresponding MosesTokenizer and MosesPunctNormalizer will be run
target_lang: if not "ignore", corresponding MosesDecokenizer will be run
return_beam_scores: if True, returns a list of translations and their corresponding beam scores.
log_timing: if True, prints timing information.
Returns:
list of translated strings
"""
# __TODO__: This will reset both source and target processors even if you want to reset just one.
# NOTE: This will also set up appropriate source and target processors for a given src/tgt language for multilingual models instead of creating a list of them.
if source_lang is not None or target_lang is not None:
self.source_processor, self.target_processor = MTEncDecModel.setup_pre_and_post_processing_utils(
source_lang, target_lang, self.encoder_tokenizer_library, self.decoder_tokenizer_library
)
mode = self.training
prepend_ids = []
if self.multilingual and self.multilingual_type != MultilingualModelType.many_to_one:
if target_lang is None:
raise ValueError("target_lang needs to be specified to run inference for multilingual model.")
tgt_symbol = self.encoder_tokenizer.token_to_id('<' + target_lang + '>')
if tgt_symbol in self.multilingual_ids:
prepend_ids = [tgt_symbol]
else:
print("WARNING: Target language ID not found in multilingual model. Prepending nothing.")
if log_timing:
timer = timers.NamedTimer()
else:
timer = None
cache = {
"timer": timer,
}
try:
self.eval()
self.training = False
src, src_mask = MTEncDecModel.prepare_inference_batch(
text=text,
prepend_ids=prepend_ids,
target=False,
source_processor=self.source_processor,
target_processor=self.target_processor,
encoder_tokenizer=self.encoder_tokenizer,
decoder_tokenizer=self.decoder_tokenizer,
device=self.device,
)
with torch.inference_mode():
predicted_tokens_ids, _ = self.decode(
src,
src_mask,
src.size(1)
+ self._cfg.max_generation_delta, # Generate up to src-length + max generation delta. TODO: Implement better stopping when everything hits <EOS>.
tokenizer=self.decoder_tokenizer,
)
best_translations = self.postprocess_outputs(
outputs=predicted_tokens_ids, tokenizer=self.decoder_tokenizer, processor=self.target_processor
)
return_val = best_translations
finally:
self.train(mode=mode)
if log_timing:
timing = timer.export()
timing["mean_src_length"] = src_mask.sum().cpu().item() / src_mask.shape[0]
tgt, tgt_mask = self.prepare_inference_batch(
text=best_translations,
prepend_ids=prepend_ids,
target=True,
source_processor=self.source_processor,
target_processor=self.target_processor,
encoder_tokenizer=self.encoder_tokenizer,
decoder_tokenizer=self.decoder_tokenizer,
device=self.device,
)
timing["mean_tgt_length"] = tgt_mask.sum().cpu().item() / tgt_mask.shape[0]
if type(return_val) is tuple:
return_val = return_val + (timing,)
else:
return_val = (return_val, timing)
return return_val
def itn_translate_tn(
self,
text: List[str],
source_lang: str = None,
target_lang: str = None,
return_beam_scores: bool = False,
log_timing: bool = False,
inverse_normalizer=None,
normalizer=None,
prepend_tgt_lang_id: bool = False,
) -> List[str]:
"""
Calls the translate() method with the option of running ITN (inverse text-normalization) on the input and TN (text-normalization) on the output.
Pipeline : ITN -> translate -> TN
NOTE: ITN and TN objects must be initialized with the right languages.
Args:
text: list of strings to translate
source_lang: if not "ignore", corresponding MosesTokenizer and MosesPunctNormalizer will be run
target_lang: if not "ignore", corresponding MosesDecokenizer will be run
return_beam_scores: if True, returns a list of translations and their corresponding beam scores.
log_timing: if True, prints timing information.
inverse_normalizer: instance of nemo_text_processing.inverse_text_normalization.inverse_normalize.InverseNormalizer
normalizer: instance of nemo_text_processing.text_normalization.normalize.Normalizer
Returns:
list of translated strings
"""
if inverse_normalizer is not None:
text = [inverse_normalizer.normalize(example) for example in text]
translations = self.translate(
text, source_lang, target_lang, return_beam_scores, log_timing, prepend_tgt_lang_id
)
if normalizer is not None:
translations = [normalizer.normalize(example) for example in translations]
return translations
def on_test_start(self) -> None:
self.trainer.test_loop._data_fetcher = GlobalBatchDataFetcher()
@property
def encoder(self):
return EncEmb(
self.enc_dec_model.encoder_embedding,
self.enc_dec_model.enc_dec_model.encoder,
self.enc_dec_model.encoder_relative_position_embedding,
self.device,
)
@property
def decoder(self):
return DecEmb(
self.enc_dec_model.decoder_embedding,
self.enc_dec_model.enc_dec_model.decoder,
self.enc_dec_model.decoder_relative_position_embedding,
self.device,
)
@property
def log_softmax(self):
return TokensHeadEmb(self.enc_dec_model.decoder_embedding, self.enc_dec_model.tokens_head, self.device)
@property
def input_module(self):
return self.encoder
def list_export_subnets(self):
return ['encoder', 'log_softmax', 'decoder']
| NeMo-main | nemo/collections/nlp/models/machine_translation/megatron_nmt_model.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.models.machine_translation.mt_enc_dec_bottleneck_model import MTBottleneckModel
from nemo.collections.nlp.models.machine_translation.mt_enc_dec_model import MTEncDecModel
| NeMo-main | nemo/collections/nlp/models/machine_translation/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import json
import os
import random
from collections import OrderedDict
from math import ceil
from pathlib import Path
from typing import Dict, List, Optional, Union
import numpy as np
import torch
import torch.distributed as dist
import torch.utils.data as pt_data
from omegaconf import DictConfig, ListConfig, OmegaConf
from pytorch_lightning import Trainer
from pytorch_lightning.utilities import rank_zero_only
from sacrebleu import corpus_bleu
from nemo.collections.common.data import ConcatDataset
from nemo.collections.common.losses import NLLLoss, SmoothedCrossEntropyLoss
from nemo.collections.common.metrics import GlobalAverageLossMetric
from nemo.collections.common.parts import transformer_weights_init
from nemo.collections.common.tokenizers.bytelevel_tokenizers import ByteLevelProcessor
from nemo.collections.common.tokenizers.chinese_tokenizers import ChineseProcessor
from nemo.collections.common.tokenizers.en_ja_tokenizers import EnJaProcessor, JaMecabProcessor
from nemo.collections.common.tokenizers.indic_tokenizers import IndicProcessor
from nemo.collections.common.tokenizers.moses_tokenizers import MosesProcessor
from nemo.collections.common.tokenizers.tabular_tokenizer import TabularTokenizer
from nemo.collections.nlp.data import TarredTranslationDataset, TranslationDataset
from nemo.collections.nlp.models.enc_dec_nlp_model import EncDecNLPModel
from nemo.collections.nlp.models.machine_translation.mt_enc_dec_config import MTEncDecModelConfig
from nemo.collections.nlp.modules.common import TokenClassifier
from nemo.collections.nlp.modules.common.lm_utils import get_transformer
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer
from nemo.collections.nlp.modules.common.transformer import BeamSearchSequenceGenerator, TopKSequenceGenerator
from nemo.core.classes import Exportable
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.utils import logging, model_utils, timers
__all__ = ['MTEncDecModel']
class MTEncDecModel(EncDecNLPModel, Exportable):
"""
Encoder-decoder machine translation model.
"""
def __init__(self, cfg: MTEncDecModelConfig, trainer: Trainer = None):
cfg = model_utils.convert_model_config_to_dict_config(cfg)
# Get global rank and total number of GPU workers for IterableDataset partitioning, if applicable
# Global_rank and local_rank is set by LightningModule in Lightning 1.2.0
self.world_size = 1
if trainer is not None:
self.world_size = trainer.num_nodes * trainer.num_devices
cfg = model_utils.maybe_update_config_version(cfg)
self.src_language = cfg.get("src_language", None)
self.tgt_language = cfg.get("tgt_language", None)
self.multilingual = cfg.get("multilingual", False)
self.multilingual_ids = []
self.special_tokens = {}
self.encoder_tokenizer_library = cfg.encoder_tokenizer.get('library', 'yttm')
self.decoder_tokenizer_library = cfg.decoder_tokenizer.get('library', 'yttm')
self.validate_input_ids = cfg.get("validate_input_ids", True)
if self.multilingual:
if isinstance(self.src_language, ListConfig) and isinstance(self.tgt_language, ListConfig):
raise ValueError(
"cfg.src_language and cfg.tgt_language cannot both be lists. We only support many-to-one or one-to-many multilingual models."
)
elif isinstance(self.src_language, ListConfig):
pass
elif isinstance(self.tgt_language, ListConfig):
for lng in self.tgt_language:
self.special_tokens["<" + lng + ">"] = "<" + lng + ">"
else:
raise ValueError(
"Expect either cfg.src_language or cfg.tgt_language to be a list when multilingual=True."
)
self.shared_embeddings = cfg.get("shared_embeddings", False)
# Instantiates tokenizers and register to be saved with NeMo Model archive
# After this call, ther will be self.encoder_tokenizer and self.decoder_tokenizer
# Which can convert between tokens and token_ids for SRC and TGT languages correspondingly.
encoder_tokenizer_model, decoder_tokenizer_model, encoder_vocab_file = None, None, None
if cfg.encoder_tokenizer.get('tokenizer_model') is not None:
encoder_tokenizer_model = self.register_artifact(
"encoder_tokenizer.tokenizer_model", cfg.encoder_tokenizer.get('tokenizer_model')
)
if cfg.decoder_tokenizer.get('tokenizer_model') is not None:
decoder_tokenizer_model = self.register_artifact(
"decoder_tokenizer.tokenizer_model", cfg.decoder_tokenizer.get('tokenizer_model')
)
if cfg.encoder_tokenizer.get('vocab_file') is not None:
encoder_vocab_file = (
self.register_artifact("encoder_tokenizer.vocab_file", cfg.encoder_tokenizer.get('vocab_file')),
)
encoder_tokenizer, decoder_tokenizer = MTEncDecModel.setup_enc_dec_tokenizers(
encoder_tokenizer_library=self.encoder_tokenizer_library,
encoder_tokenizer_model=encoder_tokenizer_model,
encoder_bpe_dropout=cfg.encoder_tokenizer.get('bpe_dropout', 0.0)
if cfg.encoder_tokenizer.get('bpe_dropout', 0.0) is not None
else 0.0,
encoder_model_name=cfg.encoder.get('model_name') if hasattr(cfg.encoder, 'model_name') else None,
encoder_r2l=cfg.encoder_tokenizer.get('r2l', False),
decoder_tokenizer_library=self.decoder_tokenizer_library,
encoder_tokenizer_vocab_file=encoder_vocab_file,
decoder_tokenizer_model=decoder_tokenizer_model,
decoder_bpe_dropout=cfg.decoder_tokenizer.get('bpe_dropout', 0.0)
if cfg.decoder_tokenizer.get('bpe_dropout', 0.0) is not None
else 0.0,
decoder_model_name=cfg.decoder.get('model_name') if hasattr(cfg.decoder, 'model_name') else None,
decoder_r2l=cfg.decoder_tokenizer.get('r2l', False),
special_tokens=self.special_tokens,
encoder_sentencepiece_legacy=cfg.encoder_tokenizer.get('sentencepiece_legacy', False),
decoder_sentencepiece_legacy=cfg.decoder_tokenizer.get('sentencepiece_legacy', False),
)
self.encoder_tokenizer, self.decoder_tokenizer = encoder_tokenizer, decoder_tokenizer
if self.multilingual:
(
self.source_processor_list,
self.target_processor_list,
self.multilingual_lang_to_id,
) = MTEncDecModel.setup_multilingual_ids_and_processors(
self.src_language,
self.tgt_language,
self.encoder_tokenizer,
self.decoder_tokenizer,
self.encoder_tokenizer_library,
self.decoder_tokenizer_library,
)
self.multilingual_ids = list(self.multilingual_lang_to_id.values())
else:
# After this call, the model will have self.source_processor and self.target_processor objects
self.source_processor, self.target_processor = MTEncDecModel.setup_pre_and_post_processing_utils(
self.src_language, self.tgt_language, self.encoder_tokenizer_library, self.decoder_tokenizer_library
)
self.multilingual_ids = [None]
# TODO: Why is this base constructor call so late in the game?
super().__init__(cfg=cfg, trainer=trainer)
# encoder from NeMo, Megatron-LM, or HuggingFace
encoder_cfg_dict = OmegaConf.to_container(cfg.get('encoder'))
encoder_cfg_dict['vocab_size'] = self.encoder_vocab_size
library = encoder_cfg_dict.pop('library', 'nemo')
model_name = encoder_cfg_dict.pop('model_name', None)
pretrained = encoder_cfg_dict.pop('pretrained', False)
checkpoint_file = encoder_cfg_dict.pop('checkpoint_file', None)
if isinstance(self.encoder_tokenizer, TabularTokenizer):
# TabularTokenizer does not include a padding token, so this uses the prior default of 0.
encoder_padding_idx = 0
else:
encoder_padding_idx = self.encoder_tokenizer.pad_id
self.encoder = get_transformer(
library=library,
model_name=model_name,
pretrained=pretrained,
config_dict=encoder_cfg_dict,
encoder=True,
pre_ln_final_layer_norm=encoder_cfg_dict.get('pre_ln_final_layer_norm', False),
checkpoint_file=checkpoint_file,
padding_idx=encoder_padding_idx,
)
# decoder from NeMo, Megatron-LM, or HuggingFace
decoder_cfg_dict = OmegaConf.to_container(cfg.get('decoder'))
decoder_cfg_dict['vocab_size'] = self.decoder_vocab_size
library = decoder_cfg_dict.pop('library', 'nemo')
model_name = decoder_cfg_dict.pop('model_name', None)
pretrained = decoder_cfg_dict.pop('pretrained', False)
if isinstance(self.decoder_tokenizer, TabularTokenizer):
# TabularTokenizer does not include a padding token, so this uses the prior default of 0.
decoder_padding_idx = 0
else:
decoder_padding_idx = self.decoder_tokenizer.pad_id
self.decoder = get_transformer(
library=library,
model_name=model_name,
pretrained=pretrained,
config_dict=decoder_cfg_dict,
encoder=False,
pre_ln_final_layer_norm=decoder_cfg_dict.get('pre_ln_final_layer_norm', False),
padding_idx=decoder_padding_idx,
)
# validate hidden_size of encoder and decoder
self._validate_encoder_decoder_hidden_size()
self.log_softmax = TokenClassifier(
hidden_size=self.decoder.hidden_size,
num_classes=self.decoder_vocab_size,
activation=cfg.head.activation,
log_softmax=cfg.head.log_softmax,
dropout=cfg.head.dropout,
use_transformer_init=cfg.head.use_transformer_init,
)
self.beam_search = BeamSearchSequenceGenerator(
embedding=self.decoder.embedding,
decoder=self.decoder.decoder,
log_softmax=self.log_softmax,
max_sequence_length=self.decoder.max_sequence_length,
beam_size=cfg.beam_size,
bos=self.decoder_tokenizer.bos_id,
pad=self.decoder_tokenizer.pad_id,
eos=self.decoder_tokenizer.eos_id,
len_pen=cfg.len_pen,
max_delta_length=cfg.max_generation_delta,
)
# tie embedding weights
if self.shared_embeddings:
if not cfg.get("shared_tokenizer", True):
raise ValueError("shared_tokenizer cannot be False when shared_embeddings is True")
# validate vocabulary size and embedding dimension
if (
self.encoder.embedding.token_embedding.weight.shape
!= self.decoder.embedding.token_embedding.weight.shape
):
raise ValueError(
f"Cannot tie encoder and decoder embeddings due to mismatch in embedding sizes "
f"(num_embeddings, embedding_dim): {self.encoder.embedding.token_embedding.weight.shape} (encoder) "
f"{self.decoder.embedding.token_embedding.weight.shape} (decoder)"
)
self.encoder.embedding.token_embedding.weight = self.decoder.embedding.token_embedding.weight
# tie weights of embedding and softmax matrices
self.log_softmax.mlp.layer0.weight = self.decoder.embedding.token_embedding.weight
# TODO: encoder and decoder with different hidden size?
std_init_range = 1 / self.encoder.hidden_size ** 0.5
# initialize weights if not using pretrained encoder/decoder
if not self._cfg.encoder.get('pretrained', False):
self.encoder.apply(lambda module: transformer_weights_init(module, std_init_range))
if not self._cfg.decoder.get('pretrained', False):
self.decoder.apply(lambda module: transformer_weights_init(module, std_init_range))
self.log_softmax.apply(lambda module: transformer_weights_init(module, std_init_range))
self.loss_fn = SmoothedCrossEntropyLoss(
pad_id=self.decoder_tokenizer.pad_id, label_smoothing=cfg.label_smoothing
)
self.eval_loss_fn = NLLLoss(ignore_index=self.decoder_tokenizer.pad_id)
@classmethod
def setup_multilingual_ids_and_processors(
cls,
src_language,
tgt_language,
encoder_tokenizer,
decoder_tokenizer,
encoder_tokenizer_library,
decoder_tokenizer_library,
):
multilingual_ids = OrderedDict()
# Determine all of the language IDs that need to be added as special tokens.
if isinstance(src_language, ListConfig) and isinstance(tgt_language, ListConfig):
assert len(src_language) == len(tgt_language)
all_languages = list(set(tgt_language + src_language))
elif isinstance(tgt_language, ListConfig):
all_languages = tgt_language
elif not isinstance(src_language, ListConfig) and not isinstance(tgt_language, ListConfig):
all_languages = [src_language, tgt_language]
else:
all_languages = []
# If target is a list config, then add all language ID tokens to the tokenizer.
# When both src, tgt are lists, we concat and take a unique of all lang IDs.
# If only tgt lang is a list, then we only add those lang IDs to the tokenizer.
if all_languages != []:
for lng in all_languages:
if len(encoder_tokenizer.text_to_ids(f"<{lng}>")) != 1:
encoder_tokenizer.add_special_tokens({f"<{lng}>": f"<{lng}>"})
if len(decoder_tokenizer.text_to_ids(f"<{lng}>")) != 1:
decoder_tokenizer.add_special_tokens({f"<{lng}>": f"<{lng}>"})
# Make sure that we are adding the same language ID to both tokenizers. If this assert fails it means the tokenizers were different to begin with.
assert encoder_tokenizer.text_to_ids(f"<{lng}>")[0] == decoder_tokenizer.text_to_ids(f"<{lng}>")[0]
multilingual_ids[lng] = encoder_tokenizer.text_to_ids(f"<{lng}>")[0]
if isinstance(src_language, ListConfig) and not isinstance(tgt_language, ListConfig):
tgt_language = [tgt_language] * len(src_language)
elif isinstance(tgt_language, ListConfig) and not isinstance(src_language, ListConfig):
src_language = [src_language] * len(tgt_language)
else:
pass
source_processor_list = []
target_processor_list = []
for src_lng, tgt_lng in zip(src_language, tgt_language):
src_prcsr, tgt_prscr = MTEncDecModel.setup_pre_and_post_processing_utils(
src_lng, tgt_lng, encoder_tokenizer_library, decoder_tokenizer_library
)
source_processor_list.append(src_prcsr)
target_processor_list.append(tgt_prscr)
return source_processor_list, target_processor_list, multilingual_ids
def _validate_encoder_decoder_hidden_size(self):
"""
Validate encoder and decoder hidden sizes, and enforce same size.
Can be overridden by child classes to support encoder/decoder different
hidden_size.
"""
if self.encoder.hidden_size != self.decoder.hidden_size:
raise ValueError(
f"Class does not support encoder.hidden_size ({self.encoder.hidden_size}) != decoder.hidden_size ({self.decoder.hidden_size}). Please use bottleneck architecture instead (i.e., model.encoder.arch = 'seq2seq' in conf/aayn_bottleneck.yaml)"
)
@classmethod
def filter_predicted_ids(cls, ids, decoder_tokenizer):
ids[ids >= decoder_tokenizer.vocab_size] = decoder_tokenizer.unk_id
return ids
def test_encoder_ids(self, ids, raise_error=False):
invalid_ids = torch.logical_or((ids >= self.encoder_tokenizer.vocab_size).any(), (ids < 0).any(),)
if raise_error and invalid_ids:
raise ValueError("Encoder ids are out of range (tip: check encoder tokenizer)")
return not invalid_ids
def test_decoder_ids(self, ids, raise_error=False):
invalid_ids = torch.logical_or((ids >= self.decoder_tokenizer.vocab_size).any(), (ids < 0).any(),)
if raise_error and invalid_ids:
raise ValueError("Decoder ids are out of range (tip: check decoder tokenizer)")
return not invalid_ids
@typecheck()
def forward(self, src, src_mask, tgt, tgt_mask):
if self.validate_input_ids:
# test src/tgt for id range (i.e., hellp in catching wrong tokenizer)
self.test_encoder_ids(src, raise_error=True)
self.test_decoder_ids(tgt, raise_error=True)
src_hiddens = self.encoder(input_ids=src, encoder_mask=src_mask)
tgt_hiddens = self.decoder(
input_ids=tgt, decoder_mask=tgt_mask, encoder_embeddings=src_hiddens, encoder_mask=src_mask
)
log_probs = self.log_softmax(hidden_states=tgt_hiddens)
return log_probs
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
# forward pass
for i in range(len(batch)):
if batch[i].ndim == 3:
# Dataset returns already batched data and the first dimension of size 1 added by DataLoader
# is excess.
batch[i] = batch[i].squeeze(dim=0)
src_ids, src_mask, tgt_ids, tgt_mask, labels = batch
log_probs = self(src_ids, src_mask, tgt_ids, tgt_mask)
train_loss = self.loss_fn(log_probs=log_probs, labels=labels)
tensorboard_logs = {
'train_loss': train_loss,
'lr': self._optimizer.param_groups[0]['lr'],
}
return {'loss': train_loss, 'log': tensorboard_logs}
def eval_step(self, batch, batch_idx, mode, dataloader_idx=0):
for i in range(len(batch)):
if batch[i].ndim == 3:
# Dataset returns already batched data and the first dimension of size 1 added by DataLoader
# is excess.
batch[i] = batch[i].squeeze(dim=0)
if self.multilingual:
self.source_processor = self.source_processor_list[dataloader_idx]
self.target_processor = self.target_processor_list[dataloader_idx]
src_ids, src_mask, tgt_ids, tgt_mask, labels = batch
log_probs = self(src_ids, src_mask, tgt_ids, tgt_mask)
eval_loss = self.eval_loss_fn(log_probs=log_probs, labels=labels)
# this will run encoder twice -- TODO: potentially fix
inputs, translations = self.batch_translate(src=src_ids, src_mask=src_mask)
if dataloader_idx == 0:
getattr(self, f'{mode}_loss')(loss=eval_loss, num_measurements=log_probs.shape[0] * log_probs.shape[1])
else:
getattr(self, f'{mode}_loss_{dataloader_idx}')(
loss=eval_loss, num_measurements=log_probs.shape[0] * log_probs.shape[1]
)
np_tgt = tgt_ids.detach().cpu().numpy()
ground_truths = [self.decoder_tokenizer.ids_to_text(tgt) for tgt in np_tgt]
ground_truths = [self.target_processor.detokenize(tgt.split(' ')) for tgt in ground_truths]
num_non_pad_tokens = np.not_equal(np_tgt, self.decoder_tokenizer.pad_id).sum().item()
return {
'inputs': inputs,
'translations': translations,
'ground_truths': ground_truths,
'num_non_pad_tokens': num_non_pad_tokens,
}
def test_step(self, batch, batch_idx, dataloader_idx=0):
loss = self.eval_step(batch, batch_idx, 'test', dataloader_idx)
if type(self.trainer.test_dataloaders) == list and len(self.trainer.test_dataloaders) > 1:
self.test_step_outputs[dataloader_idx].append(loss)
else:
self.test_step_outputs.append(loss)
@rank_zero_only
def log_param_stats(self):
for name, p in self.named_parameters():
if p.requires_grad:
self.trainer.logger.experiment.add_histogram(name + '_hist', p, global_step=self.global_step)
self.trainer.logger.experiment.add_scalars(
name,
{'mean': p.mean(), 'stddev': p.std(), 'max': p.max(), 'min': p.min()},
global_step=self.global_step,
)
def validation_step(self, batch, batch_idx, dataloader_idx=0):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
loss = self.eval_step(batch, batch_idx, 'val', dataloader_idx)
if type(self.trainer.val_dataloaders) == list and len(self.trainer.val_dataloaders) > 1:
self.validation_step_outputs[dataloader_idx].append(loss)
else:
self.validation_step_outputs.append(loss)
return loss
def eval_epoch_end(self, outputs, mode, global_rank):
# if user specifies one validation dataloader, then PTL reverts to giving a list of dictionary instead of a list of list of dictionary
if isinstance(outputs[0], dict):
outputs = [outputs]
loss_list = []
sb_score_list = []
for dataloader_idx, output in enumerate(outputs):
if dataloader_idx == 0:
eval_loss = getattr(self, f'{mode}_loss').compute()
else:
eval_loss = getattr(self, f'{mode}_loss_{dataloader_idx}').compute()
inputs = list(itertools.chain(*[x['inputs'] for x in output]))
translations = list(itertools.chain(*[x['translations'] for x in output]))
ground_truths = list(itertools.chain(*[x['ground_truths'] for x in output]))
assert len(translations) == len(inputs)
assert len(translations) == len(ground_truths)
# Gather translations and ground truths from all workers
tr_and_gt = [None for _ in range(self.world_size)]
# we also need to drop pairs where ground truth is an empty string
dist.all_gather_object(
tr_and_gt, [(t, g) for (t, g) in zip(translations, ground_truths) if g.strip() != '']
)
if global_rank == 0:
_translations = []
_ground_truths = []
for rank in range(0, self.world_size):
_translations += [t for (t, g) in tr_and_gt[rank]]
_ground_truths += [g for (t, g) in tr_and_gt[rank]]
if self.multilingual and isinstance(self.tgt_language, ListConfig):
tgt_language = self.tgt_language[dataloader_idx]
else:
tgt_language = self.tgt_language
if tgt_language in ['ja', 'ja-mecab']:
sacre_bleu = corpus_bleu(_translations, [_ground_truths], tokenize="ja-mecab")
elif tgt_language in ['zh']:
sacre_bleu = corpus_bleu(_translations, [_ground_truths], tokenize="zh")
else:
sacre_bleu = corpus_bleu(_translations, [_ground_truths], tokenize="13a")
# because the reduction op later is average (over word_size)
sb_score = sacre_bleu.score * self.world_size
dataset_name = "Validation" if mode == 'val' else "Test"
logging.info(
f"Dataset name: {dataset_name}, Dataloader index: {dataloader_idx}, Set size: {len(translations)}"
)
logging.info(
f"Dataset name: {dataset_name}, Dataloader index: {dataloader_idx}, Val Loss = {eval_loss}"
)
logging.info(
f"Dataset name: {dataset_name}, Dataloader index: {dataloader_idx}, Sacre BLEU = {sb_score / self.world_size}"
)
logging.info(
f"Dataset name: {dataset_name}, Dataloader index: {dataloader_idx}, Translation Examples:"
)
for i in range(0, 3):
ind = random.randint(0, len(translations) - 1)
logging.info(" " + '\u0332'.join(f"Example {i}:"))
logging.info(f" Input: {inputs[ind]}")
logging.info(f" Prediction: {translations[ind]}")
logging.info(f" Ground Truth: {ground_truths[ind]}")
else:
sb_score = 0.0
loss_list.append(eval_loss.cpu().numpy())
sb_score_list.append(sb_score)
if dataloader_idx == 0:
self.log(f"{mode}_loss", eval_loss, sync_dist=True)
self.log(f"{mode}_sacreBLEU", sb_score, sync_dist=True)
getattr(self, f'{mode}_loss').reset()
else:
self.log(f"{mode}_loss_dl_index_{dataloader_idx}", eval_loss, sync_dist=True)
self.log(f"{mode}_sacreBLEU_dl_index_{dataloader_idx}", sb_score, sync_dist=True)
getattr(self, f'{mode}_loss_{dataloader_idx}').reset()
outputs[dataloader_idx].clear() # free memory
if len(loss_list) > 1:
self.log(f"{mode}_loss_avg", np.mean(loss_list), sync_dist=True)
self.log(f"{mode}_sacreBLEU_avg", np.mean(sb_score_list), sync_dist=True)
def on_validation_epoch_end(self):
"""
Called at the end of validation to aggregate outputs.
:param outputs: list of individual outputs of each validation step.
"""
self.eval_epoch_end(self.validation_step_outputs, 'val', self.global_rank)
def on_test_epoch_end(self):
self.eval_epoch_end(self.test_step_outputs, 'test', self.global_rank)
@classmethod
def setup_enc_dec_tokenizers(
cls,
encoder_tokenizer_library=None,
encoder_tokenizer_model=None,
encoder_bpe_dropout=0.0,
encoder_model_name=None,
encoder_r2l=False,
encoder_tokenizer_vocab_file=None,
decoder_tokenizer_library=None,
decoder_tokenizer_model=None,
decoder_bpe_dropout=0.0,
decoder_model_name=None,
decoder_r2l=False,
encoder_sentencepiece_legacy=False,
decoder_sentencepiece_legacy=False,
special_tokens={},
):
supported_tokenizers = ['yttm', 'huggingface', 'sentencepiece', 'megatron', 'byte-level']
if (
encoder_tokenizer_library not in supported_tokenizers
or decoder_tokenizer_library not in supported_tokenizers
):
raise NotImplementedError(f"Currently we only support tokenizers in {supported_tokenizers}.")
encoder_tokenizer = get_nmt_tokenizer(
library=encoder_tokenizer_library,
tokenizer_model=encoder_tokenizer_model,
bpe_dropout=encoder_bpe_dropout,
model_name=encoder_model_name,
vocab_file=encoder_tokenizer_vocab_file,
special_tokens=special_tokens,
use_fast=False,
r2l=encoder_r2l,
legacy=encoder_sentencepiece_legacy,
)
decoder_tokenizer = get_nmt_tokenizer(
library=decoder_tokenizer_library,
tokenizer_model=decoder_tokenizer_model,
bpe_dropout=decoder_bpe_dropout,
model_name=decoder_model_name,
vocab_file=None,
special_tokens=special_tokens,
use_fast=False,
r2l=decoder_r2l,
legacy=decoder_sentencepiece_legacy,
)
# validate no token is negative for sentencepiece tokenizers
for tok_name, tok_library, tok_model, legacy in [
("encoder_tokenizer", encoder_tokenizer_library, encoder_tokenizer, encoder_sentencepiece_legacy),
("decoder_tokenizer", decoder_tokenizer_library, decoder_tokenizer, decoder_sentencepiece_legacy),
]:
if tok_library == 'sentencepiece':
negative_tokens = []
for n in ["eos_id", "bos_id", "unk_id", "pad_id"]:
v = getattr(tok_model.tokenizer, n)()
if v < 0:
negative_tokens.append(f"{n}={v}")
if negative_tokens and not legacy:
raise ValueError(
f"{tok_name}=sentencepiece has invalid negative special tokens = {negative_tokens}"
)
# If using the legacy sentencepiece tokenizer, we can add the missing tokens as "special" tokens.
else:
# If using sentencepiece legacy, eos, bos and pad need to be set/added differently.
if legacy:
# bos, eos, pad and unk may be present in the provided spm .model file, if they are, use it.
if not hasattr(tok_model, 'pad_token'):
if hasattr(tok_model.tokenizer, 'pad_id') and tok_model.tokenizer.pad_id() > 0:
tok_model.pad_token = tok_model.tokenizer.id_to_piece(tok_model.tokenizer.pad_id())
else:
tok_model.add_special_tokens({'pad_token': '<pad>'})
else:
tok_model.add_special_tokens({'pad_token': '<pad>'})
if not hasattr(tok_model, 'bos_token'):
if hasattr(tok_model.tokenizer, 'bos_id') and tok_model.tokenizer.bos_id() > 0:
tok_model.bos_token = tok_model.tokenizer.id_to_piece(tok_model.tokenizer.bos_id())
else:
tok_model.add_special_tokens({'bos_token': '<bos>'})
else:
tok_model.add_special_tokens({'bos_token': '<s>'})
if not hasattr(tok_model, 'eos_token'):
if hasattr(tok_model.tokenizer, 'eos_id') and tok_model.tokenizer.eos_id() > 0:
tok_model.eos_token = tok_model.tokenizer.id_to_piece(tok_model.tokenizer.eos_id())
else:
tok_model.add_special_tokens({'eos_token': '<eos>'})
else:
tok_model.add_special_tokens({'eos_token': '</s>'})
return encoder_tokenizer, decoder_tokenizer
def setup_training_data(self, train_data_config: Optional[DictConfig]):
self._train_ds = MTEncDecModel._setup_dataset_from_config(
cfg=train_data_config,
encoder_tokenizer=self.encoder_tokenizer,
decoder_tokenizer=self.decoder_tokenizer,
global_rank=self.global_rank,
world_size=self.world_size,
multilingual=self.multilingual,
multilingual_ids=self.multilingual_ids,
)
self._train_dl = MTEncDecModel._setup_dataloader_from_config(cfg=train_data_config, dataset=self._train_ds,)
# Need to set this because if using an IterableDataset, the length of the dataloader is the total number
# of samples rather than the number of batches, and this messes up the tqdm progress bar.
# So we set the number of steps manually (to the correct number) to fix this.
if 'use_tarred_dataset' in train_data_config and train_data_config['use_tarred_dataset']:
# We also need to check if limit_train_batches is already set.
# If it's an int, we assume that the user has set it to something sane, i.e. <= # training batches,
# and don't change it. Otherwise, adjust batches accordingly if it's a float (including 1.0).
if self._trainer is not None and isinstance(self._trainer.limit_train_batches, float):
self._trainer.limit_train_batches = int(
self._trainer.limit_train_batches * ceil(len(self._train_dl.dataset) / self.world_size)
)
elif self._trainer is None:
logging.warning(
"Model Trainer was not set before constructing the dataset, incorrect number of "
"training batches will be used. Please set the trainer and rebuild the dataset."
)
def setup_multiple_validation_data(self, val_data_config: Union[DictConfig, Dict]):
self.setup_validation_data(val_data_config)
def setup_multiple_test_data(self, test_data_config: Union[DictConfig, Dict]):
self.setup_test_data(test_data_config)
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
self._validation_ds = MTEncDecModel._setup_eval_dataset_from_config(
cfg=val_data_config,
multilingual=self.multilingual,
multilingual_ids=self.multilingual_ids,
encoder_tokenizer=self.encoder_tokenizer,
decoder_tokenizer=self.decoder_tokenizer,
)
self._validation_dl = MTEncDecModel._setup_eval_dataloader_from_config(
cfg=val_data_config, datasets=self._validation_ds
)
# Need to set this because if using an IterableDataset, the length of the dataloader is the total number
# of samples rather than the number of batches, and this messes up the tqdm progress bar.
# So we set the number of steps manually (to the correct number) to fix this.
if 'use_tarred_dataset' in val_data_config and val_data_config['use_tarred_dataset']:
# We also need to check if limit_val_batches is already set.
# If it's an int, we assume that the user has set it to something sane, i.e. <= # validation batches,
# and don't change it. Otherwise, adjust batches accordingly if it's a float (including 1.0).
if self._trainer is not None and isinstance(self._trainer.limit_val_batches, float):
self._trainer.limit_val_batches = int(
self._trainer.limit_val_batches * ceil(len(self._validation_dl.dataset) / self.world_size)
)
elif self._trainer is None:
logging.warning(
"Model Trainer was not set before constructing the dataset, incorrect number of "
"validation batches will be used. Please set the trainer and rebuild the dataset."
)
# instantiate Torchmetric for each val dataloader
if self._validation_dl is not None:
for dataloader_idx in range(len(self._validation_dl)):
if dataloader_idx == 0:
setattr(
self, f'val_loss', GlobalAverageLossMetric(dist_sync_on_step=False, take_avg_loss=True),
)
else:
setattr(
self,
f'val_loss_{dataloader_idx}',
GlobalAverageLossMetric(dist_sync_on_step=False, take_avg_loss=True),
)
def setup_test_data(self, test_data_config: Optional[DictConfig]):
self._test_ds = MTEncDecModel._setup_eval_dataset_from_config(
cfg=test_data_config,
multilingual=self.multilingual,
multilingual_ids=self.multilingual_ids,
encoder_tokenizer=self.encoder_tokenizer,
decoder_tokenizer=self.decoder_tokenizer,
)
self._test_dl = MTEncDecModel._setup_eval_dataloader_from_config(cfg=test_data_config, datasets=self._test_ds)
# instantiate Torchmetric for each test dataloader
if self._test_dl is not None:
for dataloader_idx in range(len(self._test_dl)):
if dataloader_idx == 0:
setattr(
self, f'test_loss', GlobalAverageLossMetric(dist_sync_on_step=False, take_avg_loss=True),
)
else:
setattr(
self,
f'test_loss_{dataloader_idx}',
GlobalAverageLossMetric(dist_sync_on_step=False, take_avg_loss=True),
)
@classmethod
def _setup_dataset_from_config(
cls,
cfg: DictConfig,
encoder_tokenizer,
decoder_tokenizer,
global_rank,
world_size,
multilingual,
multilingual_ids,
):
if cfg.get("use_tarred_dataset", False) or cfg.get("dataset_type", "") == "tarred":
if cfg.get("metadata_file") is None:
raise FileNotFoundError("Trying to use tarred data set but could not find metadata path in config.")
metadata_file_list = cfg.get('metadata_file')
tar_files_list = cfg.get('tar_files', None)
if isinstance(metadata_file_list, str):
metadata_file_list = [metadata_file_list]
if tar_files_list is not None and isinstance(tar_files_list, str):
tar_files_list = [tar_files_list]
if tar_files_list is not None and len(tar_files_list) != len(metadata_file_list):
raise ValueError('The config must have the same number of tarfile paths and metadata file paths.')
datasets = []
for idx, metadata_file in enumerate(metadata_file_list):
with open(metadata_file) as metadata_reader:
metadata = json.load(metadata_reader)
if tar_files_list is None:
tar_files = metadata.get('tar_files')
if tar_files is not None:
# update absolute path of tar files based on metadata_file path
valid_tar_files = []
metadata_basedir = os.path.abspath(os.path.dirname(metadata_file))
updated_fn = 0
for fn in tar_files:
# if a file does not exist, look in metadata file directory
if os.path.exists(fn):
valid_fn = fn
else:
updated_fn += 1
valid_fn = os.path.join(metadata_basedir, os.path.basename(fn))
if not os.path.exists(valid_fn):
raise RuntimeError(
f"File in tarred dataset is missing from absolute and relative paths {fn}"
)
valid_tar_files.append(valid_fn)
tar_files = valid_tar_files
logging.info(f'Updated the path of {updated_fn} tarred files')
logging.info(f'Loading from tarred dataset {tar_files}')
else:
tar_files = tar_files_list[idx]
if metadata.get('tar_files') is not None:
logging.info(
f'Tar file paths found in both cfg and metadata using one in cfg by default - {tar_files}'
)
dataset = TarredTranslationDataset(
text_tar_filepaths=tar_files,
metadata_path=metadata_file,
encoder_tokenizer=encoder_tokenizer,
decoder_tokenizer=decoder_tokenizer,
shuffle_n=cfg.get("tar_shuffle_n", 100),
shard_strategy=cfg.get("shard_strategy", "scatter"),
global_rank=global_rank,
world_size=world_size,
reverse_lang_direction=cfg.get("reverse_lang_direction", False),
prepend_id=multilingual_ids[idx] if multilingual else None,
)
datasets.append(dataset)
if len(datasets) > 1:
dataset = ConcatDataset(
datasets=datasets,
sampling_technique=cfg.get('concat_sampling_technique'),
sampling_temperature=cfg.get('concat_sampling_temperature'),
sampling_probabilities=cfg.get('concat_sampling_probabilities'),
global_rank=global_rank,
world_size=world_size,
)
else:
dataset = datasets[0]
else:
src_file_list = cfg.src_file_name
tgt_file_list = cfg.tgt_file_name
if isinstance(src_file_list, str):
src_file_list = [src_file_list]
if isinstance(tgt_file_list, str):
tgt_file_list = [tgt_file_list]
if len(src_file_list) != len(tgt_file_list):
raise ValueError('The same number of filepaths must be passed in for source and target.')
datasets = []
for idx, src_file in enumerate(src_file_list):
dataset = TranslationDataset(
dataset_src=str(Path(src_file).expanduser()),
dataset_tgt=str(Path(tgt_file_list[idx]).expanduser()),
tokens_in_batch=cfg.tokens_in_batch,
clean=cfg.get("clean", False),
max_seq_length=cfg.get("max_seq_length", 512),
min_seq_length=cfg.get("min_seq_length", 1),
max_seq_length_diff=cfg.get("max_seq_length_diff", 512),
max_seq_length_ratio=cfg.get("max_seq_length_ratio", 512),
cache_ids=cfg.get("cache_ids", False),
cache_data_per_node=cfg.get("cache_data_per_node", False),
use_cache=cfg.get("use_cache", False),
reverse_lang_direction=cfg.get("reverse_lang_direction", False),
prepend_id=multilingual_ids[idx] if multilingual else None,
)
dataset.batchify(encoder_tokenizer, decoder_tokenizer)
datasets.append(dataset)
if len(datasets) > 1:
dataset = ConcatDataset(
datasets=datasets,
shuffle=cfg.get('shuffle'),
sampling_technique=cfg.get('concat_sampling_technique'),
sampling_temperature=cfg.get('concat_sampling_temperature'),
sampling_probabilities=cfg.get('concat_sampling_probabilities'),
global_rank=global_rank,
world_size=world_size,
)
else:
dataset = datasets[0]
return dataset
@classmethod
def _setup_dataloader_from_config(cls, cfg, dataset):
if cfg.shuffle:
sampler = pt_data.RandomSampler(dataset)
else:
sampler = pt_data.SequentialSampler(dataset)
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=1,
sampler=None
if (
cfg.get("use_tarred_dataset", False)
or cfg.get("dataset_type", "") == "tarred"
or isinstance(dataset, ConcatDataset)
)
else sampler,
num_workers=cfg.get("num_workers", 2),
pin_memory=cfg.get("pin_memory", False),
drop_last=cfg.get("drop_last", False),
)
def replace_beam_with_sampling(self, topk=500):
self.beam_search = TopKSequenceGenerator(
embedding=self.decoder.embedding,
decoder=self.decoder.decoder,
log_softmax=self.log_softmax,
max_sequence_length=self.beam_search.max_seq_length,
beam_size=topk,
bos=self.decoder_tokenizer.bos_id,
pad=self.decoder_tokenizer.pad_id,
eos=self.decoder_tokenizer.eos_id,
)
@classmethod
def _setup_eval_dataset_from_config(
cls,
cfg: DictConfig,
multilingual: bool,
multilingual_ids,
encoder_tokenizer,
decoder_tokenizer,
add_bos_eos_to_encoder=True,
):
src_file_name = cfg.get('src_file_name')
tgt_file_name = cfg.get('tgt_file_name')
if src_file_name is None or tgt_file_name is None:
raise ValueError(
'Validation dataloader needs both cfg.src_file_name and cfg.tgt_file_name to not be None.'
)
else:
# convert src_file_name and tgt_file_name to list of strings
if isinstance(src_file_name, str):
src_file_list = [src_file_name]
elif isinstance(src_file_name, ListConfig):
src_file_list = src_file_name
else:
raise ValueError("cfg.src_file_name must be string or list of strings")
if isinstance(tgt_file_name, str):
tgt_file_list = [tgt_file_name]
elif isinstance(tgt_file_name, ListConfig):
tgt_file_list = tgt_file_name
else:
raise ValueError("cfg.tgt_file_name must be string or list of strings")
if len(src_file_list) != len(tgt_file_list):
raise ValueError('The same number of filepaths must be passed in for source and target validation.')
datasets = []
prepend_idx = 0
for idx, src_file in enumerate(src_file_list):
if multilingual:
prepend_idx = idx
dataset = TranslationDataset(
dataset_src=str(Path(src_file).expanduser()),
dataset_tgt=str(Path(tgt_file_list[idx]).expanduser()),
tokens_in_batch=cfg.tokens_in_batch,
clean=cfg.get("clean", False),
max_seq_length=cfg.get("max_seq_length", 512),
min_seq_length=cfg.get("min_seq_length", 1),
max_seq_length_diff=cfg.get("max_seq_length_diff", 512),
max_seq_length_ratio=cfg.get("max_seq_length_ratio", 512),
cache_ids=cfg.get("cache_ids", False),
cache_data_per_node=cfg.get("cache_data_per_node", False),
use_cache=cfg.get("use_cache", False),
reverse_lang_direction=cfg.get("reverse_lang_direction", False),
prepend_id=multilingual_ids[prepend_idx] if multilingual else None,
add_bos_eos_to_encoder=add_bos_eos_to_encoder,
)
dataset.batchify(encoder_tokenizer, decoder_tokenizer)
datasets.append(dataset)
return datasets
@classmethod
def _setup_eval_dataloader_from_config(cls, cfg, datasets):
dataloaders = []
for dataset in datasets:
if cfg.shuffle:
sampler = pt_data.RandomSampler(dataset)
else:
sampler = pt_data.SequentialSampler(dataset)
dataloaders.append(
torch.utils.data.DataLoader(
dataset=dataset,
batch_size=1,
sampler=None
if (cfg.get("use_tarred_dataset", False) or isinstance(datasets[0], ConcatDataset))
else sampler,
num_workers=cfg.get("num_workers", 2),
pin_memory=cfg.get("pin_memory", False),
drop_last=cfg.get("drop_last", False),
)
)
return dataloaders
@classmethod
def setup_pre_and_post_processing_utils(
cls, source_lang, target_lang, encoder_tokenizer_library, decoder_tokenizer_library
):
"""
Creates source and target processor objects for input and output pre/post-processing.
"""
source_processor, target_processor = None, None
if encoder_tokenizer_library == 'byte-level':
source_processor = ByteLevelProcessor()
elif (source_lang == 'en' and target_lang == 'ja') or (source_lang == 'ja' and target_lang == 'en'):
source_processor = EnJaProcessor(source_lang)
elif source_lang == 'ja-mecab':
source_processor = JaMecabProcessor()
elif source_lang == 'zh':
source_processor = ChineseProcessor()
elif source_lang == 'hi':
source_processor = IndicProcessor(source_lang)
elif source_lang == 'ignore':
source_processor = None
elif source_lang is not None and source_lang not in ['ja', 'zh', 'hi']:
source_processor = MosesProcessor(source_lang)
if decoder_tokenizer_library == 'byte-level':
target_processor = ByteLevelProcessor()
elif (source_lang == 'en' and target_lang == 'ja') or (source_lang == 'ja' and target_lang == 'en'):
target_processor = EnJaProcessor(target_lang)
elif target_lang == 'ja-mecab':
target_processor = JaMecabProcessor()
elif target_lang == 'zh':
target_processor = ChineseProcessor()
elif target_lang == 'hi':
target_processor = IndicProcessor(target_lang)
elif target_lang == 'ignore':
target_processor = None
elif target_lang is not None and target_lang not in ['ja', 'zh', 'hi']:
target_processor = MosesProcessor(target_lang)
return source_processor, target_processor
@classmethod
def ids_to_postprocessed_text(cls, beam_ids, tokenizer, processor, filter_beam_ids=True):
if filter_beam_ids:
beam_ids = MTEncDecModel.filter_predicted_ids(beam_ids, decoder_tokenizer=tokenizer)
translations = [tokenizer.ids_to_text(tr) for tr in beam_ids.cpu().numpy()]
if processor is not None:
translations = [processor.detokenize(translation.split(' ')) for translation in translations]
return translations
@torch.no_grad()
def batch_translate(
self, src: torch.LongTensor, src_mask: torch.LongTensor, return_beam_scores: bool = False, cache={}
):
"""
Translates a minibatch of inputs from source language to target language.
Args:
src: minibatch of inputs in the src language (batch x seq_len)
src_mask: mask tensor indicating elements to be ignored (batch x seq_len)
Returns:
translations: a list strings containing detokenized translations
inputs: a list of string containing detokenized inputs
"""
mode = self.training
timer = cache.get("timer", None)
try:
self.eval()
if timer is not None:
timer.start("encoder")
src_hiddens = self.encoder(input_ids=src, encoder_mask=src_mask)
if timer is not None:
timer.stop("encoder")
timer.start("sampler")
best_translations = self.beam_search(
encoder_hidden_states=src_hiddens, encoder_input_mask=src_mask, return_beam_scores=return_beam_scores
)
if timer is not None:
timer.stop("sampler")
if return_beam_scores:
all_translations, scores, best_translations = best_translations
scores = scores.view(-1)
all_translations = MTEncDecModel.ids_to_postprocessed_text(
all_translations, self.decoder_tokenizer, self.target_processor, filter_beam_ids=True
)
best_translations = MTEncDecModel.ids_to_postprocessed_text(
best_translations, self.decoder_tokenizer, self.target_processor, filter_beam_ids=True
)
inputs = MTEncDecModel.ids_to_postprocessed_text(
src, self.encoder_tokenizer, self.source_processor, filter_beam_ids=False
)
finally:
self.train(mode=mode)
if return_beam_scores:
return inputs, all_translations, scores.data.cpu().numpy().tolist(), best_translations
return inputs, best_translations
@classmethod
def prepare_inference_batch(
cls,
text,
prepend_ids=[],
target=False,
source_processor=None,
target_processor=None,
encoder_tokenizer=None,
decoder_tokenizer=None,
device=None,
):
inputs = []
processor = source_processor if not target else target_processor
tokenizer = encoder_tokenizer if not target else decoder_tokenizer
for txt in text:
txt = txt.rstrip("\n")
if processor is not None:
txt = processor.normalize(txt)
txt = processor.tokenize(txt)
ids = tokenizer.text_to_ids(txt)
ids = prepend_ids + [tokenizer.bos_id] + ids + [tokenizer.eos_id]
inputs.append(ids)
max_len = max(len(txt) for txt in inputs)
src_ids_ = np.ones((len(inputs), max_len)) * tokenizer.pad_id
for i, txt in enumerate(inputs):
src_ids_[i][: len(txt)] = txt
src_mask = torch.FloatTensor((src_ids_ != tokenizer.pad_id)).to(device)
src = torch.LongTensor(src_ids_).to(device)
return src, src_mask
@torch.no_grad()
def translate(
self,
text: List[str],
source_lang: str = None,
target_lang: str = None,
return_beam_scores: bool = False,
log_timing: bool = False,
) -> List[str]:
"""
Translates list of sentences from source language to target language.
Should be regular text, this method performs its own tokenization/de-tokenization
Args:
text: list of strings to translate
source_lang: if not "ignore", corresponding MosesTokenizer and MosesPunctNormalizer will be run
target_lang: if not "ignore", corresponding MosesDecokenizer will be run
return_beam_scores: if True, returns a list of translations and their corresponding beam scores.
log_timing: if True, prints timing information.
Returns:
list of translated strings
"""
# __TODO__: This will reset both source and target processors even if you want to reset just one.
if source_lang is not None or target_lang is not None:
self.source_processor, self.target_processor = MTEncDecModel.setup_pre_and_post_processing_utils(
source_lang, target_lang, self.encoder_tokenizer_library, self.decoder_tokenizer_library
)
mode = self.training
prepend_ids = []
if self.multilingual:
if source_lang is None or target_lang is None:
raise ValueError("Expect source_lang and target_lang to infer for multilingual model.")
src_symbol = self.encoder_tokenizer.token_to_id('<' + source_lang + '>')
tgt_symbol = self.encoder_tokenizer.token_to_id('<' + target_lang + '>')
if src_symbol in self.multilingual_ids:
prepend_ids = [src_symbol]
elif tgt_symbol in self.multilingual_ids:
prepend_ids = [tgt_symbol]
if log_timing:
timer = timers.NamedTimer()
else:
timer = None
cache = {
"timer": timer,
}
try:
self.eval()
src, src_mask = MTEncDecModel.prepare_inference_batch(
text=text,
prepend_ids=prepend_ids,
target=False,
source_processor=self.source_processor,
target_processor=self.target_processor,
encoder_tokenizer=self.encoder_tokenizer,
decoder_tokenizer=self.decoder_tokenizer,
device=self.device,
)
if return_beam_scores:
_, all_translations, scores, best_translations = self.batch_translate(
src, src_mask, return_beam_scores=True, cache=cache,
)
return_val = all_translations, scores, best_translations
else:
_, best_translations = self.batch_translate(src, src_mask, return_beam_scores=False, cache=cache)
return_val = best_translations
finally:
self.train(mode=mode)
if log_timing:
timing = timer.export()
timing["mean_src_length"] = src_mask.sum().cpu().item() / src_mask.shape[0]
tgt, tgt_mask = self.prepare_inference_batch(
text=best_translations,
prepend_ids=prepend_ids,
target=True,
source_processor=self.source_processor,
target_processor=self.target_processor,
encoder_tokenizer=self.encoder_tokenizer,
decoder_tokenizer=self.decoder_tokenizer,
device=self.device,
)
timing["mean_tgt_length"] = tgt_mask.sum().cpu().item() / tgt_mask.shape[0]
if type(return_val) is tuple:
return_val = return_val + (timing,)
else:
return_val = (return_val, timing)
return return_val
def itn_translate_tn(
self,
text: List[str],
source_lang: str = None,
target_lang: str = None,
return_beam_scores: bool = False,
log_timing: bool = False,
inverse_normalizer=None,
normalizer=None,
) -> List[str]:
"""
Calls the translate() method with the option of running ITN (inverse text-normalization) on the input adn TN (text-normalization) on the output.
Pipeline : ITN -> translate -> TN
NOTE: ITN and TN objects must be initialized with the right languages.
Args:
text: list of strings to translate
source_lang: if not "ignore", corresponding MosesTokenizer and MosesPunctNormalizer will be run
target_lang: if not "ignore", corresponding MosesDecokenizer will be run
return_beam_scores: if True, returns a list of translations and their corresponding beam scores.
log_timing: if True, prints timing information.
inverse_normalizer: instance of nemo_text_processing.inverse_text_normalization.inverse_normalize.InverseNormalizer
normalizer: instance of nemo_text_processing.text_normalization.normalize.Normalizer
Returns:
list of translated strings
"""
if inverse_normalizer is not None:
text = [inverse_normalizer.normalize(example) for example in text]
translations = self.translate(text, source_lang, target_lang, return_beam_scores, log_timing)
if normalizer is not None:
translations = [normalizer.normalize(example) for example in translations]
return translations
# EncDecRNNTModel is exported in 2 parts
def list_export_subnets(self):
return ['encoder', 'decoder']
@classmethod
def list_available_models(cls) -> Optional[Dict[str, str]]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
model = PretrainedModelInfo(
pretrained_model_name="nmt_en_de_transformer12x2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_de_transformer12x2/versions/1.0.0rc1/files/nmt_en_de_transformer12x2.nemo",
description="En->De translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_de_transformer12x2",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_de_en_transformer12x2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_de_en_transformer12x2/versions/1.0.0rc1/files/nmt_de_en_transformer12x2.nemo",
description="De->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_de_en_transformer12x2",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_en_es_transformer12x2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_es_transformer12x2/versions/1.0.0rc1/files/nmt_en_es_transformer12x2.nemo",
description="En->Es translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_es_transformer12x2",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_es_en_transformer12x2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_es_en_transformer12x2/versions/1.0.0rc1/files/nmt_es_en_transformer12x2.nemo",
description="Es->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_es_en_transformer12x2",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_en_fr_transformer12x2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_fr_transformer12x2/versions/1.0.0rc1/files/nmt_en_fr_transformer12x2.nemo",
description="En->Fr translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_fr_transformer12x2",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_fr_en_transformer12x2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_fr_en_transformer12x2/versions/1.0.0rc1/files/nmt_fr_en_transformer12x2.nemo",
description="Fr->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_fr_en_transformer12x2",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_en_ru_transformer6x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_ru_transformer6x6/versions/1.0.0rc1/files/nmt_en_ru_transformer6x6.nemo",
description="En->Ru translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_ru_transformer6x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_ru_en_transformer6x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_ru_en_transformer6x6/versions/1.0.0rc1/files/nmt_ru_en_transformer6x6.nemo",
description="Ru->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_ru_en_transformer6x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_zh_en_transformer6x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_zh_en_transformer6x6/versions/1.0.0rc1/files/nmt_zh_en_transformer6x6.nemo",
description="Zh->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_zh_en_transformer6x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_en_zh_transformer6x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_zh_transformer6x6/versions/1.0.0rc1/files/nmt_en_zh_transformer6x6.nemo",
description="En->Zh translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_zh_transformer6x6",
)
result.append(model)
# English <-> Hindi models
model = PretrainedModelInfo(
pretrained_model_name="nmt_hi_en_transformer12x2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_hi_en_transformer12x2/versions/v1.0.0/files/nmt_hi_en_transformer12x2.nemo",
description="Hi->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_hi_en_transformer12x2",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_en_hi_transformer12x2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_hi_transformer12x2/versions/v1.0.0/files/nmt_en_hi_transformer12x2.nemo",
description="En->Hi translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_hi_transformer12x2",
)
result.append(model)
# De/Fr/Es -> English models
model = PretrainedModelInfo(
pretrained_model_name="mnmt_deesfr_en_transformer12x2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/mnmt_deesfr_en_transformer12x2/versions/1.2.0/files/mnmt_deesfr_en_transformer12x2.nemo",
description="De/Es/Fr->En multilingual many-one translation model. The model has 12 encoder and 2 decoder layers with hidden dim 1,024. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:mnmt_deesfr_en_transformer12x2",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="mnmt_deesfr_en_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/mnmt_deesfr_en_transformer24x6/versions/1.2.0/files/mnmt_deesfr_en_transformer24x6.nemo",
description="De/Es/Fr->En multilingual many-one translation model. The model has 24 encoder and 6 decoder layers with hidden dim 1,024. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:mnmt_deesfr_en_transformer24x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="mnmt_deesfr_en_transformer6x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/mnmt_deesfr_en_transformer6x6/versions/1.2.0/files/mnmt_deesfr_en_transformer6x6.nemo",
description="De/Es/Fr->En multilingual many-one translation model. The model has 6 encoder and 6 decoder layers with hidden dim 1,024. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:mnmt_deesfr_en_transformer6x6",
)
result.append(model)
# English -> De/Fr/Es models
model = PretrainedModelInfo(
pretrained_model_name="mnmt_en_deesfr_transformer12x2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/mnmt_en_deesfr_transformer12x2/versions/1.2.0/files/mnmt_en_deesfr_transformer12x2.nemo",
description="En->De/Es/Fr multilingual one-many translation model. The model has 12 encoder and 2 decoder layers with hidden dim 1,024. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:mnmt_en_deesfr_transformer12x2",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="mnmt_en_deesfr_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/mnmt_en_deesfr_transformer24x6/versions/1.2.0/files/mnmt_en_deesfr_transformer24x6.nemo",
description="En->De/Es/Fr multilingual one-many translation model. The model has 24 encoder and 6 decoder layers with hidden dim 1,024. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:mnmt_en_deesfr_transformer24x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="mnmt_en_deesfr_transformer6x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/mnmt_en_deesfr_transformer6x6/versions/1.2.0/files/mnmt_en_deesfr_transformer6x6.nemo",
description="En->De/Es/Fr multilingual one-many translation model. The model has 6 encoder and 6 decoder layers with hidden dim 1,024. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:mnmt_en_deesfr_transformer6x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="mnmt_en_deesfr_transformerbase",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/mnmt_en_deesfr_transformerbase/versions/1.2.0/files/mnmt_en_deesfr_transformerbase.nemo",
description="En->De/Es/Fr multilingual one-many translation model. The model has 6 encoder and 6 decoder layers with hidden dim 512. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:mnmt_en_deesfr_transformerbase",
)
result.append(model)
# 24x6 models
model = PretrainedModelInfo(
pretrained_model_name="nmt_en_de_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_de_transformer24x6/versions/1.5/files/en_de_24x6.nemo",
description="En->De translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_de_transformer24x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_de_en_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_de_en_transformer24x6/versions/1.5/files/de_en_24x6.nemo",
description="De->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_de_en_transformer24x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_en_es_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_es_transformer24x6/versions/1.5/files/en_es_24x6.nemo",
description="En->Es translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_es_transformer24x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_es_en_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_es_en_transformer24x6/versions/1.5/files/es_en_24x6.nemo",
description="Es->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_es_en_transformer24x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_en_fr_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_fr_transformer24x6/versions/1.5/files/en_fr_24x6.nemo",
description="En->Fr translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_fr_transformer24x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_fr_en_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_fr_en_transformer24x6/versions/1.5/files/fr_en_24x6.nemo",
description="Fr->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_fr_en_transformer24x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_en_ru_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_ru_transformer24x6/versions/1.5/files/en_ru_24x6.nemo",
description="En->Ru translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_ru_transformer24x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_ru_en_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_ru_en_transformer24x6/versions/1.5/files/ru_en_24x6.nemo",
description="Ru->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_ru_en_transformer24x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_en_zh_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_zh_transformer24x6/versions/1.5/files/en_zh_24x6.nemo",
description="En->Zh translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_zh_transformer24x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_zh_en_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_zh_en_transformer24x6/versions/1.5/files/zh_en_24x6.nemo",
description="Zh->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_zh_en_transformer24x6",
)
result.append(model)
return result
| NeMo-main | nemo/collections/nlp/models/machine_translation/mt_enc_dec_model.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
import numpy as np
import torch
from pytorch_lightning import Trainer
from nemo.collections.common.losses import NLLLoss
from nemo.collections.nlp.models.machine_translation.mt_enc_dec_config import MTBottleneckModelConfig
from nemo.collections.nlp.models.machine_translation.mt_enc_dec_model import MTEncDecModel
from nemo.core.classes.common import typecheck
from nemo.utils import timers
__all__ = ['MTBottleneckModel']
def build_linear_or_identity(input_dim, output_dim):
"""
Auxiliary method to return FC layer when input_dim != output_dim
else return identity
"""
if input_dim != output_dim:
model = torch.nn.Linear(input_dim, output_dim)
else:
model = torch.nn.Identity()
return model
class MTBottleneckModel(MTEncDecModel):
"""
Machine translation model which supports bottleneck architecture,
NLL, VAE, and MIM loss.
Supported losses:
1) nll - Conditional cross entropy (the usual NMT loss)
2) mim - MIM learning framework. A latent variable model with good
reconstruction and compressed latent representation.
https://arxiv.org/pdf/2003.02645.pdf
3) vae - VAE learning framework. A latent variable model which learns
good probability estimation over observations and
a regularized latent representation.
https://arxiv.org/pdf/1312.6114.pdf
"""
def __init__(self, cfg: MTBottleneckModelConfig, trainer: Trainer = None):
super().__init__(cfg=cfg, trainer=trainer)
self.model_type: str = cfg.get("model_type", "nll")
self.min_logv: float = cfg.get("min_logv", -6)
self.latent_size: int = cfg.get("latent_size", -1)
self.non_recon_warmup_batches: int = cfg.get("non_recon_warmup_batches", 200000)
self.recon_per_token: bool = cfg.get("recon_per_token", True)
self.log_timing: bool = cfg.get("log_timing", True)
# if True, translation uses the mean of latent for VAE and MIM
self.deterministic_translate = True
# latent_size -1 will take value of encoder.hidden_size
if self.latent_size < 0:
self.latent_size = self.encoder.hidden_size
if not self.recon_per_token:
# disable reduction for train and eval loss
self.eval_loss_fn = NLLLoss(ignore_index=self.decoder_tokenizer.pad_id, reduction='none')
self.loss_fn._per_token_reduction = False
if self.model_type not in ["nll", "mim", "vae"]:
raise ValueError(f"Unknown model_type = {self.model_type}")
# project bridge dimension back to decoder hidden dimensions
self.latent2hidden = build_linear_or_identity(self.latent_size, self.decoder.hidden_size)
if self.model_type == "nll":
# project dimension of encoder hidden to latent dimension
self.hidden2latent_mean = build_linear_or_identity(self.encoder.hidden_size, self.latent_size)
else:
# MIM or VAE requires two independent projections for mean/variance
# project dimension of encoder hidden to latent dimension
self.hidden2latent_mean = torch.nn.Linear(self.encoder.hidden_size, self.latent_size)
# for probabilistic latent variable models we also need variance
self.hidden2latent_logv = torch.nn.Linear(self.encoder.hidden_size, self.latent_size)
def _validate_encoder_decoder_hidden_size(self):
"""
Validate encoder and decoder hidden sizes, and enforce same size.
We support here encoder/decoder with different hidden_size, so do nothing.
"""
pass
def eval_epoch_end(self, outputs, mode, global_rank):
# call parent for logging
super().eval_epoch_end(outputs, mode, global_rank)
# if user specifies one validation dataloader, then PTL reverts to giving a list of dictionary instead of a list of list of dictionary
if isinstance(outputs[0], dict):
outputs = [outputs]
for dataloader_idx, output in enumerate(outputs):
# add logs if available in outputs
log_dict = {}
for x in output:
if "log" in x:
for k, v in x["log"].items():
log_dict[k] = log_dict.get(k, []) + [v]
for k, v in log_dict.items():
if dataloader_idx == 0:
self.log(f"{mode}_{k}", np.mean(v), sync_dist=True)
else:
self.log(f"{mode}_{k}_dl_index_{dataloader_idx}", np.mean(v), sync_dist=True)
@classmethod
def list_available_models(cls) -> Optional[Dict[str, str]]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
return result
def encode_latent(self, hidden):
"""
Sample latent code z with reparameterization from bridge for
probabilistic latent variable models (e.g., mim, vae),
or return value for non-probabilistic models (nll)
"""
# all models have mean
z_mean = self.hidden2latent_mean(hidden)
if self.model_type == "nll":
# reconstruction only
z = z_mean
z_logv = torch.zeros_like(z)
else:
# mim or vae
# sample posterior q(z|x) for MIM and VAE
z_logv = self.hidden2latent_logv(hidden)
# avoid numerical instability for MIM
z_logv = z_logv.clamp_min(self.min_logv)
# sample z with reparameterization
e = torch.randn_like(z_mean)
z = e * torch.exp(0.5 * z_logv) + z_mean
return z, z_mean, z_logv
def loss(
self, z, z_mean, z_logv, z_mask, tgt_log_probs, tgt, tgt_mask, tgt_labels, train=False, return_info=False
):
"""
Compute the loss from latent (z) and target (x).
train - If True enables loss annealing, and label smoothing
"""
recon_loss_fn = self.loss_fn if train else self.eval_loss_fn
info_dict = {}
if self.recon_per_token:
log_p_x_given_z_per_token = -recon_loss_fn(log_probs=tgt_log_probs, labels=tgt_labels)
log_p_x_given_z = log_p_x_given_z_per_token
log_p_x_given_z_per_token = log_p_x_given_z_per_token.detach()
else:
# averaging of log_p_x_given_z per sample
output_mask = (tgt_labels != self.decoder_tokenizer.pad_id).type_as(tgt_log_probs)
log_p_x_given_z_per_token = (
-recon_loss_fn(log_probs=tgt_log_probs, labels=tgt_labels,).view(tgt_log_probs.shape[:2]) * output_mask
)
# probability per sample
log_p_x_given_z = log_p_x_given_z_per_token.sum(-1).mean()
tokens = output_mask.sum()
log_p_x_given_z_per_token = log_p_x_given_z_per_token.sum().detach() / tokens
info_dict["log_p_x_given_z"] = log_p_x_given_z.detach().cpu()
info_dict["log_p_x_given_z_per_token"] = log_p_x_given_z_per_token.detach().cpu()
# loss warmup during training only
if train:
trainer = self.trainer
# if we do not have a trainer ignore annealing
if trainer is None:
# ignore warmup and auxiliary loss
warmup_coef = 1.0
else:
global_step = self.trainer.global_step
warmup_coef = min(global_step / self.non_recon_warmup_batches, 1)
else:
# ignore warmup and auxiliary loss
warmup_coef = 1.0
info_dict["warmup_coef_recon"] = warmup_coef
if self.model_type in ["mim", "vae"]:
# tokens = tgt_mask.sum()
q_z_given_x = torch.distributions.Normal(loc=z_mean, scale=torch.exp(0.5 * z_logv),)
# average latent distribution to match averaging of observations
if self.recon_per_token:
# average latent per dimension - to heuristically match per-token reconstruction
log_q_z_given_x = q_z_given_x.log_prob(z).mean(-1).mean(-1).mean()
else:
log_q_z_given_x = q_z_given_x.log_prob(z).sum(-1).sum(-1).mean()
# build prior distribution
p_z = torch.distributions.Normal(loc=torch.zeros_like(z), scale=torch.ones_like(z),)
if self.recon_per_token:
# average latent distribution similar to averaging of observations
log_p_z = p_z.log_prob(z).mean(-1).mean(-1).mean()
else:
log_p_z = p_z.log_prob(z).sum(-1).sum(-1).mean()
if self.model_type == "mim":
loss_terms = 0.5 * (log_q_z_given_x + log_p_z)
elif self.model_type == "vae":
# KL divergence -Dkl( q(z|x) || p(z) )
loss_terms = log_p_z - log_q_z_given_x
# show loss value for reconstruction but train with MIM/VAE loss
loss = -(log_p_x_given_z + warmup_coef * loss_terms)
info_dict["log_q_z_given_x"] = log_q_z_given_x.detach().cpu()
info_dict["log_var_q_z_given_x"] = z_logv.detach().mean().cpu()
info_dict["log_p_z"] = log_p_z.detach().cpu()
info_dict["kl_div_q_p"] = (log_q_z_given_x - log_p_z).detach().cpu()
elif self.model_type == "nll":
loss = -log_p_x_given_z
if return_info:
return loss, info_dict
else:
return loss
@typecheck()
def forward(self, src, src_mask, tgt, tgt_mask, timer=None):
"""
return_info - if True, returns loss, info_dict with additional information
regarding the loss that can be logged
"""
if self.validate_input_ids:
# test src/tgt for id range (i.e., hellp in catching wrong tokenizer)
self.test_encoder_ids(src, raise_error=True)
self.test_decoder_ids(tgt, raise_error=True)
if timer is not None:
timer.start("encoder")
enc_hiddens, enc_mask = self.encoder(input_ids=src, encoder_mask=src_mask, return_mask=True,)
# build posterior distribution q(x|z)
z, z_mean, z_logv = self.encode_latent(hidden=enc_hiddens)
z_mask = enc_mask
if timer is not None:
timer.stop("encoder")
if timer is not None:
timer.start("decoder")
# decoding cross attention context
context_hiddens = self.latent2hidden(z)
tgt_hiddens = self.decoder(
input_ids=tgt, decoder_mask=tgt_mask, encoder_embeddings=context_hiddens, encoder_mask=enc_mask,
)
# build decoding distribution
tgt_log_probs = self.log_softmax(hidden_states=tgt_hiddens)
if timer is not None:
timer.stop("decoder")
return z, z_mean, z_logv, z_mask, tgt_log_probs
@torch.no_grad()
def batch_translate(
self, src: torch.LongTensor, src_mask: torch.LongTensor, return_beam_scores: bool = False, cache={}
):
"""
Translates a minibatch of inputs from source language to target language.
Args:
src: minibatch of inputs in the src language (batch x seq_len)
src_mask: mask tensor indicating elements to be ignored (batch x seq_len)
Returns:
translations: a list strings containing detokenized translations
inputs: a list of string containing detokenized inputs
"""
mode = self.training
timer = cache.get("timer", None)
try:
self.eval()
# build posterior distribution q(x|z)
if ("z" not in cache) or ("z_mean" not in cache) or ("z_mask" not in cache):
if timer is not None:
timer.start("encoder")
enc_hiddens, enc_mask = self.encoder(input_ids=src, encoder_mask=src_mask, return_mask=True)
z, z_mean, _ = self.encode_latent(hidden=enc_hiddens)
if timer is not None:
timer.stop("encoder")
else:
enc_mask = cache["z_mask"]
z = cache["z"]
z_mean = cache["z_mean"]
if getattr(self, "deterministic_translate", True):
z = z_mean
if timer is not None:
timer.start("sampler")
# decoding cross attention context
context_hiddens = self.latent2hidden(z)
best_translations = self.beam_search(
encoder_hidden_states=context_hiddens,
encoder_input_mask=enc_mask,
return_beam_scores=return_beam_scores,
)
if timer is not None:
timer.stop("sampler")
if return_beam_scores:
all_translations, scores, best_translations = best_translations
scores = scores.view(-1)
all_translations = self.ids_to_postprocessed_text(
all_translations, self.decoder_tokenizer, self.target_processor, filter_beam_ids=True
)
best_translations = self.ids_to_postprocessed_text(
best_translations, self.decoder_tokenizer, self.target_processor, filter_beam_ids=True
)
inputs = self.ids_to_postprocessed_text(
src, self.encoder_tokenizer, self.source_processor, filter_beam_ids=False
)
finally:
self.train(mode=mode)
if return_beam_scores:
return inputs, all_translations, scores.data.cpu().numpy().tolist(), best_translations
return inputs, best_translations
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
# forward pass
for i in range(len(batch)):
if batch[i].ndim == 3:
# Dataset returns already batched data and the first dimension of size 1 added by DataLoader
# is excess.
batch[i] = batch[i].squeeze(dim=0)
src_ids, src_mask, tgt_ids, tgt_mask, labels = batch
z, z_mean, z_logv, z_mask, tgt_log_probs = self(src_ids, src_mask, tgt_ids, tgt_mask)
train_loss, info_dict = self.loss(
z=z,
z_mean=z_mean,
z_logv=z_logv,
z_mask=z_mask,
tgt_log_probs=tgt_log_probs,
tgt=tgt_ids,
tgt_mask=tgt_mask,
tgt_labels=labels,
train=True,
return_info=True,
)
tensorboard_logs = {
'train_loss': train_loss,
'lr': self._optimizer.param_groups[0]['lr'],
}
tensorboard_logs.update(info_dict)
return {'loss': train_loss, 'log': tensorboard_logs}
def eval_step(self, batch, batch_idx, mode, dataloader_idx=0):
if self.log_timing:
timer = timers.NamedTimer()
else:
timer = None
for i in range(len(batch)):
if batch[i].ndim == 3:
# Dataset returns already batched data and the first dimension of size 1 added by DataLoader
# is excess.
batch[i] = batch[i].squeeze(dim=0)
if self.multilingual:
self.source_processor = self.source_processor_list[dataloader_idx]
self.target_processor = self.target_processor_list[dataloader_idx]
src_ids, src_mask, tgt_ids, tgt_mask, labels = batch
z, z_mean, z_logv, z_mask, tgt_log_probs = self(src_ids, src_mask, tgt_ids, tgt_mask, timer=timer)
eval_loss, info_dict = self.loss(
z=z,
z_mean=z_mean,
z_logv=z_logv,
z_mask=z_mask,
tgt_log_probs=tgt_log_probs,
tgt=tgt_ids,
tgt_mask=tgt_mask,
tgt_labels=labels,
train=False,
return_info=True,
)
# pass cache to sampler in order to reuse encoder's output
cache = dict(z=z, z_mean=z_mean, z_mask=z_mask, timer=timer,)
inputs, translations = self.batch_translate(src=src_ids, src_mask=src_mask, cache=cache)
num_measurements = labels.shape[0] * labels.shape[1]
if dataloader_idx == 0:
getattr(self, f'{mode}_loss')(
loss=eval_loss, num_measurements=num_measurements,
)
else:
getattr(self, f'{mode}_loss_{dataloader_idx}')(
loss=eval_loss, num_measurements=num_measurements,
)
np_tgt = tgt_ids.detach().cpu().numpy()
ground_truths = [self.decoder_tokenizer.ids_to_text(tgt) for tgt in np_tgt]
ground_truths = [self.target_processor.detokenize(tgt.split(' ')) for tgt in ground_truths]
num_non_pad_tokens = np.not_equal(np_tgt, self.decoder_tokenizer.pad_id).sum().item()
# collect logs
log_dict = {k: v.detach().cpu().numpy() if torch.is_tensor(v) else v for k, v in info_dict.items()}
# add timing if required
if timer is not None:
for k, v in timer.export().items():
log_dict[f"{k}_timing"] = v
return {
'inputs': inputs,
'translations': translations,
'ground_truths': ground_truths,
'num_non_pad_tokens': num_non_pad_tokens,
'log': log_dict,
}
| NeMo-main | nemo/collections/nlp/models/machine_translation/mt_enc_dec_bottleneck_model.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any, Optional, Tuple
from omegaconf.omegaconf import MISSING
from nemo.collections.nlp.data.machine_translation.machine_translation_dataset import TranslationDataConfig
from nemo.collections.nlp.models.enc_dec_nlp_model import EncDecNLPModelConfig
from nemo.collections.nlp.modules.common.token_classifier import TokenClassifierConfig
from nemo.collections.nlp.modules.common.tokenizer_utils import TokenizerConfig
from nemo.collections.nlp.modules.common.transformer.transformer import (
NeMoTransformerConfig,
NeMoTransformerEncoderConfig,
)
from nemo.collections.nlp.modules.common.transformer.transformer_bottleneck import (
NeMoTransformerBottleneckDecoderConfig,
NeMoTransformerBottleneckEncoderConfig,
)
from nemo.core.config.modelPT import OptimConfig, SchedConfig
@dataclass
class MTSchedConfig(SchedConfig):
name: str = 'InverseSquareRootAnnealing'
warmup_ratio: Optional[float] = None
last_epoch: int = -1
# TODO: Refactor this dataclass to to support more optimizers (it pins the optimizer to Adam-like optimizers).
@dataclass
class MTOptimConfig(OptimConfig):
name: str = 'adam'
lr: float = 1e-3
betas: Tuple[float, float] = (0.9, 0.98)
weight_decay: float = 0.0
sched: Optional[MTSchedConfig] = MTSchedConfig()
@dataclass
class MTEncDecModelConfig(EncDecNLPModelConfig):
# machine translation configurations
num_val_examples: int = 3
num_test_examples: int = 3
max_generation_delta: int = 10
label_smoothing: Optional[float] = 0.0
beam_size: int = 4
len_pen: float = 0.0
src_language: Any = 'en' # Any = str or List[str]
tgt_language: Any = 'en' # Any = str or List[str]
find_unused_parameters: Optional[bool] = True
shared_tokenizer: Optional[bool] = True
multilingual: Optional[bool] = False
preproc_out_dir: Optional[str] = None
validate_input_ids: Optional[bool] = True
shared_embeddings: bool = False
# network architecture configuration
encoder_tokenizer: Any = MISSING
encoder: Any = MISSING
decoder_tokenizer: Any = MISSING
decoder: Any = MISSING
head: TokenClassifierConfig = TokenClassifierConfig(log_softmax=True)
# dataset configurations
train_ds: Optional[TranslationDataConfig] = TranslationDataConfig(
src_file_name=MISSING,
tgt_file_name=MISSING,
tokens_in_batch=512,
clean=True,
shuffle=True,
cache_ids=False,
use_cache=False,
)
validation_ds: Optional[TranslationDataConfig] = TranslationDataConfig(
src_file_name=MISSING,
tgt_file_name=MISSING,
tokens_in_batch=512,
clean=False,
shuffle=False,
cache_ids=False,
use_cache=False,
)
test_ds: Optional[TranslationDataConfig] = TranslationDataConfig(
src_file_name=MISSING,
tgt_file_name=MISSING,
tokens_in_batch=512,
clean=False,
shuffle=False,
cache_ids=False,
use_cache=False,
)
optim: Optional[OptimConfig] = MTOptimConfig()
@dataclass
class AAYNBaseConfig(MTEncDecModelConfig):
# Attention is All You Need Base Configuration
encoder_tokenizer: TokenizerConfig = TokenizerConfig(library='yttm')
decoder_tokenizer: TokenizerConfig = TokenizerConfig(library='yttm')
encoder: NeMoTransformerEncoderConfig = NeMoTransformerEncoderConfig(
library='nemo',
model_name=None,
pretrained=False,
hidden_size=512,
inner_size=2048,
num_layers=6,
num_attention_heads=8,
ffn_dropout=0.1,
attn_score_dropout=0.1,
attn_layer_dropout=0.1,
)
decoder: NeMoTransformerConfig = NeMoTransformerConfig(
library='nemo',
model_name=None,
pretrained=False,
hidden_size=512,
inner_size=2048,
num_layers=6,
num_attention_heads=8,
ffn_dropout=0.1,
attn_score_dropout=0.1,
attn_layer_dropout=0.1,
)
@dataclass
class MTBottleneckModelConfig(AAYNBaseConfig):
model_type: str = 'nll'
min_logv: float = -6
latent_size: int = -1 # -1 will take value of encoder hidden
non_recon_warmup_batches: int = 200000
recon_per_token: bool = True
log_timing: bool = True
encoder: NeMoTransformerBottleneckEncoderConfig = NeMoTransformerBottleneckEncoderConfig(
library='nemo',
model_name=None,
pretrained=False,
hidden_size=512,
inner_size=2048,
num_layers=6,
num_attention_heads=8,
ffn_dropout=0.1,
attn_score_dropout=0.1,
attn_layer_dropout=0.1,
arch='seq2seq',
hidden_steps=32,
hidden_blocks=1,
hidden_init_method='params',
)
decoder: NeMoTransformerBottleneckDecoderConfig = NeMoTransformerBottleneckDecoderConfig(
library='nemo',
model_name=None,
pretrained=False,
inner_size=2048,
num_layers=6,
num_attention_heads=8,
ffn_dropout=0.1,
attn_score_dropout=0.1,
attn_layer_dropout=0.1,
arch='seq2seq',
)
| NeMo-main | nemo/collections/nlp/models/machine_translation/mt_enc_dec_config.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Dict, Optional
import numpy as np
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from nemo.collections.nlp.data import BertInformationRetrievalDataset
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.collections.nlp.modules.common.lm_utils import get_lm_model
from nemo.core.classes.common import typecheck
__all__ = ['BaseIRModel']
class BaseIRModel(NLPModel):
"""
Base class for information retrieval models.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
self.setup_tokenizer(cfg.tokenizer)
super().__init__(cfg=cfg, trainer=trainer)
@typecheck()
def forward(self, *args):
pass
def compute_scores_and_loss(self, inputs):
pass
@staticmethod
def get_lm_model_with_padded_embedding(cfg: DictConfig):
"""
Function which ensures that vocabulary size is divisivble by 8
for faster mixed precision training.
"""
model = get_lm_model(
config_file=cfg.language_model.config_file,
config_dict=OmegaConf.to_container(cfg.language_model.config) if cfg.language_model.config else None,
vocab_file=cfg.tokenizer.vocab_file,
trainer=trainer,
cfg=cfg,
)
vocab_size, hidden_size = model.config.vocab_size, model.config.hidden_size
tokens_to_add = 8 * math.ceil(vocab_size / 8) - vocab_size
zeros = torch.zeros((tokens_to_add, hidden_size))
model.embeddings.word_embeddings.weight.data = torch.cat((model.embeddings.word_embeddings.weight.data, zeros))
return model
@staticmethod
def calculate_mean_reciprocal_rank(query2passages, query2rel):
"""
Helper function which calculates mean reciprocal rank.
Args:
query2passages: dict which contains passage ids and corresponding
scores for each query
query2rel: dict which contains ids of relevant passages for each query
"""
reciprocal_ranks = []
for query in query2passages:
indices = np.argsort(query2passages[query]["scores"])[::-1]
sorted_psgs = query2passages[query]["psg_ids"][indices]
reciprocal_ranks.append(0)
for i, psg_id in enumerate(sorted_psgs):
if psg_id in query2rel[query]:
reciprocal_ranks[-1] = 1 / (i + 1)
break
return np.mean(reciprocal_ranks)
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
scores, train_loss = self.compute_scores_and_loss(batch[:-2])
tensorboard_logs = {"train_loss": train_loss, "lr": self._optimizer.param_groups[0]["lr"]}
return {"loss": train_loss, "log": tensorboard_logs}
def validation_step(self, batch, batch_idx):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
scores, val_loss = self.compute_scores_and_loss(batch[:-2])
query_ids, passage_ids = batch[-2:]
data_for_val = {
"val_loss": val_loss,
"scores": scores,
"query_ids": query_ids,
"passage_ids": passage_ids,
}
self.validation_step_outputs.append(data_for_val)
return data_for_val
def on_validation_epoch_end(self):
"""
Called at the end of validation to aggregate outputs.
:param outputs: list of individual outputs of each validation step.
"""
query_ids = torch.cat([x["query_ids"] for x in self.validation_step_outputs])
passage_ids = torch.cat([x["passage_ids"] for x in self.validation_step_outputs])
scores = torch.cat([x["scores"] for x in self.validation_step_outputs])
all_query_ids, all_passage_ids, all_scores = [], [], []
if torch.distributed.is_initialized():
world_size = torch.distributed.get_world_size()
for ind in range(world_size):
all_query_ids.append(torch.empty_like(query_ids))
all_passage_ids.append(torch.empty_like(passage_ids))
all_scores.append(torch.empty_like(scores))
torch.distributed.all_gather(all_query_ids, query_ids)
torch.distributed.all_gather(all_passage_ids, passage_ids)
torch.distributed.all_gather(all_scores, scores)
else:
all_query_ids.append(query_ids)
all_passage_ids.append(passage_ids)
all_scores.append(scores)
val_mrr = 0
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
query2passages, query2rels = {}, {}
processed_queries = set()
for i in range(len(all_query_ids)):
query_ids = all_query_ids[i].detach().cpu().numpy()
passage_ids = all_passage_ids[i].detach().cpu().numpy()
scores = all_scores[i].detach().cpu().numpy()
for j, query_id in enumerate(query_ids):
if query_id not in processed_queries:
processed_queries.add(query_id)
query2passages[query_id] = {
"psg_ids": passage_ids[j],
"scores": scores[j],
}
query2rels[query_id] = [passage_ids[j][0]]
else:
query2passages[query_id]["psg_ids"] = np.concatenate(
(query2passages[query_id]["psg_ids"], passage_ids[j][1:])
)
query2passages[query_id]["scores"] = np.concatenate(
(query2passages[query_id]["scores"], scores[j][1:])
)
val_mrr = self.calculate_mean_reciprocal_rank(query2passages, query2rels)
val_loss = torch.stack([x["val_loss"] for x in self.validation_step_outputs]).mean()
self.validation_step_outputs.clear() # free memory
tensorboard_logs = {
"val_mrr": val_mrr,
"val_loss": val_loss,
}
return {"log": tensorboard_logs}
def setup_training_data(self, train_data_config: Optional[DictConfig]):
self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config)
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config)
def setup_test_data(self, test_data_config: Optional[DictConfig]):
self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config)
def _setup_dataloader_from_config(self, cfg: DictConfig):
dataset = BertInformationRetrievalDataset(
tokenizer=self.tokenizer,
passages=cfg.passages,
queries=cfg.queries,
query_to_passages=cfg.query_to_passages,
num_negatives=cfg.num_negatives,
psg_cache_format=cfg.get("psg_cache_format", "pkl"),
max_query_length=cfg.get("max_query_length", 31),
max_passage_length=cfg.get("max_passage_length", 190),
)
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=cfg.batch_size,
shuffle=cfg.shuffle,
num_workers=cfg.get("num_workers", 2),
pin_memory=cfg.get("pin_memory", False),
drop_last=cfg.get("drop_last", False),
)
@classmethod
def list_available_models(cls) -> Optional[Dict[str, str]]:
pass
| NeMo-main | nemo/collections/nlp/models/information_retrieval/base_ir_model.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.models.information_retrieval.bert_dpr_model import BertDPRModel
from nemo.collections.nlp.models.information_retrieval.bert_joint_ir_model import BertJointIRModel
| NeMo-main | nemo/collections/nlp/models/information_retrieval/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
import torch
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from nemo.collections.common.losses import SmoothedCrossEntropyLoss
from nemo.collections.nlp.data import BertInformationRetrievalDataset
from nemo.collections.nlp.models.information_retrieval.base_ir_model import BaseIRModel
from nemo.collections.nlp.modules.common.tokenizer_utils import get_tokenizer
from nemo.core.classes.common import typecheck
from nemo.core.neural_types import ChannelType, LogitsType, MaskType, NeuralType
__all__ = ["BertDPRModel"]
class BertDPRModel(BaseIRModel):
"""
Information retrieval model which encodes query and passage separately
with two different BERT encoders and computes their similarity score
as a dot-product between corresponding [CLS] token representations.
"""
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"q_input_ids": NeuralType(("B", "T"), ChannelType()),
"q_attention_mask": NeuralType(("B", "T"), MaskType()),
"q_token_type_ids": NeuralType(("B", "T"), ChannelType()),
"p_input_ids": NeuralType(("B", "T"), ChannelType()),
"p_attention_mask": NeuralType(("B", "T"), MaskType()),
"p_token_type_ids": NeuralType(("B", "T"), ChannelType()),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"logits": NeuralType(("B", "D"), LogitsType())}
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
model_name = cfg.language_model.pretrained_model_name
self.tokenizer = get_tokenizer(tokenizer_name=model_name)
super().__init__(cfg=cfg, trainer=trainer)
self.q_encoder = self.get_lm_model_with_padded_embedding(cfg)
self.p_encoder = self.get_lm_model_with_padded_embedding(cfg)
self.loss = SmoothedCrossEntropyLoss(pad_id=self.tokenizer.pad_id)
@typecheck()
def forward(
self, q_input_ids, q_token_type_ids, q_attention_mask, p_input_ids, p_token_type_ids, p_attention_mask,
):
q_vectors = self.q_encoder(
input_ids=q_input_ids, token_type_ids=q_token_type_ids, attention_mask=q_attention_mask,
)
q_vectors = q_vectors[:, 0]
batch_size, hidden_size = q_vectors.size()
p_vectors = self.p_encoder(
input_ids=p_input_ids, token_type_ids=p_token_type_ids, attention_mask=p_attention_mask,
)
num_passages = p_vectors.shape[0] // batch_size
p_vectors = p_vectors[:, 0].view(-1, num_passages, hidden_size)
p_positives, p_negatives = p_vectors[:, 0], p_vectors[:, 1:]
scores = torch.cat(
(torch.matmul(q_vectors, p_positives.T), torch.einsum("ij,ipj->ip", q_vectors, p_negatives),), dim=1,
)
return scores
def compute_scores_and_loss(self, inputs):
(q_input_ids, q_input_mask, q_input_type_ids, p_input_ids, p_input_mask, p_input_type_ids,) = inputs
batch_size, num_passages, p_seq_length = p_input_ids.size()
q_seq_length = q_input_ids.size()[-1]
scores = self(
q_input_ids=q_input_ids.view(-1, q_seq_length),
q_token_type_ids=q_input_type_ids.view(-1, q_seq_length),
q_attention_mask=q_input_mask.view(-1, q_seq_length),
p_input_ids=p_input_ids.view(-1, p_seq_length),
p_token_type_ids=p_input_type_ids.view(-1, p_seq_length),
p_attention_mask=p_input_mask.view(-1, p_seq_length),
).view(batch_size, 1, batch_size + num_passages - 1)
normalized_scores = torch.log_softmax(scores, dim=-1)
labels = torch.arange(batch_size)[:, None].long().to(normalized_scores.device)
loss = self.loss(log_probs=normalized_scores, labels=labels, output_mask=torch.ones_like(labels),)
scores = scores[:, 0]
scores = torch.cat((torch.diag(scores)[:, None], scores[:, batch_size:]), dim=1,)
return scores, loss
def _setup_dataloader_from_config(self, cfg: DictConfig):
dataset = BertInformationRetrievalDataset(
tokenizer=self.tokenizer,
passages=cfg.passages,
queries=cfg.queries,
query_to_passages=cfg.query_to_passages,
num_negatives=cfg.num_negatives,
psg_cache_format=cfg.get("psg_cache_format", "pkl"),
max_query_length=cfg.get("max_query_length", 31),
max_passage_length=cfg.get("max_passage_length", 190),
preprocess_fn="preprocess_dpr",
)
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=cfg.batch_size,
shuffle=cfg.shuffle,
num_workers=cfg.get("num_workers", 2),
pin_memory=cfg.get("pin_memory", False),
drop_last=cfg.get("drop_last", False),
)
| NeMo-main | nemo/collections/nlp/models/information_retrieval/bert_dpr_model.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
import torch
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from nemo.collections.common.losses import SmoothedCrossEntropyLoss
from nemo.collections.nlp.models.information_retrieval.base_ir_model import BaseIRModel
from nemo.collections.nlp.modules.common import SequenceRegression
from nemo.collections.nlp.modules.common.tokenizer_utils import get_tokenizer
from nemo.core.classes.common import typecheck
from nemo.core.neural_types import NeuralType
__all__ = ["BertJointIRModel"]
class BertJointIRModel(BaseIRModel):
"""
Information retrieval model which jointly encodes both query and passage
and passes them to BERT encoder followed by a fully-connected layer for
similarity score prediction.
"""
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return self.bert_model.input_types
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return self.sim_score_regressor.output_types
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
model_name = cfg.language_model.pretrained_model_name
self.tokenizer = get_tokenizer(tokenizer_name=model_name)
super().__init__(cfg=cfg, trainer=trainer)
self.bert_model = self.get_lm_model_with_padded_embedding(cfg)
hidden_size = self.bert_model.config.hidden_size
self.sim_score_regressor = SequenceRegression(
hidden_size=hidden_size, num_layers=1, dropout=cfg.language_model.sim_score_dropout,
)
self.loss = SmoothedCrossEntropyLoss(pad_id=self.tokenizer.pad_id)
@typecheck()
def forward(self, input_ids, attention_mask, token_type_ids):
hidden_states = self.bert_model(
input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask,
)
if isinstance(hidden_states, tuple):
hidden_states = hidden_states[0]
scores = self.sim_score_regressor(hidden_states=hidden_states)
return scores
def compute_scores_and_loss(self, inputs):
input_ids, input_mask, input_type_ids = inputs
batch_size, num_passages, seq_length = input_ids.size()
unnormalized_scores = self(
input_ids=input_ids.view(-1, seq_length),
attention_mask=input_mask.view(-1, seq_length),
token_type_ids=input_type_ids.view(-1, seq_length),
).view(batch_size, 1, num_passages)
scores = torch.log_softmax(unnormalized_scores, dim=-1)
labels = torch.zeros_like(input_ids[:, :1, 0])
loss = self.loss(log_probs=scores, labels=labels, output_mask=torch.ones_like(labels))
return unnormalized_scores[:, 0], loss
| NeMo-main | nemo/collections/nlp/models/information_retrieval/bert_joint_ir_model.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict, Optional, Union
import numpy as np
import torch
from omegaconf import DictConfig, OmegaConf, open_dict
from pytorch_lightning import Trainer
from torch.utils.data import DataLoader
from transformers import AutoModelForSeq2SeqLM
from nemo.collections.nlp.data.dialogue import DialogueSGDDataProcessor
from nemo.collections.nlp.data.dialogue.data_processor.mellon_qa_data_processor import DialogueMellonQADataProcessor
from nemo.collections.nlp.data.dialogue.data_processor.ms_marco_data_processor import DialogueMSMarcoDataProcessor
from nemo.collections.nlp.data.dialogue.dataset.dialogue_s2s_generation_dataset import DialogueS2SGenerationDataset
from nemo.collections.nlp.metrics.dialogue_metrics import DialogueGenerationMetrics
from nemo.collections.nlp.models.language_modeling.megatron_t5_model import MegatronT5Model
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.core.classes.common import PretrainedModelInfo
from nemo.utils import logging
try:
from apex.transformer.pipeline_parallel.utils import _reconfigure_microbatch_calculator
HAVE_APEX = True
except:
HAVE_APEX = False
__all__ = ['DialogueS2SGenerationModel']
class DialogueS2SGenerationModel(NLPModel):
def __init__(
self, cfg: DictConfig, trainer: Trainer = None,
):
self.cfg = cfg
self.data_prepared = False
self.epoch_number = 0
if self.cfg.library == "huggingface":
self.setup_tokenizer(cfg.tokenizer)
elif self.cfg.library == "megatron":
# supporting MegatronT5Model in precision = fp16
t5_cfg = MegatronT5Model.restore_from(
restore_path=cfg.language_model.lm_checkpoint, trainer=trainer, return_config=True
)
# Override the T5 configuration with the one from the config file.
OmegaConf.set_struct(t5_cfg, True)
with open_dict(t5_cfg):
t5_cfg.masked_softmax_fusion = False
t5_cfg.precision = 16
t5_cfg.encoder_arch = 'transformer'
t5_cfg.decoder_arch = 'transformer'
language_model = MegatronT5Model.restore_from(
restore_path=cfg.language_model.lm_checkpoint, trainer=trainer, override_config_path=t5_cfg
)
self.tokenizer = language_model.tokenizer
super().__init__(cfg=cfg, trainer=trainer, no_lm_init=True)
if self.cfg.library == "huggingface":
self.language_model = AutoModelForSeq2SeqLM.from_pretrained(cfg.language_model.pretrained_model_name)
self.language_model.resize_token_embeddings(len(self.tokenizer.tokenizer))
if self.cfg.language_model.lm_checkpoint:
self.language_model.load_state_dict(torch.load(self.cfg.language_model.lm_checkpoint))
elif self.cfg.library == "megatron":
self.language_model = language_model
def training_step(self, batch, batch_idx):
input_ids, attn_masks, labels = batch
loss = self(input_ids, attn_masks, labels)
self.log("train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return {'loss': loss}
def validation_step(self, batch, batch_idx):
loss = self.eval_step_helper(batch=batch)
self.validation_step_outputs.append(loss)
return loss
def on_validation_epoch_end(self):
self.eval_epoch_end(self.validation_step_outputs, mode='val')
self.validation_step_outputs.clear() # free memory
def on_test_epoch_end(self):
self.eval_epoch_end(self.test_step_outputs, mode='test')
self.test_step_outputs.clear() # free memory
def eval_epoch_end(self, outputs, mode='val'):
generated_field = []
ground_truth_field = []
inputs = []
loss = []
for output in outputs:
generated_field += output["generated_field"]
ground_truth_field += output["ground_truth_field"]
inputs += output["input"]
loss.append(output["loss"].item())
os.makedirs(self.cfg.dataset.dialogues_example_dir, exist_ok=True)
filename = os.path.join(
self.cfg.dataset.dialogues_example_dir, f"{mode}_predictions_epoch{self.epoch_number}.jsonl"
)
DialogueGenerationMetrics.save_predictions(
filename, generated_field, ground_truth_field, inputs,
)
label_acc = np.mean([int(generated_field[i] == ground_truth_field[i]) for i in range(len(generated_field))])
precision, recall, f1 = DialogueGenerationMetrics.get_f1(generated_field, ground_truth_field)
bleu = DialogueGenerationMetrics.get_bleu(generated_field, ground_truth_field)
avg_loss = np.mean(loss)
ppl = np.exp(avg_loss)
self.log('{}_accuracy'.format(mode), label_acc * 100)
self.log('precision', precision)
self.log('recall', recall)
self.log('f1', f1)
self.log('bleu', bleu)
self.log('{}_loss'.format(mode), avg_loss)
self.log('{}_ppl'.format(mode), ppl)
if mode == 'val':
self.epoch_number += 1
if self.cfg.save_model:
filename = '{}/val_loss-{}-epoch-{}-answer-extender.bin'.format(
self.cfg.dataset.dialogues_example_dir, avg_loss, self.epoch_number
)
torch.save(self.language_model.state_dict(), filename)
def test_step(self, batch, batch_idx):
loss = self.eval_step_helper(batch=batch, mode='test')
self.test_step_outputs.append(loss)
return loss
# for inference only
def predict_step(self, batch, batch_idx, dataloader_idx=None):
# return self(batch)
raise NotImplementedError()
def forward(self, input_ids, attention_masks, labels):
if self.cfg.library == "huggingface":
output = self.language_model(input_ids=input_ids, attention_mask=attention_masks, labels=labels)
loss = output['loss']
elif self.cfg.library == "megatron":
labels = torch.where(labels != -100, labels, torch.zeros_like(labels))
decoder_attn_masks = torch.where(labels > 0, torch.ones_like(labels), torch.zeros_like(labels))
unmasked_unreduced_loss = self.language_model(
input_ids, labels[:, :-1], attention_masks, decoder_attn_masks[:, :-1], lm_labels=labels[:, 1:]
)
loss = self.language_model.loss_func(decoder_attn_masks[:, 1:].contiguous(), unmasked_unreduced_loss)
return loss
def prepare_megatron_generation(self, labels, input_ids, template_length):
"""
# adapted from MegatronGPTModel._bucketize_gpt_inference
"""
batch_size = labels.size(0)
prompt_tags = [self.prompt_tags[0]] * batch_size if self.prompt_tags else None
batch_tokens = input_ids.tolist()
# unpad tokens
lens = template_length
indxs = [index for index in range(batch_size)]
for lenn, index in zip(lens, indxs):
batch_tokens[index] = batch_tokens[index][:lenn]
# chunk tokens by same length
pre_buckets, lens = [], list(set(lens.tolist()))
for lenn in lens:
pre_buckets.append([(tokens, index) for index, tokens in enumerate(batch_tokens) if len(tokens) == lenn])
buckets, positions, bucket_prompt_tags = [], [], []
# get buckets and prompts initial positions
for bucket in pre_buckets:
buckets.append(torch.tensor([item[0] for item in bucket]).to(device=labels.device))
positions.append([item[1] for item in bucket])
# bucket prompt tags identically to their corresponding examples
if prompt_tags:
bucket_prompt_tags.append([prompt_tags[item[1]] for item in bucket])
# Flatten position list
positions = [item for sublist in positions for item in sublist]
# Flatten buckets and bucket_prompt_tags # temp fix for megatron complete issue. However, this is also slower than bucketized inference
buckets = [item.unsqueeze(0) for sublist in buckets for item in sublist]
bucket_prompt_tags = [[item] for sublist in bucket_prompt_tags for item in sublist]
request = {"tokens": buckets, "prompt_tags": bucket_prompt_tags}
return positions, request
def post_process_megatron_generation(self, outputs):
text_outputs = [output[0] for output in outputs]
generated_tokens = self.tokenizer.tokenizer(text_outputs, padding=True, return_tensors="pt").data["input_ids"]
return generated_tokens
def generate_candidates(self, input_ids, attn_masks, labels):
tokens_to_generate = self.cfg.tokens_to_generate
if self.cfg.library == "huggingface":
param_dict = {
"input_ids": input_ids,
"attention_mask": attn_masks,
"max_length": tokens_to_generate,
}
generated_tokens = self.language_model.generate(**param_dict)
elif self.cfg.library == 'megatron':
_reconfigure_microbatch_calculator(
rank=0, # This doesn't matter since it is only used for logging
rampup_batch_size=None,
global_batch_size=1,
micro_batch_size=1, # Make sure that there is no "grad acc" while decoding.
data_parallel_size=1, # We check above to make sure that dataparallel size is always 1 at inference.
)
generated_tokens, _ = self.language_model.decode(input_ids, attn_masks, tokens_to_generate)
generated_field = self.process_into_structured_fields(generated_tokens)
ground_truth_field = self.process_into_structured_fields(labels)
return generated_field, ground_truth_field
def process_into_structured_fields(self, full_seq_ids, template_length=None):
structured_field = []
for i in range(full_seq_ids.size(0)):
start_point = 0 if template_length is None else template_length[i].item()
stop_point = full_seq_ids.size(1)
for j in range(start_point, stop_point):
if full_seq_ids.data[i, j] in [self.tokenizer.tokenizer.pad_token_id, -100] and j != 0:
stop_point = j
break
token_ids = full_seq_ids[i, start_point:stop_point]
one_generated_field = self.tokenizer.tokenizer.decode(token_ids, skip_special_tokens=True).strip()
structured_field.append(one_generated_field)
return structured_field
def eval_step_helper(self, batch, mode='val'):
input_ids, attn_masks, labels = batch
loss = self(input_ids, attn_masks, labels)
self.log("{}_loss".format(mode), loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
generated_field, ground_truth_field = self.generate_candidates(input_ids, attn_masks, labels)
return {
'loss': loss,
'input': self.tokenizer.tokenizer.batch_decode(input_ids, skip_special_tokens=True),
'generated_field': generated_field,
'ground_truth_field': ground_truth_field,
}
def prepare_data(self):
"""
Preprocessed schema and dialogues and caches this
"""
if self.data_prepared:
return
if self._cfg.dataset.task == "ms_marco":
self.dialogues_processor = DialogueMSMarcoDataProcessor(
data_dir=self._cfg.dataset.data_dir, tokenizer=self.tokenizer, cfg=self._cfg.dataset
)
elif self._cfg.dataset.task == "sgd_generation":
self.dialogues_processor = DialogueSGDDataProcessor(
data_dir=self._cfg.dataset.data_dir,
dialogues_example_dir=self._cfg.dataset.dialogues_example_dir,
tokenizer=self.tokenizer,
cfg=self._cfg.dataset,
)
elif self._cfg.dataset.task == "mellon_qa":
self.dialogues_processor = DialogueMellonQADataProcessor(
data_dir=self._cfg.dataset.data_dir, tokenizer=self.tokenizer, cfg=self._cfg.dataset
)
else:
raise ValueError("Only ms_marco, sgd_generation and mellon_qa supported for Dialogue GPT Generation Model")
self.data_prepared = True
def update_data_dirs(self, data_dir: str, dialogues_example_dir: str):
"""
Update data directories
Args:
data_dir: path to data directory
dialogues_example_dir: path to preprocessed dialogues example directory, if not exists will be created.
"""
if not os.path.exists(data_dir):
raise ValueError(f"{data_dir} is not found")
self._cfg.dataset.data_dir = data_dir
self._cfg.dataset.dialogues_example_dir = dialogues_example_dir
logging.info(f'Setting model.dataset.data_dir to {data_dir}.')
logging.info(f'Setting model.dataset.dialogues_example_dir to {dialogues_example_dir}.')
def setup_training_data(self, train_data_config: Optional[DictConfig] = None):
self.prepare_data()
self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config, split=train_data_config.ds_item)
def setup_multiple_validation_data(self, val_data_config: Optional[DictConfig] = None):
return self.setup_validation_data(val_data_config)
def setup_validation_data(self, val_data_config: Optional[DictConfig] = None):
self.prepare_data()
self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config, split=val_data_config.ds_item)
def setup_multiple_test_data(self, test_data_config: Union[DictConfig, Dict]):
self.setup_test_data(test_data_config)
def setup_test_data(self, test_data_config: Optional[DictConfig] = None):
self.prepare_data()
self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config, split=test_data_config.ds_item)
def _setup_dataloader_from_config(self, cfg: DictConfig, split: str) -> DataLoader:
dataset_cfg = self._cfg.dataset
data_dir = dataset_cfg.data_dir
if not os.path.exists(data_dir):
raise FileNotFoundError(f"Data directory is not found at: {data_dir}.")
dataset = DialogueS2SGenerationDataset(
dataset_split=split,
dialogues_processor=self.dialogues_processor,
tokenizer=self.dialogues_processor._tokenizer,
cfg=dataset_cfg,
)
dl = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=cfg.batch_size,
collate_fn=dataset.collate_fn,
drop_last=cfg.drop_last,
shuffle=cfg.shuffle,
num_workers=cfg.num_workers,
pin_memory=cfg.pin_memory,
)
return dl
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
return result
| NeMo-main | nemo/collections/nlp/models/dialogue/dialogue_s2s_generation_model.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
from typing import Dict, Optional, Union
import numpy as np
import torch
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from torch.utils.data import DataLoader
from transformers import AutoModelWithLMHead
from nemo.collections.nlp.data.dialogue.data_processor.mellon_qa_data_processor import DialogueMellonQADataProcessor
from nemo.collections.nlp.data.dialogue.data_processor.ms_marco_data_processor import DialogueMSMarcoDataProcessor
from nemo.collections.nlp.data.dialogue.dataset.dialogue_gpt_generation_dataset import DialogueGPTGenerationDataset
from nemo.collections.nlp.metrics.dialogue_metrics import DialogueGenerationMetrics
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.models.language_modeling.megatron_gpt_prompt_learning_model import (
MegatronGPTPromptLearningModel,
)
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.core.classes.common import PretrainedModelInfo
from nemo.utils import logging
__all__ = ['DialogueGPTGenerationModel']
NUM_TASKS = 1 # focussing on intent currently 6 # number of multi-head tasks
class DialogueGPTGenerationModel(NLPModel):
def __init__(
self, cfg: DictConfig, trainer: Trainer = None,
):
self.cfg = cfg
self.data_prepared = False
self.setup_tokenizer(cfg.tokenizer)
self.tokenizer.tokenizer.pad_token = self.tokenizer.tokenizer.eos_token
self.epoch_number = 0
self.prompt_learning = self.cfg.prompt_learning
super().__init__(cfg=cfg, trainer=trainer, no_lm_init=True)
if self.cfg.library == "huggingface":
self.language_model = AutoModelWithLMHead.from_pretrained(cfg.language_model.pretrained_model_name)
self.language_model.resize_token_embeddings(len(self.tokenizer.tokenizer))
if self.cfg.language_model.lm_checkpoint:
self.language_model.load_state_dict(torch.load(self.cfg.language_model.lm_checkpoint))
elif self.cfg.library == "megatron":
if self.prompt_learning:
# removing tokenizer cfg as this triggers tokenizer construction which is not helpful here as we have a separate tokenizer
new_cfg = copy.copy(cfg)
del new_cfg.tokenizer
self.language_model = MegatronGPTPromptLearningModel(new_cfg, trainer)
else:
self.language_model = MegatronGPTModel.restore_from(cfg.language_model.lm_checkpoint, trainer=trainer)
def training_step(self, batch, batch_idx):
input_ids, attn_masks, labels, _, _ = batch
loss = self(input_ids, attn_masks, labels, inference=False)
self.log("train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return {'loss': loss}
def validation_step(self, batch, batch_idx):
loss = self.eval_step_helper(batch=batch)
self.validation_step_outputs.append(loss)
return loss
def on_validation_epoch_end(self):
self.eval_epoch_end(self.validation_step_outputs, mode='val')
self.validation_step_outputs.clear() # free memory
def on_test_epoch_end(self):
self.eval_epoch_end(self.test_step_outputs, mode='test')
self.test_step_outputs.clear() # free memory
def eval_epoch_end(self, outputs, mode='val'):
generated_field = []
ground_truth_field = []
inputs = []
loss = []
for output in outputs:
generated_field += output["generated_field"]
ground_truth_field += output["ground_truth_field"]
inputs += output["input"]
loss.append(output["loss"].item())
os.makedirs(self.cfg.dataset.dialogues_example_dir, exist_ok=True)
filename = os.path.join(
self.cfg.dataset.dialogues_example_dir, f"{mode}_predictions_epoch{self.epoch_number}.jsonl"
)
DialogueGenerationMetrics.save_predictions(
filename, generated_field, ground_truth_field, inputs,
)
label_acc = np.mean([int(generated_field[i] == ground_truth_field[i]) for i in range(len(generated_field))])
precision, recall, f1 = DialogueGenerationMetrics.get_f1(generated_field, ground_truth_field)
bleu = DialogueGenerationMetrics.get_bleu(generated_field, ground_truth_field)
avg_loss = np.mean(loss)
ppl = np.exp(avg_loss)
self.log('{}_accuracy'.format(mode), label_acc * 100)
self.log('precision', precision)
self.log('recall', recall)
self.log('f1', f1)
self.log('bleu', bleu)
self.log('{}_loss'.format(mode), avg_loss)
self.log('{}_ppl'.format(mode), ppl)
if mode == 'val':
self.epoch_number += 1
if self.cfg.save_model:
filename = '{}/val_loss-{}-epoch-{}-answer-extender.bin'.format(
self.cfg.dataset.dialogues_example_dir, avg_loss, self.epoch_number
)
torch.save(self.language_model.state_dict(), filename)
def test_step(self, batch, batch_idx):
loss = self.eval_step_helper(batch=batch, mode='test')
self.test_step_outputs.append(loss)
return loss
# for inference only
def predict_step(self, batch, batch_idx, dataloader_idx=None):
# return self(batch)
raise NotImplementedError()
def forward(self, input_ids, attention_mask, labels, inference=True):
if self.cfg.library == "huggingface":
output = self.language_model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
loss = output['loss']
elif self.cfg.library == "megatron":
num_prompt_tokens = (
len(self.language_model.pseudo_token_ids) if hasattr(self.language_model, 'pseudo_token_ids') else 0
)
position_ids = torch.arange(
start=0, end=num_prompt_tokens + input_ids.size(1), dtype=torch.long, device=input_ids.device,
)
position_ids = position_ids.unsqueeze(0).repeat(input_ids.size(0), 1)
prompt_ids = torch.tensor([0] * input_ids.size(0)) if self.prompt_learning else None
attn_mask_add_on = torch.ones((attention_mask.size(0), num_prompt_tokens), device=attention_mask.device)
full_attention_mask = torch.cat([attn_mask_add_on, attention_mask], axis=-1)
full_attention_mask_expand = torch.tril(
full_attention_mask.unsqueeze(2).tile(full_attention_mask.size(1))
).unsqueeze(1)
attn_mask = full_attention_mask_expand > 0
prompt_token_labels = torch.full(
size=(input_ids.size(0), num_prompt_tokens),
fill_value=self.tokenizer.tokenizer.pad_token_id,
dtype=torch.long,
)
if self.prompt_learning:
prompt_token_labels.data = torch.LongTensor(
np.tile(np.array(self.language_model.pseudo_token_ids), (input_ids.size(0), 1))
)
prompt_token_labels = prompt_token_labels.to(input_ids.device)
input_ids_new = torch.cat([torch.zeros_like(prompt_token_labels), input_ids], axis=1)
make_up_last_column_input_ids = (
torch.ones_like(input_ids_new[:, -1:]) * self.tokenizer.tokenizer.pad_token_id
)
left_shifted_input_ids = torch.cat([input_ids_new[:, 1:], make_up_last_column_input_ids], axis=-1)
if self.prompt_learning:
unmasked_unreduced_loss = self.language_model(
input_ids_new,
position_ids,
attn_mask,
labels=left_shifted_input_ids,
taskname_ids=prompt_ids,
inference=inference,
)
else:
unmasked_unreduced_loss = self.language_model(
input_ids, position_ids, attn_mask, labels=left_shifted_input_ids
)
if isinstance(unmasked_unreduced_loss, tuple):
unmasked_unreduced_loss = unmasked_unreduced_loss[0]
labels = torch.cat([prompt_token_labels, labels], axis=1)
make_up_last_column_labels = torch.ones_like(labels[:, -1:]) * self.tokenizer.tokenizer.pad_token_id
new_labels = torch.cat([labels[:, 1:], make_up_last_column_labels], axis=-1)
filler = torch.zeros_like(new_labels)
labels_mask_0 = torch.where(new_labels != -100, new_labels, filler)
labels_mask = labels_mask_0 > 0
loss = self.mask_and_reduce_loss(labels_mask, unmasked_unreduced_loss)
return loss
def mask_and_reduce_loss(self, loss_mask, output_tensor):
losses = output_tensor.float()
loss_mask = loss_mask.view(-1).float()
loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()
return loss
def setup(self, stage=None):
super().setup(stage)
if self.cfg.library == "megatron" and self.prompt_learning:
self.language_model.init_new_prompts()
def prepare_megatron_generation(self, labels, input_ids, template_length):
"""
# adapted from MegatronGPTModel._bucketize_gpt_inference
"""
batch_size = labels.size(0)
prompt_tags = [self.prompt_tags[0]] * batch_size if self.prompt_learning else None
batch_tokens = input_ids.tolist()
# unpad tokens
lens = template_length
indxs = [index for index in range(batch_size)]
for lenn, index in zip(lens, indxs):
batch_tokens[index] = batch_tokens[index][:lenn]
# chunk tokens by same length
pre_buckets, lens = [], list(set(lens.tolist()))
for lenn in lens:
pre_buckets.append([(tokens, index) for index, tokens in enumerate(batch_tokens) if len(tokens) == lenn])
buckets, positions, bucket_prompt_tags = [], [], []
# get buckets and prompts initial positions
for bucket in pre_buckets:
buckets.append(torch.tensor([item[0] for item in bucket]).to(device=labels.device))
positions.append([item[1] for item in bucket])
# bucket prompt tags identically to their corresponding examples
if prompt_tags:
bucket_prompt_tags.append([prompt_tags[item[1]] for item in bucket])
# Flatten position list
positions = [item for sublist in positions for item in sublist]
# Flatten buckets and bucket_prompt_tags # temp fix for megatron complete issue. However, this is also slower than bucketized inference
buckets = [item.unsqueeze(0) for sublist in buckets for item in sublist]
bucket_prompt_tags = [[item] for sublist in bucket_prompt_tags for item in sublist]
request = {"tokens": buckets, "prompt_tags": bucket_prompt_tags}
return positions, request
def post_process_megatron_generation(self, outputs):
text_outputs = [output[0] for output in outputs]
generated_tokens = self.tokenizer.tokenizer(text_outputs, padding=True, return_tensors="pt").data["input_ids"]
return generated_tokens
def generate_candidates(self, labels, template_length, input_ids, attn_masks):
tokens_to_generate = self.cfg.tokens_to_generate
if self.cfg.library == "huggingface":
generated_tokens = []
max_length = 0
for i in range(input_ids.size(0)):
param_dict = {
"input_ids": input_ids[i : i + 1, : template_length[i]],
"max_length": template_length[i] + tokens_to_generate,
"pad_token_id": self.tokenizer.tokenizer.pad_token_id,
}
generated_tokens.append(self.language_model.generate(**param_dict))
max_length = max(max_length, generated_tokens[-1].size(1))
# pad each generated to ensure they are of same length in dim 1, therefore stack-able
generated_tokens = [
torch.cat(
[i, torch.ones((1, max_length - i.size(1))).to(i.device) * self.tokenizer.tokenizer.pad_token_id],
axis=-1,
)
for i in generated_tokens
]
generated_tokens = torch.cat(generated_tokens, axis=0)
elif self.cfg.library == "megatron":
positions, request = self.prepare_megatron_generation(labels, input_ids, template_length)
outputs = self.language_model.complete(request, positions, tokens_to_generate)
generated_tokens = self.post_process_megatron_generation(outputs)
generated_field = self.process_into_structured_fields(generated_tokens, template_length=template_length)
ground_truth_field = self.process_into_structured_fields(labels, template_length=template_length)
return generated_field, ground_truth_field
def process_into_structured_fields(self, full_seq_ids, template_length=None):
structured_field = []
for i in range(full_seq_ids.size(0)):
start_point = 0 if template_length is None else template_length[i].item()
stop_point = full_seq_ids.size(1)
for j in range(start_point, stop_point):
if full_seq_ids.data[i, j] == self.tokenizer.tokenizer.pad_token_id:
stop_point = j
break
one_generated_field = self.tokenizer.tokenizer.decode(full_seq_ids[i, start_point:stop_point]).strip()
structured_field.append(one_generated_field)
return structured_field
def eval_step_helper(self, batch, mode='val'):
input_ids, attn_masks, labels, template_length, utterance_length = batch
loss = self(input_ids, attn_masks, labels)
self.log("{}_loss".format(mode), loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
# autoregressively generate candidates (possibly with constraint)
generated_field, ground_truth_field = self.generate_candidates(labels, template_length, input_ids, attn_masks)
return {
'loss': loss,
'input': self.tokenizer.tokenizer.batch_decode(input_ids, skip_special_tokens=True),
'generated_field': generated_field,
'ground_truth_field': ground_truth_field,
}
def prepare_data(self):
"""
Preprocessed schema and dialogues and caches this
"""
if self.data_prepared:
return
if self._cfg.dataset.task == "ms_marco":
self.dialogues_processor = DialogueMSMarcoDataProcessor(
data_dir=self._cfg.dataset.data_dir, tokenizer=self.tokenizer, cfg=self._cfg.dataset
)
elif self._cfg.dataset.task == "mellon_qa":
self.dialogues_processor = DialogueMellonQADataProcessor(
data_dir=self._cfg.dataset.data_dir, tokenizer=self.tokenizer, cfg=self._cfg.dataset
)
else:
raise ValueError("Only ms_marco and mellon_qa supported for Dialogue GPT Generation Model")
self.data_prepared = True
def update_data_dirs(self, data_dir: str, dialogues_example_dir: str):
"""
Update data directories
Args:
data_dir: path to data directory
dialogues_example_dir: path to preprocessed dialogues example directory, if not exists will be created.
"""
if not os.path.exists(data_dir):
raise ValueError(f"{data_dir} is not found")
self._cfg.dataset.data_dir = data_dir
self._cfg.dataset.dialogues_example_dir = dialogues_example_dir
logging.info(f'Setting model.dataset.data_dir to {data_dir}.')
logging.info(f'Setting model.dataset.dialogues_example_dir to {dialogues_example_dir}.')
def setup_training_data(self, train_data_config: Optional[DictConfig] = None):
self.prepare_data()
self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config, split=train_data_config.ds_item)
def setup_multiple_validation_data(self, val_data_config: Optional[DictConfig] = None):
return self.setup_validation_data(val_data_config)
def setup_validation_data(self, val_data_config: Optional[DictConfig] = None):
self.prepare_data()
self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config, split=val_data_config.ds_item)
def setup_multiple_test_data(self, test_data_config: Union[DictConfig, Dict]):
self.setup_test_data(test_data_config)
def setup_test_data(self, test_data_config: Optional[DictConfig] = None):
self.prepare_data()
self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config, split=test_data_config.ds_item)
def _setup_dataloader_from_config(self, cfg: DictConfig, split: str) -> DataLoader:
dataset_cfg = self._cfg.dataset
data_dir = dataset_cfg.data_dir
if not os.path.exists(data_dir):
raise FileNotFoundError(f"Data directory is not found at: {data_dir}.")
dataset = DialogueGPTGenerationDataset(
dataset_split=split,
dialogues_processor=self.dialogues_processor,
tokenizer=self.dialogues_processor._tokenizer,
cfg=dataset_cfg,
)
dl = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=cfg.batch_size,
collate_fn=dataset.collate_fn,
drop_last=cfg.drop_last,
shuffle=cfg.shuffle,
num_workers=cfg.num_workers,
pin_memory=cfg.pin_memory,
)
return dl
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
return result
| NeMo-main | nemo/collections/nlp/models/dialogue/dialogue_gpt_generation_model.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict, List, Optional
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from torch.utils.data import DataLoader
from nemo.collections.common.losses import AggregatorLoss, CrossEntropyLoss
from nemo.collections.nlp.data.dialogue.data_processor.assistant_data_processor import DialogueAssistantDataProcessor
from nemo.collections.nlp.data.dialogue.dataset.dialogue_bert_dataset import (
DialogueBERTDataset,
DialogueIntentSlotInferenceDataset,
)
from nemo.collections.nlp.data.intent_slot_classification import IntentSlotDataDesc
from nemo.collections.nlp.metrics.classification_report import ClassificationReport
from nemo.collections.nlp.metrics.dialogue_metrics import DialogueClassificationMetrics
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.collections.nlp.modules.common import SequenceTokenClassifier
from nemo.collections.nlp.parts.utils_funcs import tensor2list
from nemo.core.classes import typecheck
from nemo.core.classes.common import PretrainedModelInfo
from nemo.utils import logging
class IntentSlotClassificationModel(NLPModel):
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
""" Initializes BERT Joint Intent and Slot model.
"""
self.max_seq_length = cfg.dataset.max_seq_length
self.cfg = cfg
# Check the presence of data_dir.
if not cfg.dataset.data_dir or not os.path.exists(cfg.dataset.data_dir):
# Set default values of data_desc.
self._set_defaults_data_desc(cfg)
else:
self.data_dir = cfg.dataset.data_dir
# Update configuration of data_desc.
self._set_data_desc_to_cfg(cfg, cfg.dataset.data_dir, cfg.train_ds, cfg.validation_ds)
# init superclass
super().__init__(cfg=cfg, trainer=trainer)
# Initialize Classifier.
self._reconfigure_classifier()
def _set_defaults_data_desc(self, cfg):
"""
Method makes sure that cfg.data_desc params are set.
If not, set's them to "dummy" defaults.
"""
if not hasattr(cfg, "data_desc"):
OmegaConf.set_struct(cfg, False)
cfg.data_desc = {}
# Intents.
cfg.data_desc.intent_labels = " "
cfg.data_desc.intent_label_ids = {" ": 0}
cfg.data_desc.intent_weights = [1]
# Slots.
cfg.data_desc.slot_labels = " "
cfg.data_desc.slot_label_ids = {" ": 0}
cfg.data_desc.slot_weights = [1]
cfg.data_desc.pad_label = "O"
OmegaConf.set_struct(cfg, True)
def _set_data_desc_to_cfg(self, cfg, data_dir, train_ds, validation_ds):
""" Method creates IntentSlotDataDesc and copies generated values to cfg.data_desc. """
# Save data from data desc to config - so it can be reused later, e.g. in inference.
data_desc = IntentSlotDataDesc(data_dir=data_dir, modes=[train_ds.prefix, validation_ds.prefix])
OmegaConf.set_struct(cfg, False)
if not hasattr(cfg, "data_desc") or cfg.data_desc is None:
cfg.data_desc = {}
# Intents.
cfg.data_desc.intent_labels = list(data_desc.intents_label_ids.keys())
cfg.data_desc.intent_label_ids = data_desc.intents_label_ids
cfg.data_desc.intent_weights = data_desc.intent_weights
# Slots.
cfg.data_desc.slot_labels = list(data_desc.slots_label_ids.keys())
cfg.data_desc.slot_label_ids = data_desc.slots_label_ids
cfg.data_desc.slot_weights = data_desc.slot_weights
cfg.data_desc.pad_label = data_desc.pad_label
# for older(pre - 1.0.0.b3) configs compatibility
if not hasattr(cfg, "class_labels") or cfg.class_labels is None:
cfg.class_labels = {}
cfg.class_labels = OmegaConf.create(
{'intent_labels_file': 'intent_labels.csv', 'slot_labels_file': 'slot_labels.csv'}
)
slot_labels_file = os.path.join(data_dir, cfg.class_labels.slot_labels_file)
intent_labels_file = os.path.join(data_dir, cfg.class_labels.intent_labels_file)
self._save_label_ids(data_desc.slots_label_ids, slot_labels_file)
self._save_label_ids(data_desc.intents_label_ids, intent_labels_file)
self.register_artifact('class_labels.intent_labels_file', intent_labels_file)
self.register_artifact('class_labels.slot_labels_file', slot_labels_file)
OmegaConf.set_struct(cfg, True)
def _save_label_ids(self, label_ids: Dict[str, int], filename: str) -> None:
""" Saves label ids map to a file """
with open(filename, 'w') as out:
labels, _ = zip(*sorted(label_ids.items(), key=lambda x: x[1]))
out.write('\n'.join(labels))
logging.info(f'Labels: {label_ids}')
logging.info(f'Labels mapping saved to : {out.name}')
def _reconfigure_classifier(self):
""" Method reconfigures the classifier depending on the settings of model cfg.data_desc """
self.classifier = SequenceTokenClassifier(
hidden_size=self.hidden_size,
num_intents=len(self.cfg.data_desc.intent_labels),
num_slots=len(self.cfg.data_desc.slot_labels),
dropout=self.cfg.classifier_head.fc_dropout,
num_layers=self.cfg.classifier_head.num_output_layers,
log_softmax=False,
)
# define losses
if self.cfg.class_balancing == 'weighted_loss':
# You may need to increase the number of epochs for convergence when using weighted_loss
self.intent_loss = CrossEntropyLoss(logits_ndim=2, weight=self.cfg.data_desc.intent_weights)
self.slot_loss = CrossEntropyLoss(logits_ndim=3, weight=self.cfg.data_desc.slot_weights)
else:
self.intent_loss = CrossEntropyLoss(logits_ndim=2)
self.slot_loss = CrossEntropyLoss(logits_ndim=3)
self.total_loss = AggregatorLoss(
num_inputs=2, weights=[self.cfg.intent_loss_weight, 1.0 - self.cfg.intent_loss_weight]
)
# setup to track metrics
self.intent_classification_report = ClassificationReport(
num_classes=len(self.cfg.data_desc.intent_labels),
label_ids=self.cfg.data_desc.intent_label_ids,
dist_sync_on_step=True,
mode='micro',
)
self.slot_classification_report = ClassificationReport(
num_classes=len(self.cfg.data_desc.slot_labels),
label_ids=self.cfg.data_desc.slot_label_ids,
dist_sync_on_step=True,
mode='micro',
)
def update_data_dir_for_training(self, data_dir: str, train_ds, validation_ds) -> None:
"""
Update data directory and get data stats with Data Descriptor.
Also, reconfigures the classifier - to cope with data with e.g. different number of slots.
Args:
data_dir: path to data directory
"""
logging.info(f'Setting data_dir to {data_dir}.')
self.data_dir = data_dir
# Update configuration with new data.
self._set_data_desc_to_cfg(self.cfg, data_dir, train_ds, validation_ds)
# Reconfigure the classifier for different settings (number of intents, slots etc.).
self._reconfigure_classifier()
def update_data_dir_for_testing(self, data_dir) -> None:
"""
Update data directory.
Args:
data_dir: path to data directory
"""
logging.info(f'Setting data_dir to {data_dir}.')
self.data_dir = data_dir
@typecheck()
def forward(self, input_ids, attention_mask, token_type_ids):
"""
No special modification required for Lightning, define it as you normally would
in the `nn.Module` in vanilla PyTorch.
"""
if self._cfg.tokenizer.get('library', '') == 'megatron':
hidden_states, _ = self.bert_model(input_ids, attention_mask, tokentype_ids=token_type_ids, lm_labels=None)
else:
hidden_states = self.bert_model(
input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask
)
intent_logits, slot_logits = self.classifier(hidden_states=hidden_states)
return intent_logits, slot_logits
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
# forward pass
input_ids, input_type_ids, input_mask, loss_mask, subtokens_mask, intent_labels, slot_labels = batch
intent_logits, slot_logits = self(
input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask
)
# calculate combined loss for intents and slots
intent_loss = self.intent_loss(logits=intent_logits, labels=intent_labels)
slot_loss = self.slot_loss(logits=slot_logits, labels=slot_labels, loss_mask=loss_mask)
train_loss = self.total_loss(loss_1=intent_loss, loss_2=slot_loss)
lr = self._optimizer.param_groups[0]['lr']
self.log('train_loss', train_loss)
self.log('lr', lr, prog_bar=True)
return {
'loss': train_loss,
'lr': lr,
}
def validation_step(self, batch, batch_idx):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
input_ids, input_type_ids, input_mask, loss_mask, subtokens_mask, intent_labels, slot_labels = batch
intent_logits, slot_logits = self(
input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask
)
# calculate combined loss for intents and slots
intent_loss = self.intent_loss(logits=intent_logits, labels=intent_labels)
slot_loss = self.slot_loss(logits=slot_logits, labels=slot_labels, loss_mask=loss_mask)
val_loss = self.total_loss(loss_1=intent_loss, loss_2=slot_loss)
# calculate accuracy metrics for intents and slot reporting
# intents
intent_preds = torch.argmax(intent_logits, axis=-1)
self.intent_classification_report.update(intent_preds, intent_labels)
# slots
subtokens_mask = subtokens_mask > 0.5
slot_preds = torch.argmax(slot_logits, axis=-1)
self.slot_classification_report.update(slot_preds[subtokens_mask], slot_labels[subtokens_mask])
loss = {
'val_loss': val_loss,
'intent_tp': self.intent_classification_report.tp,
'intent_fn': self.intent_classification_report.fn,
'intent_fp': self.intent_classification_report.fp,
'slot_tp': self.slot_classification_report.tp,
'slot_fn': self.slot_classification_report.fn,
'slot_fp': self.slot_classification_report.fp,
'intent_preds': intent_preds,
'intent_labels': intent_labels,
'slot_preds': slot_preds,
'slot_labels': slot_labels,
'input': input_ids,
'subtokens_mask': subtokens_mask,
}
self.validation_step_outputs.append(loss)
return loss
@staticmethod
def get_continuous_slots(slot_ids, utterance_tokens):
"""
Extract continuous spans of slot_ids
Args:
Slot_ids: list of str representing slot of each word token
For instance, 'O', 'email_address', 'email_address', 'email_address', 'O', 'O', 'O', 'O']
Corresponds to ['enter', 'atdfd@yahoo', 'dot', 'com', 'into', 'my', 'contact', 'list']
Returns:
list of str where each element is a slot name-value pair
e.g. ['email_address(atdfd@yahoo dot com)']
"""
slot_id_stack = []
position_stack = []
for i, slot_id in enumerate(slot_ids):
if not slot_id_stack or slot_id != slot_id_stack[-1]:
slot_id_stack.append(slot_id)
position_stack.append([])
position_stack[-1].append(i)
slot_id_to_start_and_exclusive_end = {
slot_id_stack[i]: [position_stack[i][0], position_stack[i][-1] + 1]
for i in range(len(position_stack))
if slot_id_stack[i] != 'O'
}
slot_to_words = {
slot: ' '.join(utterance_tokens[position[0] : position[1]])
for slot, position in slot_id_to_start_and_exclusive_end.items()
}
slot_name_and_values = ["{}({})".format(slot, value) for slot, value in slot_to_words.items()]
return slot_name_and_values
def get_utterance_tokens(self, token_ids, token_masks):
"""
Get utterance tokens based on initial utterance tokenization using token_masks,
which shows the starting subtoken of each utterance token.
Args:
token_ids: IntTensor of size (max_seq_len, )
token_masks: BoolTensor of size (max_seq_len, )
Returns
token_list: List of Str (list of tokens with len <= max_seq_len)
"""
tokens_stack = []
tokens = self.tokenizer.tokenizer.convert_ids_to_tokens(token_ids)
for token_idx, token in enumerate(tokens):
if token_masks[token_idx].item():
tokens_stack.append([token])
elif tokens_stack:
clean_token = (
token.replace("##", '')
.replace(self.tokenizer.tokenizer.sep_token, '')
.replace(self.tokenizer.tokenizer.pad_token, '')
)
tokens_stack[-1].append(clean_token)
token_list = [''.join(token) for token in tokens_stack]
return token_list
def get_unified_metrics(self, outputs):
slot_preds = []
slot_labels = []
subtokens_mask = []
inputs = []
intent_preds = []
intent_labels = []
for output in outputs:
slot_preds += output['slot_preds']
slot_labels += output["slot_labels"]
subtokens_mask += output["subtokens_mask"]
inputs += output["input"]
intent_preds += output["intent_preds"]
intent_labels += output["intent_labels"]
ground_truth_labels = self.convert_intent_ids_to_intent_names(intent_labels)
generated_labels = self.convert_intent_ids_to_intent_names(intent_preds)
predicted_slots = self.mask_unused_subword_slots(slot_preds, subtokens_mask)
ground_truth_slots = self.mask_unused_subword_slots(slot_labels, subtokens_mask)
all_generated_slots = []
all_ground_truth_slots = []
all_utterances = []
for i in range(len(predicted_slots)):
utterance_tokens = self.get_utterance_tokens(inputs[i], subtokens_mask[i])
ground_truth_slot_names = ground_truth_slots[i].split()
predicted_slot_names = predicted_slots[i].split()
processed_ground_truth_slots = IntentSlotClassificationModel.get_continuous_slots(
ground_truth_slot_names, utterance_tokens
)
processed_predicted_slots = IntentSlotClassificationModel.get_continuous_slots(
predicted_slot_names, utterance_tokens
)
all_generated_slots.append(processed_predicted_slots)
all_ground_truth_slots.append(processed_ground_truth_slots)
all_utterances.append(' '.join(utterance_tokens))
os.makedirs(self.cfg.dataset.dialogues_example_dir, exist_ok=True)
filename = os.path.join(self.cfg.dataset.dialogues_example_dir, "predictions.jsonl")
DialogueClassificationMetrics.save_predictions(
filename,
generated_labels,
all_generated_slots,
ground_truth_labels,
all_ground_truth_slots,
['' for i in range(len(generated_labels))],
['' for i in range(len(generated_labels))],
all_utterances,
)
(
slot_precision,
slot_recall,
slot_f1,
slot_joint_goal_accuracy,
) = DialogueClassificationMetrics.get_slot_filling_metrics(all_generated_slots, all_ground_truth_slots)
return slot_precision, slot_recall, slot_f1, slot_joint_goal_accuracy
def on_validation_epoch_end(self):
"""
Called at the end of validation to aggregate outputs.
:param outputs: list of individual outputs of each validation step.
"""
prefix = "test" if self.trainer.testing else "val"
if prefix == "val":
outputs = self.validation_step_outputs
else:
outputs = self.test_step_outputs
(
unified_slot_precision,
unified_slot_recall,
unified_slot_f1,
unified_slot_joint_goal_accuracy,
) = self.get_unified_metrics(outputs)
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
# calculate metrics and log classification report (separately for intents and slots)
intent_precision, intent_recall, intent_f1, intent_report = self.intent_classification_report.compute()
logging.info(f'Intent report: {intent_report}')
slot_precision, slot_recall, slot_f1, slot_report = self.slot_classification_report.compute()
logging.info(f'Slot report: {slot_report}')
self.log(f'{prefix}_loss', avg_loss)
self.log('intent_precision', intent_precision)
self.log('intent_recall', intent_recall)
self.log('intent_f1', intent_f1)
self.log('slot_precision', slot_precision)
self.log('slot_recall', slot_recall)
self.log('slot_f1', slot_f1)
self.log('unified_slot_precision', unified_slot_precision)
self.log('unified_slot_recall', unified_slot_recall)
self.log('unified_slot_f1', unified_slot_f1)
self.log('unified_slot_joint_goal_accuracy', unified_slot_joint_goal_accuracy)
self.intent_classification_report.reset()
self.slot_classification_report.reset()
self.validation_step_outputs.clear() if prefix == 'val' else self.test_step_outputs.clear()
return {
'val_loss': avg_loss,
'intent_precision': intent_precision,
'intent_recall': intent_recall,
'intent_f1': intent_f1,
'slot_precision': slot_precision,
'slot_recall': slot_recall,
'slot_f1': slot_f1,
'unified_slot_precision': unified_slot_precision,
'unified_slot_recall': unified_slot_recall,
'unified_slot_f1': unified_slot_f1,
'unified_slot_joint_goal_accuracy': unified_slot_joint_goal_accuracy,
}
def test_step(self, batch, batch_idx):
"""
Lightning calls this inside the test loop with the data from the test dataloader
passed in as `batch`.
"""
loss = self.validation_step(batch, batch_idx)
self.test_step_outputs.append(loss)
return loss
def on_test_epoch_end(self):
"""
Called at the end of test to aggregate outputs.
:param outputs: list of individual outputs of each test step.
"""
return self.on_validation_epoch_end()
def setup_training_data(self, train_data_config: Optional[DictConfig]):
self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config, dataset_split='train')
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config, dataset_split='dev')
def setup_test_data(self, test_data_config: Optional[DictConfig]):
self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config, dataset_split='test')
def _setup_dataloader_from_config(self, cfg: DictConfig, dataset_split: str):
data_processor = DialogueAssistantDataProcessor(self.data_dir, self.tokenizer, cfg=self.cfg.dataset)
dataset = DialogueBERTDataset(
dataset_split,
data_processor,
self.tokenizer,
self.cfg.dataset, # this is the model.dataset cfg, which is diff from train_ds cfg etc
)
return DataLoader(
dataset=dataset,
batch_size=cfg.batch_size,
shuffle=cfg.shuffle,
num_workers=cfg.num_workers,
pin_memory=cfg.pin_memory,
drop_last=cfg.drop_last,
collate_fn=dataset.collate_fn,
)
def _setup_infer_dataloader(self, queries: List[str], test_ds) -> 'torch.utils.data.DataLoader':
"""
Setup function for a infer data loader.
Args:
queries: text
batch_size: batch size to use during inference
Returns:
A pytorch DataLoader.
"""
dataset = DialogueIntentSlotInferenceDataset(
tokenizer=self.tokenizer, queries=queries, max_seq_length=-1, do_lower_case=False
)
return torch.utils.data.DataLoader(
dataset=dataset,
collate_fn=dataset.collate_fn,
batch_size=test_ds.batch_size,
shuffle=test_ds.shuffle,
num_workers=test_ds.num_workers,
pin_memory=test_ds.pin_memory,
drop_last=test_ds.drop_last,
)
def update_data_dirs(self, data_dir: str, dialogues_example_dir: str):
"""
Update data directories
Args:
data_dir: path to data directory
dialogues_example_dir: path to preprocessed dialogues example directory, if not exists will be created.
"""
if not os.path.exists(data_dir):
raise ValueError(f"{data_dir} is not found")
self.cfg.dataset.data_dir = data_dir
self.cfg.dataset.dialogues_example_dir = dialogues_example_dir
logging.info(f'Setting model.dataset.data_dir to {data_dir}.')
logging.info(f'Setting model.dataset.dialogues_example_dir to {dialogues_example_dir}.')
def predict_from_examples(self, queries: List[str], test_ds) -> List[List[str]]:
"""
Get prediction for the queries (intent and slots)
Args:
queries: text sequences
test_ds: Dataset configuration section.
Returns:
predicted_intents, predicted_slots: model intent and slot predictions
"""
predicted_intents = []
predicted_slots = []
mode = self.training
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Switch model to evaluation mode
self.eval()
self.to(device)
# Dataset.
infer_datalayer = self._setup_infer_dataloader(queries, test_ds)
for batch in infer_datalayer:
input_ids, input_type_ids, input_mask, loss_mask, subtokens_mask = batch
intent_logits, slot_logits = self.forward(
input_ids=input_ids.to(device),
token_type_ids=input_type_ids.to(device),
attention_mask=input_mask.to(device),
)
# predict intents
intent_preds = tensor2list(torch.argmax(intent_logits, axis=-1))
predicted_intents += self.convert_intent_ids_to_intent_names(intent_preds)
# predict slots
slot_preds = torch.argmax(slot_logits, axis=-1)
predicted_slots += self.mask_unused_subword_slots(slot_preds, subtokens_mask)
# set mode back to its original value
self.train(mode=mode)
return predicted_intents, predicted_slots
def convert_intent_ids_to_intent_names(self, intent_preds):
# Retrieve intent and slot vocabularies from configuration.
intent_labels = self.cfg.data_desc.intent_labels
predicted_intents = []
# convert numerical outputs to Intent and Slot labels from the dictionaries
for intent_num in intent_preds:
# if intent_num < len(intent_labels):
predicted_intents.append(intent_labels[int(intent_num)])
# else:
# # should not happen
# predicted_intents.append("Unknown Intent")
return predicted_intents
def mask_unused_subword_slots(self, slot_preds, subtokens_mask):
# Retrieve intent and slot vocabularies from configuration.
slot_labels = self.cfg.data_desc.slot_labels
predicted_slots = []
for slot_preds_query, mask_query in zip(slot_preds, subtokens_mask):
query_slots = ''
for slot, mask in zip(slot_preds_query, mask_query):
if mask == 1:
# if slot < len(slot_labels):
query_slots += slot_labels[int(slot)] + ' '
# else:
# query_slots += 'Unknown_slot '
predicted_slots.append(query_slots.strip())
return predicted_slots
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
model = PretrainedModelInfo(
pretrained_model_name="Joint_Intent_Slot_Assistant",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemonlpmodels/versions/1.0.0a5/files/Joint_Intent_Slot_Assistant.nemo",
description="This models is trained on this https://github.com/xliuhw/NLU-Evaluation-Data dataset which includes 64 various intents and 55 slots. Final Intent accuracy is about 87%, Slot accuracy is about 89%.",
)
result.append(model)
return result
| NeMo-main | nemo/collections/nlp/models/dialogue/intent_slot_classification_model.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.models.dialogue.dialogue_gpt_classification_model import DialogueGPTClassificationModel
from nemo.collections.nlp.models.dialogue.dialogue_zero_shot_intent_model import DialogueZeroShotIntentModel
from nemo.collections.nlp.models.dialogue.intent_slot_classification_model import IntentSlotClassificationModel
from nemo.collections.nlp.models.dialogue.sgdqa_model import SGDQAModel
| NeMo-main | nemo/collections/nlp/models/dialogue/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This file contains code artifacts adapted from the original implementation:
https://github.com/google-research/google-research/blob/master/schema_guided_dst/baseline/train_and_predict.py
'''
import os
from typing import List, Optional
import torch
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from torch.utils.data import DataLoader
from nemo.collections.nlp.data.dialogue import DialogueSGDBERTDataset, DialogueSGDDataProcessor
from nemo.collections.nlp.data.dialogue.sgd.evaluate import evaluate, get_in_domain_services
from nemo.collections.nlp.data.dialogue.sgd.prediction_utils import write_predictions_to_file
from nemo.collections.nlp.losses import SGDDialogueStateLoss
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.collections.nlp.modules import SGDDecoder, SGDEncoder
from nemo.collections.nlp.parts.utils_funcs import tensor2list
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.utils import logging
__all__ = ['SGDQAModel']
class SGDQAModel(NLPModel):
"""
Dialogue State Tracking Model SGD-QA (https://arxiv.org/abs/2105.08049)
The SGD-QA model is a fast multi-pass schema-guided state-tracking model, that is trained on the Google schema-guided state tracking dataset (https://arxiv.org/abs/1909.05855).
The model takes dialogue as input and outputs the dialogue state, which includes slot-value pairs.
The model consists of two components: a neural natural language understanding model (NLU), and a rule-based state tracker.
The NLU takes in a dialogue turn and different schema (entity) information options and outputs their match score. The state tracker takes the highest rated entities and composes
the dialogue state across turns.
"""
@property
def output_module(self):
return self.decoder
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
self.data_prepared = False
super().__init__(cfg=cfg, trainer=trainer)
self.encoder = SGDEncoder(hidden_size=self.bert_model.config.hidden_size, dropout=self._cfg.encoder.dropout)
self.decoder = SGDDecoder(embedding_dim=self.bert_model.config.hidden_size)
self.loss = SGDDialogueStateLoss(reduction="mean")
@typecheck()
def forward(self, input_ids, attention_mask, token_type_ids):
token_embeddings = self.bert_model(
input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask
)
if isinstance(token_embeddings, tuple):
token_embeddings = token_embeddings[0]
encoded_utterance, token_embeddings = self.encoder(hidden_states=token_embeddings)
(
logit_intent_status,
logit_req_slot_status,
logit_cat_slot_status,
logit_cat_slot_value_status,
logit_noncat_slot_status,
logit_spans,
) = self.decoder(
encoded_utterance=encoded_utterance, token_embeddings=token_embeddings, utterance_mask=attention_mask
)
return (
logit_intent_status,
logit_req_slot_status,
logit_cat_slot_status,
logit_cat_slot_value_status,
logit_noncat_slot_status,
logit_spans,
)
def training_step(self, batch, batch_idx):
(
example_id_num,
service_id,
utterance_ids,
token_type_ids,
attention_mask,
intent_status,
requested_slot_status,
categorical_slot_status,
categorical_slot_value_status,
noncategorical_slot_status,
noncategorical_slot_value_start,
noncategorical_slot_value_end,
start_char_idx,
end_char_idx,
task_mask,
) = batch
(
logit_intent_status,
logit_req_slot_status,
logit_cat_slot_status,
logit_cat_slot_value_status,
logit_noncat_slot_status,
logit_spans,
) = self(input_ids=utterance_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
loss = self.loss(
logit_intent_status=logit_intent_status,
intent_status=intent_status,
logit_req_slot_status=logit_req_slot_status,
requested_slot_status=requested_slot_status,
logit_cat_slot_status=logit_cat_slot_status,
categorical_slot_status=categorical_slot_status,
logit_cat_slot_value_status=logit_cat_slot_value_status,
categorical_slot_value_status=categorical_slot_value_status,
logit_noncat_slot_status=logit_noncat_slot_status,
noncategorical_slot_status=noncategorical_slot_status,
logit_spans=logit_spans,
noncategorical_slot_value_start=noncategorical_slot_value_start,
noncategorical_slot_value_end=noncategorical_slot_value_end,
task_mask=task_mask,
)
lr = self._optimizer.param_groups[0]['lr']
self.log('train_loss', loss)
self.log('lr', lr, prog_bar=True)
return {
'loss': loss,
'lr': lr,
}
def validation_step(self, batch: List[torch.Tensor], batch_idx: int, dataloader_idx: int = 0) -> dict:
"""
Called at every validation step to aggregate and postprocess outputs on each GPU
Args:
batch: input batch at validation step
batch_idx: batch index
dataloader_idx: dataloader index
"""
loss, tensors = self.eval_step_helper(batch=batch)
self.log(f'val_loss', loss)
if type(self.trainer.val_dataloaders) == list and len(self.trainer.val_dataloaders) > 1:
self.validation_step_outputs[dataloader_idx].append({f'val_loss': loss, f'tensors': tensors})
else:
self.validation_step_outputs.append({f'val_loss': loss, f'tensors': tensors})
return {f'val_loss': loss, f'tensors': tensors}
def test_step(self, batch: List[torch.Tensor], batch_idx: int, dataloader_idx: int = 0) -> dict:
"""
Called at every test step to aggregate and postprocess outputs on each GPU
Args:
batch: input batch at test step
batch_idx: batch index
dataloader_idx: dataloader index
"""
loss, tensors = self.eval_step_helper(batch=batch)
if type(self.trainer.test_dataloaders) == list and len(self.trainer.test_dataloaders) > 1:
self.test_step_outputs[dataloader_idx].append({f'test_loss': loss, f'tensors': tensors})
else:
self.test_step_outputs.append({f'test_loss': loss, f'tensors': tensors})
return {f'test_loss': loss, f'tensors': tensors}
def eval_step_helper(self, batch: List[torch.Tensor]):
"""
Helper called at every validation/test step to aggregate and postprocess outputs on each GPU
Args:
batch: input batch at step
Returns:
loss: averaged batch loss
tensors: collection of aggregated output tensors across all GPU workers
"""
(
example_id_num,
service_id,
utterance_ids,
token_type_ids,
attention_mask,
intent_status,
requested_slot_status,
categorical_slot_status,
categorical_slot_value_status,
noncategorical_slot_status,
noncategorical_slot_value_start,
noncategorical_slot_value_end,
start_char_idx,
end_char_idx,
task_mask,
) = batch
(
logit_intent_status,
logit_req_slot_status,
logit_cat_slot_status,
logit_cat_slot_value_status,
logit_noncat_slot_status,
logit_spans,
) = self(input_ids=utterance_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
loss = self.loss(
logit_intent_status=logit_intent_status,
intent_status=intent_status,
logit_req_slot_status=logit_req_slot_status,
requested_slot_status=requested_slot_status,
logit_cat_slot_status=logit_cat_slot_status,
categorical_slot_status=categorical_slot_status,
logit_cat_slot_value_status=logit_cat_slot_value_status,
categorical_slot_value_status=categorical_slot_value_status,
logit_noncat_slot_status=logit_noncat_slot_status,
noncategorical_slot_status=noncategorical_slot_status,
logit_spans=logit_spans,
noncategorical_slot_value_start=noncategorical_slot_value_start,
noncategorical_slot_value_end=noncategorical_slot_value_end,
task_mask=task_mask,
)
all_example_id_num = []
all_service_id = []
all_logit_intent_status = []
all_logit_req_slot_status = []
all_logit_cat_slot_status = []
all_logit_cat_slot_value_status = []
all_logit_noncat_slot_status = []
all_logit_spans = []
all_start_char_idx = []
all_end_char_idx = []
if self.trainer.num_devices and self.trainer.world_size > 1:
world_size = self.trainer.world_size
for ind in range(world_size):
all_example_id_num.append(torch.empty_like(example_id_num))
all_service_id.append(torch.empty_like(service_id))
all_logit_intent_status.append(torch.empty_like(logit_intent_status))
all_logit_req_slot_status.append(torch.empty_like(logit_req_slot_status))
all_logit_cat_slot_status.append(torch.empty_like(logit_cat_slot_status))
all_logit_cat_slot_value_status.append(torch.empty_like(logit_cat_slot_value_status))
all_logit_noncat_slot_status.append(torch.empty_like(logit_noncat_slot_status))
all_logit_spans.append(torch.empty_like(logit_spans))
all_start_char_idx.append(torch.empty_like(start_char_idx))
all_end_char_idx.append(torch.empty_like(end_char_idx))
torch.distributed.all_gather(all_example_id_num, example_id_num)
torch.distributed.all_gather(all_service_id, service_id)
torch.distributed.all_gather(all_logit_intent_status, logit_intent_status)
torch.distributed.all_gather(all_logit_req_slot_status, logit_req_slot_status)
torch.distributed.all_gather(all_logit_cat_slot_status, logit_cat_slot_status)
torch.distributed.all_gather(all_logit_cat_slot_value_status, logit_cat_slot_value_status)
torch.distributed.all_gather(all_logit_noncat_slot_status, logit_noncat_slot_status)
torch.distributed.all_gather(all_logit_spans, logit_spans)
torch.distributed.all_gather(all_start_char_idx, start_char_idx)
torch.distributed.all_gather(all_end_char_idx, end_char_idx)
else:
all_example_id_num.append(example_id_num)
all_service_id.append(service_id)
all_logit_intent_status.append(logit_intent_status)
all_logit_req_slot_status.append(logit_req_slot_status)
all_logit_cat_slot_status.append(logit_cat_slot_status)
all_logit_cat_slot_value_status.append(logit_cat_slot_value_status)
all_logit_noncat_slot_status.append(logit_noncat_slot_status)
all_logit_spans.append(logit_spans)
all_start_char_idx.append(start_char_idx)
all_end_char_idx.append(end_char_idx)
# after this: all_x is list of tensors, of length world_size
example_id_num = torch.cat(all_example_id_num)
service_id = torch.cat(all_service_id)
logit_intent_status = torch.cat(all_logit_intent_status)
logit_req_slot_status = torch.cat(all_logit_req_slot_status)
logit_cat_slot_status = torch.cat(all_logit_cat_slot_status)
logit_cat_slot_value_status = torch.cat(all_logit_cat_slot_value_status)
logit_noncat_slot_status = torch.cat(all_logit_noncat_slot_status)
logit_spans = torch.cat(all_logit_spans)
start_char_idx = torch.cat(all_start_char_idx)
end_char_idx = torch.cat(all_end_char_idx)
intent_status = torch.nn.Sigmoid()(logit_intent_status)
# Scores are output for each requested slot.
req_slot_status = torch.nn.Sigmoid()(logit_req_slot_status)
# For categorical slots, the status of each slot and the predicted value are output.
cat_slot_status_dist = torch.nn.Softmax(dim=-1)(logit_cat_slot_status)
cat_slot_status = torch.argmax(logit_cat_slot_status, axis=-1)
cat_slot_status_p = torch.max(cat_slot_status_dist, axis=-1)[0]
cat_slot_value_status = torch.nn.Sigmoid()(logit_cat_slot_value_status)
# For non-categorical slots, the status of each slot and the indices for spans are output.
noncat_slot_status_dist = torch.nn.Softmax(dim=-1)(logit_noncat_slot_status)
noncat_slot_status = torch.argmax(logit_noncat_slot_status, axis=-1)
noncat_slot_status_p = torch.max(noncat_slot_status_dist, axis=-1)[0]
softmax = torch.nn.Softmax(dim=1)
scores = softmax(logit_spans)
start_scores, end_scores = torch.unbind(scores, dim=-1)
batch_size, max_num_tokens = end_scores.size()
# Find the span with the maximum sum of scores for start and end indices.
total_scores = torch.unsqueeze(start_scores, axis=2) + torch.unsqueeze(end_scores, axis=1)
start_idx = torch.arange(max_num_tokens, device=total_scores.get_device()).view(1, -1, 1)
end_idx = torch.arange(max_num_tokens, device=total_scores.get_device()).view(1, 1, -1)
invalid_index_mask = (start_idx > end_idx).repeat(batch_size, 1, 1)
total_scores = torch.where(
invalid_index_mask,
torch.zeros(total_scores.size(), device=total_scores.get_device(), dtype=total_scores.dtype),
total_scores,
)
max_span_index = torch.argmax(total_scores.view(-1, max_num_tokens ** 2), axis=-1)
max_span_p = torch.max(total_scores.view(-1, max_num_tokens ** 2), axis=-1)[0]
span_start_index = torch.floor_divide(max_span_index, max_num_tokens)
span_end_index = torch.fmod(max_span_index, max_num_tokens)
tensors = {
'example_id_num': example_id_num,
'service_id': service_id,
'intent_status': intent_status,
'req_slot_status': req_slot_status,
'cat_slot_status': cat_slot_status,
'cat_slot_status_p': cat_slot_status_p,
'cat_slot_value_status': cat_slot_value_status,
'noncat_slot_status': noncat_slot_status,
'noncat_slot_status_p': noncat_slot_status_p,
'noncat_slot_p': max_span_p,
'noncat_slot_start': span_start_index,
'noncat_slot_end': span_end_index,
'noncat_alignment_start': start_char_idx,
'noncat_alignment_end': end_char_idx,
}
return loss, tensors
def multi_validation_epoch_end(self, outputs: List[dict], dataloader_idx: int = 0):
"""
Called at the end of validation to post process outputs into human readable format
Args:
outputs: list of individual outputs of each validation step
dataloader_idx: dataloader index
"""
avg_loss = torch.stack([x[f'val_loss'] for x in outputs]).mean()
split = self._validation_names[dataloader_idx][:-1]
dataloader = self._validation_dl[dataloader_idx]
metrics = self.multi_eval_epoch_end_helper(outputs=outputs, split=split, dataloader=dataloader)
for k, v in metrics.items():
self.log(f'{split}_{k}', v, rank_zero_only=True)
self.log(f'val_loss', avg_loss, prog_bar=True, rank_zero_only=True)
def multi_test_epoch_end(self, outputs: List[dict], dataloader_idx: int = 0):
"""
Called at the end of test to post process outputs into human readable format
Args:
outputs: list of individual outputs of each test step
dataloader_idx: dataloader index
"""
avg_loss = torch.stack([x[f'test_loss'] for x in outputs]).mean()
split = self._test_names[dataloader_idx][:-1]
dataloader = self._test_dl[dataloader_idx]
metrics = self.multi_eval_epoch_end_helper(outputs=outputs, split=split, dataloader=dataloader)
for k, v in metrics.items():
self.log(f'{split}_{k}', v, rank_zero_only=True)
self.log(f'test_loss', avg_loss, prog_bar=True, rank_zero_only=True)
def multi_eval_epoch_end_helper(
self, outputs: List[dict], split: str, dataloader: torch.utils.data.DataLoader
) -> dict:
"""
Helper called at the end of evaluation to post process outputs into human readable format
Args:
outputs: list of individual outputs of each step
split: data split
dataloader: dataloader
Returns:
metrics: metrics collection
"""
def get_str_example_id(split: str, ids_to_service_names_dict: dict, example_id_num: torch.Tensor) -> str:
"""
Constructs string representation of example ID
Args:
split: evaluation data split
ids_to_service_names_dict: id to service name mapping
example_id_num: tensor example id
"""
def format_turn_id(ex_id_num):
dialog_id_1, dialog_id_2, turn_id, service_id, model_task_id, slot_intent_id, value_id = ex_id_num
return "{}-{}_{:05d}-{:02d}-{}-{}-{}-{}".format(
split,
dialog_id_1,
dialog_id_2,
turn_id,
ids_to_service_names_dict[service_id],
model_task_id,
slot_intent_id,
value_id,
)
return list(map(format_turn_id, tensor2list(example_id_num)))
def combine_predictions_in_example(predictions: dict, batch_size: int):
'''
Combines predicted values to a single example.
Args:
predictions: predictions ordered by keys then batch
batch_size: batch size
Returns:
examples_preds: predictions ordered by batch then key
'''
examples_preds = [{} for _ in range(batch_size)]
for k, v in predictions.items():
if k != 'example_id':
v = torch.chunk(v, batch_size)
for i in range(batch_size):
if k == 'example_id':
examples_preds[i][k] = v[i]
else:
examples_preds[i][k] = v[i].view(-1)
return examples_preds
example_id_num = torch.cat([x[f'tensors']['example_id_num'] for x in outputs])
service_id = torch.cat([x[f'tensors']['service_id'] for x in outputs])
intent_status = torch.cat([x[f'tensors']['intent_status'] for x in outputs])
req_slot_status = torch.cat([x[f'tensors']['req_slot_status'] for x in outputs])
cat_slot_status = torch.cat([x[f'tensors']['cat_slot_status'] for x in outputs])
cat_slot_status_p = torch.cat([x[f'tensors']['cat_slot_status_p'] for x in outputs])
cat_slot_value_status = torch.cat([x[f'tensors']['cat_slot_value_status'] for x in outputs])
noncat_slot_status = torch.cat([x[f'tensors']['noncat_slot_status'] for x in outputs])
noncat_slot_status_p = torch.cat([x[f'tensors']['noncat_slot_status_p'] for x in outputs])
noncat_slot_p = torch.cat([x[f'tensors']['noncat_slot_p'] for x in outputs])
noncat_slot_start = torch.cat([x[f'tensors']['noncat_slot_start'] for x in outputs])
noncat_slot_end = torch.cat([x[f'tensors']['noncat_slot_end'] for x in outputs])
noncat_alignment_start = torch.cat([x[f'tensors']['noncat_alignment_start'] for x in outputs])
noncat_alignment_end = torch.cat([x[f'tensors']['noncat_alignment_end'] for x in outputs])
ids_to_service_names_dict = self.dialogues_processor.schemas._services_id_to_vocab
example_id = get_str_example_id(dataloader.dataset, ids_to_service_names_dict, example_id_num)
metrics = {}
try:
prediction_dir = self.trainer.log_dir if self.trainer.log_dir is not None else ""
except:
prediction_dir = ""
if self.trainer.global_rank == 0:
prediction_dir = os.path.join(
prediction_dir, 'predictions', 'pred_res_{}_{}'.format(split, self._cfg.dataset.task_name)
)
os.makedirs(prediction_dir, exist_ok=True)
input_json_files = DialogueSGDDataProcessor.get_dialogue_files(
self._cfg.dataset.data_dir, split, self._cfg.dataset.task_name
)
predictions = {}
predictions['example_id'] = example_id
predictions['service_id'] = service_id
predictions['intent_status'] = intent_status
predictions['req_slot_status'] = req_slot_status
predictions['cat_slot_status'] = cat_slot_status
predictions['cat_slot_status_p'] = cat_slot_status_p
predictions['cat_slot_value_status'] = cat_slot_value_status
predictions['noncat_slot_status'] = noncat_slot_status
predictions['noncat_slot_status_p'] = noncat_slot_status_p
predictions['noncat_slot_p'] = noncat_slot_p
predictions['noncat_slot_start'] = noncat_slot_start
predictions['noncat_slot_end'] = noncat_slot_end
predictions['noncat_alignment_start'] = noncat_alignment_start
predictions['noncat_alignment_end'] = noncat_alignment_end
in_domain_services = get_in_domain_services(
os.path.join(self._cfg.dataset.data_dir, split, "schema.json"),
self.dialogues_processor.get_seen_services("train"),
)
predictions = combine_predictions_in_example(predictions, service_id.shape[0])
# write predictions to file in Dstc8/SGD format
write_predictions_to_file(
predictions,
input_json_files,
output_dir=prediction_dir,
schemas=self.dialogues_processor.schemas,
state_tracker=self._cfg.dataset.state_tracker,
eval_debug=False,
in_domain_services=in_domain_services,
)
metrics = evaluate(
prediction_dir,
self._cfg.dataset.data_dir,
split,
in_domain_services,
joint_acc_across_turn=self._cfg.dataset.joint_acc_across_turn,
use_fuzzy_match=self._cfg.dataset.use_fuzzy_match,
)
return metrics
def prepare_data(self):
"""
Preprocessed schema and dialogues and caches this
"""
if self.data_prepared:
return
self.dialogues_processor = DialogueSGDDataProcessor(
data_dir=self._cfg.dataset.data_dir,
dialogues_example_dir=self._cfg.dataset.dialogues_example_dir,
tokenizer=self.tokenizer,
cfg=self._cfg.dataset,
)
self.data_prepared = True
def update_data_dirs(self, data_dir: str, dialogues_example_dir: str):
"""
Update data directories
Args:
data_dir: path to data directory
dialogues_example_dir: path to preprocessed dialogues example directory, if not exists will be created.
"""
if not os.path.exists(data_dir):
raise ValueError(f"{data_dir} is not found")
self._cfg.dataset.data_dir = data_dir
self._cfg.dataset.dialogues_example_dir = dialogues_example_dir
logging.info(f'Setting model.dataset.data_dir to {data_dir}.')
logging.info(f'Setting model.dataset.dialogues_example_dir to {dialogues_example_dir}.')
def setup_training_data(self, train_data_config: Optional[DictConfig] = None):
self.prepare_data()
self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config, split=train_data_config.ds_item)
def setup_validation_data(self, val_data_config: Optional[DictConfig] = None):
self.prepare_data()
self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config, split=val_data_config.ds_item)
def setup_test_data(self, test_data_config: Optional[DictConfig] = None):
self.prepare_data()
self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config, split=test_data_config.ds_item)
def _setup_dataloader_from_config(self, cfg: DictConfig, split: str) -> DataLoader:
dataset_cfg = self._cfg.dataset
data_dir = dataset_cfg.data_dir
if not os.path.exists(data_dir):
raise FileNotFoundError(f"Data directory is not found at: {data_dir}.")
# dataset = SGDDataset(dataset_split=split, dialogues_processor=self.dialogues_processor)
dataset = DialogueSGDBERTDataset(
dataset_split=split,
dialogues_processor=self.dialogues_processor,
tokenizer=self.dialogues_processor._tokenizer,
schemas=self.dialogues_processor.schemas,
schema_config=self.dialogues_processor.schema_config,
cfg=dataset_cfg,
)
dl = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=cfg.batch_size,
collate_fn=dataset.collate_fn,
drop_last=cfg.drop_last,
shuffle=cfg.shuffle,
num_workers=cfg.num_workers,
pin_memory=cfg.pin_memory,
)
return dl
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
result.append(
PretrainedModelInfo(
pretrained_model_name="sgdqa_bertbasecased",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/sgdqa_bertbasecased/versions/1.0.0/files/sgdqa_bertbasecased.nemo",
description="Dialogue State Tracking model finetuned from NeMo BERT Base Cased on Google SGD dataset which has a joint goal accuracy of 59.72% on dev set and 45.85% on test set.",
)
)
return result
| NeMo-main | nemo/collections/nlp/models/dialogue/sgdqa_model.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import copy
import os
import random
from typing import Dict, Optional, Union
import numpy as np
import torch
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from torch.utils.data import DataLoader
from transformers import AutoModelWithLMHead
from nemo.collections.nlp.data.dialogue import DialogueGPTClassificationDataset, DialogueSGDDataProcessor
from nemo.collections.nlp.data.dialogue.data_processor.assistant_data_processor import DialogueAssistantDataProcessor
from nemo.collections.nlp.data.dialogue.data_processor.design_data_processor import DialogueDesignDataProcessor
from nemo.collections.nlp.metrics.classification_report import ClassificationReport
from nemo.collections.nlp.metrics.dialogue_metrics import DialogueClassificationMetrics
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.models.language_modeling.megatron_gpt_prompt_learning_model import (
MegatronGPTPromptLearningModel,
)
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.collections.nlp.modules.common import VirtualPromptSource, VirtualPromptStyle
from nemo.collections.nlp.modules.common.text_generation_utils import (
get_default_sampling_params,
megatron_gpt_generate,
)
from nemo.collections.nlp.modules.common.transformer.text_generation import LengthParam
from nemo.collections.nlp.parts.nlp_overrides import NLPSaveRestoreConnector
from nemo.core.classes.common import PretrainedModelInfo
from nemo.utils import logging
__all__ = ['DialogueGPTClassificationModel']
class DialogueGPTClassificationModel(NLPModel):
def __init__(
self, cfg: DictConfig, trainer: Trainer = None,
):
self.cfg = cfg
self.eval_mode = cfg.dataset.eval_mode
self.data_prepared = False
self.epoch_number = 0
self.prompt_learning = self.cfg.prompt_learning
super().__init__(cfg=cfg, trainer=trainer, no_lm_init=True)
if self.cfg.library == "huggingface":
self.language_model = AutoModelWithLMHead.from_pretrained(cfg.language_model.pretrained_model_name)
self.language_model.resize_token_embeddings(len(self.tokenizer.tokenizer))
self.unreduced_loss_fct = torch.nn.CrossEntropyLoss(reduction='none')
elif self.cfg.library == "megatron":
if self.prompt_learning:
if os.path.exists(cfg.prompt_learning_nemo_path):
self.language_model = MegatronGPTPromptLearningModel.restore_from(
cfg.prompt_learning_nemo_path,
trainer=trainer,
save_restore_connector=NLPSaveRestoreConnector(),
)
else:
# removing tokenizer cfg as this triggers tokenizer construction which is not helpful here as we have a separate tokenizer
new_cfg = copy.copy(cfg)
del new_cfg.tokenizer
new_cfg.nemo_path = cfg.prompt_learning_nemo_path
self.language_model = MegatronGPTPromptLearningModel(new_cfg, trainer)
else:
self.language_model = MegatronGPTModel.restore_from(cfg.language_model.lm_checkpoint, trainer=trainer)
all_labels = list(
self._train_dl.dataset.all_possible_labels.union(
self._validation_dl.dataset.all_possible_labels, self._test_dl.dataset.all_possible_labels
)
)
self.label_to_ids = collections.defaultdict(int)
for i in range(len(all_labels)):
self.label_to_ids[all_labels[i]] = i
self.all_existing_labels = set(self.label_to_ids.keys())
self.token_to_words = {}
self.classification_report = ClassificationReport(
num_classes=len(self.label_to_ids) + 1, mode='micro', label_ids=self.label_to_ids, dist_sync_on_step=True
)
def setup_optimizer_param_groups(self):
"""
ModelPT override for prompt learning.
Optimizer will get self._optimizer_param_groups.
Makes two optimizer param groups, one for the frozen model params
and one for the prompt-table/prompt-encoder params. The learning
rate for the frozen model's params will always be zero effectively
freezing the model's params but still allowing for the needed gradients
to be passed around in pipeline parallel models. The prompt-encoder
and/or prompt table will use the learning rate set by the user.
"""
if not self.prompt_learning:
super().setup_optimizer_param_groups()
return
# Freeze frozen model
for param in self.language_model.frozen_model.parameters():
param.requires_grad = False
virtual_prompt_params = {'params': []}
if self.language_model.frozen_model.model.pre_process:
virtual_prompt_params['params'].extend([param for param in self.language_model.prompt_table.parameters()])
if self.language_model.virtual_prompt_source == VirtualPromptSource.PROMPT_ENCODER:
virtual_prompt_params['params'].extend(
[param for param in self.language_model.prompt_encoder.parameters()]
)
self._optimizer_param_groups = (virtual_prompt_params,)
def training_step(self, batch, batch_idx):
(
input_ids,
attn_masks,
labels,
candidate_input_ids,
candidate_attn_masks,
template_length,
utterance_length,
correct_candidate,
) = batch
# construct training samples as generating " Answer: yes/no" after "<utterance> <label_type>: <candidate_label>"
if self.eval_mode == "binary_score":
new_input_ids = []
new_attn_masks = []
for i in range(candidate_input_ids.size(0)):
# in some datasets like assistant, there might be 60+ possible intents with 1 correct intent
# therefore we might not want to use all possible intents as negative samples
# instead use {binary_score_subsample_ratio} negative samples for every positive sample
if self.cfg.dataset.binary_score_subsample:
new_input_ids.append(candidate_input_ids[i, 2 * correct_candidate[i].item(), :])
new_attn_masks.append(candidate_attn_masks[i, 2 * correct_candidate[i].item(), :])
possible_negatives = []
for j in range(0, candidate_input_ids.size(1), 2):
if j > 0 and torch.equal(candidate_input_ids[i, j, :], candidate_input_ids[i, 0, :]):
break
if j != 2 * correct_candidate[i].item():
possible_negatives.append(j)
negative_samples = random.choices(
possible_negatives, k=int(self.cfg.dataset.binary_score_subsample_ratio)
)
for negative_sample in negative_samples:
new_input_ids.append(candidate_input_ids[i, negative_sample, :])
new_attn_masks.append(candidate_attn_masks[i, negative_sample, :])
else:
for j in range(0, candidate_input_ids.size(1), 2):
if j > 0 and torch.equal(candidate_input_ids[i, j, :], candidate_input_ids[i, 0, :]):
break
new_input_ids.append(candidate_input_ids[i, j, :])
new_attn_masks.append(candidate_attn_masks[i, j, :])
input_ids = torch.stack(new_input_ids)
attn_masks = torch.stack(new_attn_masks)
labels = self.get_binary_score_labels(input_ids)
loss, _ = self(input_ids, attn_masks, labels, inference=False)
self.log("train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return {'loss': loss}
def validation_step(self, batch, batch_idx):
loss = self.eval_step_helper(batch=batch)
self.validation_step_outputs.append(loss)
return loss
def on_validation_epoch_end(self):
self.eval_epoch_end(self.validation_step_outputs, mode='val')
self.validation_step_outputs.clear() # free memory
def on_test_epoch_end(self):
self.eval_epoch_end(self.test_step_outputs, mode='test')
self.test_step_outputs.clear() # free memory
def eval_epoch_end(self, outputs, mode='val'):
generated_field = []
ground_truth_field = []
inputs = []
for output in outputs:
generated_field += output["generated_field"]
ground_truth_field += output["ground_truth_field"]
inputs += output["input"]
with_slots = self.cfg.dataset.target_template == "with_slots"
generated_labels, generated_slots = DialogueClassificationMetrics.split_label_and_slots(
generated_field, with_slots=with_slots
)
ground_truth_labels, ground_truth_slots = DialogueClassificationMetrics.split_label_and_slots(
ground_truth_field, with_slots=with_slots
)
os.makedirs(self.cfg.dataset.dialogues_example_dir, exist_ok=True)
filename = os.path.join(
self.cfg.dataset.dialogues_example_dir, f"{mode}_predictions_epoch{self.epoch_number}.jsonl"
)
DialogueClassificationMetrics.save_predictions(
filename,
generated_labels,
generated_slots,
ground_truth_labels,
ground_truth_slots,
generated_field,
ground_truth_field,
inputs,
)
label_acc = np.mean([int(generated_labels[i] == ground_truth_labels[i]) for i in range(len(generated_labels))])
generated_field_ids = torch.tensor([self.label_to_ids[label] for label in generated_labels], dtype=int).to(
self.classification_report.device
)
ground_truth_field_ids = torch.tensor(
[self.label_to_ids[label] for label in ground_truth_labels], dtype=int
).to(self.classification_report.device)
tp, fn, fp, _ = self.classification_report(generated_field_ids, ground_truth_field_ids)
precision, recall, f1, report = self.classification_report.compute()
self.classification_report.reset()
(
slot_precision,
slot_recall,
slot_f1,
slot_joint_goal_accuracy,
) = DialogueClassificationMetrics.get_slot_filling_metrics(generated_slots, ground_truth_slots)
logging.info(report)
self.log('{}_precision'.format(self.cfg.dataset.field), precision)
self.log('{}_f1'.format(self.cfg.dataset.field), f1)
self.log('{}_recall'.format(self.cfg.dataset.field), recall)
self.log('{}_{}_accuracy'.format(mode, self.cfg.dataset.field), label_acc * 100)
self.log('slot_precision', slot_precision)
self.log('slot_recall', slot_recall)
self.log('slot_f1', slot_f1)
self.log('slot_joint_goal_accuracy', slot_joint_goal_accuracy)
if mode == 'val':
self.epoch_number += 1
if self.cfg.save_model:
filename = '{}/epoch-{}-model.bin'.format(self.cfg.dataset.dialogues_example_dir, self.epoch_number)
torch.save(self.language_model.state_dict(), filename)
def test_step(self, batch, batch_idx):
loss = self.eval_step_helper(batch=batch, mode='test')
self.test_step_outputs.append(loss)
return loss
# for inference only
def predict_step(self, batch, batch_idx, dataloader_idx=None):
# return self(batch)
raise NotImplementedError()
def on_train_end(self):
if self.prompt_learning:
self.language_model.on_train_end()
def get_prompt_token_labels_for_megatron_gpt(self, input_ids, num_prompt_tokens):
prompt_token_labels = torch.full(
size=(input_ids.size(0), num_prompt_tokens),
fill_value=self.tokenizer.tokenizer.pad_token_id,
dtype=torch.long,
)
if self.prompt_learning:
prompt_token_labels.data = torch.LongTensor(
np.tile(np.array(self.language_model.pseudo_token_ids), (input_ids.size(0), 1))
)
prompt_token_labels = prompt_token_labels.to(input_ids.device)
return prompt_token_labels
def get_virtual_prompt_ids_for_megatron_gpt(self, input_ids):
if (
self.cfg.virtual_prompt_style == VirtualPromptStyle.P_TUNING
or not self.prompt_learning
or self.trainer.testing
):
prompt_ids = torch.tensor([0] * input_ids.size(0)).to(input_ids.device) if self.prompt_learning else None
else:
total_virtual_tokens = self.cfg.task_templates[0].total_virtual_tokens
init_text = self.cfg.task_templates[0].taskname
init_text_ids = self.tokenizer.text_to_ids(init_text)
init_text_ids = torch.tensor(init_text_ids).to(input_ids.device)
prompt_ids = init_text_ids.repeat(input_ids.size(0), 1)[:, :total_virtual_tokens]
return prompt_ids
def forward(self, input_ids, attention_mask, labels, inference=True):
if self.cfg.library == "huggingface":
output = self.language_model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
loss = output['loss']
# calculate loss per sample
b_logits = output['logits']
shift_logits = b_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
unreduced_loss = self.unreduced_loss_fct(
shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)
)
loss_per_sample = torch.mean(unreduced_loss.view(shift_labels.size()), dim=-1)
elif self.cfg.library == "megatron":
num_prompt_tokens = (
len(self.language_model.pseudo_token_ids) if hasattr(self.language_model, 'pseudo_token_ids') else 0
)
position_ids = torch.arange(
start=0, end=num_prompt_tokens + input_ids.size(1), dtype=torch.long, device=input_ids.device,
)
prompt_ids = self.get_virtual_prompt_ids_for_megatron_gpt(input_ids)
attn_mask_add_on = torch.ones((attention_mask.size(0), num_prompt_tokens), device=attention_mask.device)
full_attention_mask = torch.cat([attn_mask_add_on, attention_mask], axis=-1)
full_attention_mask_expand = torch.tril(
full_attention_mask.unsqueeze(2).tile(full_attention_mask.size(1))
).unsqueeze(1)
attn_mask = full_attention_mask_expand <= 0
prompt_token_labels = self.get_prompt_token_labels_for_megatron_gpt(input_ids, num_prompt_tokens)
input_ids_new = torch.cat([prompt_token_labels, input_ids], axis=1)
make_up_last_column_input_ids = (
torch.ones_like(input_ids_new[:, -1:]) * self.tokenizer.tokenizer.pad_token_id
)
left_shifted_input_ids = torch.cat([input_ids_new[:, 1:], make_up_last_column_input_ids], axis=-1)
if self.prompt_learning:
unmasked_unreduced_loss = self.language_model(
input_ids_new,
position_ids,
attn_mask,
labels=left_shifted_input_ids,
taskname_ids=prompt_ids,
inference=inference,
)
else:
unmasked_unreduced_loss = self.language_model(
input_ids, position_ids, attn_mask, labels=left_shifted_input_ids
)
if isinstance(unmasked_unreduced_loss, tuple):
unmasked_unreduced_loss = unmasked_unreduced_loss[0]
labels = torch.cat([prompt_token_labels, labels], axis=1)
make_up_last_column_labels = torch.ones_like(labels[:, -1:]) * self.tokenizer.tokenizer.pad_token_id
new_labels = torch.cat([labels[:, 1:], make_up_last_column_labels], axis=-1)
filler = torch.zeros_like(new_labels)
labels_mask_0 = torch.where(new_labels != -100, new_labels, filler)
labels_mask = labels_mask_0 > 0
loss = self.mask_and_reduce_loss(labels_mask, unmasked_unreduced_loss)
loss_per_sample = self.mask_and_reduce_loss_per_sample(labels_mask, unmasked_unreduced_loss)
return loss, loss_per_sample
def mask_and_reduce_loss_per_sample(self, loss_mask, unmasked_unreduced_loss):
"""
Mask and reduce loss based on each sample in batch
Useful for ranking candidates with the same prompt in batch based on loss
"""
losses = unmasked_unreduced_loss.float()
loss_mask = loss_mask.view(-1).float()
masked_loss = losses.view(-1) * loss_mask
loss_per_sample = torch.mean(masked_loss.view(unmasked_unreduced_loss.size()), dim=-1)
return loss_per_sample
def mask_and_reduce_loss(self, loss_mask, output_tensor):
losses = output_tensor.float()
loss_mask = loss_mask.view(-1).float()
loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()
return loss
def decode(self, tokens):
if tokens not in self.token_to_words:
self.token_to_words[tokens] = self.tokenizer.tokenizer.decode(tokens)
return self.token_to_words[tokens]
def binary_score_candidates(
self,
candidate_input_ids,
candidate_attn_masks,
utterance_length,
labels,
template_length,
correct_candidate,
minus_negative=True,
inference=False,
):
best_candidate_input_ids = []
for i in range(candidate_input_ids.size(0)):
best_j = 0
lowest_loss = float("inf")
for j in range(0, candidate_input_ids.size(1), 2):
if j > 0 and torch.equal(candidate_input_ids[i, j, :], candidate_input_ids[i, 0, :]):
break
start_yes = j if j // 2 == correct_candidate[i].item() else j + 1
cand_loss = self(
candidate_input_ids[i, start_yes : start_yes + 1, :],
candidate_attn_masks[i, start_yes : start_yes + 1, :],
self.get_binary_score_labels(candidate_input_ids[i, start_yes : start_yes + 1, :]),
inference=inference,
)
considered_loss = cand_loss.item()
if minus_negative:
start_no = j + 1 if j // 2 == correct_candidate[i].item() else j
negative_cand_loss = self(
candidate_input_ids[i, start_no : start_no + 1, :],
candidate_attn_masks[i, start_no : start_no + 1, :],
self.get_binary_score_labels(candidate_input_ids[i, start_no : start_no + 1, :]),
inference=inference,
)
considered_loss -= negative_cand_loss.item()
if considered_loss < lowest_loss:
best_j = start_yes
lowest_loss = considered_loss
best_candidate_input_ids.append(candidate_input_ids[i, best_j, :])
candidate_tokens = torch.stack(best_candidate_input_ids)
generated_field, ground_truth_field = self.process_into_structured_fields(
candidate_tokens, labels, template_length=template_length
)
return generated_field, ground_truth_field
def get_binary_score_labels(self, input_ids):
# mask out every token except the last token for yes/no/true/false
labels = torch.zeros_like(input_ids)
for i in range(input_ids.size(0)):
for j in range(input_ids.size(1)):
if input_ids.data[0, j] == self.tokenizer.tokenizer.pad_token_id:
stop_point = j
break
last_point = stop_point - 1
labels.data[i, last_point] = input_ids[i, last_point]
return labels
def rank_candidates(
self,
candidate_input_ids,
candidate_attn_masks,
utterance_length,
labels,
template_length,
minus_prior=True,
inference=False,
):
best_candidate_input_ids = []
for i in range(candidate_input_ids.size(0)):
# candidates are padded with first candidate to ensure equal number of candidates in batch
# run for loop to strip redundant candidates
last_j = candidate_input_ids.size(1)
for j in range(1, candidate_input_ids.size(1)):
if torch.equal(candidate_input_ids[i, j, :], candidate_input_ids[i, 0, :]):
last_j = j
break
utterance_end = utterance_length[i].item()
# this might cause GPU memory pressure there are many candidates
# if OOM, re-write to do this in a for loop with as many as train_ds.batch_size
_, loss_per_sample = self(
candidate_input_ids[i, :last_j, :],
candidate_attn_masks[i, :last_j, :],
candidate_input_ids[i, :last_j, :],
inference=inference,
)
if minus_prior:
_, utterance_free_cand_loss_per_sample = self(
candidate_input_ids[i, :last_j, utterance_end:],
candidate_attn_masks[i, :last_j, utterance_end:],
candidate_input_ids[i, :last_j, utterance_end:],
inference=inference,
)
considered_loss = loss_per_sample - utterance_free_cand_loss_per_sample
else:
considered_loss = loss_per_sample
best_j = torch.argmin(considered_loss)
best_candidate_input_ids.append(candidate_input_ids[i, best_j, :])
candidate_tokens = torch.stack(best_candidate_input_ids)
generated_field, ground_truth_field = self.process_into_structured_fields(
candidate_tokens, labels, template_length=template_length
)
return generated_field, ground_truth_field
def generate_candidates(self, labels, template_length, input_ids, attn_masks):
tokens_to_generate = self.cfg.tokens_to_generate
if self.cfg.library == "huggingface":
generated_tokens = []
max_length = 0
for i in range(input_ids.size(0)):
param_dict = {
"input_ids": input_ids[i : i + 1, : template_length[i]],
"max_length": template_length[i] + tokens_to_generate,
"pad_token_id": self.tokenizer.tokenizer.pad_token_id,
}
generated_tokens.append(self.language_model.generate(**param_dict))
max_length = max(max_length, generated_tokens[-1].size(1))
# pad each generated to ensure they are of same length in dim 1, therefore stack-able
generated_tokens = [
torch.cat(
[i, torch.ones((1, max_length - i.size(1))).to(i.device) * self.tokenizer.tokenizer.pad_token_id],
axis=-1,
)
for i in generated_tokens
]
generated_tokens = torch.cat(generated_tokens, axis=0)
num_prompt_tokens = 0
elif self.cfg.library == "megatron":
prompt_ids = self.get_virtual_prompt_ids_for_megatron_gpt(input_ids)
num_prompt_tokens = (
len(self.language_model.pseudo_token_ids) if hasattr(self.language_model, 'pseudo_token_ids') else 0
)
prompt_token_labels = self.get_prompt_token_labels_for_megatron_gpt(input_ids, num_prompt_tokens)
input_ids_without_answers = [
torch.cat(
[
input_ids[i, : template_length[i]],
torch.ones((input_ids.size(1) - template_length[i].item(),)).to(input_ids.device)
* self.tokenizer.tokenizer.pad_token_id,
],
axis=-1,
).type(input_ids.dtype)
for i in range(input_ids.size(0))
]
input_ids_without_answers = torch.stack(input_ids_without_answers)
input_ids_new = torch.cat(
[
prompt_token_labels,
input_ids_without_answers,
torch.ones((input_ids.size(0), tokens_to_generate)).to(input_ids.device)
* self.tokenizer.tokenizer.pad_token_id,
],
axis=1,
).type(input_ids.dtype)
tokens_for_generation = (input_ids_new, template_length + num_prompt_tokens)
length_param: LengthParam = {"min_length": 0, "max_length": tokens_to_generate}
generated_dict = megatron_gpt_generate(
self.language_model,
tokens_for_generation,
self.tokenizer,
length_param,
get_default_sampling_params(),
task_ids=prompt_ids,
)
generated_tokens = torch.LongTensor(generated_dict['token_ids'])
generated_field, ground_truth_field = self.process_into_structured_fields(
generated_tokens, labels, template_length=template_length + num_prompt_tokens
)
return generated_field, ground_truth_field
def eval_step_helper(self, batch, mode='val'):
(
input_ids,
attn_masks,
labels,
candidate_input_ids,
candidate_attn_masks,
template_length,
utterance_length,
correct_candidate,
) = batch
inference = mode == 'test'
loss, _ = self(input_ids, attn_masks, labels, inference=inference)
self.log("{}_loss".format(mode), loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
# ranking using perplexity of candidates following the "<utterance> <label_type>:"
if self.eval_mode == "ranking":
generated_field, ground_truth_field = self.rank_candidates(
candidate_input_ids,
candidate_attn_masks,
utterance_length,
labels,
template_length,
inference=inference,
)
# autoregressively generate candidates (possibly with constraint)
elif self.eval_mode == "generation":
generated_field, ground_truth_field = self.generate_candidates(
labels, template_length, input_ids, attn_masks
)
# comparing likelihood based on the perplexity of generating " Answer: yes" after "<utterance> <label_type>: <candidate_label>"
# (optionally, the difference of that with " Answer: no" using the flag minus_negative=True)
elif self.eval_mode == "binary_score":
generated_field, ground_truth_field = self.binary_score_candidates(
candidate_input_ids,
candidate_attn_masks,
utterance_length,
labels,
template_length,
correct_candidate,
inference=inference,
)
else:
raise ValueError(
"{} is not among supported options (ranking, generation, binary_score)".format(self.eval_mode)
)
return {
'loss': loss,
'input': self.tokenizer.tokenizer.batch_decode(input_ids, skip_special_tokens=True),
'generated_field': generated_field,
'ground_truth_field': ground_truth_field,
}
def process_into_structured_fields(self, generated_tokens, labels, template_length=None):
generated_field = []
for i in range(generated_tokens.size(0)):
start_point = 0 if template_length is None else template_length[i].item()
stop_point = generated_tokens.size(1)
for j in range(start_point, stop_point):
if generated_tokens.data[i, j] == self.tokenizer.tokenizer.pad_token_id:
stop_point = j
break
# this is to account for the tokens ' Answer: ' + 'yes'/'no'/'true'/'false'
if self.eval_mode == "binary_score":
stop_point -= 3
one_generated_field = self.decode(generated_tokens[i, start_point:stop_point]).strip()
generated_field.append(one_generated_field)
ground_truth_field = self.process_ground_truth_field(labels)
return generated_field, ground_truth_field
def process_ground_truth_field(self, labels):
ground_truth_field = []
for i in range(labels.size(0)):
correct_label = tuple(
[j for j in labels.data[i] if j != self.tokenizer.tokenizer.pad_token_id and j != -100]
)
ground_truth_field.append(self.decode(correct_label).strip())
return ground_truth_field
def prepare_data(self):
"""
Preprocessed schema and dialogues and caches this
"""
if self.data_prepared:
return
if self._cfg.dataset.task == 'sgd':
self.dialogues_processor = DialogueSGDDataProcessor(
data_dir=self._cfg.dataset.data_dir,
dialogues_example_dir=self._cfg.dataset.dialogues_example_dir,
tokenizer=self.tokenizer,
cfg=self._cfg.dataset,
)
elif self._cfg.dataset.task in ['assistant', "zero_shot"]:
self.dialogues_processor = DialogueAssistantDataProcessor(
data_dir=self._cfg.dataset.data_dir, tokenizer=self.tokenizer, cfg=self._cfg.dataset
)
elif self._cfg.dataset.task == 'design':
self.dialogues_processor = DialogueDesignDataProcessor(
data_dir=self._cfg.dataset.data_dir, tokenizer=self.tokenizer, cfg=self._cfg.dataset,
)
else:
raise ValueError("Only sgd, assistant, zero_shot, design supported for Dialogue GPT Classification Model")
self.data_prepared = True
def setup(self, stage=None):
super().setup(stage)
if self.cfg.library == "megatron" and self.prompt_learning and stage == "fit":
if self.cfg.virtual_prompt_style == VirtualPromptStyle.P_TUNING:
self.language_model.init_prompt_encoder()
else:
raise ValueError(
"Use model.virtual_prompt_style='p-tuning' with model.p_tuning.encoder_type='embedding' to enable prompt-tuning."
)
def update_data_dirs(self, data_dir: str, dialogues_example_dir: str):
"""
Update data directories
Args:
data_dir: path to data directory
dialogues_example_dir: path to preprocessed dialogues example directory, if not exists will be created.
"""
if not os.path.exists(data_dir):
raise ValueError(f"{data_dir} is not found")
self._cfg.dataset.data_dir = data_dir
self._cfg.dataset.dialogues_example_dir = dialogues_example_dir
logging.info(f'Setting model.dataset.data_dir to {data_dir}.')
logging.info(f'Setting model.dataset.dialogues_example_dir to {dialogues_example_dir}.')
def setup_training_data(self, train_data_config: Optional[DictConfig] = None):
self.prepare_data()
self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config, split=train_data_config.ds_item)
def setup_multiple_validation_data(self, val_data_config: Optional[DictConfig] = None):
return self.setup_validation_data(val_data_config)
def setup_validation_data(self, val_data_config: Optional[DictConfig] = None):
self.prepare_data()
self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config, split=val_data_config.ds_item)
def setup_multiple_test_data(self, test_data_config: Union[DictConfig, Dict]):
self.setup_test_data(test_data_config)
def setup_test_data(self, test_data_config: Optional[DictConfig] = None):
self.prepare_data()
self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config, split=test_data_config.ds_item)
def _setup_dataloader_from_config(self, cfg: DictConfig, split: str) -> DataLoader:
dataset_cfg = self._cfg.dataset
data_dir = dataset_cfg.data_dir
if not os.path.exists(data_dir):
raise FileNotFoundError(f"Data directory is not found at: {data_dir}.")
dataset = DialogueGPTClassificationDataset(
dataset_split=split,
dialogues_processor=self.dialogues_processor,
tokenizer=self.dialogues_processor._tokenizer,
cfg=dataset_cfg,
)
dl = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=cfg.batch_size,
collate_fn=dataset.collate_fn,
drop_last=cfg.drop_last,
shuffle=cfg.shuffle,
num_workers=cfg.num_workers,
pin_memory=cfg.pin_memory,
)
return dl
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
return result
| NeMo-main | nemo/collections/nlp/models/dialogue/dialogue_gpt_classification_model.py |
# Copyright 2022 The HuggingFace Inc. team.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Optional
import numpy as np
import torch
import torch.nn.functional as F
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from transformers import AutoModel
from nemo.collections.nlp.data.dialogue import DialogueSGDDataProcessor
from nemo.collections.nlp.data.dialogue.data_processor.assistant_data_processor import DialogueAssistantDataProcessor
from nemo.collections.nlp.data.dialogue.data_processor.design_data_processor import DialogueDesignDataProcessor
from nemo.collections.nlp.data.dialogue.dataset.dialogue_nearest_neighbour_dataset import (
DialogueNearestNeighbourDataset,
)
from nemo.collections.nlp.metrics.classification_report import ClassificationReport
from nemo.collections.nlp.metrics.dialogue_metrics import DialogueGenerationMetrics
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.core.classes.common import PretrainedModelInfo
from nemo.utils import logging
__all__ = ['DialogueNearestNeighbourModel']
class DialogueNearestNeighbourModel(NLPModel):
"""Dialogue Nearest Neighbour Model identifies the intent of an utterance using the cosine similarity between sentence embeddings of the utterance and various label descriptions """
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
self.cfg = cfg
super().__init__(cfg=cfg, trainer=trainer)
if self.cfg.library == "huggingface":
self.language_model = AutoModel.from_pretrained(self.cfg.language_model.pretrained_model_name)
def _setup_dataloader_from_config(self, cfg: DictConfig, dataset_split) -> 'torch.utils.data.DataLoader':
if self._cfg.dataset.task == "zero_shot":
self.data_processor = DialogueAssistantDataProcessor(
self.cfg.data_dir, self.tokenizer, cfg=self.cfg.dataset
)
elif self._cfg.dataset.task == "design":
self.data_processor = DialogueDesignDataProcessor(
data_dir=self._cfg.dataset.data_dir, tokenizer=self.tokenizer, cfg=self._cfg.dataset
)
elif self._cfg.dataset.task == 'sgd':
self.data_processor = DialogueSGDDataProcessor(
data_dir=self._cfg.dataset.data_dir,
dialogues_example_dir=self._cfg.dataset.dialogues_example_dir,
tokenizer=self.tokenizer,
cfg=self._cfg.dataset,
)
else:
raise ValueError("Only zero_shot, design and sgd supported for Zero Shot Intent Model")
dataset = DialogueNearestNeighbourDataset(
dataset_split,
self.data_processor,
self.tokenizer,
self.cfg.dataset, # this is the model.dataset cfg, which is diff from train_ds cfg etc
)
return torch.utils.data.DataLoader(
dataset=dataset,
collate_fn=dataset.collate_fn,
batch_size=cfg.batch_size,
shuffle=cfg.shuffle,
num_workers=cfg.get("num_workers", 0),
pin_memory=cfg.get("pin_memory", False),
drop_last=cfg.get("drop_last", False),
)
def forward(self, input_ids, attention_mask):
if self.cfg.library == 'huggingface':
output = self.language_model(input_ids=input_ids, attention_mask=attention_mask)
return output
def training_step(self, batch, batch_idx):
raise NotImplementedError
def test_step(self, batch, batch_idx):
loss = self.validation_step(batch, batch_idx, mode='test')
self.test_step_outputs.append(loss)
return loss
@staticmethod
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] # First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
def validation_step(self, batch, batch_idx, mode='val'):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
input_ids, input_mask, labels = batch
preds = []
gts = []
inputs = []
for i in range(input_ids.size(0)):
output = self.forward(input_ids=input_ids[i], attention_mask=input_mask[i])
sentence_embeddings = DialogueNearestNeighbourModel.mean_pooling(output, input_mask[i])
sentence_embeddings = F.normalize(sentence_embeddings, p=2, dim=1)
cos_sim = F.cosine_similarity(sentence_embeddings[:1, :], sentence_embeddings[1:, :])
pred = torch.argmax(cos_sim).item() + 1
gt = torch.argmax(labels[i][1:]).item() + 1
preds.append(input_ids[i, pred])
gts.append(input_ids[i, gt])
inputs.append(input_ids[i, 0])
loss = {'preds': torch.stack(preds), 'labels': torch.stack(gts), 'inputs': torch.stack(inputs)}
self.validation_step_outputs.append(loss)
return loss
def multi_test_epoch_end(self, outputs, dataloader_idx):
return self.on_validation_epoch_end()
def on_validation_epoch_end(self):
"""
Get metrics based on the candidate label with the highest predicted likelihood and the ground truth label for intent
"""
prefix = "test" if self.trainer.testing else "val"
if prefix == "val":
outputs = self.validation_step_outputs
else:
outputs = self.test_step_outputs
output_preds = torch.cat([output['preds'] for output in outputs], dim=0)
output_labels = torch.cat([output['labels'] for output in outputs], dim=0)
inputs = torch.cat([output['inputs'] for output in outputs], dim=0)
decoded_preds = self.tokenizer.tokenizer.batch_decode(output_preds, skip_special_tokens=True)
decoded_labels = self.tokenizer.tokenizer.batch_decode(output_labels, skip_special_tokens=True)
decoded_inputs = self.tokenizer.tokenizer.batch_decode(inputs, skip_special_tokens=True)
prompt_len = len(self.cfg.dataset.prompt_template.strip())
predicted_labels = [i[prompt_len:].strip() for i in decoded_preds]
ground_truth_labels = [i[prompt_len:].strip() for i in decoded_labels]
os.makedirs(self.cfg.dataset.dialogues_example_dir, exist_ok=True)
filename = os.path.join(self.cfg.dataset.dialogues_example_dir, "test_predictions.jsonl")
DialogueGenerationMetrics.save_predictions(
filename, predicted_labels, ground_truth_labels, decoded_inputs,
)
label_to_ids = {label: idx for idx, label in enumerate(list(set(predicted_labels + ground_truth_labels)))}
self.classification_report = ClassificationReport(
num_classes=len(label_to_ids), mode='micro', label_ids=label_to_ids, dist_sync_on_step=True
).to(output_preds[0].device)
predicted_label_ids = torch.tensor([label_to_ids[label] for label in predicted_labels]).to(
output_preds[0].device
)
ground_truth_label_ids = torch.tensor([label_to_ids[label] for label in ground_truth_labels]).to(
output_preds[0].device
)
tp, fn, fp, _ = self.classification_report(predicted_label_ids, ground_truth_label_ids)
precision, recall, f1, report = self.classification_report.compute()
label_acc = np.mean([int(predicted_labels[i] == ground_truth_labels[i]) for i in range(len(predicted_labels))])
logging.info(report)
self.log('unified_precision', precision)
self.log('unified_f1', f1)
self.log('unified_recall', recall)
self.log('unfied_accuracy', label_acc * 100)
self.classification_report.reset()
self.validation_step_outputs.clear() if prefix == 'val' else self.test_step_outputs.clear()
def setup_training_data(self, train_data_config: Optional[DictConfig]):
if not train_data_config:
logging.info(
f"Dataloader config or file_name for the training set is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._train_dl = self._setup_dataloader_from_config(train_data_config, "train")
# self.create_loss_module()
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
if not val_data_config:
logging.info(
f"Dataloader config or file_path for the validation data set is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._validation_dl = self._setup_dataloader_from_config(val_data_config, "dev")
def setup_multiple_test_data(self, test_data_config: Optional[DictConfig]):
self.setup_test_data(test_data_config)
def setup_test_data(self, test_data_config: Optional[DictConfig]):
if not test_data_config:
logging.info(
f"Dataloader config or file_path for the test data set is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._test_dl = self._setup_dataloader_from_config(test_data_config, "test")
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained models which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
return result
| NeMo-main | nemo/collections/nlp/models/dialogue/dialogue_nearest_neighbour_model.py |
# Copyright 2018 The HuggingFace Inc. team.
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import defaultdict
from typing import Dict, List, Optional, Union
import numpy as np
import torch
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from nemo.collections.nlp.data.dialogue import DialogueSGDDataProcessor
from nemo.collections.nlp.data.dialogue.data_processor.assistant_data_processor import DialogueAssistantDataProcessor
from nemo.collections.nlp.data.dialogue.data_processor.design_data_processor import DialogueDesignDataProcessor
from nemo.collections.nlp.data.dialogue.dataset.dialogue_zero_shot_intent_dataset import DialogueZeroShotIntentDataset
from nemo.collections.nlp.data.zero_shot_intent_recognition.zero_shot_intent_dataset import (
ZeroShotIntentInferenceDataset,
calc_class_weights_from_dataloader,
)
from nemo.collections.nlp.metrics.classification_report import ClassificationReport
from nemo.collections.nlp.metrics.dialogue_metrics import DialogueGenerationMetrics
from nemo.collections.nlp.models import TextClassificationModel
from nemo.core.classes.common import PretrainedModelInfo
from nemo.utils import logging
__all__ = ['DialogueZeroShotIntentModel']
class DialogueZeroShotIntentModel(TextClassificationModel):
"""TextClassificationModel to be trained on two- or three-class textual entailment data, to be used for zero shot intent recognition."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
self.cfg = cfg
super().__init__(cfg=cfg, trainer=trainer)
if self.cfg.library == 'megatron':
# zero shot intent classification loading
# cannot directly load as .nemo uses the pre-refactor model
# therefore transfer its attributes over
if self.cfg.original_nemo_checkpoint is not None:
original_model = DialogueZeroShotIntentModel.restore_from(self.cfg.original_nemo_checkpoint)
self.classifier = original_model.classifier
self.bert_model = original_model.bert_model
self.loss = original_model.loss
self.classification_report = original_model.classification_report
elif self.cfg.library == "huggingface":
self.nli_model = AutoModelForSequenceClassification.from_pretrained('facebook/bart-large-mnli')
self.bert_model = self.nli_model.model
self.classifier = self.nli_model.classification_head
original_model = DialogueZeroShotIntentModel.restore_from(self.cfg.original_nemo_checkpoint)
self.loss = original_model.loss
self.classification_report = original_model.classification_report
self.tokenizer = AutoTokenizer.from_pretrained('facebook/bart-large-mnli')
self.tokenizer.max_seq_length = self.cfg.dataset.max_seq_length
def _setup_dataloader_from_config(self, cfg: DictConfig, dataset_split) -> 'torch.utils.data.DataLoader':
if self._cfg.dataset.task == "zero_shot":
self.data_processor = DialogueAssistantDataProcessor(
self.cfg.data_dir, self.tokenizer, cfg=self.cfg.dataset
)
elif self._cfg.dataset.task == "design":
self.data_processor = DialogueDesignDataProcessor(
data_dir=self._cfg.dataset.data_dir, tokenizer=self.tokenizer, cfg=self._cfg.dataset
)
elif self._cfg.dataset.task == 'sgd':
self.data_processor = DialogueSGDDataProcessor(
data_dir=self._cfg.dataset.data_dir,
dialogues_example_dir=self._cfg.dataset.dialogues_example_dir,
tokenizer=self.tokenizer,
cfg=self._cfg.dataset,
)
else:
raise ValueError("Only zero_shot, design and sgd supported for Zero Shot Intent Model")
dataset = DialogueZeroShotIntentDataset(
dataset_split,
self.data_processor,
self.tokenizer,
self.cfg.dataset, # this is the model.dataset cfg, which is diff from train_ds cfg etc
)
return torch.utils.data.DataLoader(
dataset=dataset,
collate_fn=dataset.collate_fn,
batch_size=cfg.batch_size,
shuffle=cfg.shuffle,
num_workers=cfg.get("num_workers", 0),
pin_memory=cfg.get("pin_memory", False),
drop_last=cfg.get("drop_last", False),
)
def forward(self, input_ids, attention_mask, token_type_ids):
if self.cfg.library == 'megatron':
hidden_states = self.bert_model(
input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask
)
if isinstance(hidden_states, tuple):
hidden_states = hidden_states[0]
logits = self.classifier(hidden_states=hidden_states)
elif self.cfg.library == 'huggingface':
output = self.nli_model(input_ids=input_ids, attention_mask=attention_mask)
logits = output['logits']
return logits
def setup_training_data(self, train_data_config: Optional[DictConfig]):
if not train_data_config:
logging.info(
f"Dataloader config or file_name for the training set is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._train_dl = self._setup_dataloader_from_config(train_data_config, "train")
# calculate the class weights to be used in the loss function
if self.cfg.dataset.class_balancing == 'weighted_loss':
self.class_weights = calc_class_weights_from_dataloader(
self._train_dl, self.cfg.dataset.num_classes, self.cfg.dataset.data_dir
)
else:
self.class_weights = None
# we need to create/update the loss module by using the weights calculated from the training data
self.create_loss_module()
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
if not val_data_config:
logging.info(
f"Dataloader config or file_path for the validation data set is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._validation_dl = self._setup_dataloader_from_config(val_data_config, "dev")
def setup_test_data(self, test_data_config: Optional[DictConfig]):
if not test_data_config:
logging.info(
f"Dataloader config or file_path for the test data set is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._test_dl = self._setup_dataloader_from_config(test_data_config, "test")
def _setup_infer_dataloader(
self,
queries: List[str],
candidate_labels: List[str],
hypothesis_template=str,
batch_size=1,
max_seq_length: int = -1,
) -> 'torch.utils.data.DataLoader':
"""
Setup method for inference data loader. Here the premise-hypothesis pairs are made from queries and candidate labels.
Args:
queries: the queries to classify
candidate_labels: strings to be used as labels
hypothesis_template: the template used to turn each label into an NLI-style hypothesis. Must include a {}
or similar syntax for the candidate label to be inserted.
batch_size: batch size to use during inference
max_seq_length: maximum length of queries, default is -1 for no limit
Returns:
A pytorch DataLoader.
"""
dataset = ZeroShotIntentInferenceDataset(
queries=queries,
candidate_labels=candidate_labels,
tokenizer=self.tokenizer,
max_seq_length=max_seq_length,
hypothesis_template=hypothesis_template,
)
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=2,
pin_memory=False,
drop_last=False,
collate_fn=dataset.collate_fn,
)
def validation_step(self, batch, batch_idx, split='val'):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
input_ids, input_type_ids, input_mask, labels = batch
logits = self.forward(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)
val_loss = self.loss(logits=logits, labels=labels)
preds = torch.argmax(logits, axis=-1)
tp, fn, fp, _ = self.classification_report(preds, labels)
loss = {
'val_loss': val_loss,
'tp': tp,
'fn': fn,
'fp': fp,
'logits': logits,
'input_ids': input_ids,
'labels': labels,
}
self.validation_step_outputs.append(loss)
return loss
def on_validation_epoch_end(self, split="val"):
"""
Get metrics based on the candidate label with the highest predicted likelihood and the ground truth label for intent
"""
output_logits = torch.cat([output['logits'] for output in self.validation_step_outputs], dim=0)
output_input_ids = torch.cat([output['input_ids'] for output in self.validation_step_outputs], dim=0)
output_labels = torch.cat([output['labels'] for output in self.validation_step_outputs], dim=0)
if self.cfg.library == 'huggingface':
entail_logits = output_logits[..., 2]
decoded_input_ids = [self.tokenizer.decode(output_input_ids[i]) for i in range(len(output_input_ids))]
utterance_candidate_pairs = [i.split(self.tokenizer.sep_token) for i in decoded_input_ids]
utterances = [
i[0].replace(self.tokenizer.bos_token, '').replace(self.tokenizer.eos_token, '')
for i in utterance_candidate_pairs
]
elif self.cfg.library == 'megatron':
entail_logits = output_logits[..., 1]
decoded_input_ids = [
self.tokenizer.tokenizer.decode(output_input_ids[i]) for i in range(len(output_input_ids))
]
utterance_candidate_pairs = [i.split(self.tokenizer.tokenizer.sep_token) for i in decoded_input_ids]
utterances = [
i[0].replace(self.tokenizer.tokenizer.bos_token, '').replace(self.tokenizer.tokenizer.eos_token, '')
for i in utterance_candidate_pairs
]
# account for uncased tokenization
candidates = [
i[1]
.replace(self.cfg.dataset.prompt_template.lower(), '')
.replace(self.cfg.dataset.prompt_template, '')
.strip()
for i in utterance_candidate_pairs
]
utterance_to_idx = defaultdict(list)
for idx, utterance in enumerate(utterances):
utterance_to_idx[utterance].append(idx)
predicted_labels = []
ground_truth_labels = []
utterances = []
for utterance, idxs in utterance_to_idx.items():
utterance_candidates = [candidates[idx] for idx in idxs]
logits = [entail_logits[idx].item() for idx in idxs]
labels = [output_labels[idx].item() for idx in idxs]
correct_candidate = utterance_candidates[np.argmax(labels)]
predicted_candidate = utterance_candidates[np.argmax(logits)]
predicted_labels.append(predicted_candidate)
ground_truth_labels.append(correct_candidate)
utterances.append(utterance)
os.makedirs(self.cfg.dataset.dialogues_example_dir, exist_ok=True)
filename = os.path.join(self.cfg.dataset.dialogues_example_dir, "test_predictions.jsonl")
DialogueGenerationMetrics.save_predictions(
filename, predicted_labels, ground_truth_labels, utterances,
)
label_to_ids = {label: idx for idx, label in enumerate(list(set(predicted_labels + ground_truth_labels)))}
self.classification_report = ClassificationReport(
num_classes=len(label_to_ids), mode='micro', label_ids=label_to_ids, dist_sync_on_step=True
).to(output_logits[0].device)
predicted_label_ids = torch.tensor([label_to_ids[label] for label in predicted_labels]).to(
output_logits[0].device
)
ground_truth_label_ids = torch.tensor([label_to_ids[label] for label in ground_truth_labels]).to(
output_logits[0].device
)
tp, fn, fp, _ = self.classification_report(predicted_label_ids, ground_truth_label_ids)
precision, recall, f1, report = self.classification_report.compute()
label_acc = np.mean([int(predicted_labels[i] == ground_truth_labels[i]) for i in range(len(predicted_labels))])
avg_loss = torch.stack([x[f'val_loss'] for x in self.validation_step_outputs]).mean()
logging.info(report)
self.log('unified_precision', precision)
self.log('unified_f1', f1)
self.log('unified_recall', recall)
self.log('unfied_accuracy', label_acc * 100)
self.log('val_loss', avg_loss, prog_bar=True)
self.validation_step_outputs.clear() # free memory
self.classification_report.reset()
def predict(
self,
queries: Union[str, List[str]],
candidate_labels: Union[str, List[str]],
hypothesis_template='This example is {}.',
batch_size=1,
multi_label=True,
entailment_idx=1,
contradiction_idx=0,
) -> List[Dict]:
"""
Given a list of queries and a list of candidate labels, return a ranked list of labels and scores for each query.
Example usage:
queries = ["I'd like a veggie burger, fries, and a coke", "Turn off the lights in the living room",]
candidate_labels = ["Food order", "Change lighting"]
model.predict(queries, candidate_labels)
Example output:
[{'sentence': "I'd like a veggie burger, fries, and a coke",
'labels': ['Food order', 'Change lighting'],
'scores': [0.8557153344154358, 0.12036784738302231]},
{'sentence': 'Turn off the lights in the living room',
'labels': ['Change lighting', 'Food order'],
'scores': [0.8506497144699097, 0.06594637036323547]}]
Args:
queries: the query or list of queries to classify
candidate_labels: string or list of strings to be used as labels
hypothesis_template: the template used to turn each label into an NLI-style hypothesis. Must include a {}
or similar syntax for the candidate label to be inserted.
batch_size: the batch size to use for inference.
multi_label: whether or not multiple candidate labels can be true. If False, the scores are normalized
such that all class probabilities sum to 1. If True, the labels are
considered independent and probabilities are normalized for each candidate by doing a softmax of
the entailment score vs. the contradiction score.
entailment_idx: the index of the "entailment" class in the trained model; models trained on MNLI
using NeMo's glue_benchmark.py or zero_shot_intent_model.py use an index of 1 by default.
contradiction_idx: the index of the "contradiction" class in the trained model; models trained on MNLI
using NeMo's glue_benchmark.py or zero_shot_intent_model.py use an index of 0 by default.
Returns:
list of dictionaries; one dict per input query. Each dict has keys "sentence", "labels", "scores".
labels and scores are parallel lists (with each score corresponding to the label at the same index),
sorted from highest to lowest score.
"""
if not queries:
raise ValueError("No queries were passed for classification!")
if not candidate_labels:
raise ValueError("No candidate labels were provided!")
queries = [queries] if isinstance(queries, str) else queries
candidate_labels = [candidate_labels] if isinstance(candidate_labels, str) else candidate_labels
if len(candidate_labels) == 1:
multi_label = True
mode = self.training
try:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Switch model to evaluation mode
self.eval()
self.to(device)
infer_datalayer = self._setup_infer_dataloader(
queries,
candidate_labels,
hypothesis_template=hypothesis_template,
batch_size=batch_size,
max_seq_length=self._cfg.dataset.max_seq_length,
)
all_batch_logits = []
for batch in infer_datalayer:
input_ids, input_type_ids, input_mask, _ = batch
logits = self.forward(
input_ids=input_ids.to(device),
token_type_ids=input_type_ids.to(device),
attention_mask=input_mask.to(device),
)
all_batch_logits.append(logits.detach().cpu().numpy())
all_logits = np.concatenate(all_batch_logits)
outputs = all_logits.reshape((len(queries), len(candidate_labels), -1))
if not multi_label:
# softmax the "entailment" logits over all candidate labels
entail_logits = outputs[..., entailment_idx]
scores = np.exp(entail_logits) / np.exp(entail_logits).sum(-1, keepdims=True)
else:
# softmax over the entailment vs. contradiction dim for each label independently
entail_contr_logits = outputs[..., [contradiction_idx, entailment_idx]]
scores = np.exp(entail_contr_logits) / np.exp(entail_contr_logits).sum(-1, keepdims=True)
scores = scores[..., 1]
result = []
for i in range(len(queries)):
sorted_idxs = list(reversed(scores[i].argsort()))
result.append(
{
"sentence": queries[i],
"labels": [candidate_labels[j] for j in sorted_idxs],
"scores": scores[i][sorted_idxs].tolist(),
}
)
finally:
# set mode back to its original value
self.train(mode=mode)
return result
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained models which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
result.append(
PretrainedModelInfo(
pretrained_model_name="zeroshotintent_en_bert_base_uncased",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/zeroshotintent_en_bert_base_uncased/versions/1.4.1/files/zeroshotintent_en_bert_base_uncased.nemo",
description="DialogueZeroShotIntentModel trained by fine tuning BERT-base-uncased on the MNLI (Multi-Genre Natural Language Inference) dataset, which achieves an accuracy of 84.9% and 84.8% on the matched and mismatched dev sets, respectively.",
)
)
result.append(
PretrainedModelInfo(
pretrained_model_name="zeroshotintent_en_megatron_uncased",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/zeroshotintent_en_megatron_uncased/versions/1.4.1/files/zeroshotintent_en_megatron_uncased.nemo",
description="DialogueZeroShotIntentModel trained by fine tuning Megatron-BERT-345m=M-uncased on the MNLI (Multi-Genre Natural Language Inference) dataset, which achieves an accuracy of 90.0% and 89.9% on the matched and mismatched dev sets, respectively",
)
)
return result
| NeMo-main | nemo/collections/nlp/models/dialogue/dialogue_zero_shot_intent_model.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.models.text_normalization_as_tagging.thutmose_tagger import ThutmoseTaggerModel
| NeMo-main | nemo/collections/nlp/models/text_normalization_as_tagging/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import perf_counter
from typing import Dict, List, Optional
import torch
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from nemo.collections.common.losses import CrossEntropyLoss
from nemo.collections.nlp.data.text_normalization_as_tagging import (
ThutmoseTaggerDataset,
ThutmoseTaggerTestDataset,
bert_example,
tagging,
)
from nemo.collections.nlp.data.text_normalization_as_tagging.utils import read_label_map, read_semiotic_classes
from nemo.collections.nlp.metrics.classification_report import ClassificationReport
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.collections.nlp.modules.common.token_classifier import TokenClassifier
from nemo.collections.nlp.parts.utils_funcs import tensor2list
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types import LogitsType, NeuralType
from nemo.utils import logging
from nemo.utils.decorators import experimental
__all__ = ["ThutmoseTaggerModel"]
@experimental
class ThutmoseTaggerModel(NLPModel):
"""
BERT-based tagging model for ITN, inspired by LaserTagger approach.
It maps spoken-domain input words to tags:
KEEP, DELETE, or any of predefined replacement tags which correspond to a written-domain fragment.
Example: one hundred thirty four -> _1 <DELETE> 3 4_
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"logits": NeuralType(('B', 'T', 'D'), LogitsType()),
"semiotic_logits": NeuralType(('B', 'T', 'D'), LogitsType()),
}
@property
def input_module(self):
return self
@property
def output_module(self):
return self
def __init__(self, cfg: DictConfig, trainer: Trainer = None) -> None:
super().__init__(cfg=cfg, trainer=trainer)
label_map_file = self.register_artifact("label_map", cfg.label_map, verify_src_exists=True)
semiotic_classes_file = self.register_artifact(
"semiotic_classes", cfg.semiotic_classes, verify_src_exists=True
)
self.label_map = read_label_map(label_map_file)
self.semiotic_classes = read_semiotic_classes(semiotic_classes_file)
self.num_labels = len(self.label_map)
self.num_semiotic_labels = len(self.semiotic_classes)
self.id_2_tag = {tag_id: tagging.Tag(tag) for tag, tag_id in self.label_map.items()}
self.id_2_semiotic = {semiotic_id: semiotic for semiotic, semiotic_id in self.semiotic_classes.items()}
self.max_sequence_len = cfg.get('max_sequence_len', self.tokenizer.tokenizer.model_max_length)
# setup to track metrics
# we will have (len(self.semiotic_classes) + 1) labels
# last one stands for WRONG (span in which the predicted tags don't match the labels)
# this is needed to feed the sequence of classes to classification_report during validation
label_ids = self.semiotic_classes.copy()
label_ids["WRONG"] = len(self.semiotic_classes)
self.tag_classification_report = ClassificationReport(
len(self.semiotic_classes) + 1, label_ids=label_ids, mode='micro', dist_sync_on_step=True
)
self.tag_multiword_classification_report = ClassificationReport(
len(self.semiotic_classes) + 1, label_ids=label_ids, mode='micro', dist_sync_on_step=True
)
self.semiotic_classification_report = ClassificationReport(
len(self.semiotic_classes) + 1, label_ids=label_ids, mode='micro', dist_sync_on_step=True
)
self.hidden_size = cfg.hidden_size
self.logits = TokenClassifier(
self.hidden_size, num_classes=self.num_labels, num_layers=1, log_softmax=False, dropout=0.1
)
self.semiotic_logits = TokenClassifier(
self.hidden_size, num_classes=self.num_semiotic_labels, num_layers=1, log_softmax=False, dropout=0.1
)
self.loss_fn = CrossEntropyLoss(logits_ndim=3)
self.builder = bert_example.BertExampleBuilder(
self.label_map, self.semiotic_classes, self.tokenizer.tokenizer, self.max_sequence_len
)
@typecheck()
def forward(self, input_ids, input_mask, segment_ids):
src_hiddens = self.bert_model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
tag_logits = self.logits(hidden_states=src_hiddens)
semiotic_logits = self.semiotic_logits(hidden_states=src_hiddens)
return tag_logits, semiotic_logits
# Training
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
input_ids, input_mask, segment_ids, labels_mask, labels, semiotic_labels, _ = batch
tag_logits, semiotic_logits = self.forward(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids)
loss_on_tags = self.loss_fn(logits=tag_logits, labels=labels, loss_mask=labels_mask)
loss_on_semiotic = self.loss_fn(logits=semiotic_logits, labels=semiotic_labels, loss_mask=labels_mask)
loss = loss_on_tags + loss_on_semiotic
lr = self._optimizer.param_groups[0]['lr']
self.log('train_loss', loss)
self.log('lr', lr, prog_bar=True)
return {'loss': loss, 'lr': lr}
# Validation and Testing
def validation_step(self, batch, batch_idx):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
input_ids, input_mask, segment_ids, labels_mask, tag_labels, semiotic_labels, semiotic_spans = batch
tag_logits, semiotic_logits = self.forward(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids)
tag_preds = torch.argmax(tag_logits, dim=2)
semiotic_preds = torch.argmax(semiotic_logits, dim=2)
# Update tag classification_report
predictions, labels = tag_preds.tolist(), tag_labels.tolist()
for prediction, label, semiotic in zip(predictions, labels, semiotic_spans):
# Here we want to track whether the predicted output matches ground truth labels
# for each whole semiotic span
# so we construct the special input for classification report, for example:
# label = [PLAIN, PLAIN, DATE, PLAIN, LETTERS, PLAIN]
# pred = [PLAIN, PLAIN, WRONG, PLAIN, LETTERS, PLAIN]
span_labels = []
span_predictions = []
for cid, start, end in semiotic:
if cid == -1:
break
span_labels.append(cid)
if prediction[start:end] == label[start:end]:
span_predictions.append(cid)
else:
span_predictions.append(self.tag_classification_report.num_classes - 1) # this stands for WRONG
if len(span_labels) != len(span_predictions):
raise ValueError(
"Length mismatch: len(span_labels)="
+ str(len(span_labels))
+ "; len(span_predictions)="
+ str(len(span_predictions))
)
self.tag_classification_report(
torch.tensor(span_predictions).to(self.device), torch.tensor(span_labels).to(self.device)
)
# We collect a separate classification_report for multiword replacements, as they are harder for the model
multiword_span_labels = []
multiword_span_predictions = []
for cid, start, end in semiotic:
if cid == -1:
break
# this is a trick to determine if label consists of a single replacement
# - it will be repeated for each input subtoken
if len(set(label[start:end])) == 1:
continue
multiword_span_labels.append(cid)
if prediction[start:end] == label[start:end]:
multiword_span_predictions.append(cid)
else:
# this stands for WRONG
multiword_span_predictions.append(self.tag_classification_report.num_classes - 1)
if len(multiword_span_labels) != len(multiword_span_predictions):
raise ValueError(
"Length mismatch: len(multiword_span_labels)="
+ str(len(multiword_span_labels))
+ "; len(multiword_span_predictions)="
+ str(len(multiword_span_predictions))
)
self.tag_multiword_classification_report(
torch.tensor(multiword_span_predictions).to(self.device),
torch.tensor(multiword_span_labels).to(self.device),
)
# Update semiotic classification_report
predictions, labels = semiotic_preds.tolist(), semiotic_labels.tolist()
for prediction, label, semiotic in zip(predictions, labels, semiotic_spans):
# Here we want to track whether the predicted output matches ground truth labels for whole semiotic span
# so we construct the special input for classification report, for example:
# label = [PLAIN, PLAIN, DATE, PLAIN, LETTERS, PLAIN]
# pred = [PLAIN, PLAIN, WRONG, PLAIN, LETTERS, PLAIN]
span_labels = []
span_predictions = []
for cid, start, end in semiotic:
if cid == -1:
break
span_labels.append(cid)
if prediction[start:end] == label[start:end]:
span_predictions.append(cid)
else:
span_predictions.append(self.tag_classification_report.num_classes - 1) # this stands for WRONG
if len(span_labels) != len(span_predictions):
raise ValueError(
"Length mismatch: len(span_labels)="
+ str(len(span_labels))
+ "; len(span_predictions)="
+ str(len(span_predictions))
)
self.semiotic_classification_report(
torch.tensor(span_predictions).to(self.device), torch.tensor(span_labels).to(self.device)
)
val_loss_tag = self.loss_fn(logits=tag_logits, labels=tag_labels, loss_mask=labels_mask)
val_loss_semiotic = self.loss_fn(logits=semiotic_logits, labels=semiotic_labels, loss_mask=labels_mask)
val_loss = val_loss_tag + val_loss_semiotic
return {'val_loss': val_loss}
def on_validation_epoch_end(self, outputs):
"""
Called at the end of validation to aggregate outputs.
:param outputs: list of individual outputs of each validation step.
"""
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
# calculate metrics and classification report
# In our task recall = accuracy, and the recall column - is the per class accuracy
_, tag_accuracy, _, tag_report = self.tag_classification_report.compute()
_, tag_multiword_accuracy, _, tag_multiword_report = self.tag_multiword_classification_report.compute()
_, semiotic_accuracy, _, semiotic_report = self.semiotic_classification_report.compute()
logging.info("Total tag accuracy: " + str(tag_accuracy))
logging.info(tag_report)
logging.info("Only multiword tag accuracy: " + str(tag_multiword_accuracy))
logging.info(tag_multiword_report)
logging.info("Total semiotic accuracy: " + str(semiotic_accuracy))
logging.info(semiotic_report)
self.log('val_loss', avg_loss, prog_bar=True)
self.log('tag accuracy', tag_accuracy)
self.log('tag multiword accuracy', tag_multiword_accuracy)
self.log('semiotic accuracy', semiotic_accuracy)
self.tag_classification_report.reset()
self.tag_multiword_classification_report.reset()
self.semiotic_classification_report.reset()
def test_step(self, batch, batch_idx):
"""
Lightning calls this inside the test loop with the data from the test dataloader
passed in as `batch`.
"""
return self.validation_step(batch, batch_idx)
def on_test_epoch_end(self, outputs):
"""
Called at the end of test to aggregate outputs.
:param outputs: list of individual outputs of each test step.
"""
return self.on_validation_epoch_end(outputs)
# Functions for inference
@torch.no_grad()
def _infer(self, sents: List[str]) -> List[List[int]]:
""" Main function for Inference
Args:
sents: A list of input sentences (lowercase spoken-domain words separated by space).
Returns:
all_preds: A list of tab-separated text records, same size as input list. Each record consists of 4 items:
- final output text
- input words
- tags predicted for input words
- tags after swap preprocessing
- semiotic labels predicted for input words
"""
# all input sentences go into one batch
dataloader_cfg = {"batch_size": len(sents), "num_workers": 3, "pin_memory": False}
infer_datalayer = self._setup_infer_dataloader(dataloader_cfg, sents)
batch = next(iter(infer_datalayer))
input_ids, input_mask, segment_ids = batch
tag_logits, semiotic_logits = self.forward(
input_ids=input_ids.to(self.device),
input_mask=input_mask.to(self.device),
segment_ids=segment_ids.to(self.device),
)
all_preds = []
for i, sent in enumerate(sents):
example = self.builder.build_bert_example(source=sent, infer=True)
tag_preds = tensor2list(torch.argmax(tag_logits[i], dim=-1))
semiotic_preds = tensor2list(torch.argmax(semiotic_logits[i], dim=-1))
# this mask is required by get_token_labels
example.features["labels_mask"] = [0] + [1] * (len(semiotic_preds) - 2) + [0]
example.features["tag_labels"] = tag_preds
example.features["semiotic_labels"] = semiotic_preds
tags = [self.id_2_tag[label_id] for label_id in example.get_token_labels("tag_labels")]
semiotic_labels = [
self.id_2_semiotic[label_id] for label_id in example.get_token_labels("semiotic_labels")
]
prediction, inp_str, tag_str, tag_with_swap_str = example.editing_task.realize_output(
tags, semiotic_labels
)
all_preds.append(
prediction
+ "\t"
+ inp_str
+ "\t"
+ tag_str
+ "\t"
+ tag_with_swap_str
+ "\t"
+ " ".join(semiotic_labels)
)
return all_preds
# Functions for processing data
def setup_training_data(self, train_data_config: Optional[DictConfig]):
if not train_data_config or not train_data_config.data_path:
logging.info(
f"Dataloader config or file_path for the train is missing, so no data loader for train is created!"
)
self._train_dl = None
return
self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config, data_split="train")
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
if not val_data_config or not val_data_config.data_path:
logging.info(
f"Dataloader config or file_path for the validation is missing, so no data loader for validation is created!"
)
self._validation_dl = None
return
self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config, data_split="val")
def setup_test_data(self, test_data_config: Optional[DictConfig]):
if not test_data_config or test_data_config.data_path is None:
logging.info(
f"Dataloader config or file_path for the test is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config, data_split="test")
def _setup_dataloader_from_config(self, cfg: DictConfig, data_split: str):
start_time = perf_counter()
logging.info(f'Creating {data_split} dataset')
input_file = cfg.data_path
dataset = ThutmoseTaggerDataset(input_file=input_file, example_builder=self.builder)
dl = torch.utils.data.DataLoader(
dataset=dataset, batch_size=cfg.batch_size, shuffle=cfg.shuffle, collate_fn=dataset.collate_fn
)
running_time = perf_counter() - start_time
logging.info(f'Took {running_time} seconds')
return dl
def _setup_infer_dataloader(self, cfg: DictConfig, queries: List[str]) -> 'torch.utils.data.DataLoader':
"""
Setup function for a infer data loader.
Args:
cfg: config dictionary containing data loader params like batch_size, num_workers and pin_memory
queries: text
Returns:
A pytorch DataLoader.
"""
dataset = ThutmoseTaggerTestDataset(sents=queries, example_builder=self.builder)
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=cfg["batch_size"],
shuffle=False,
num_workers=cfg.get("num_workers", 0),
pin_memory=cfg.get("pin_memory", False),
drop_last=False,
collate_fn=dataset.collate_fn,
)
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
result = [
PretrainedModelInfo(
pretrained_model_name="itn_en_thutmose_bert",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/itn_en_thutmose_bert/versions/1.9.0/files/itn_en_thutmose_bert.nemo",
description="A single-pass tagger-based English model for inverse text normalization based"
"on BERT, trained on 2 mln sentences from Google Text Normalization Dataset",
),
PretrainedModelInfo(
pretrained_model_name="itn_ru_thutmose_bert",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/itn_ru_thutmose_bert/versions/1.11.0/files/itn_ru_thutmose_bert.nemo",
description="A single-pass tagger-based Russian model for inverse text normalization based"
"on BERT, trained on 2 mln sentences from Google Text Normalization Dataset",
),
]
return result
| NeMo-main | nemo/collections/nlp/models/text_normalization_as_tagging/thutmose_tagger.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.models.text2sparql.text2sparql_model import Text2SparqlModel
| NeMo-main | nemo/collections/nlp/models/text2sparql/__init__.py |
# Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2020, MeetKai Inc. All rights reserved.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional, Tuple
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from transformers import AutoModel, BartForConditionalGeneration, EncoderDecoderModel
from nemo.collections.common.metrics import Perplexity
from nemo.collections.nlp.data.text2sparql import Text2SparqlDataset
from nemo.collections.nlp.modules.common.tokenizer_utils import get_tokenizer
from nemo.core.classes.common import typecheck
from nemo.core.classes.modelPT import ModelPT
from nemo.core.neural_types import ChannelType, MaskType, NeuralType
from nemo.utils import logging
__all__ = ["Text2SparqlModel"]
class Text2SparqlModel(ModelPT):
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"attention_mask": NeuralType(('B', 'T'), MaskType(), optional=True),
"decoder_input_ids": NeuralType(('B', 'T'), ChannelType(), optional=True),
"labels": NeuralType(('B', 'T'), ChannelType(), optional=True),
}
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
# must assign tokenizers before init
if cfg.language_model.pretrained_model_name:
if cfg.language_model.pretrained_encoder_model_name or cfg.language_model.pretrained_decoder_model_name:
raise ValueError(
"Must have either pretrained_model_name or both pretrained_encoder_model name and "
"pretrained_decoder_model_name."
)
# setup tokenizer
self.encoder_tokenizer = self.setup_tokenizer(cfg.encoder_tokenizer)
self.encoder_add_special_tokens = cfg.encoder_tokenizer.add_special_tokens
# set decoder to encoder
self.decoder_tokenizer = self.encoder_tokenizer
self.decoder_add_special_tokens = self.encoder_add_special_tokens
else:
if not (
cfg.language_model.pretrained_encoder_model_name and cfg.language_model.pretrained_decoder_model_name
):
raise ValueError("Both encoder and decoder must be specified")
# setup tokenizers
self.encoder_tokenizer = self.setup_tokenizer(cfg.encoder_tokenizer)
self.encoder_add_special_tokens = cfg.encoder_tokenizer.add_special_tokens
self.decoder_tokenizer = self.setup_tokenizer(cfg.decoder_tokenizer)
self.decoder_add_special_tokens = cfg.decoder_tokenizer.add_special_tokens
if not self.encoder_tokenizer:
raise TypeError("encoder_tokenizer failed to initialize")
if not self.decoder_tokenizer:
raise TypeError("decoder_tokenizer failed to initialize")
# init superclass
super().__init__(cfg=cfg, trainer=trainer)
# must assign modules after init
if cfg.language_model.pretrained_model_name:
# Setup end-to-end model
if "bart" in cfg.language_model.pretrained_model_name:
self.model = BartForConditionalGeneration.from_pretrained(cfg.language_model.pretrained_model_name)
else:
self.model = AutoModel.from_pretrained(cfg.language_model.pretrained_model_name)
else:
if not (
cfg.language_model.pretrained_encoder_model_name and cfg.language_model.pretrained_decoder_model_name
):
raise ValueError("Both encoder and decoder must be specified")
# Setup encoder/decoder model
self.model = EncoderDecoderModel.from_encoder_decoder_pretrained(
encoder=cfg.language_model.pretrained_encoder_model_name,
decoder=cfg.language_model.pretrained_decoder_model_name,
)
self.validation_perplexity = Perplexity()
self.setup_optimization(cfg.optim)
@typecheck()
def forward(
self,
input_ids: torch.Tensor,
attention_mask: torch.Tensor = None,
decoder_input_ids: torch.Tensor = None,
labels: torch.Tensor = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
No special modification required for Lightning, define it as you normally would
in the `nn.Module` in vanilla PyTorch.
"""
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
labels=labels,
return_dict=False,
)
return outputs
@typecheck.disable_checks()
def generate(self, input_ids: torch.Tensor) -> torch.Tensor:
"""Wraps huggingface EncoderDecoder.generate()."""
outputs = self.model.generate(
input_ids=input_ids,
pad_token_id=self.encoder_tokenizer.pad_id,
bos_token_id=self.encoder_tokenizer.bos_id,
eos_token_id=self.encoder_tokenizer.eos_id,
decoder_start_token_id=self.decoder_tokenizer.bos_id,
**self._cfg.generate,
)
return outputs
def training_step(self, batch: Tuple, batch_idx: int) -> Dict:
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`. Loss calculation from HuggingFace's BartForConditionalGeneration.
"""
input_ids, input_mask, decoder_input_ids, labels = batch
loss = self.forward(
input_ids=input_ids, attention_mask=input_mask, decoder_input_ids=decoder_input_ids, labels=labels,
)[0]
tensorboard_logs = {"train_loss": loss, "lr": self._optimizer.param_groups[0]["lr"]}
return {"loss": loss, "log": tensorboard_logs}
def validation_step(self, batch: Tuple, batch_idx: int) -> Dict:
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`. Loss calculation from HuggingFace's BartForConditionalGeneration.
"""
input_ids, input_mask, decoder_input_ids, labels = batch
loss, logits = self.forward(
input_ids=input_ids, attention_mask=input_mask, decoder_input_ids=decoder_input_ids, labels=labels,
)[:2]
self.validation_perplexity(logits=logits)
tensorboard_logs = {"val_loss": loss}
return {"val_loss": loss, "log": tensorboard_logs}
def on_validation_epoch_end(self, outputs: List[Dict]) -> Dict:
"""
Called at the end of validation to aggregate outputs.
:param outputs: list of individual outputs of each validation step.
"""
avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()
perplexity = self.validation_perplexity.compute()
tensorboard_logs = {"val_loss": avg_loss, "perplexity": perplexity}
logging.info(f"evaluation perplexity {perplexity.item()}")
self.log("val_loss", avg_loss)
return {"val_loss": avg_loss, "log": tensorboard_logs}
@typecheck.disable_checks()
def test_step(self, batch: Tuple, batch_idx: int) -> torch.Tensor:
"""Lightning calls this inside the test loop with data from the test dataloader."""
input_ids, input_mask, decoder_input_ids, labels = batch
sequences = self.generate(input_ids=input_ids)
return sequences
@typecheck.disable_checks()
def on_test_epoch_end(self, outputs: List[torch.Tensor]) -> Dict[str, List[str]]:
"""Called at the end of test to aggregate outputs and decode them."""
texts = [self.encoder_tokenizer.ids_to_text(seq) for batch in outputs for seq in batch]
self.test_output = [{"texts": texts}]
return {"texts": texts}
def setup_tokenizer(self, cfg: DictConfig):
tokenizer = get_tokenizer(
tokenizer_name=cfg.tokenizer_name,
tokenizer_model=cfg.tokenizer_model,
special_tokens=OmegaConf.to_container(cfg.special_tokens) if cfg.special_tokens else None,
vocab_file=cfg.vocab_file,
)
return tokenizer
def setup_training_data(self, train_data_config: Optional[DictConfig]):
self._train_dl = self.setup_dataloader_from_config(cfg=train_data_config)
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
self._validation_dl = self.setup_dataloader_from_config(cfg=val_data_config)
def setup_test_data(self, test_data_config: Optional[DictConfig]):
self._test_dl = self.setup_dataloader_from_config(cfg=test_data_config)
def setup_dataloader_from_config(self, cfg: DictConfig):
dataset = Text2SparqlDataset(
filepath=cfg.filepath,
encoder_tokenizer=self.encoder_tokenizer,
decoder_tokenizer=self.decoder_tokenizer,
encoder_add_special_tokens=self.encoder_add_special_tokens,
decoder_add_special_tokens=self.decoder_add_special_tokens,
max_seq_length=self._cfg.max_seq_length,
num_samples=cfg.get("num_samples", -1),
convert_labels=self._cfg.convert_labels,
)
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=self._cfg.batch_size,
shuffle=cfg.shuffle,
num_workers=cfg.get("num_workers", 2),
pin_memory=cfg.get("pin_memory", False),
drop_last=cfg.get("drop_last", False),
collate_fn=dataset.collate_fn,
)
@classmethod
def list_available_models(cls) -> Optional[Dict[str, str]]:
pass
| NeMo-main | nemo/collections/nlp/models/text2sparql/text2sparql_model.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.models.glue_benchmark.glue_benchmark_model import GLUEModel
| NeMo-main | nemo/collections/nlp/models/glue_benchmark/__init__.py |
# Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import f1_score, matthews_corrcoef
__all__ = ['compute_metrics']
def accuracy(preds: List[int], labels: List[int]):
return {"acc": (preds == labels).mean()}
def acc_and_f1(preds: List[int], labels: List[int]):
accuracy = (preds == labels).mean()
f1 = f1_score(y_true=labels, y_pred=preds)
return {"acc": accuracy, "f1": f1}
def mcc(preds: List[int], labels: List[int]):
return {"mcc": matthews_corrcoef(labels, preds)}
def pearson_and_spearman(preds: List[int], labels: List[int]):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {"pearson": pearson_corr, "spearmanr": spearman_corr, "pear+spear av": (pearson_corr + spearman_corr) / 2}
def compute_metrics(task_name: str, preds: List[int], labels: List[int]) -> Dict[str, float]:
"""
Computes metrics for GLUE tasks
Args:
task_name: GLUE task name
preds: model predictions
labels: golden labels
Returns:
metrics
"""
if len(preds) != len(labels):
raise ValueError("Predictions and labels must have the same length")
metric_fn = accuracy
if task_name == 'cola':
metric_fn = mcc
elif task_name in ['mrpc', 'qqp']:
metric_fn = acc_and_f1
elif task_name == 'sts-b':
metric_fn = pearson_and_spearman
return metric_fn(preds, labels)
| NeMo-main | nemo/collections/nlp/models/glue_benchmark/metrics_for_glue.py |
# Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict, Optional, Union
import numpy as np
import torch
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from nemo.collections.common.losses import CrossEntropyLoss, MSELoss
from nemo.collections.nlp.data.glue_benchmark.glue_benchmark_dataset import GLUE_TASKS_NUM_LABELS, GLUEDataset
from nemo.collections.nlp.models.glue_benchmark.metrics_for_glue import compute_metrics
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.collections.nlp.modules.common import SequenceClassifier, SequenceRegression
from nemo.collections.nlp.parts.utils_funcs import list2str, tensor2list
from nemo.core.classes import typecheck
from nemo.core.neural_types import NeuralType
from nemo.utils import logging
__all__ = ['GLUEModel']
'''
Some transformer of this code were adapted from the HuggingFace library at
https://github.com/huggingface/transformers
Example of running a pretrained BERT model on the 9 GLUE tasks, read more
about GLUE benchmark here: https://gluebenchmark.com
Download the GLUE data by running the script:
https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e
Some of these tasks have a small dataset and training can lead to high variance
in the results between different runs. Below is the median on 5 runs
(with different seeds) for each of the metrics on the dev set of the benchmark
with an uncased BERT base model (the checkpoint bert-base-uncased)
(source https://github.com/huggingface/transformers/tree/master/examples#glue).
Task Metric Result
CoLA Matthew's corr 48.87
SST-2 Accuracy 91.74
MRPC F1/Accuracy 90.70/86.27
STS-B Person/Spearman corr. 91.39/91.04
QQP Accuracy/F1 90.79/87.66
MNLI Matched acc./Mismatched acc. 83.70/84.83
QNLI Accuracy 89.31
RTE Accuracy 71.43
WNLI Accuracy 43.66
'''
class GLUEModel(NLPModel):
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return self.bert_model.input_types
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return self.pooler.output_types
@property
def output_module(self):
return self.pooler
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
"""
Initializes model to use BERT model for GLUE tasks.
"""
if cfg.task_name not in cfg.supported_tasks:
raise ValueError(f'{cfg.task_name} not in supported task. Choose from {cfg.supported_tasks}')
self.task_name = cfg.task_name
# needed to setup validation on multiple datasets
# MNLI task has two separate dev sets: matched and mismatched
if not self._is_model_being_restored():
if self.task_name == "mnli":
cfg.validation_ds.ds_item = [
os.path.join(cfg.dataset.data_dir, 'dev_matched.tsv'),
os.path.join(cfg.dataset.data_dir, 'dev_mismatched.tsv'),
]
else:
cfg.validation_ds.ds_item = os.path.join(cfg.dataset.data_dir, cfg.validation_ds.ds_item)
cfg.train_ds.ds_item = os.path.join(cfg.dataset.data_dir, cfg.train_ds.ds_item)
logging.info(f'Using {cfg.validation_ds.ds_item} for model evaluation.')
super().__init__(cfg=cfg, trainer=trainer)
num_labels = GLUE_TASKS_NUM_LABELS[self.task_name]
# uses [CLS] token for classification (the first token)
if self.task_name == "sts-b":
self.pooler = SequenceRegression(hidden_size=self.bert_model.config.hidden_size)
self.loss = MSELoss()
else:
self.pooler = SequenceClassifier(
hidden_size=self.bert_model.config.hidden_size, num_classes=num_labels, log_softmax=False
)
self.loss = CrossEntropyLoss()
def update_data_dir(self, data_dir: str) -> None:
"""
Update data directory and get data stats with Data Descriptor
Weights are later used to setup loss
Args:
data_dir: path to data directory
"""
self._cfg.dataset.data_dir = data_dir
logging.info(f'Setting model.dataset.data_dir to {data_dir}.')
if self.task_name == "mnli":
self._cfg.validation_ds.ds_item = [
os.path.join(data_dir, 'dev_matched.tsv'),
os.path.join(data_dir, 'dev_mismatched.tsv'),
]
else:
self._cfg.validation_ds.ds_item = os.path.join(data_dir, 'dev.tsv')
self._cfg.train_ds.ds_item = os.path.join(data_dir, 'train.tsv')
logging.info(f'Using {self._cfg.validation_ds.ds_item} for model evaluation.')
@typecheck()
def forward(self, input_ids, token_type_ids, attention_mask):
hidden_states = self.bert_model(
input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask
)
if isinstance(hidden_states, tuple):
hidden_states = hidden_states[0]
output = self.pooler(hidden_states=hidden_states)
return output
def training_step(self, batch, batch_idx):
input_ids, input_type_ids, input_mask, labels = batch
model_output = self(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)
if self.task_name == "sts-b":
loss = self.loss(preds=model_output, labels=labels)
else:
loss = self.loss(logits=model_output, labels=labels)
lr = self._optimizer.param_groups[0]['lr']
self.log('train_loss', loss)
self.log('lr', lr, prog_bar=True)
return {
'loss': loss,
'lr': lr,
}
def validation_step(self, batch, batch_idx, dataloader_idx=0):
input_ids, input_type_ids, input_mask, labels = batch
model_output = self(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)
if self.task_name == "sts-b":
val_loss = self.loss(preds=model_output, labels=labels)
else:
val_loss = self.loss(logits=model_output, labels=labels)
if self.task_name != 'sts-b':
model_output = torch.argmax(model_output, 1)
eval_tensors = {'preds': model_output, 'labels': labels}
return {'val_loss': val_loss, 'eval_tensors': eval_tensors}
def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0):
"""
Called at the end of validation to aggregate outputs.
outputs: list of individual outputs of each validation step.
"""
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
preds = torch.cat([x['eval_tensors']['preds'] for x in outputs])
labels = torch.cat([x['eval_tensors']['labels'] for x in outputs])
all_preds = []
all_labels = []
if torch.distributed.is_initialized():
world_size = torch.distributed.get_world_size()
for ind in range(world_size):
all_preds.append(torch.empty_like(preds))
all_labels.append(torch.empty_like(labels))
torch.distributed.all_gather(all_preds, preds)
torch.distributed.all_gather(all_labels, labels)
else:
all_preds.append(preds)
all_labels.append(labels)
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
preds = []
labels = []
for p in all_preds:
preds.extend(tensor2list(p))
for l in all_labels:
labels.extend(tensor2list(l))
results = compute_metrics(self.task_name, np.array(preds), np.array(labels))
val_name = self._validation_names[dataloader_idx].upper()
logging.info(f'{val_name} evaluation: {results}')
# writing labels and predictions to a file in output_dir is specified in the config
output_dir = self._cfg.output_dir
if output_dir:
os.makedirs(output_dir, exist_ok=True)
filename = os.path.join(output_dir, f'{self.task_name}_{val_name}.txt')
logging.info(f'Saving labels and predictions to {filename}')
with open(filename, 'w') as f:
f.write('labels\t' + list2str(labels) + '\n')
f.write('preds\t' + list2str(preds) + '\n')
self.log('val_loss', avg_loss)
if self.trainer.is_global_zero:
for k, v in results.items():
self.log(f'{val_name}_{k}', v, rank_zero_only=True)
def setup_training_data(self, train_data_config: Optional[DictConfig] = None):
if train_data_config is None:
train_data_config = self._cfg.train_ds
self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config)
def setup_validation_data(self, val_data_config: Optional[DictConfig] = None):
if val_data_config is None:
val_data_config = self._cfg.validation_ds
self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config)
def setup_multiple_validation_data(self, val_data_config: Union[DictConfig, Dict] = None):
if val_data_config is None:
val_data_config = self._cfg.validation_ds
return super().setup_multiple_validation_data(val_data_config)
def _setup_dataloader_from_config(self, cfg: DictConfig):
file_name = cfg.ds_item
if not os.path.exists(file_name):
raise FileNotFoundError(
"GLUE datasets not found. For more details on how to get the data, see: "
"https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e"
)
dataset = GLUEDataset(
file_name=file_name,
task_name=self.task_name,
tokenizer=self.tokenizer,
max_seq_length=self._cfg.dataset.max_seq_length,
use_cache=self._cfg.dataset.use_cache,
)
return torch.utils.data.DataLoader(
dataset=dataset,
collate_fn=dataset.collate_fn,
batch_size=cfg.batch_size,
shuffle=cfg.shuffle,
num_workers=self._cfg.dataset.num_workers,
pin_memory=self._cfg.dataset.pin_memory,
drop_last=self._cfg.dataset.drop_last,
)
@classmethod
def list_available_models(cls) -> Optional[Dict[str, str]]:
pass
| NeMo-main | nemo/collections/nlp/models/glue_benchmark/glue_benchmark_model.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict, List, Optional
import torch
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from nemo.collections.common.losses import CrossEntropyLoss
from nemo.collections.nlp.data.text_classification import TextClassificationDataset, calc_class_weights
from nemo.collections.nlp.metrics.classification_report import ClassificationReport
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.collections.nlp.modules.common import SequenceClassifier
from nemo.collections.nlp.parts.utils_funcs import tensor2list
from nemo.core.classes.common import typecheck
from nemo.core.classes.exportable import Exportable
from nemo.utils import logging
__all__ = ['TextClassificationModel']
class TextClassificationModel(NLPModel, Exportable):
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
"""Initializes the BERTTextClassifier model."""
# shared params for dataset and data loaders
self.dataset_cfg = cfg.dataset
self.class_weights = None
super().__init__(cfg=cfg, trainer=trainer)
self.classifier = SequenceClassifier(
hidden_size=self.hidden_size,
num_classes=cfg.dataset.num_classes,
num_layers=cfg.classifier_head.num_output_layers,
activation='relu',
log_softmax=False,
dropout=cfg.classifier_head.fc_dropout,
use_transformer_init=True,
idx_conditioned_on=0,
)
self.create_loss_module()
# setup to track metrics
self.classification_report = ClassificationReport(
num_classes=cfg.dataset.num_classes, mode='micro', dist_sync_on_step=True
)
# register the file containing the labels into the artifacts to get stored in the '.nemo' file later
if 'class_labels' in cfg and 'class_labels_file' in cfg.class_labels and cfg.class_labels.class_labels_file:
self.register_artifact('class_labels.class_labels_file', cfg.class_labels.class_labels_file)
def create_loss_module(self):
# create the loss module if it is not yet created by the training data loader
if not hasattr(self, 'loss'):
if hasattr(self, 'class_weights') and self.class_weights:
# You may need to increase the number of epochs for convergence when using weighted_loss
self.loss = CrossEntropyLoss(weight=self.class_weights)
else:
self.loss = CrossEntropyLoss()
@typecheck()
def forward(self, input_ids, attention_mask, token_type_ids):
"""
No special modification required for Lightning, define it as you normally would
in the `nn.Module` in vanilla PyTorch.
"""
hidden_states = self.bert_model(
input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask
)
if isinstance(hidden_states, tuple):
hidden_states = hidden_states[0]
logits = self.classifier(hidden_states=hidden_states)
return logits.float()
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
# forward pass
input_ids, input_type_ids, input_mask, labels = batch
logits = self.forward(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)
train_loss = self.loss(logits=logits, labels=labels)
lr = self._optimizer.param_groups[0]['lr']
self.log('train_loss', train_loss)
self.log('lr', lr, prog_bar=True)
return {
'loss': train_loss,
'lr': lr,
}
def validation_step(self, batch, batch_idx, split="val"):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
input_ids, input_type_ids, input_mask, labels = batch
logits = self.forward(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)
val_loss = self.loss(logits=logits, labels=labels)
preds = torch.argmax(logits, axis=-1)
tp, fn, fp, _ = self.classification_report(preds, labels)
loss = {f'{split}_loss': val_loss, 'tp': tp, 'fn': fn, 'fp': fp}
if split == 'val':
self.validation_step_outputs.append(loss)
elif split == 'test':
self.test_step_outputs.append(loss)
return loss
def on_validation_epoch_end(self, split="val"):
"""
Called at the end of validation to aggregate outputs.
:param outputs: list of individual outputs of each validation step.
"""
avg_loss = torch.tensor(0)
if split == 'val':
avg_loss = torch.stack([x[f'val_loss'] for x in self.validation_step_outputs]).mean()
self.validation_step_outputs.clear() # free memory
elif split == 'test':
avg_loss = torch.stack([x[f'test_loss'] for x in self.test_step_outputs]).mean()
self.test_step_outputs.clear() # free memory
# calculate metrics and classification report
precision, recall, f1, report = self.classification_report.compute()
logging.info(f'{split}_report: {report}')
self.log(f'{split}_loss', avg_loss, prog_bar=True)
self.log(f'{split}_precision', precision)
self.log(f'{split}_f1', f1)
self.log(f'{split}_recall', recall)
self.classification_report.reset()
def test_step(self, batch, batch_idx):
"""
Lightning calls this inside the test loop with the data from the test dataloader
passed in as `batch`.
"""
return self.validation_step(batch, batch_idx, 'test')
def on_test_epoch_end(self):
"""
Called at the end of test to aggregate outputs.
:param outputs: list of individual outputs of each test step.
"""
return self.on_validation_epoch_end(split='test')
def setup_training_data(self, train_data_config: Optional[DictConfig]):
if not train_data_config or not train_data_config.file_path:
logging.info(
f"Dataloader config or file_path for the train is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config)
# calculate the class weights to be used in the loss function
if self.cfg.dataset.class_balancing == 'weighted_loss':
self.class_weights = calc_class_weights(train_data_config.file_path, self.cfg.dataset.num_classes)
else:
self.class_weights = None
# we need to create/update the loss module by using the weights calculated from the training data
self.create_loss_module()
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
if not val_data_config or not val_data_config.file_path:
logging.info(
f"Dataloader config or file_path for the validation is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config)
def setup_test_data(self, test_data_config: Optional[DictConfig]):
if not test_data_config or not test_data_config.file_path:
logging.info(
f"Dataloader config or file_path for the test is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config)
def _setup_dataloader_from_config(self, cfg: Dict) -> 'torch.utils.data.DataLoader':
input_file = cfg.file_path
if not os.path.exists(input_file):
raise FileNotFoundError(
f'{input_file} not found! The data should be be stored in TAB-separated files \n\
"validation_ds.file_path" and "train_ds.file_path" for train and evaluation respectively. \n\
Each line of the files contains text sequences, where words are separated with spaces. \n\
The label of the example is separated with TAB at the end of each line. \n\
Each line of the files should follow the format: \n\
[WORD][SPACE][WORD][SPACE][WORD][...][TAB][LABEL]'
)
dataset = TextClassificationDataset(
tokenizer=self.tokenizer,
input_file=input_file,
max_seq_length=self.dataset_cfg.max_seq_length,
num_samples=cfg.get("num_samples", -1),
shuffle=cfg.shuffle,
use_cache=self.dataset_cfg.use_cache,
)
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=cfg.batch_size,
shuffle=cfg.shuffle,
num_workers=cfg.get("num_workers", 0),
pin_memory=cfg.get("pin_memory", False),
drop_last=cfg.get("drop_last", False),
collate_fn=dataset.collate_fn,
)
@torch.no_grad()
def classifytext(self, queries: List[str], batch_size: int = 1, max_seq_length: int = -1) -> List[int]:
"""
Get prediction for the queries
Args:
queries: text sequences
batch_size: batch size to use during inference
max_seq_length: sequences longer than max_seq_length will get truncated. default -1 disables truncation.
Returns:
all_preds: model predictions
"""
# store predictions for all queries in a single list
all_preds = []
mode = self.training
device = next(self.parameters()).device
try:
# Switch model to evaluation mode
self.eval()
logging_level = logging.get_verbosity()
logging.set_verbosity(logging.WARNING)
dataloader_cfg = {"batch_size": batch_size, "num_workers": 3, "pin_memory": False}
infer_datalayer = self._setup_infer_dataloader(dataloader_cfg, queries, max_seq_length)
for i, batch in enumerate(infer_datalayer):
input_ids, input_type_ids, input_mask, subtokens_mask = batch
logits = self.forward(
input_ids=input_ids.to(device),
token_type_ids=input_type_ids.to(device),
attention_mask=input_mask.to(device),
)
preds = tensor2list(torch.argmax(logits, axis=-1))
all_preds.extend(preds)
finally:
# set mode back to its original value
self.train(mode=mode)
logging.set_verbosity(logging_level)
return all_preds
def _setup_infer_dataloader(
self, cfg: Dict, queries: List[str], max_seq_length: int = -1
) -> 'torch.utils.data.DataLoader':
"""
Setup function for a infer data loader.
Args:
cfg: config dictionary containing data loader params like batch_size, num_workers and pin_memory
queries: text
max_seq_length: maximum length of queries, default is -1 for no limit
Returns:
A pytorch DataLoader.
"""
dataset = TextClassificationDataset(tokenizer=self.tokenizer, queries=queries, max_seq_length=max_seq_length)
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=cfg["batch_size"],
shuffle=False,
num_workers=cfg.get("num_workers", 0),
pin_memory=cfg.get("pin_memory", False),
drop_last=False,
collate_fn=dataset.collate_fn,
)
@classmethod
def list_available_models(cls) -> Optional[Dict[str, str]]:
pass
| NeMo-main | nemo/collections/nlp/models/text_classification/text_classification_model.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.models.text_classification.text_classification_model import TextClassificationModel
| NeMo-main | nemo/collections/nlp/models/text_classification/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import perf_counter
from typing import Dict, List, Optional
import torch
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from torch import nn
from transformers import AutoModelForTokenClassification, AutoTokenizer, DataCollatorForTokenClassification
from transformers.tokenization_utils_base import BatchEncoding
from nemo.collections.nlp.data.text_normalization import TextNormalizationTaggerDataset, constants
from nemo.collections.nlp.metrics.classification_report import ClassificationReport
from nemo.collections.nlp.models.duplex_text_normalization.utils import has_numbers
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types import ChannelType, LogitsType, MaskType, NeuralType
from nemo.utils import logging
__all__ = ['DuplexTaggerModel']
class DuplexTaggerModel(NLPModel):
"""
Transformer-based (duplex) tagger model for TN/ITN.
"""
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"attention_mask": NeuralType(('B', 'T'), MaskType(), optional=True),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"logits": NeuralType(('B', 'T', 'D'), LogitsType())}
@property
def input_module(self):
return self
@property
def output_module(self):
return self
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
self.tokenizer = AutoTokenizer.from_pretrained(cfg.tokenizer, add_prefix_space=True)
super().__init__(cfg=cfg, trainer=trainer)
self.num_labels = len(constants.ALL_TAG_LABELS)
self.mode = cfg.get('mode', 'joint')
self.model = AutoModelForTokenClassification.from_pretrained(cfg.transformer, num_labels=self.num_labels)
self.transformer_name = cfg.transformer
self.max_sequence_len = cfg.get('max_sequence_len', self.tokenizer.model_max_length)
# Loss Functions
self.loss_fct = nn.CrossEntropyLoss(ignore_index=constants.LABEL_PAD_TOKEN_ID)
# setup to track metrics
self.classification_report = ClassificationReport(
self.num_labels, constants.LABEL_IDS, mode='micro', dist_sync_on_step=True
)
# Language
self.lang = cfg.get('lang', None)
@typecheck()
def forward(self, input_ids, attention_mask):
logits = self.model(input_ids=input_ids, attention_mask=attention_mask).logits
return logits
# Training
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
num_labels = self.num_labels
# Apply Transformer
tag_logits = self.forward(input_ids=batch['input_ids'], attention_mask=batch['attention_mask'])
# Loss
train_loss = self.loss_fct(tag_logits.view(-1, num_labels), batch['labels'].view(-1))
lr = self._optimizer.param_groups[0]['lr']
self.log('train_loss', train_loss)
self.log('lr', lr, prog_bar=True)
return {'loss': train_loss, 'lr': lr}
# Validation and Testing
def validation_step(self, batch, batch_idx):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
# Apply Transformer
tag_logits = self.forward(input_ids=batch['input_ids'], attention_mask=batch['attention_mask'])
tag_preds = torch.argmax(tag_logits, dim=2)
# Update classification_report
predictions, labels = tag_preds.tolist(), batch['labels'].tolist()
for prediction, label in zip(predictions, labels):
cur_preds = [p for (p, l) in zip(prediction, label) if l != constants.LABEL_PAD_TOKEN_ID]
cur_labels = [l for (p, l) in zip(prediction, label) if l != constants.LABEL_PAD_TOKEN_ID]
self.classification_report(
torch.tensor(cur_preds).to(self.device), torch.tensor(cur_labels).to(self.device)
)
def on_validation_epoch_end(self):
"""
Called at the end of validation to aggregate outputs.
:param outputs: list of individual outputs of each validation step.
"""
# calculate metrics and classification report
precision, _, _, report = self.classification_report.compute()
logging.info(report)
self.log('val_token_precision', precision)
self.classification_report.reset()
def test_step(self, batch, batch_idx):
"""
Lightning calls this inside the test loop with the data from the test dataloader
passed in as `batch`.
"""
return self.validation_step(batch, batch_idx)
def on_test_epoch_end(self):
"""
Called at the end of test to aggregate outputs.
:param outputs: list of individual outputs of each test step.
"""
return self.on_validation_epoch_end()
# Functions for inference
@torch.no_grad()
def _infer(self, sents: List[List[str]], inst_directions: List[str]):
""" Main function for Inference
Args:
sents: A list of inputs tokenized by a basic tokenizer.
inst_directions: A list of str where each str indicates the direction of the corresponding instance
(i.e., INST_BACKWARD for ITN or INST_FORWARD for TN).
Returns:
all_tag_preds: A list of list where each list contains the raw tag predictions for the corresponding input words in sents.
nb_spans: A list of ints where each int indicates the number of semiotic spans in input words.
span_starts: A list of lists where each list contains the starting locations of semiotic spans in input words.
span_ends: A list of lists where each list contains the ending locations of semiotic spans in input words.
"""
self.eval()
# Append prefix
texts = []
for ix, sent in enumerate(sents):
if inst_directions[ix] == constants.INST_BACKWARD:
prefix = constants.ITN_PREFIX
elif inst_directions[ix] == constants.INST_FORWARD:
prefix = constants.TN_PREFIX
texts.append([prefix] + sent)
# Apply the model
encodings = self.tokenizer(texts, is_split_into_words=True, padding=True, truncation=True, return_tensors='pt')
inputs = encodings
encodings_reduced = None
# check that the length of the 'input_ids' equals as least the length of the original input
# if an input symbol is missing in the tokenizer's vocabulary (such as emoji or a Chinese character), it could be skipped
len_texts = [len(x) for x in texts]
len_ids = [
len(self.tokenizer.convert_ids_to_tokens(x, skip_special_tokens=True)) for x in encodings['input_ids']
]
idx_valid = [i for i, (t, enc) in enumerate(zip(len_texts, len_ids)) if enc >= t]
if len(idx_valid) != len(texts):
logging.warning(
'Some of the examples have symbols that were skipped during the tokenization. Such examples will be skipped.'
)
for i in range(len(texts)):
if i not in idx_valid:
logging.warning(f'Invalid input: {texts[i]}')
# skip these sentences and fall back to the input
# exclude invalid examples from the encodings
encodings_reduced = {k: tensor[idx_valid, :] for k, tensor in encodings.items()}
for k, tensor in encodings_reduced.items():
if tensor.ndim == 1:
encodings_reduced[k] = tensor.unsqueeze(dim=0)
inputs = BatchEncoding(data=encodings_reduced)
# skip the batch if no valid inputs are present
if encodings_reduced and encodings_reduced['input_ids'].numel() == 0:
# -1 to exclude tag for the prompt token
all_tag_preds = [[constants.SAME_TAG] * (len(x) - 1) for x in texts]
nb_spans = [0] * len(texts)
span_starts = [] * len(texts)
span_ends = [] * len(texts)
return all_tag_preds, nb_spans, span_starts, span_ends
logits = self.model(**inputs.to(self.device)).logits
pred_indexes = torch.argmax(logits, dim=-1).tolist()
# Extract all_tag_preds for words
all_tag_preds = []
batch_size, max_len = encodings['input_ids'].size()
pred_idx = 0
for ix in range(batch_size):
if ix in idx_valid:
# remove first special token and task prefix token
raw_tag_preds = [constants.ALL_TAG_LABELS[p] for p in pred_indexes[pred_idx][2:]]
tag_preds, previous_word_idx = [], None
word_ids = encodings.word_ids(batch_index=ix)[2:]
for jx, word_idx in enumerate(word_ids):
if word_idx is None:
continue
if word_idx != previous_word_idx:
tag_preds.append(raw_tag_preds[jx]) # without special token at index 0
previous_word_idx = word_idx
pred_idx += 1
else:
# for excluded examples, use SAME tags for all words
tag_preds = [constants.SAME_TAG] * (len(texts[ix]) - 1)
all_tag_preds.append(tag_preds)
# Post-correction of simple tagger mistakes, i.e. I- tag is proceeding the B- tag in a span
all_tag_preds = [
self._postprocess_tag_preds(words, inst_dir, ps)
for words, inst_dir, ps in zip(sents, inst_directions, all_tag_preds)
]
# Decoding
nb_spans, span_starts, span_ends = self.decode_tag_preds(all_tag_preds)
return all_tag_preds, nb_spans, span_starts, span_ends
def _postprocess_tag_preds(self, words: List[str], inst_dir: str, preds: List[str]):
""" Function for postprocessing the raw tag predictions of the model. It
corrects obvious mistakes in the tag predictions such as a TRANSFORM span
starts with I_TRANSFORM_TAG (instead of B_TRANSFORM_TAG).
Args:
words: The words in the input sentence
inst_dir: The direction of the instance (i.e., constants.INST_BACKWARD or INST_FORWARD).
preds: The raw tag predictions
Returns: The processed raw tag predictions
"""
final_preds = []
for ix, p in enumerate(preds):
# a TRANSFORM span starts with I_TRANSFORM_TAG, change to B_TRANSFORM_TAG
if p == constants.I_PREFIX + constants.TRANSFORM_TAG:
if ix == 0 or (not constants.TRANSFORM_TAG in final_preds[ix - 1]):
final_preds.append(constants.B_PREFIX + constants.TRANSFORM_TAG)
continue
# a span has numbers but does not have TRANSFORM tags (for TN)
if inst_dir == constants.INST_FORWARD:
if has_numbers(words[ix]) and (not constants.TRANSFORM_TAG in p):
final_preds.append(constants.B_PREFIX + constants.TRANSFORM_TAG)
continue
# Convert B-TASK tag to B-SAME tag
if p == constants.B_PREFIX + constants.TASK_TAG:
final_preds.append(constants.B_PREFIX + constants.SAME_TAG)
continue
# Default
final_preds.append(p)
return final_preds
def decode_tag_preds(self, tag_preds: List[List[str]]):
""" Decoding the raw tag predictions to locate the semiotic spans in the
input texts.
Args:
tag_preds: A list of list where each list contains the raw tag predictions for the corresponding input words.
Returns:
nb_spans: A list of ints where each int indicates the number of semiotic spans in each input.
span_starts: A list of lists where each list contains the starting locations of semiotic spans in an input words.
span_ends: A list of lists where each list contains the inclusive ending locations of semiotic spans in an input words.
"""
nb_spans, span_starts, span_ends = [], [], []
for i, preds in enumerate(tag_preds):
cur_nb_spans, cur_span_start = 0, None
cur_span_starts, cur_span_ends = [], []
for ix, pred in enumerate(preds + ['EOS']):
if pred != constants.I_PREFIX + constants.TRANSFORM_TAG:
if not cur_span_start is None:
cur_nb_spans += 1
cur_span_starts.append(cur_span_start)
cur_span_ends.append(ix - 1)
cur_span_start = None
if pred == constants.B_PREFIX + constants.TRANSFORM_TAG:
cur_span_start = ix
nb_spans.append(cur_nb_spans)
span_starts.append(cur_span_starts)
span_ends.append(cur_span_ends)
return nb_spans, span_starts, span_ends
# Functions for processing data
def setup_training_data(self, train_data_config: Optional[DictConfig]):
if not train_data_config or not train_data_config.data_path:
logging.info(
f"Dataloader config or file_path for the train is missing, so no data loader for train is created!"
)
self._train_dl = None
return
self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config, data_split="train")
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
if not val_data_config or not val_data_config.data_path:
logging.info(
f"Dataloader config or file_path for the validation is missing, so no data loader for validation is created!"
)
self._validation_dl = None
return
self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config, data_split="val")
def setup_test_data(self, test_data_config: Optional[DictConfig]):
if not test_data_config or test_data_config.data_path is None:
logging.info(
f"Dataloader config or file_path for the test is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config, data_split="test")
def _setup_dataloader_from_config(self, cfg: DictConfig, data_split: str):
start_time = perf_counter()
logging.info(f'Creating {data_split} dataset')
input_file = cfg.data_path
tagger_data_augmentation = cfg.get('tagger_data_augmentation', False)
dataset = TextNormalizationTaggerDataset(
input_file=input_file,
tokenizer=self.tokenizer,
tokenizer_name=self.transformer_name,
mode=self.mode,
tagger_data_augmentation=tagger_data_augmentation,
lang=self.lang,
max_seq_length=self.max_sequence_len,
use_cache=cfg.get('use_cache', False),
max_insts=cfg.get('max_insts', -1),
)
data_collator = DataCollatorForTokenClassification(self.tokenizer)
dl = torch.utils.data.DataLoader(
dataset=dataset, batch_size=cfg.batch_size, shuffle=cfg.shuffle, collate_fn=data_collator
)
running_time = perf_counter() - start_time
logging.info(f'Took {running_time} seconds')
return dl
def input_example(self):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
sample = next(self.parameters())
input_ids = torch.randint(low=0, high=2048, size=(2, 16), device=sample.device)
attention_mask = torch.randint(low=0, high=1, size=(2, 16), device=sample.device)
return tuple([input_ids, attention_mask])
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
result.append(
PretrainedModelInfo(
pretrained_model_name="neural_text_normalization_t5",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/neural_text_normalization_t5/versions/1.5.0/files/neural_text_normalization_t5_tagger.nemo",
description="Text Normalization model's tagger model.",
)
)
result.append(
PretrainedModelInfo(
pretrained_model_name="itn_en_t5",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/itn_en_t5/versions/1.11.0/files/itn_en_t5_tagger.nemo",
description="English Inverse Text Normalization model's tagger model.",
)
)
return result
| NeMo-main | nemo/collections/nlp/models/duplex_text_normalization/duplex_tagger.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from collections import defaultdict
from math import ceil
from typing import Dict, List, Optional, Union
import torch
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, DataCollatorForSeq2Seq
import nemo.collections.nlp.data.text_normalization.constants as constants
from nemo.collections.common.tokenizers.moses_tokenizers import MosesProcessor
from nemo.collections.nlp.data.text_normalization import TextNormalizationTestDataset
from nemo.collections.nlp.data.text_normalization.decoder_dataset import (
TarredTextNormalizationDecoderDataset,
TextNormalizationDecoderDataset,
)
from nemo.collections.nlp.models.duplex_text_normalization.utils import get_formatted_string
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types import ChannelType, LabelsType, LossType, MaskType, NeuralType
from nemo.utils import logging
try:
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
PYNINI_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
PYNINI_AVAILABLE = False
__all__ = ['DuplexDecoderModel']
class DuplexDecoderModel(NLPModel):
"""
Transformer-based (duplex) decoder model for TN/ITN.
"""
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"decoder_input_ids": NeuralType(('B', 'T'), ChannelType()),
"attention_mask": NeuralType(('B', 'T'), MaskType(), optional=True),
"labels": NeuralType(('B', 'T'), LabelsType()),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"loss": NeuralType((), LossType())}
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
# Get global rank and total number of GPU workers for IterableDataset partitioning, if applicable
# Global_rank and local_rank is set by LightningModule in Lightning 1.2.0
self.world_size = 1
if trainer is not None:
self.world_size = trainer.num_nodes * trainer.num_devices
self.tokenizer = AutoTokenizer.from_pretrained(cfg.tokenizer)
super().__init__(cfg=cfg, trainer=trainer, no_lm_init=True)
self.model = AutoModelForSeq2SeqLM.from_pretrained(cfg.transformer)
self.max_sequence_len = cfg.get('max_sequence_len', self.tokenizer.model_max_length)
self.mode = cfg.get('mode', 'joint')
self.transformer_name = cfg.transformer
# Language
self.lang = cfg.get('lang', None)
# Covering Grammars
self.cg_normalizer = None # Default
# We only support integrating with English TN covering grammars at the moment
self.use_cg = cfg.get('use_cg', False) and self.lang == constants.ENGLISH
if self.use_cg:
self.setup_cgs(cfg)
# setup processor for detokenization
self.processor = MosesProcessor(lang_id=self.lang)
# Setup covering grammars (if enabled)
def setup_cgs(self, cfg: DictConfig):
"""
Setup covering grammars (if enabled).
:param cfg: Configs of the decoder model.
"""
self.use_cg = True
self.neural_confidence_threshold = cfg.get('neural_confidence_threshold', 0.99)
self.n_tagged = cfg.get('n_tagged', 1)
input_case = 'cased' # input_case is cased by default
if hasattr(self.tokenizer, 'do_lower_case') and self.tokenizer.do_lower_case:
input_case = 'lower_cased'
if PYNINI_AVAILABLE:
self.cg_normalizer = NormalizerWithAudio(input_case=input_case, lang=self.lang)
else:
self.cg_normalizer = None
logging.warning(
"`nemo_text_processing` is not installed, see https://github.com/NVIDIA/NeMo-text-processing for details"
)
@typecheck()
def forward(self, input_ids, decoder_input_ids, attention_mask, labels):
outputs = self.model(
input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, labels=labels
)
return outputs.loss
# Training
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
# tarred dataset contains batches, and the first dimension of size 1 added by the DataLoader
# (batch_size is set to 1) is redundant
if batch['input_ids'].ndim == 3:
batch = {k: v.squeeze(dim=0) for k, v in batch.items()}
# Apply Transformer
train_loss = self.forward(
input_ids=batch['input_ids'],
decoder_input_ids=batch['decoder_input_ids'],
attention_mask=batch['attention_mask'],
labels=batch['labels'],
)
lr = self._optimizer.param_groups[0]['lr']
self.log('train_loss', train_loss)
self.log('lr', lr, prog_bar=True)
return {'loss': train_loss, 'lr': lr}
# Validation and Testing
def validation_step(self, batch, batch_idx, dataloader_idx=0, split="val"):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
# Apply Transformer
val_loss = self.forward(
input_ids=batch['input_ids'],
decoder_input_ids=batch['decoder_input_ids'],
attention_mask=batch['attention_mask'],
labels=batch['labels'],
)
labels_str = self.tokenizer.batch_decode(
torch.ones_like(batch['labels']) * ((batch['labels'] == -100) * 100) + batch['labels'],
skip_special_tokens=True,
)
generated_texts, _, _ = self._generate_predictions(
input_ids=batch['input_ids'], model_max_len=self.max_sequence_len
)
results = defaultdict(int)
for idx, class_id in enumerate(batch['semiotic_class_id']):
direction = constants.TASK_ID_TO_MODE[batch['direction'][idx][0].item()]
class_name = self._val_id_to_class[dataloader_idx][class_id[0].item()]
pred_result = TextNormalizationTestDataset.is_same(
generated_texts[idx], labels_str[idx], constants.DIRECTIONS_TO_MODE[direction]
)
results[f"correct_{class_name}_{direction}"] += torch.tensor(pred_result, dtype=torch.int).to(self.device)
results[f"total_{class_name}_{direction}"] += torch.tensor(1).to(self.device)
results[f"{split}_loss"] = val_loss
return dict(results)
def multi_validation_epoch_end(self, outputs: List, dataloader_idx=0, split="val"):
"""
Called at the end of validation to aggregate outputs.
Args:
outputs: list of individual outputs of each validation step.
"""
avg_loss = torch.stack([x[f'{split}_loss'] for x in outputs]).mean()
# create a dictionary to store all the results
results = {}
directions = [constants.TN_MODE, constants.ITN_MODE] if self.mode == constants.JOINT_MODE else [self.mode]
for class_name in self._val_class_to_id[dataloader_idx]:
for direction in directions:
results[f"correct_{class_name}_{direction}"] = 0
results[f"total_{class_name}_{direction}"] = 0
for key in results:
count = [x[key] for x in outputs if key in x]
count = torch.stack(count).sum() if len(count) > 0 else torch.tensor(0).to(self.device)
results[key] = count
all_results = defaultdict(list)
if torch.distributed.is_initialized():
world_size = torch.distributed.get_world_size()
for ind in range(world_size):
for key, v in results.items():
all_results[key].append(torch.empty_like(v))
for key, v in results.items():
torch.distributed.all_gather(all_results[key], v)
else:
for key, v in results.items():
all_results[key].append(v)
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
if split == "test":
val_name = self._test_names[dataloader_idx].upper()
else:
val_name = self._validation_names[dataloader_idx].upper()
final_results = defaultdict(int)
for key, v in all_results.items():
for _v in v:
final_results[key] += _v.item()
accuracies = defaultdict(dict)
for key, value in final_results.items():
if "total_" in key:
_, class_name, mode = key.split('_')
correct = final_results[f"correct_{class_name}_{mode}"]
if value == 0:
accuracies[mode][class_name] = (0, correct, value)
else:
acc = round(correct / value * 100, 3)
accuracies[mode][class_name] = (acc, correct, value)
for mode, values in accuracies.items():
report = f"Accuracy {mode.upper()} task {val_name}:\n"
report += '\n'.join(
[
get_formatted_string((class_name, f'{v[0]}% ({v[1]}/{v[2]})'), str_max_len=24)
for class_name, v in values.items()
]
)
# calculate average across all classes
all_total = 0
all_correct = 0
for _, class_values in values.items():
_, correct, total = class_values
all_correct += correct
all_total += total
all_acc = round((all_correct / all_total) * 100, 3) if all_total > 0 else 0
report += '\n' + get_formatted_string(
('AVG', f'{all_acc}% ({all_correct}/{all_total})'), str_max_len=24
)
logging.info(report)
accuracies[mode]['AVG'] = [all_acc]
self.log(f'{split}_loss', avg_loss)
if self.trainer.is_global_zero:
for mode in accuracies:
for class_name, values in accuracies[mode].items():
self.log(f'{val_name}_{mode.upper()}_acc_{class_name.upper()}', values[0], rank_zero_only=True)
return {
f'{split}_loss': avg_loss,
}
def test_step(self, batch, batch_idx, dataloader_idx: int = 0):
"""
Lightning calls this inside the test loop with the data from the test dataloader
passed in as `batch`.
"""
return self.validation_step(batch, batch_idx, dataloader_idx, split="test")
def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):
"""
Called at the end of test to aggregate outputs.
outputs: list of individual outputs of each test step.
"""
return self.multi_validation_epoch_end(outputs, dataloader_idx, split="test")
@torch.no_grad()
def _generate_predictions(self, input_ids: torch.Tensor, model_max_len: int = 512):
"""
Generates predictions
"""
outputs = self.model.generate(
input_ids, output_scores=True, return_dict_in_generate=True, max_length=model_max_len
)
generated_ids, sequence_toks_scores = outputs['sequences'], outputs['scores']
generated_texts = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
return generated_texts, generated_ids, sequence_toks_scores
# Functions for inference
@torch.no_grad()
def _infer(
self,
sents: List[List[str]],
nb_spans: List[int],
span_starts: List[List[int]],
span_ends: List[List[int]],
inst_directions: List[str],
):
""" Main function for Inference
Args:
sents: A list of inputs tokenized by a basic tokenizer.
nb_spans: A list of ints where each int indicates the number of semiotic spans in each input.
span_starts: A list of lists where each list contains the starting locations of semiotic spans in an input.
span_ends: A list of lists where each list contains the ending locations of semiotic spans in an input.
inst_directions: A list of str where each str indicates the direction of the corresponding instance (i.e., INST_BACKWARD for ITN or INST_FORWARD for TN).
Returns: A list of lists where each list contains the decoded spans for the corresponding input.
"""
self.eval()
if sum(nb_spans) == 0:
return [[]] * len(sents)
model, tokenizer = self.model, self.tokenizer
ctx_size = constants.DECODE_CTX_SIZE
extra_id_0 = constants.EXTRA_ID_0
extra_id_1 = constants.EXTRA_ID_1
"""
Build all_inputs - extracted spans to be transformed by the decoder model
Inputs for TN direction have "0" prefix, while the backward, ITN direction, has prefix "1"
"input_centers" - List[str] - ground-truth labels for the span
"""
input_centers, input_dirs, all_inputs = [], [], []
for ix, sent in enumerate(sents):
cur_inputs = []
for jx in range(nb_spans[ix]):
cur_start = span_starts[ix][jx]
cur_end = span_ends[ix][jx]
ctx_left = sent[max(0, cur_start - ctx_size) : cur_start]
ctx_right = sent[cur_end + 1 : cur_end + 1 + ctx_size]
span_words = sent[cur_start : cur_end + 1]
span_words_str = ' '.join(span_words)
input_centers.append(span_words_str)
input_dirs.append(inst_directions[ix])
# Build cur_inputs
if inst_directions[ix] == constants.INST_BACKWARD:
cur_inputs = [constants.ITN_PREFIX]
if inst_directions[ix] == constants.INST_FORWARD:
cur_inputs = [constants.TN_PREFIX]
cur_inputs += ctx_left
cur_inputs += [extra_id_0] + span_words_str.split(' ') + [extra_id_1]
cur_inputs += ctx_right
all_inputs.append(' '.join(cur_inputs))
# Apply the decoding model
batch = tokenizer(all_inputs, padding=True, return_tensors='pt')
input_ids = batch['input_ids'].to(self.device)
generated_texts, generated_ids, sequence_toks_scores = self._generate_predictions(
input_ids=input_ids, model_max_len=self.max_sequence_len
)
# Use covering grammars (if enabled)
if self.use_cg:
# Compute sequence probabilities
sequence_probs = torch.ones(len(all_inputs)).to(self.device)
for ix, cur_toks_scores in enumerate(sequence_toks_scores):
cur_generated_ids = generated_ids[:, ix + 1].tolist()
cur_toks_probs = torch.nn.functional.softmax(cur_toks_scores, dim=-1)
# Compute selected_toks_probs
selected_toks_probs = []
for jx, _id in enumerate(cur_generated_ids):
if _id != self.tokenizer.pad_token_id:
selected_toks_probs.append(cur_toks_probs[jx, _id])
else:
selected_toks_probs.append(1)
selected_toks_probs = torch.tensor(selected_toks_probs).to(self.device)
sequence_probs *= selected_toks_probs
# For TN cases where the neural model is not confident, use CGs
neural_confidence_threshold = self.neural_confidence_threshold
for ix, (_dir, _input, _prob) in enumerate(zip(input_dirs, input_centers, sequence_probs)):
if _dir == constants.INST_FORWARD and _prob < neural_confidence_threshold:
try:
cg_outputs = self.cg_normalizer.normalize(text=_input, verbose=False, n_tagged=self.n_tagged)
generated_texts[ix] = list(cg_outputs)[0]
except: # if there is any exception, fall back to the input
generated_texts[ix] = _input
# Prepare final_texts
final_texts, span_ctx = [], 0
for nb_span in nb_spans:
cur_texts = []
for i in range(nb_span):
cur_texts.append(generated_texts[span_ctx])
span_ctx += 1
final_texts.append(cur_texts)
return final_texts
# Functions for processing data
def setup_training_data(self, train_data_config: Optional[DictConfig]):
if not train_data_config or not train_data_config.data_path:
logging.info(
f"Dataloader config or file_path for the train is missing, so no data loader for train is created!"
)
self.train_dataset, self._train_dl = None, None
return
self.train_dataset, self._train_dl = self._setup_dataloader_from_config(
cfg=train_data_config, data_split="train"
)
# Need to set this because if using an IterableDataset, the length of the dataloader is the total number
# of samples rather than the number of batches, and this messes up the tqdm progress bar.
# So we set the number of steps manually (to the correct number) to fix this.
if 'use_tarred_dataset' in train_data_config and train_data_config['use_tarred_dataset']:
# We also need to check if limit_train_batches is already set.
# If it's an int, we assume that the user has set it to something sane, i.e. <= # training batches,
# and don't change it. Otherwise, adjust batches accordingly if it's a float (including 1.0).
if self._trainer is not None and isinstance(self._trainer.limit_train_batches, float):
self._trainer.limit_train_batches = int(
self._trainer.limit_train_batches * ceil(len(self._train_dl.dataset) / self.world_size)
)
elif self._trainer is None:
logging.warning(
"Model Trainer was not set before constructing the dataset, incorrect number of "
"training batches will be used. Please set the trainer and rebuild the dataset."
)
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
if not val_data_config or not val_data_config.data_path:
logging.info(
f"Dataloader config or file_path for the validation is missing, so no data loader for validation is created!"
)
self.validation_dataset, self._validation_dl = None, None
return
self.validation_dataset, self._validation_dl = self._setup_dataloader_from_config(
cfg=val_data_config, data_split="val"
)
# Need to set this because if using an IterableDataset, the length of the dataloader is the total number
# of samples rather than the number of batches, and this messes up the tqdm progress bar.
# So we set the number of steps manually (to the correct number) to fix this.
if 'use_tarred_dataset' in val_data_config and val_data_config['use_tarred_dataset']:
# We also need to check if limit_val_batches is already set.
# If it's an int, we assume that the user has set it to something sane, i.e. <= # validation batches,
# and don't change it. Otherwise, adjust batches accordingly if it's a float (including 1.0).
if self._trainer is not None and isinstance(self._trainer.limit_val_batches, float):
self._trainer.limit_val_batches = int(
self._trainer.limit_val_batches * ceil(len(self._validation_dl.dataset) / self.world_size)
)
elif self._trainer is None:
logging.warning(
"Model Trainer was not set before constructing the dataset, incorrect number of "
"validation batches will be used. Please set the trainer and rebuild the dataset."
)
def setup_multiple_validation_data(self, val_data_config: Union[DictConfig, Dict] = None):
if val_data_config is None:
val_data_config = self._cfg.validation_ds
return super().setup_multiple_validation_data(val_data_config)
def setup_multiple_test_data(self, test_data_config: Union[DictConfig, Dict] = None):
if test_data_config is None:
test_data_config = self._cfg.test_ds
return super().setup_multiple_test_data(test_data_config)
def setup_test_data(self, test_data_config: Optional[DictConfig]):
if not test_data_config or test_data_config.data_path is None:
logging.info(
f"Dataloader config or file_path for the test is missing, so no data loader for test is created!"
)
self.test_dataset, self._test_dl = None, None
return
self.test_dataset, self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config, data_split="test")
def _setup_dataloader_from_config(self, cfg: DictConfig, data_split: str):
logging.info(f"Creating {data_split} dataset")
shuffle = cfg["shuffle"]
if cfg.get("use_tarred_dataset", False):
logging.info('Tarred dataset')
metadata_file = cfg["tar_metadata_file"]
if metadata_file is None or not os.path.exists(metadata_file):
raise FileNotFoundError(f"Trying to use tarred dataset but could not find {metadata_file}.")
with open(metadata_file, "r") as f:
metadata = json.load(f)
num_batches = metadata["num_batches"]
tar_files = os.path.join(os.path.dirname(metadata_file), metadata["text_tar_filepaths"])
logging.info(f"Loading {tar_files}")
dataset = TarredTextNormalizationDecoderDataset(
text_tar_filepaths=tar_files,
num_batches=num_batches,
shuffle_n=cfg.get("tar_shuffle_n", 4 * cfg['batch_size']) if shuffle else 0,
shard_strategy=cfg.get("shard_strategy", "scatter"),
global_rank=self.global_rank,
world_size=self.world_size,
)
dl = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=1,
sampler=None,
num_workers=cfg.get("num_workers", 2),
pin_memory=cfg.get("pin_memory", False),
drop_last=cfg.get("drop_last", False),
)
else:
input_file = cfg.data_path
if not os.path.exists(input_file):
raise ValueError(f"{input_file} not found.")
dataset = TextNormalizationDecoderDataset(
input_file=input_file,
tokenizer=self.tokenizer,
tokenizer_name=self.transformer_name,
mode=self.mode,
max_len=self.max_sequence_len,
decoder_data_augmentation=cfg.get('decoder_data_augmentation', False)
if data_split == "train"
else False,
lang=self.lang,
use_cache=cfg.get('use_cache', False),
max_insts=cfg.get('max_insts', -1),
do_tokenize=True,
)
# create and save class names to class_ids mapping for validation
# (each validation set might have different classes)
if data_split in ['val', 'test']:
if not hasattr(self, "_val_class_to_id"):
self._val_class_to_id = []
self._val_id_to_class = []
self._val_class_to_id.append(dataset.label_ids_semiotic)
self._val_id_to_class.append({v: k for k, v in dataset.label_ids_semiotic.items()})
data_collator = DataCollatorForSeq2Seq(
self.tokenizer, model=self.model, label_pad_token_id=constants.LABEL_PAD_TOKEN_ID, padding=True
)
dl = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=cfg.batch_size,
shuffle=shuffle,
collate_fn=data_collator,
num_workers=cfg.get("num_workers", 3),
pin_memory=cfg.get("pin_memory", False),
drop_last=cfg.get("drop_last", False),
)
return dataset, dl
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
result.append(
PretrainedModelInfo(
pretrained_model_name="neural_text_normalization_t5",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/neural_text_normalization_t5/versions/1.5.0/files/neural_text_normalization_t5_decoder.nemo",
description="Text Normalization model's decoder model.",
)
)
result.append(
PretrainedModelInfo(
pretrained_model_name="itn_en_t5",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/itn_en_t5/versions/1.11.0/files/itn_en_t5_decoder.nemo",
description="English Inverse Text Normalization model's decoder model.",
)
)
return result
| NeMo-main | nemo/collections/nlp/models/duplex_text_normalization/duplex_decoder.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.models.duplex_text_normalization.duplex_decoder import DuplexDecoderModel
from nemo.collections.nlp.models.duplex_text_normalization.duplex_tagger import DuplexTaggerModel
from nemo.collections.nlp.models.duplex_text_normalization.duplex_tn import DuplexTextNormalizationModel
| NeMo-main | nemo/collections/nlp/models/duplex_text_normalization/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import ceil
from time import perf_counter
from typing import List
import numpy as np
import torch.nn as nn
from tqdm import tqdm
from nemo.collections.nlp.data.text_normalization import TextNormalizationTestDataset, constants
from nemo.collections.nlp.data.text_normalization.utils import input_preprocessing
from nemo.collections.nlp.models.duplex_text_normalization.utils import get_formatted_string
from nemo.utils import logging
try:
from nemo_text_processing.text_normalization.data_loader_utils import post_process_punct
PYNINI_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
PYNINI_AVAILABLE = False
__all__ = ['DuplexTextNormalizationModel']
class DuplexTextNormalizationModel(nn.Module):
"""
DuplexTextNormalizationModel is a wrapper class that can be used to
encapsulate a trained tagger and a trained decoder. The class is intended
to be used for inference only (e.g., for evaluation).
"""
def __init__(self, tagger, decoder, lang):
super(DuplexTextNormalizationModel, self).__init__()
self.tagger = tagger
self.decoder = decoder
self.lang = lang
def evaluate(
self, dataset: TextNormalizationTestDataset, batch_size: int, errors_log_fp: str, verbose: bool = True
):
""" Function for evaluating the performance of the model on a dataset
Args:
dataset: The dataset to be used for evaluation.
batch_size: Batch size to use during inference. You can set it to be 1
(no batching) if you want to measure the running time of the model
per individual example (assuming requests are coming to the model one-by-one).
errors_log_fp: Path to the file for logging the errors
verbose: if true prints and logs various evaluation results
Returns:
results: A Dict containing the evaluation results (e.g., accuracy, running time)
"""
results = {}
error_f = open(errors_log_fp, 'w+')
# Apply the model on the dataset
(
all_run_times,
all_dirs,
all_inputs,
all_targets,
all_classes,
all_nb_spans,
all_span_starts,
all_span_ends,
all_output_spans,
) = ([], [], [], [], [], [], [], [], [])
all_tag_preds, all_final_preds = [], []
nb_iters = int(ceil(len(dataset) / batch_size))
for i in tqdm(range(nb_iters)):
start_idx = i * batch_size
end_idx = (i + 1) * batch_size
batch_insts = dataset[start_idx:end_idx]
(
batch_dirs,
batch_inputs,
batch_targets,
batch_classes,
batch_nb_spans,
batch_span_starts,
batch_span_ends,
) = zip(*batch_insts)
# Inference and Running Time Measurement
batch_start_time = perf_counter()
batch_tag_preds, batch_output_spans, batch_final_preds = self._infer(
batch_inputs, batch_dirs, processed=True
)
batch_run_time = (perf_counter() - batch_start_time) * 1000 # milliseconds
all_run_times.append(batch_run_time)
# Update all_dirs, all_inputs, all_tag_preds, all_final_preds and all_targets
all_dirs.extend(batch_dirs)
all_inputs.extend(batch_inputs)
all_tag_preds.extend(batch_tag_preds)
all_final_preds.extend(batch_final_preds)
all_targets.extend(batch_targets)
all_classes.extend(batch_classes)
all_nb_spans.extend(batch_nb_spans)
all_span_starts.extend(batch_span_starts)
all_span_ends.extend(batch_span_ends)
all_output_spans.extend(batch_output_spans)
# Metrics
tn_error_ctx, itn_error_ctx = 0, 0
for direction in constants.INST_DIRECTIONS:
(
cur_dirs,
cur_inputs,
cur_tag_preds,
cur_final_preds,
cur_targets,
cur_classes,
cur_nb_spans,
cur_span_starts,
cur_span_ends,
cur_output_spans,
) = ([], [], [], [], [], [], [], [], [], [])
for dir, _input, tag_pred, final_pred, target, cls, nb_spans, span_starts, span_ends, output_spans in zip(
all_dirs,
all_inputs,
all_tag_preds,
all_final_preds,
all_targets,
all_classes,
all_nb_spans,
all_span_starts,
all_span_ends,
all_output_spans,
):
if dir == direction:
cur_dirs.append(dir)
cur_inputs.append(_input)
cur_tag_preds.append(tag_pred)
cur_final_preds.append(final_pred)
cur_targets.append(target)
cur_classes.append(cls)
cur_nb_spans.append(nb_spans)
cur_span_starts.append(span_starts)
cur_span_ends.append(span_ends)
cur_output_spans.append(output_spans)
nb_instances = len(cur_final_preds)
cur_targets_sent = [" ".join(x) for x in cur_targets]
sent_accuracy = TextNormalizationTestDataset.compute_sent_accuracy(
cur_final_preds, cur_targets_sent, cur_dirs
)
class_accuracy = TextNormalizationTestDataset.compute_class_accuracy(
[x.split() for x in cur_inputs],
cur_targets,
cur_tag_preds,
cur_dirs,
cur_output_spans,
cur_classes,
cur_nb_spans,
cur_span_ends,
)
if verbose:
logging.info(f'\n============ Direction {direction} ============')
logging.info(f'Sentence Accuracy: {sent_accuracy}')
logging.info(f'nb_instances: {nb_instances}')
if not isinstance(class_accuracy, str):
log_class_accuracies = ""
for key, value in class_accuracy.items():
log_class_accuracies += f"\n\t{key}:\t{value[0]}\t{value[1]}/{value[2]}"
else:
log_class_accuracies = class_accuracy
logging.info(f'class accuracies: {log_class_accuracies}')
# Update results
results[direction] = {
'sent_accuracy': sent_accuracy,
'nb_instances': nb_instances,
"class_accuracy": log_class_accuracies,
}
# Write errors to log file
for _input, tag_pred, final_pred, target, classes in zip(
cur_inputs, cur_tag_preds, cur_final_preds, cur_targets_sent, cur_classes
):
if not TextNormalizationTestDataset.is_same(final_pred, target, direction):
if direction == constants.INST_BACKWARD:
error_f.write('Backward Problem (ITN)\n')
itn_error_ctx += 1
elif direction == constants.INST_FORWARD:
error_f.write('Forward Problem (TN)\n')
tn_error_ctx += 1
formatted_input_str = get_formatted_string(self.decoder.processor.tokenize(_input).split())
formatted_tag_pred_str = get_formatted_string(tag_pred)
class_str = " ".join(classes)
error_f.write(f'Original Input : {_input}\n')
error_f.write(f'Input : {formatted_input_str}\n')
error_f.write(f'Predicted Tags : {formatted_tag_pred_str}\n')
error_f.write(f'Ground Classes : {class_str}\n')
error_f.write(f'Predicted Str : {final_pred}\n')
error_f.write(f'Ground-Truth : {target}\n')
error_f.write('\n')
results['itn_error_ctx'] = itn_error_ctx
results['tn_error_ctx'] = tn_error_ctx
# Running Time
avg_running_time = np.average(all_run_times) / batch_size # in ms
if verbose:
logging.info(f'Average running time (normalized by batch size): {avg_running_time} ms')
results['running_time'] = avg_running_time
# Close log file
error_f.close()
logging.info(f'Errors are saved at {errors_log_fp}.')
return results
# Functions for inference
def _infer(self, sents: List[str], inst_directions: List[str], processed=False):
"""
Main function for Inference
If the 'joint' mode is used, "sents" will include both spoken and written forms on each input sentence,
and "inst_directions" will include both constants.INST_BACKWARD and constants.INST_FORWARD
Args:
sents: A list of input texts.
inst_directions: A list of str where each str indicates the direction of the corresponding instance \
(i.e., constants.INST_BACKWARD for ITN or constants.INST_FORWARD for TN).
processed: Set to True when used with TextNormalizationTestDataset, the data is already tokenized with moses,
repetitive moses tokenization could lead to the number of tokens and class span mismatch
Returns:
tag_preds: A list of lists where the inner list contains the tag predictions from the tagger for each word in the input text.
output_spans: A list of lists where each list contains the decoded semiotic spans from the decoder for an input text.
final_outputs: A list of str where each str is the final output text for an input text.
"""
original_sents = [s for s in sents]
# Separate into words
if not processed:
sents = [input_preprocessing(x, lang=self.lang) for x in sents]
sents = [self.decoder.processor.tokenize(x).split() for x in sents]
else:
sents = [x.split() for x in sents]
# Tagging
# span_ends included, returns index wrt to words in input without auxiliary words
tag_preds, nb_spans, span_starts, span_ends = self.tagger._infer(sents, inst_directions)
output_spans = self.decoder._infer(sents, nb_spans, span_starts, span_ends, inst_directions)
# Prepare final outputs
final_outputs = []
for ix, (sent, tags) in enumerate(zip(sents, tag_preds)):
try:
cur_words, jx, span_idx = [], 0, 0
cur_spans = output_spans[ix]
while jx < len(sent):
tag, word = tags[jx], sent[jx]
if constants.SAME_TAG in tag:
cur_words.append(word)
jx += 1
else:
jx += 1
cur_words.append(cur_spans[span_idx])
span_idx += 1
while jx < len(sent) and tags[jx] == constants.I_PREFIX + constants.TRANSFORM_TAG:
jx += 1
if processed:
# for Class-based evaluation, don't apply Moses detokenization
cur_output_str = " ".join(cur_words)
else:
# detokenize the output with Moses and fix punctuation marks to match the input
# for interactive inference or inference from a file
cur_output_str = self.decoder.processor.detokenize(cur_words)
if PYNINI_AVAILABLE:
cur_output_str = post_process_punct(input=original_sents[ix], normalized_text=cur_output_str)
else:
logging.warning(
"`pynini` not installed, please install via nemo_text_processing/pynini_install.sh"
)
final_outputs.append(cur_output_str)
except IndexError:
logging.warning(f"Input sent is too long and will be skipped - {original_sents[ix]}")
final_outputs.append(original_sents[ix])
return tag_preds, output_spans, final_outputs
| NeMo-main | nemo/collections/nlp/models/duplex_text_normalization/duplex_tn.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
__all__ = ['has_numbers']
def has_numbers(input_str: str):
""" Check if a string has a number character """
return any(char.isdigit() for char in input_str)
def get_formatted_string(strs: Tuple[str], str_max_len: int = 10, space_len: int = 2):
""" Get a nicely formatted string from a list of strings"""
padded_strs = []
for cur_str in strs:
cur_str = cur_str + ' ' * (str_max_len - len(cur_str))
padded_strs.append(cur_str[:str_max_len])
spaces = ' ' * space_len
return spaces.join(padded_strs)
| NeMo-main | nemo/collections/nlp/models/duplex_text_normalization/utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from math import ceil
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from omegaconf import DictConfig, open_dict
from pytorch_lightning import Trainer
from torch.nn import Linear
from tqdm import tqdm
from nemo.collections.common.losses.cross_entropy import CrossEntropyLoss
from nemo.collections.nlp.models.token_classification.punctuation_capitalization_model import (
PunctuationCapitalizationModel,
)
from nemo.collections.nlp.modules.common.transformer import TransformerDecoder
from nemo.core.classes.common import PretrainedModelInfo
from nemo.core.classes.mixins import adapter_mixins
from nemo.utils import logging
try:
import nemo.collections.asr as nemo_asr
from nemo.collections.asr.parts.submodules.conformer_modules import ConformerLayer
ASR_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
ASR_AVAILABLE = False
__all__ = ['PunctuationCapitalizationLexicalAudioModel']
def update_model_config_to_support_adapter(model_cfg):
with open_dict(model_cfg):
adapter_metadata = adapter_mixins.get_registered_adapter(model_cfg.encoder._target_)
if adapter_metadata is not None:
model_cfg.encoder._target_ = adapter_metadata.adapter_class_path
return model_cfg
class PunctuationCapitalizationLexicalAudioModel(PunctuationCapitalizationModel):
"""
A model for restoring punctuation and capitalization in text using lexical and audio features.
The model consists of a language model and two multilayer perceptrons (MLP) on top the fusion of LM and AM. The first
MLP serves for punctuation prediction and the second is for capitalization prediction. You can use only BERT-like
HuggingFace language models (model ``forward`` method accepts ``input_ids``, ``token_types_ids``,
``attention_mask`` arguments). See more about model config options :ref:`here<model-config-label>`.
And any :class:`~nemo.collections.asr.models.EncDecCTCModel` which has encoder module which is used as an AM.
For training and testing use dataset
:class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_dataset.BertPunctuationCapitalizationDataset` with parameter ``use_audio`` set to ``True``,
for training on huge amounts of data which cannot be loaded into memory simultaneously use
:class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_tarred_dataset.BertPunctuationCapitalizationTarredDataset` with parameter ``use_audio`` set to ``True``.
Args:
cfg: a model configuration. It should follow dataclass
:class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_config.PunctuationCapitalizationLexicalAudioModelConfig`
See an example of full config in
`nemo/examples/nlp/token_classification/conf/punctuation_capitalization_lexical_audio_config.yaml
<https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/token_classification/conf/punctuation_capitalization_lexical_audio_config.yaml>`_
trainer: an instance of a PyTorch Lightning trainer
"""
def __init__(self, cfg: DictConfig, trainer: Trainer = None) -> None:
super().__init__(cfg, trainer)
if not ASR_AVAILABLE:
raise ModuleNotFoundError(
'Nemo ASR was not installed, see https://github.com/NVIDIA/NeMo#installation for installation instructions'
)
if os.path.exists(cfg.audio_encoder.pretrained_model):
audio_cfg = nemo_asr.models.ASRModel.restore_from(cfg.audio_encoder.pretrained_model, return_config=True)
else:
audio_cfg = nemo_asr.models.ASRModel.from_pretrained(
cfg.audio_encoder.pretrained_model, return_config=True
)
if cfg.audio_encoder.get('adapter', None):
if cfg.audio_encoder.adapter.enable:
audio_cfg = update_model_config_to_support_adapter(audio_cfg)
if os.path.exists(cfg.audio_encoder.pretrained_model):
self.audio_encoder = nemo_asr.models.ASRModel.restore_from(
cfg.audio_encoder.pretrained_model, override_config_path=audio_cfg
)
else:
self.audio_encoder = nemo_asr.models.ASRModel.from_pretrained(
cfg.audio_encoder.pretrained_model, override_config_path=audio_cfg
)
if cfg.audio_encoder.adapter.get('enable', False):
with open_dict(cfg):
cfg.audio_encoder.adapter.config.in_features = self.audio_encoder.cfg.decoder.feat_in
self.audio_encoder.add_adapter(name='audio_adapter', cfg=cfg.audio_encoder.adapter.config)
self.audio_encoder.set_enabled_adapters(enabled=True)
self.audio_encoder.freeze()
self.audio_encoder.unfreeze_enabled_adapters()
self.fusion = TransformerDecoder(
num_layers=cfg.audio_encoder.fusion.num_layers,
hidden_size=self.bert_model(**self.bert_model.input_example()[0]).size()[-1],
inner_size=cfg.audio_encoder.fusion.inner_size,
num_attention_heads=cfg.audio_encoder.fusion.num_attention_heads,
)
if hasattr(self.audio_encoder.cfg, 'decoder.feat_in'):
self.audio_proj = Linear(
self.audio_encoder.cfg.decoder.feat_in,
self.bert_model(**self.bert_model.input_example()[0]).size()[-1],
)
else:
self.audio_proj = Linear(
self.audio_encoder.cfg.encoder.d_model,
self.bert_model(**self.bert_model.input_example()[0]).size()[-1],
)
if cfg.audio_encoder.freeze.get('is_enabled', False):
for param in self.audio_encoder.parameters():
param.requires_grad = False
for i in range(cfg.audio_encoder.freeze.get('num_layers')):
self.audio_encoder.add_module(
f'conf_encoder_{i}',
ConformerLayer(
d_model=cfg.audio_encoder.freeze.get('d_model'), d_ff=cfg.audio_encoder.freeze.get('d_ff')
),
)
if cfg.get('restore_lexical_encoder_from', None) and not self._is_model_being_restored():
if os.path.exists(cfg.get('restore_lexical_encoder_from')):
self.bert_model = (
PunctuationCapitalizationModel.restore_from(cfg.restore_lexical_encoder_from)
.to(self.device)
.bert_model
)
else:
raise ValueError(f'Provided path {cfg.get("restore_lexical_encoder_from")} does not exists')
if hasattr(self.audio_encoder, 'decoder'):
del self.audio_encoder.decoder
if hasattr(self.audio_encoder, '_wer'):
del self.audio_encoder._wer
if hasattr(self.audio_encoder, 'loss'):
del self.audio_encoder.loss
if hasattr(self.audio_encoder, 'decoder_losses'):
del self.audio_encoder.decoder_losses
if cfg.get('use_weighted_loss', False):
punct_freq = torch.tensor(
list(self.train_dataloader().dataset.punct_label_frequencies.values()), dtype=torch.float
)
punct_weight = 1 - (punct_freq - punct_freq.min()) / punct_freq.max()
capit_freq = torch.tensor(
list(self.train_dataloader().dataset.capit_label_frequencies.values()), dtype=torch.float
)
capit_weight = 1 - (capit_freq - capit_freq.min()) / capit_freq.max()
self.loss_punct = CrossEntropyLoss(logits_ndim=3, weight=punct_weight)
self.loss_capit = CrossEntropyLoss(logits_ndim=3, weight=capit_weight)
else:
self.loss_punct = self.loss
self.loss_capit = self.loss
self.set_max_audio_length(1024)
def _make_step(self, batch: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
punct_logits, capit_logits = self(
input_ids=batch['input_ids'],
token_type_ids=batch['segment_ids'],
attention_mask=batch['input_mask'],
features=batch['features'],
features_length=batch['features_length'],
)
punct_loss = self.loss_punct(logits=punct_logits, labels=batch['punct_labels'], loss_mask=batch['loss_mask'])
capit_loss = self.loss_capit(logits=capit_logits, labels=batch['capit_labels'], loss_mask=batch['loss_mask'])
loss = self.agg_loss(loss_1=punct_loss, loss_2=capit_loss)
return loss, punct_logits, capit_logits
def forward(
self,
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
token_type_ids: Optional[torch.Tensor] = None,
features: torch.Tensor = None,
features_length: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Executes a forward pass through the model. For more details see ``forward`` method of :class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_config.PunctuationCapitalizationLexicalAudioModelConfig`
and ``forward`` method of :class:'~nemo.collections.asr.models.EncDecCTCModel'
Args:
input_ids (:obj:`torch.Tensor`): an integer torch tensor of shape ``[Batch, Time]``. Contains encoded
source tokens.
attention_mask (:obj:`torch.Tensor`): a boolean torch tensor of shape ``[Batch, Time]``. Contains an
attention mask for excluding paddings.
token_type_ids (:obj:`torch.Tensor`): an integer torch Tensor of shape ``[Batch, Time]``. Contains an index
of segment to which a token belongs. If ``token_type_ids`` is not ``None``, then it should be a zeros
tensor.
features (:obj:`torch.Tensor`): tensor that represents a batch of raw audio signals,
of shape [B, T]. T here represents timesteps, with 1 second of audio represented as
sample_rate number of floating point values.
features_length (:obj:`torch.Tensor`): Vector of length B, that contains the individual lengths of the audio
sequences.
Returns:
:obj:`Tuple[torch.Tensor, torch.Tensor]`: a tuple containing
- ``punct_logits`` (:obj:`torch.Tensor`): a float torch tensor of shape
``[Batch, Time, NumPunctuationLabels]`` containing punctuation logits
- ``capit_logits`` (:obj:`torch.Tensor`): a float torch tensor of shape
``[Batch, Time, NumCapitalizationLabels]`` containing capitalization logits
"""
self.update_max_seq_length(seq_length=features.size(1), device=features.device)
lexical_hidden_states = self.bert_model(
input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask
)
if isinstance(lexical_hidden_states, tuple):
lexical_hidden_states = lexical_hidden_states[0]
processed_signal, processed_signal_length = self.audio_encoder.preprocessor(
input_signal=features, length=features_length,
)
if self.audio_encoder.spec_augmentation is not None and self.training:
processed_signal = self.audio_encoder.spec_augmentation(
input_spec=processed_signal, length=processed_signal_length
)
audio_hidden_states, audio_hidden_states_length = self.audio_encoder.encoder(
audio_signal=processed_signal, length=processed_signal_length
)
audio_hidden_states = audio_hidden_states.permute(0, 2, 1)
audio_hidden_states = self.audio_proj(audio_hidden_states)
fused = self.fusion(
lexical_hidden_states,
attention_mask,
audio_hidden_states,
self.make_pad_mask(audio_hidden_states.size(1), audio_hidden_states_length),
)
punct_logits = self.punct_classifier(hidden_states=fused)
capit_logits = self.capit_classifier(hidden_states=fused)
return punct_logits, capit_logits
def make_pad_mask(self, max_audio_length, seq_lens):
"""Make masking for padding."""
mask = self.seq_range[:max_audio_length].expand(seq_lens.size(0), -1) < seq_lens.unsqueeze(-1)
return mask
def update_max_seq_length(self, seq_length: int, device):
if torch.distributed.is_initialized():
global_max_len = torch.tensor([seq_length], dtype=torch.float32, device=device)
# Update across all ranks in the distributed system
torch.distributed.all_reduce(global_max_len, op=torch.distributed.ReduceOp.MAX)
seq_length = global_max_len.int().item()
if seq_length > self.max_audio_length:
self.set_max_audio_length(seq_length)
def set_max_audio_length(self, max_audio_length):
"""
Sets maximum input length.
Pre-calculates internal seq_range mask.
"""
self.max_audio_length = max_audio_length
device = next(self.parameters()).device
seq_range = torch.arange(0, self.max_audio_length, device=device)
if hasattr(self, 'seq_range'):
self.seq_range = seq_range
else:
self.register_buffer('seq_range', seq_range, persistent=False)
def add_punctuation_capitalization(
self,
queries: List[str],
batch_size: int = None,
max_seq_length: int = 64,
step: int = 8,
margin: int = 16,
return_labels: bool = False,
dataloader_kwargs: Dict[str, Any] = None,
audio_queries: Optional[Union[List[bytes], List[str]]] = None,
target_sr: Optional[int] = None,
) -> List[str]:
"""
Adds punctuation and capitalization to the queries. Use this method for inference.
Parameters ``max_seq_length``, ``step``, ``margin`` are for controlling the way queries are split into segments
which are processed by the model. Parameter ``max_seq_length`` is a length of a segment after tokenization
including special tokens [CLS] in the beginning and [SEP] in the end of a segment. Parameter ``step`` is a
shift between consequent segments. Parameter ``margin`` is used to exclude negative effect of subtokens near
borders of segments which have only one side context.
If segments overlap, probabilities of overlapping predictions are multiplied and then the label with
corresponding to the maximum probability is selected.
Args:
queries (:obj:`List[str]`): lower cased text without punctuation.
batch_size (:obj:`List[str]`, `optional`): batch size to use during inference. If ``batch_size`` parameter
is not provided, then it will be equal to length of ``queries`` list.
max_seq_length (:obj:`int`, `optional`, defaults to :obj:`64`): maximum sequence length of a segment after
tokenization including :code:`[CLS]` and :code:`[SEP]` tokens.
step (:obj:`int`, `optional`, defaults to :obj:`8`): relative shift of consequent segments into which long
queries are split. Long queries are split into segments which can overlap. Parameter ``step`` controls
such overlapping. Imagine that queries are tokenized into characters, ``max_seq_length=5``, and
``step=2``. In such case, query ``"hello"`` is tokenized into segments
``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'l', 'l', 'o', '[SEP]']]``.
margin (:obj:`int`, `optional`, defaults to :obj:`16`): number of subtokens in the beginning and the end of
segments which are not used for prediction computation. The first segment does not have left margin and
the last segment does not have right margin. For example, if an input sequence is tokenized into
characters, ``max_seq_length=5``, ``step=1``, and ``margin=1``, then query ``"hello"`` will be
tokenized into segments ``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'e', 'l', 'l', '[SEP]'],
['[CLS]', 'l', 'l', 'o', '[SEP]']]``. These segments are passed to the model. Before final predictions
computation, margins are removed. In the next list, subtokens which logits are not used for final
predictions computation are marked with asterisk: ``[['[CLS]'*, 'h', 'e', 'l'*, '[SEP]'*],
['[CLS]'*, 'e'*, 'l', 'l'*, '[SEP]'*], ['[CLS]'*, 'l'*, 'l', 'o', '[SEP]'*]]``.
return_labels (:obj:`bool`, `optional`, defaults to :obj:`False`): whether to return labels in NeMo format
(see :ref:`nlp/punctuation_and_capitalization/NeMo Data Format`) instead of queries with restored
punctuation and capitalization.
dataloader_kwargs (:obj:`Dict[str, Any]`, `optional`): an optional dictionary with parameters of PyTorch
data loader. May include keys: ``'num_workers'``, ``'pin_memory'``, ``'worker_init_fn'``,
``'prefetch_factor'``, ``'persistent_workers'``.
audio_queries (:obj:`List[str]`, `optional`): paths to audio files.
target_sr (:obj:`int`, `optional`): target sample rate for audios.
Returns:
:obj:`List[str]`: a list of queries with restored capitalization and punctuation if
``return_labels=False``, else a list of punctuation and capitalization labels strings for all queries
"""
if len(queries) == 0:
return []
if batch_size is None:
batch_size = len(queries)
logging.info(f'Using batch size {batch_size} for inference')
result: List[str] = []
mode = self.training
try:
self.eval()
infer_datalayer = self._setup_infer_dataloader(
queries, batch_size, max_seq_length, step, margin, dataloader_kwargs, audio_queries, target_sr
)
# Predicted labels for queries. List of labels for every query
all_punct_preds: List[List[int]] = [[] for _ in queries]
all_capit_preds: List[List[int]] = [[] for _ in queries]
# Accumulated probabilities (or product of probabilities acquired from different segments) of punctuation
# and capitalization. Probabilities for words in a query are extracted using `subtokens_mask`. Probabilities
# for newly processed words are appended to the accumulated probabilities. If probabilities for a word are
# already present in `acc_probs`, old probabilities are replaced with a product of old probabilities
# and probabilities acquired from new segment. Segments are processed in an order they appear in an
# input query. When all segments with a word are processed, a label with the highest probability
# (or product of probabilities) is chosen and appended to an appropriate list in `all_preds`. After adding
# prediction to `all_preds`, probabilities for a word are removed from `acc_probs`.
acc_punct_probs: List[Optional[np.ndarray]] = [None for _ in queries]
acc_capit_probs: List[Optional[np.ndarray]] = [None for _ in queries]
d = self.device
for batch_i, batch in tqdm(
enumerate(infer_datalayer), total=ceil(len(infer_datalayer.dataset) / batch_size), unit="batch"
):
(
inp_ids,
inp_type_ids,
inp_mask,
subtokens_mask,
start_word_ids,
query_ids,
is_first,
is_last,
features,
features_length,
) = batch
punct_logits, capit_logits = self.forward(
input_ids=inp_ids.to(d),
token_type_ids=inp_type_ids.to(d),
attention_mask=inp_mask.to(d),
features=features.to(d),
features_length=features_length.to(d),
)
_res = self._transform_logit_to_prob_and_remove_margins_and_extract_word_probs(
punct_logits, capit_logits, subtokens_mask, start_word_ids, margin, is_first, is_last
)
punct_probs, capit_probs, start_word_ids = _res
for i, (q_i, start_word_id, bpp_i, bcp_i) in enumerate(
zip(query_ids, start_word_ids, punct_probs, capit_probs)
):
for all_preds, acc_probs, b_probs_i in [
(all_punct_preds, acc_punct_probs, bpp_i),
(all_capit_preds, acc_capit_probs, bcp_i),
]:
if acc_probs[q_i] is None:
acc_probs[q_i] = b_probs_i
else:
all_preds[q_i], acc_probs[q_i] = self._move_acc_probs_to_token_preds(
all_preds[q_i], acc_probs[q_i], start_word_id - len(all_preds[q_i]),
)
acc_probs[q_i] = self._update_accumulated_probabilities(acc_probs[q_i], b_probs_i)
for all_preds, acc_probs in [(all_punct_preds, acc_punct_probs), (all_capit_preds, acc_capit_probs)]:
for q_i, (pred, prob) in enumerate(zip(all_preds, acc_probs)):
if prob is not None:
all_preds[q_i], acc_probs[q_i] = self._move_acc_probs_to_token_preds(pred, prob, len(prob))
for i, query in enumerate(queries):
result.append(
self._get_labels(all_punct_preds[i], all_capit_preds[i])
if return_labels
else self._apply_punct_capit_predictions(query, all_punct_preds[i], all_capit_preds[i])
)
finally:
# set mode back to its original value
self.train(mode=mode)
return result
@classmethod
def list_available_models(cls) -> List[PretrainedModelInfo]:
return []
| NeMo-main | nemo/collections/nlp/models/token_classification/punctuation_capitalization_lexical_audio_model.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.models.token_classification.punctuation_capitalization_config import (
PunctuationCapitalizationModelConfig,
)
from nemo.collections.nlp.models.token_classification.punctuation_capitalization_lexical_audio_model import (
PunctuationCapitalizationLexicalAudioModel,
)
from nemo.collections.nlp.models.token_classification.punctuation_capitalization_model import (
PunctuationCapitalizationModel,
)
from nemo.collections.nlp.models.token_classification.token_classification_model import TokenClassificationModel
| NeMo-main | nemo/collections/nlp/models/token_classification/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import warnings
from math import ceil
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from tqdm import tqdm
from nemo.collections.common.losses import AggregatorLoss, CrossEntropyLoss
from nemo.collections.common.metrics import GlobalAverageLossMetric
from nemo.collections.nlp.data.token_classification.punctuation_capitalization_dataset import (
BertPunctuationCapitalizationDataset,
PunctuationCapitalizationEvalDataConfig,
PunctuationCapitalizationTrainDataConfig,
load_label_ids,
raise_not_equal_labels_error,
)
from nemo.collections.nlp.data.token_classification.punctuation_capitalization_infer_dataset import (
BertPunctuationCapitalizationInferDataset,
)
from nemo.collections.nlp.data.token_classification.punctuation_capitalization_tarred_dataset import (
BertPunctuationCapitalizationTarredDataset,
)
from nemo.collections.nlp.metrics.classification_report import ClassificationReport
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.collections.nlp.models.token_classification.punctuation_capitalization_config import (
is_legacy_model_config,
legacy_model_config_to_new_model_config,
)
from nemo.collections.nlp.modules.common import TokenClassifier
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.classes.exportable import Exportable
from nemo.core.neural_types import LogitsType, NeuralType
from nemo.utils import logging
__all__ = ['PunctuationCapitalizationModel']
class PunctuationCapitalizationModel(NLPModel, Exportable):
"""
A model for restoring punctuation and capitalization in text. The model is usually used together with ASR model
because ASR models often return text without punctuation and capitalization.
The model consists of a language model and two multilayer perceptrons (MLP) on top the language model. The first
MLP serves for punctuation prediction and the second is for capitalization prediction. You can use only BERT-like
HuggingFace language models (model ``forward`` method accepts ``input_ids``, ``token_types_ids``,
``attention_mask`` arguments). See more about model config options :ref:`here<model-config-label>`.
Use method :meth:`~add_punctuation_capitalization` for model inference.
For training and testing use dataset
:class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_dataset.BertPunctuationCapitalizationDataset`,
for training on huge amounts of data which cannot be loaded into memory simultaneously use
:class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_tarred_dataset.BertPunctuationCapitalizationTarredDataset`.
Args:
cfg: a model configuration. It should follow dataclass
:class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_config.PunctuationCapitalizationModelConfig`
See an example of full config in
`nemo/examples/nlp/token_classification/conf/punctuation_capitalization_config.yaml
<https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/token_classification/conf/punctuation_capitalization_config.yaml>`_
trainer: an instance of a PyTorch Lightning trainer
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Neural types of a :meth:`forward` method output."""
return {
"punct_logits": NeuralType(('B', 'T', 'C'), LogitsType()),
"capit_logits": NeuralType(('B', 'T', 'C'), LogitsType()),
}
def __init__(self, cfg: DictConfig, trainer: Trainer = None) -> None:
"""Initializes BERT Punctuation and Capitalization model."""
if is_legacy_model_config(cfg):
cfg = legacy_model_config_to_new_model_config(cfg)
# For structure of `self.metrics` attribute see `self._setup_metrics_dictionary` method.
self.metrics: Optional[torch.nn.ModuleDict] = None
self.label_ids_are_set: bool = False
self.punct_label_ids: Optional[Dict[str, int]] = None
self.capit_label_ids: Optional[Dict[str, int]] = None
super().__init__(cfg=cfg, trainer=trainer)
if not self.label_ids_are_set:
self._set_label_ids()
self.punct_classifier = TokenClassifier(
hidden_size=self.hidden_size,
num_classes=len(self.punct_label_ids),
activation=cfg.punct_head.activation,
log_softmax=False,
dropout=cfg.punct_head.fc_dropout,
num_layers=cfg.punct_head.num_fc_layers,
use_transformer_init=cfg.punct_head.use_transformer_init,
)
self.capit_classifier = TokenClassifier(
hidden_size=self.hidden_size,
num_classes=len(self.capit_label_ids),
activation=cfg.capit_head.activation,
log_softmax=False,
dropout=cfg.capit_head.fc_dropout,
num_layers=cfg.capit_head.num_fc_layers,
use_transformer_init=cfg.capit_head.use_transformer_init,
)
self.loss = CrossEntropyLoss(logits_ndim=3)
self.agg_loss = AggregatorLoss(num_inputs=2)
@typecheck()
def forward(
self, input_ids: torch.Tensor, attention_mask: torch.Tensor, token_type_ids: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Executes a forward pass through the model. For more details see ``forward`` method of HuggingFace BERT-like
(models which accept ``input_ids``, ``attention_mask``, ``token_type_ids`` arguments) models.
Args:
input_ids (:obj:`torch.Tensor`): an integer torch tensor of shape ``[Batch, Time]``. Contains encoded
source tokens.
attention_mask (:obj:`torch.Tensor`): a boolean torch tensor of shape ``[Batch, Time]``. Contains an
attention mask for excluding paddings.
token_type_ids (:obj:`torch.Tensor`): an integer torch Tensor of shape ``[Batch, Time]``. Contains an index
of segment to which a token belongs. If ``token_type_ids`` is not ``None``, then it should be a zeros
tensor.
Returns:
:obj:`Tuple[torch.Tensor, torch.Tensor]`: a tuple containing
- ``punct_logits`` (:obj:`torch.Tensor`): a float torch tensor of shape
``[Batch, Time, NumPunctuationLabels]`` containing punctuation logits
- ``capit_logits`` (:obj:`torch.Tensor`): a float torch tensor of shape
``[Batch, Time, NumCapitalizationLabels]`` containing capitalization logits
"""
hidden_states = self.bert_model(
input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask
)
if isinstance(hidden_states, tuple):
hidden_states = hidden_states[0]
punct_logits = self.punct_classifier(hidden_states=hidden_states)
capit_logits = self.capit_classifier(hidden_states=hidden_states)
return punct_logits.float(), capit_logits.float()
def _make_step(self, batch: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
punct_logits, capit_logits = self(
input_ids=batch['input_ids'], token_type_ids=batch['segment_ids'], attention_mask=batch['input_mask']
)
punct_loss = self.loss(logits=punct_logits, labels=batch['punct_labels'], loss_mask=batch['loss_mask'])
capit_loss = self.loss(logits=capit_logits, labels=batch['capit_labels'], loss_mask=batch['loss_mask'])
loss = self.agg_loss(loss_1=punct_loss, loss_2=capit_loss)
return loss, punct_logits, capit_logits
def training_step(self, batch: Dict[str, torch.Tensor], batch_idx: int) -> Dict[str, Union[torch.Tensor, float]]:
"""
Lightning calls this inside the training loop with the data from the training dataloader passed in as
``batch``.
Args:
batch: a dictionary with following
items:
- ``'input_ids'`` (:obj:`torch.Tensor`): an integer torch tensor of shape ``[Batch, Time]`` containing
encoded source text
- ``'segment_ids'`` (:obj:`torch.Tensor`): a zeros integer torch tensor of shape ``[Batch, Time]``
- ``'input_mask'`` (:obj:`torch.Tensor`): a boolean torch tensor of shape ``[Batch, Time]``. Serves as
attention mask. should be ``False`` on padding tokens and ``True`` on other tokens.
- ``'loss_mask'`` (:obj:`torch.Tensor`): a boolean torch tensor of shape ``[Batch, Time]``. Which token
to compute loss on. See more details in description of parameters ``ignore_start_end`` and
``ignore_extra_tokens`` of a class
:class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_dataset.BertPunctuationCapitalizationDataset`
- ``'punct_labels'`` (:obj:`torch.Tensor`): a ``long`` torch tensor of shape ``[Batch, Time]``.
Contains encoded punctuation labels
- ``'capit_labels'`` (:obj:`torch.Tensor`): a ``long`` torch tensor of shape ``[Batch, Time]``.
Contains encoded capitalization labels
- ``'subtokens_mask'`` (:obj:`torch.Tensor`): not required for training and can be omitted
batch_idx (:obj:`int`): an index of batch. Mandatory Lightning parameter
Returns:
:obj:`Dict[str, Union[torch.Tensor, float]]`: a dictionary with 2 items:
- ``'loss'`` (:obj:`torch.Tensor`): torch tensor containing mean aggregated punctuation and
capitalization loss
- ``'lr'`` (:obj:`float`): a float containing learning rate
"""
loss, _, _ = self._make_step(batch)
lr = self._optimizer.param_groups[0]['lr']
self.log('lr', lr, prog_bar=True)
self.log('train_loss', loss)
return {'loss': loss, 'lr': lr}
def eval_step(self, batch: Dict[str, torch.Tensor], mode: str, dataloader_idx: int) -> Dict[str, None]:
"""
A method called by :meth:`validation_step` and :meth:`test_step`. Performs forward pass and updates metrics.
Args:
batch (:obj:`Dict[str, torch.Tensor]`): a dictionary with following items:
- ``'input_ids'`` (:obj:`torch.Tensor`): an integer torch tensor of shape ``[Batch, Time]`` containing
encoded source text.
- ``'subtokens_mask'`` (:obj:`torch.Tensor`): a boolean torch tensor of shape ``[Batch, Time]``. An
element of this item is ``True`` if corresponding token from ``'input_ids'`` element is the first
token in some word.
- ``'segment_ids'`` (:obj:`torch.Tensor`): a zeros integer torch tensor of shape ``[Batch, Time]``.
- ``'input_mask'`` (:obj:`torch.Tensor`): a boolean torch tensor of shape ``[Batch, Time]``. Serves as
attention mask. should be ``False`` on padding tokens and ``True`` on other tokens.
- ``'loss_mask'`` (:obj:`torch.Tensor`): a boolean torch tensor of shape ``[Batch, Time]``. Which token
to compute loss on. See more details in description of parameters ``ignore_start_end`` and
``ignore_extra_tokens`` of class
:class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_dataset.BertPunctuationCapitalizationDataset`.
- ``'punct_labels'`` (:obj:`torch.Tensor`): a long torch tensor of shape ``[Batch, Time]``. Contains
encoded punctuation labels.
- ``'capit_labels'`` (:obj:`torch.Tensor`): a long torch tensor of shape ``[Batch, Time]``. Contains
encoded capitalization labels.
mode: either ``'validation'`` or ``'test'`` depending on caller method.
dataloader_idx: NeMo parameter for multi dataset validation.
Returns:
:obj:`Dict[str, None]`: a dictionary containing items ``'loss'``, ``'punct_class_report'``,
``'capit_class_report'`` which values are ``None``. Values are ``None`` because metrics are computed using
``torchmetrics``.
"""
loss, punct_logits, capit_logits = self._make_step(batch)
subtokens_mask = batch['subtokens_mask']
punct_preds = torch.argmax(punct_logits, axis=-1)[subtokens_mask]
punct_labels = batch['punct_labels'][subtokens_mask]
capit_preds = torch.argmax(capit_logits, axis=-1)[subtokens_mask]
capit_labels = batch['capit_labels'][subtokens_mask]
self.metrics[mode]['loss'][dataloader_idx](
loss=loss, num_measurements=batch['loss_mask'].sum().to(loss.device)
)
self.metrics[mode]['punct_class_report'][dataloader_idx](punct_preds, punct_labels)
self.metrics[mode]['capit_class_report'][dataloader_idx](capit_preds, capit_labels)
# torchmetrics are used for metrics computation
return {'loss': None, 'punct_class_report': None, 'capit_class_report': None}
def validation_step(
self, batch: Dict[str, torch.Tensor], batch_idx: int, dataloader_idx: int = 0
) -> Dict[str, None]:
"""
Lightning calls this inside the validation loop with the data from the validation dataloader passed in as
``batch``. See more details in :meth:`eval_step`.
Args:
batch (:obj:`dict`): see :meth:`eval_step` for the ``batch`` parameter explanation
batch_idx (:obj:`int`): an index of a batch in a dataset. A mandatory Lightning parameter
dataloader_idx (:obj:`int`): a NeMo parameter for performing testing on multiple datasets
Returns:
:obj:`Dict[str, None]`: a dictionary containing items ``'loss'``, ``'punct_class_report'``,
``'capit_class_report'`` which values are ``None``. Values are ``None`` because metrics are computed using
``torchmetrics``.
"""
loss = self.eval_step(batch, 'val', dataloader_idx)
if type(self.trainer.val_dataloaders) == list and len(self.trainer.val_dataloaders) > 1:
self.validation_step_outputs[dataloader_idx].append(loss)
else:
self.validation_step_outputs.append(loss)
return loss
def test_step(self, batch: Dict[str, torch.Tensor], batch_idx: int, dataloader_idx: int = 0) -> Dict[str, None]:
"""
Lightning calls this inside the test loop with the data from the test dataloader passed in as ``batch``.
See more details in :meth:`eval_step`.
Args:
batch (:obj:`dict`): see :meth:`eval_step` for the ``batch`` parameter explanation
batch_idx (:obj:`int`): an index of a batch in a dataset. A mandatory Lightning parameter
dataloader_idx (:obj:`int`): a NeMo parameter for performing testing on multiple datasets
Returns:
:obj:`Dict[str, None]`: a dictionary containing items ``'loss'``, ``'punct_class_report'``,
``'capit_class_report'`` which values are ``None``. Values are ``None`` because metrics are computed using
``torchmetrics``.
"""
loss = self.eval_step(batch, 'test', dataloader_idx)
if type(self.trainer.test_dataloaders) == list and len(self.trainer.test_dataloaders) > 1:
self.test_step_outputs[dataloader_idx].append(loss)
else:
self.test_step_outputs.append(loss)
return loss
def on_train_epoch_end(self) -> None:
"""
Called at the end of training epoch. This method properly shuffles
:class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_dataset.BertPunctuationCapitalizationDataset`.
Regular data loader shuffling only permutes batches.
Args:
outputs (:obj:`pytorch_lightning.utilities.types.EPOCH_OUTPUT`): an output of all training steps. It is a
mandatory PyTorch Lightning parameter, and it is not used in this method
"""
shuffle = self._cfg.train_ds.get('shuffle')
if shuffle is None: # Encountered legacy config
shuffle = not self.cfg.train_ds.get('use_tarred_dataset', False)
if shuffle:
if isinstance(self.train_dataloader().dataset, BertPunctuationCapitalizationDataset):
self.train_dataloader().dataset.repack_batches_with_shuffle()
def _multi_eval_epoch_end(self, mode: str, dataloader_idx: int) -> Dict[str, Dict[str, torch.Tensor]]:
loss = self.metrics[mode]['loss'][dataloader_idx].compute()
self.metrics[mode]['loss'][dataloader_idx].reset()
punct_res = self.metrics[mode]['punct_class_report'][dataloader_idx].compute()
punct_precision, punct_recall, punct_f1, punct_report = punct_res
self.metrics[mode]['punct_class_report'][dataloader_idx].reset()
capit_res = self.metrics[mode]['capit_class_report'][dataloader_idx].compute()
capit_precision, capit_recall, capit_f1, capit_report = capit_res
self.metrics[mode]['capit_class_report'][dataloader_idx].reset()
log_dict = {
'log': {
f'{mode}_loss': loss,
f'{mode}_punct_precision': punct_precision,
f'{mode}_punct_f1': punct_f1,
f'{mode}_punct_recall': punct_recall,
f'{mode}_capit_precision': capit_precision,
f'{mode}_capit_f1': capit_f1,
f'{mode}_capit_recall': capit_recall,
}
}
logging.info(f'Punctuation report: {punct_report}')
logging.info(f'Capitalization report: {capit_report}')
return log_dict
def multi_validation_epoch_end(self, outputs: Any, dataloader_idx: int = 0) -> Dict[str, Dict[str, torch.Tensor]]:
"""
Called at the end of validation to compute and log metrics.
"""
return self._multi_eval_epoch_end('val', dataloader_idx)
def multi_test_epoch_end(self, outputs: Any, dataloader_idx: int = 0) -> Dict[str, Dict[str, torch.Tensor]]:
"""
Called at the end of model testing to compute and log metrics.
"""
return self._multi_eval_epoch_end('test', dataloader_idx)
def update_config_after_restoring_from_checkpoint(self, **kwargs) -> None:
"""
Set new values for some sections of config. Useful after restoring from checkpoint for fine-tuning
and testing if config parameters of a restored checkpoint are not suitable.
For ``class_labels``, ``common_dataset_parameters``, ``train_ds``, ``validation_ds``, ``test_ds``, there is
no need to provide values for all items in an updated config section. If an item is omitted in this method
parameter, then corresponding item in model config does not change.
If the entire updated section is missing in the model config, then omitted items from this method parameters
are set according to default values listed
:ref:`here <run-config-label>`.
.. warning::
Parameter ``optim`` is processed in a special way. ``optim`` contents are used not for updating of
model config, but for replacement of entire config section.
If one of parameters ``train_ds``, ``validation_ds``, ``test_ds``, is provided but its value is
``None``, then corresponding section is replaced with ``None``.
.. warning::
You may change values of parameters related to label ids:
- ``common_dataset_parameters.punct_label_ids``,
- ``common_dataset_parameters.capit_label_ids``,
- ``common_dataset_parameters.label_vocab_dir``,
- ``class_labels.punct_labels_file``,
- ``class_labels.capit_labels_file``,
yet label ids in these parameters must be equal to label ids loaded from checkpoint. Otherwise,
an error will be raised.
Keyword Args:
class_labels (:obj:`Union[DictConfig, Dict[str, str]]`): names of label id files used as label
id dictionaries. See more in :ref:`class labels' config<class-labels-config-label>`.
common_dataset_parameters (:obj:`Union[DictConfig, Dict[str, Any]]`, `optional`): see more in
:ref:`common dataset parameters config<common-dataset-parameters-config-label>`.
train_ds (:obj:`Union[DictConfig, Dict[str, Any]]`, `optional`): configuration of training dataset. See
possible options in :ref:`data config<data-config-label>`.
validation_ds (:obj:`Union[DictConfig, Dict[str, Any]]`, `optional`): configuration of validation
dataset. See possible options in :ref:`data config<data-config-label>`.
test_ds (:obj:`Union[DictConfig, Dict[str, Any]]`, `optional`): configuration of test dataset. See
possible options in :ref:`data config<data-config-label>`.
optim (:obj:`Union[DictConfig, Dict[str, Any]]`, `optional`): optimization configuration. See possible
options in :ref:`optimization<optimization-label>` and in `primer
<https://github.com/NVIDIA/NeMo/blob/main/tutorials/00_NeMo_Primer.ipynb>`_ tutorial.
"""
allowed_keys = {'class_labels', 'common_dataset_parameters', 'train_ds', 'validation_ds', 'test_ds', 'optim'}
unexpected_keys = set(kwargs) - allowed_keys
if unexpected_keys:
raise ValueError(
f"Found unexpected keyword arguments: {unexpected_keys}. You can use only {allowed_keys}."
)
if 'class_labels' in kwargs:
if kwargs['class_labels'] is None:
raise ValueError(
f"'class_labels' parameters is `None`, whereas you cannot remove section 'class_labels' from model "
f"config."
)
self._cfg.class_labels = OmegaConf.merge(self._cfg.class_labels, OmegaConf.create(kwargs['class_labels']))
if 'common_dataset_parameters' in kwargs:
if kwargs['common_dataset_parameters'] is None:
raise ValueError(
f"'common_dataset_parameters' item is `None`, whereas you cannot remove section"
f"'common_dataset_parameters' from model config."
)
self._cfg.common_dataset_parameters = OmegaConf.merge(
self._cfg.common_dataset_parameters, OmegaConf.create(kwargs['common_dataset_parameters'])
)
self._check_label_config_parameters()
if 'train_ds' in kwargs:
if kwargs['train_ds'] is None:
self._cfg.train_ds = None
else:
if 'train_ds' in self._cfg and self._cfg.train_ds is not None:
base = self._cfg.train_ds
else:
base = OmegaConf.structured(PunctuationCapitalizationTrainDataConfig)
self._cfg.train_ds = OmegaConf.merge(base, OmegaConf.create(kwargs['train_ds']))
if 'validation_ds' in kwargs:
if kwargs['validation_ds'] is None:
self._cfg.validation_ds = None
else:
if 'validation_ds' in self._cfg and self._cfg.validation_ds is not None:
base = self._cfg.validation_ds
else:
base = OmegaConf.structured(PunctuationCapitalizationEvalDataConfig)
self._cfg.validation_ds = OmegaConf.merge(base, OmegaConf.create(kwargs['validation_ds']))
if 'test_ds' in kwargs:
if kwargs['test_ds'] is None:
self._cfg.test_ds = None
else:
if 'test_ds' in self._cfg and self._cfg.test_ds is not None:
base = self._cfg.test_ds
else:
base = OmegaConf.structured(PunctuationCapitalizationEvalDataConfig)
self._cfg.test_ds = OmegaConf.merge(base, OmegaConf.create(kwargs['test_ds']))
if 'optim' in kwargs:
self._cfg.optim = kwargs['optim']
def setup_training_data(self, train_data_config: Optional[Union[Dict[str, Any], DictConfig]] = None) -> None:
"""
Sets up training data: creates dataset and sets data loader. If parameter ``train_data_config`` is not
provided, then :ref:`config<model-config-label>` section ``train_ds`` will be used.
Args:
train_data_config (:obj:`Union[Dict[str, Any], DictConfig]`, `optional`): a dictionary that should contain
only fields present in :ref:`data config<data-config-label>`.
If some of the fields are missing, then they will be set according to
:ref:`data config<data-config-label>` defaults. If ``train_data_config`` parameter is not set, then
``train_ds`` item of model config is used. Here model config is a configuration used for model
instantiation.
"""
if train_data_config is not None:
train_data_config = OmegaConf.create(train_data_config)
train_data_config = OmegaConf.merge(
OmegaConf.structured(PunctuationCapitalizationTrainDataConfig), train_data_config
)
if train_data_config is None:
train_data_config = self._cfg.train_ds
self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config, train=True)
# Need to set this because if using an IterableDataset, the length of the dataloader is the total number
# of samples rather than the number of batches, and this messes up the tqdm progress bar.
# So we set the number of steps manually (to the correct number) to fix this.
if 'use_tarred_dataset' in train_data_config and train_data_config['use_tarred_dataset']:
# We also need to check if limit_train_batches is already set.
# If it's an int, we assume that the user has set it to something sane, i.e. <= # training batches,
# and don't change it. Otherwise, adjust batches accordingly if it's a float (including 1.0).
if self._trainer is not None and isinstance(self._trainer.limit_train_batches, float):
self._trainer.limit_train_batches = int(
self._trainer.limit_train_batches * ceil(len(self._train_dl.dataset) / self.world_size)
)
elif self._trainer is None:
logging.warning(
"Model Trainer was not set before constructing the dataset, incorrect number of "
"training batches will be used. Please set the trainer and rebuild the dataset."
)
self.punct_label_ids = self._train_dl.dataset.punct_label_ids.copy()
self.capit_label_ids = self._train_dl.dataset.capit_label_ids.copy()
self.label_ids_are_set = True
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
label_vocab_dir = self._cfg.common_dataset_parameters.label_vocab_dir
if label_vocab_dir is None:
punct_label_ids_file, capit_label_ids_file = self._train_dl.dataset.save_labels_and_get_file_paths(
self._cfg.class_labels.punct_labels_file, self._cfg.class_labels.capit_labels_file
)
else:
punct_label_ids_file = Path(label_vocab_dir).expanduser() / self._cfg.class_labels.punct_labels_file
capit_label_ids_file = Path(label_vocab_dir).expanduser() / self._cfg.class_labels.capit_labels_file
self.register_artifact('class_labels.punct_labels_file', str(punct_label_ids_file))
self.register_artifact('class_labels.capit_labels_file', str(capit_label_ids_file))
def _get_eval_metrics_kwargs(
self,
) -> Tuple[
Dict[str, bool],
Dict[str, Union[bool, str, int, Dict[str, int]]],
Dict[str, Union[bool, str, int, Dict[str, int]]],
]:
loss_kw = {'dist_sync_on_step': False, 'take_avg_loss': True}
punct_kw = {
'num_classes': len(self.punct_label_ids),
'label_ids': self.punct_label_ids,
'mode': 'macro',
'dist_sync_on_step': False,
}
capit_kw = {
'num_classes': len(self.capit_label_ids),
'label_ids': self.capit_label_ids,
'mode': 'macro',
'dist_sync_on_step': False,
}
return loss_kw, punct_kw, capit_kw
def _setup_metrics_dictionary(self) -> None:
eval_metrics = torch.nn.ModuleDict(
{
"loss": torch.nn.ModuleList([]),
"punct_class_report": torch.nn.ModuleList([]),
"capit_class_report": torch.nn.ModuleList([]),
}
)
self.metrics = torch.nn.ModuleDict({"val": eval_metrics, "test": copy.deepcopy(eval_metrics)})
def setup_validation_data(self, val_data_config: Optional[Union[Dict[str, Any], DictConfig]] = None) -> None:
"""
Sets up validation data: creates dataset and sets data loader. If parameter ``val_data_config`` is not
provided, then ``validation_ds`` :ref:`config <model-config-label>` section will be used. Here model config is
a configuration used for model instantiation.
Args:
val_data_config (:obj:`Union[Dict[str, Any], DictConfig]`, `optional`): a dictionary that should contain
only fields present in data config :ref:`description<data-config-label>`.
If some of the fields are missing, then they will be set according to data config
:ref:`description<data-config-label>` defaults. If ``val_data_config`` parameter is not set, then
``validation_ds`` item of model config is used. Here model config is a configuration used for model
instantiation.
"""
if val_data_config is not None:
val_data_config = OmegaConf.create(val_data_config)
val_data_config = OmegaConf.merge(
OmegaConf.structured(PunctuationCapitalizationEvalDataConfig), val_data_config
)
if self.metrics is None:
self._setup_metrics_dictionary()
if val_data_config is None:
val_data_config = self._cfg.validation_ds
self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config, train=False)
# Need to set this because if using an IterableDataset, the length of the dataloader is the total number
# of samples rather than the number of batches, and this messes up the tqdm progress bar.
# So we set the number of steps manually (to the correct number) to fix this.
if 'use_tarred_dataset' in val_data_config and val_data_config['use_tarred_dataset']:
# We also need to check if limit_val_batches is already set.
# If it's an int, we assume that the user has set it to something sane, i.e. <= # validation batches,
# and don't change it. Otherwise, adjust batches accordingly if it's a float (including 1.0).
if self._trainer is not None and isinstance(self._trainer.limit_val_batches, float):
self._trainer.limit_val_batches = int(
self._trainer.limit_val_batches * ceil(len(self._validation_dl.dataset) / self.world_size)
)
elif self._trainer is None:
logging.warning(
"Model Trainer was not set before constructing the dataset, incorrect number of "
"validation batches will be used. Please set the trainer and rebuild the dataset."
)
loss_kw, punct_kw, capit_kw = self._get_eval_metrics_kwargs()
self.metrics['val']['loss'].append(GlobalAverageLossMetric(**loss_kw))
self.metrics['val']['punct_class_report'].append(ClassificationReport(**punct_kw))
self.metrics['val']['capit_class_report'].append(ClassificationReport(**capit_kw))
def setup_test_data(self, test_data_config: Optional[Union[Dict[str, Any], DictConfig]] = None) -> None:
"""
Sets up test data: creates dataset and sets data loader. If parameter ``test_data_config`` is not
provided, then ``test_ds`` config section will be used. See more about in data config
:ref:`description <data-config-label>` and model config :ref:`description<model-config-label>`.
Args:
test_data_config (:obj:`Union[Dict[str, Any], DictConfig]`, `optional`): a dictionary that should contain
only fields present in data config :ref:`description<data-config-label>`.
If some of the fields are missing, then they will be set according to data config
:ref:`description <data-config-label>` defaults. If ``test_data_config`` parameter is not set, then
``test_ds`` item of :ref:`model config <model-config-label>` is used. Here model config is a
configuration used for model instantiation.
"""
if test_data_config is not None:
test_data_config = OmegaConf.create(test_data_config)
test_data_config = OmegaConf.merge(
OmegaConf.structured(PunctuationCapitalizationEvalDataConfig), test_data_config
)
if self.metrics is None:
self._setup_metrics_dictionary()
if test_data_config is None:
test_data_config = self._cfg.test_ds
self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config, train=False)
# Check for multiple dataloaders here as it may not get called in ModelPT when models are being restored
if type(self._test_dl) == list and len(self._test_dl) > 1:
for _ in range(len(self._test_dl)):
self.test_step_outputs.append([])
loss_kw, punct_kw, capit_kw = self._get_eval_metrics_kwargs()
self.metrics['test']['loss'].append(GlobalAverageLossMetric(**loss_kw))
self.metrics['test']['punct_class_report'].append(ClassificationReport(**punct_kw))
self.metrics['test']['capit_class_report'].append(ClassificationReport(**capit_kw))
def _check_label_config_parameters(self) -> None:
"""
Checks that config items ``common_dataset_parameters.punct_label_ids`` and
``common_dataset_parameters.punct_label_vocab_file``,
``common_dataset_parameters.capit_label_ids`` and ``common_dataset_parameters.capit_label_vocab_file`` contain
identical label ids. Of course, if any of these parameters is ``None``, then check is not performed.
In addition, this method checks that ``common_dataset_parameters.pad_label`` has id ``0`` in punctuation and
capitalization label ids.
"""
pli = self._cfg.common_dataset_parameters.punct_label_ids
cli = self._cfg.common_dataset_parameters.capit_label_ids
pad_label = self._cfg.common_dataset_parameters.pad_label
plvf, clvf = self._extract_label_vocab_files_from_config()
for label_ids, label_vocab_file, already_set_label_ids, label_ids_name, label_vocab_name in [
(pli, plvf, self.punct_label_ids, 'punct_label_ids', 'punct_label_vocab_file'),
(cli, clvf, self.capit_label_ids, 'capit_label_ids', 'capit_label_vocab_file'),
]:
if label_vocab_file is not None:
file_label_ids = load_label_ids(label_vocab_file)
if label_ids is not None and label_vocab_file is not None:
if label_ids != file_label_ids:
raise_not_equal_labels_error(
first_labels=label_ids,
second_labels=file_label_ids,
first_labels_desc=f"Labels passed in config parameter "
f"`model.common_dataset_parameters.{label_ids_name}`",
second_labels_desc=f"Labels loaded from file {plvf} passed in config "
f"parameter `model.common_dataset_parameters.{label_vocab_name}",
)
if already_set_label_ids is not None:
config_label_ids = label_ids if label_vocab_file is None else file_label_ids
if config_label_ids is not None:
if label_vocab_file is None:
config_label_ids_source = (
f"Labels passed in config parameter `model.common_dataset_parameters.{label_ids_name}`"
)
else:
config_label_ids_source = (
f"Labels loaded from file {plvf} passed in config parameter "
f"`model.common_dataset_parameters.{label_vocab_name}`"
)
if already_set_label_ids != config_label_ids:
raise_not_equal_labels_error(
first_labels=config_label_ids,
second_labels=already_set_label_ids,
first_labels_desc=config_label_ids_source,
second_labels_desc=f"Labels which are already set in an attribute "
f"`PunctuationCapitalizationModel.{label_ids_name}`",
)
if plvf is not None:
pli = load_label_ids(plvf)
if clvf is not None:
cli = load_label_ids(clvf)
for label_ids, parameter_name in [
(pli, 'punct_label_vocab_file' if pli is None else 'punct_label_ids'),
(cli, 'capit_label_vocab_file' if cli is None else 'capit_label_ids'),
]:
if label_ids is not None and label_ids[pad_label] != 0:
raise ValueError(
f"Pad label '{pad_label}' has non zero id {label_ids[pad_label]} in "
f"`model.common_dataset_parameters.{parameter_name}`."
)
def _extract_label_vocab_files_from_config(self) -> Tuple[Optional[Path], Optional[Path]]:
if self._is_model_being_restored():
punct_label_vocab_file = self._cfg.class_labels.punct_labels_file
capit_label_vocab_file = self._cfg.class_labels.capit_labels_file
else:
if self._cfg.common_dataset_parameters.label_vocab_dir is None:
punct_label_vocab_file, capit_label_vocab_file = None, None
else:
label_vocab_dir = Path(self._cfg.common_dataset_parameters.label_vocab_dir).expanduser()
punct_label_vocab_file = label_vocab_dir / self._cfg.class_labels.punct_labels_file
capit_label_vocab_file = label_vocab_dir / self._cfg.class_labels.capit_labels_file
return punct_label_vocab_file, capit_label_vocab_file
def _set_label_ids(self) -> None:
"""
Set model attributes ``punct_label_ids`` and ``capit_label_ids`` based on label ids passed in config
item ``common_dataset_parameters``.
This method also registers artifacts ``class_labels.punct_labels_file`` and ``class_labels.capit_labels_file``.
This method is called if you do not plan to infer label ids from training file with labels. If training file
with labels is going to be used, then calling :meth:`~setup_training_data` is enough to set
``punct_label_ids`` and ``capit_label_ids`` and register label artifacts.
"""
punct_label_vocab_file, capit_label_vocab_file = self._extract_label_vocab_files_from_config()
if punct_label_vocab_file is not None:
punct_labels_file = self.register_artifact('class_labels.punct_labels_file', str(punct_label_vocab_file))
if punct_labels_file is None:
logging.warning(
f"The artifact `class_labels.punct_labels_file` was not found in checkpoint. Will rely on "
f"`punct_label_ids` parameter"
)
self.punct_label_ids = OmegaConf.to_container(self._cfg.common_dataset_parameters.punct_label_ids)
else:
self.punct_label_ids = load_label_ids(
self.register_artifact('class_labels.punct_labels_file', str(punct_label_vocab_file))
)
elif self._cfg.common_dataset_parameters.punct_label_ids is not None:
self.punct_label_ids = OmegaConf.to_container(self._cfg.common_dataset_parameters.punct_label_ids)
else:
raise ValueError(
f"Could not set attribute `punct_label_ids`. Config parameters "
f"`model.common_dataset_parameters.punct_label_ids`, "
f"`model.common_dataset_parameters.punct_label_vocab_file` are not set. Another way to set "
f"`punct_label_ids` is calling method `setup_training_data`. That way punctuation label ids will be "
f"inferred from training set."
)
if capit_label_vocab_file is not None:
capit_labels_file = self.register_artifact('class_labels.capit_labels_file', str(capit_label_vocab_file))
if capit_labels_file is None:
logging.warning(
f"The artifact `class_labels.capit_labels_file` was not found in checkpoint. Will rely on "
f"`capit_label_ids` parameter"
)
self.capit_label_ids = OmegaConf.to_container(self._cfg.common_dataset_parameters.capit_label_ids)
else:
self.capit_label_ids = load_label_ids(
self.register_artifact('class_labels.capit_labels_file', str(capit_label_vocab_file))
)
elif self._cfg.common_dataset_parameters.capit_label_ids is not None:
self.capit_label_ids = OmegaConf.to_container(self._cfg.common_dataset_parameters.capit_label_ids)
else:
raise ValueError(
f"Could not set attribute `capit_label_ids`. Config parameters "
f"`model.common_dataset_parameters.capit_label_ids`, "
f"`model.common_dataset_parameters.capit_label_vocab_file` are not set. Another way to set "
f"`capit_label_ids` is calling method `setup_training_data`. That way capitalization label ids will "
f"be inferred from training set."
)
self.label_ids_are_set = True
def _setup_dataloader_from_config(self, cfg: DictConfig, train: bool) -> torch.utils.data.DataLoader:
"""
Creates dataset and data loader according to config ``cfg``. If ``train=False`` and attributes
``punct_label_ids`` and ``capit_label_ids`` are not set, then this method sets the attributes and registers
label artifacts.
Args:
cfg (:obj:`DictConfig`): a config which follows dataclass
:class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_dataset.PunctuationCapitalizationEvalDataConfig`
Note that list ``ds_item`` is not supported because list ``ds_item`` is unpacked by NeMo core
instruments
train (:obj:`bool`): whether train data is set. If ``True``, then label ids are not set in this function
"""
self._check_label_config_parameters()
if not self.label_ids_are_set and not train:
self._set_label_ids()
if cfg.use_tarred_dataset:
if cfg.tar_metadata_file is None:
raise ValueError(
f"If parameter `use_tarred_dataset` is `True`, then a field `tar_metadata_file` has to be a path "
f"to tarred dataset metadata file, whereas `None` is given."
)
tar_metadata_file = Path(cfg.ds_item) / cfg.tar_metadata_file
dataset = BertPunctuationCapitalizationTarredDataset(
metadata_file=tar_metadata_file,
tokenizer=self.tokenizer,
pad_label=self._cfg.common_dataset_parameters.pad_label,
ignore_extra_tokens=self._cfg.common_dataset_parameters.ignore_extra_tokens,
ignore_start_end=self._cfg.common_dataset_parameters.ignore_start_end,
world_size=self.world_size,
global_rank=self.global_rank,
shuffle_n=cfg.tar_shuffle_n,
shard_strategy=cfg.shard_strategy,
label_info_save_dir=cfg.label_info_save_dir,
use_audio=cfg.use_audio,
)
dataset.check_for_label_consistency_with_model_config(
self.punct_label_ids,
self.capit_label_ids,
self._cfg.class_labels,
self._cfg.common_dataset_parameters,
)
else:
if cfg.text_file is None or cfg.labels_file is None:
raise ValueError(
f"If parameter `use_tarred_dataset` is `False`, then fields `text_file` and `labels_file` in "
f"dataset config must not be `None`. Whereas `text_file={cfg.text_file}` and "
f"`label_file={cfg.labels_file}`."
)
if cfg.tokens_in_batch is None and cfg.use_bucketing:
raise ValueError(
f"If `use_tarred_dataset` is `False`, then you need to provide `tokens_in_batch` parameter."
)
text_file, labels_file, = Path(cfg.ds_item) / cfg.text_file, Path(cfg.ds_item) / cfg.labels_file
if cfg.audio_file:
audio_file = Path(cfg.ds_item) / cfg.audio_file
if self.label_ids_are_set:
label_kwargs = {'punct_label_ids': self.punct_label_ids, 'capit_label_ids': self.capit_label_ids}
else:
punct_label_vocab_file, capit_label_vocab_file = self._extract_label_vocab_files_from_config()
label_kwargs = {
'punct_label_ids': self._cfg.common_dataset_parameters.punct_label_ids,
'capit_label_ids': self._cfg.common_dataset_parameters.capit_label_ids,
'punct_label_vocab_file': punct_label_vocab_file,
'capit_label_vocab_file': capit_label_vocab_file,
}
if train:
number_of_batches_is_multiple_of = 1
if self._trainer is None:
warnings.warn(
'A model attribute `trainer` is not set before training dataset setting. If training is '
'resumed from checkpoint, then current epoch data loading can be distorted: some batches '
'may be processed several times and some can be not processed at all. `trainer.current_epoch`'
' is used as random seed for shuffling batches. Now 0 will be used. If the '
'checkpoint was created not during initial epoch a shuffling of the dataset will '
'be different. You may try use `exp_manager()` function and '
'`PunctuationCapitalizationModel.set_trainer()` method before '
'`PunctuationCapitalizationModel.setup_training_data()` method.'
)
batch_shuffling_random_seed = 0
else:
batch_shuffling_random_seed = self._trainer.current_epoch
else:
batch_shuffling_random_seed = 0
if self._trainer is None:
warnings.warn(
'A model attribute `trainer` is not set before test or validation dataset setting. If more '
'than 1 GPU is used for testing, then some examples may be tested several times because '
'number of batches may be not evenly divisible by number of processes. This leads to '
'distortion of metrics. See more in description of `number_of_batches_is_multiple_of` '
'parameter of class `BertPunctuationCapitalizationDataset` initializer and '
'https://pytorch.org/docs/stable/data.html#multi-process-data-loading. You may try to use '
'`PunctuationCapitalizationModel.set_trainer()` method before '
'`PunctuationCapitalizationModel.setup_validation_data()` and '
'`PunctuationCapitalizationModel.setup_test_data()` methods.'
)
number_of_batches_is_multiple_of = 1
else:
number_of_batches_is_multiple_of = self._trainer.num_nodes * self._trainer.num_devices
if cfg.cache_dir is None:
cache_dir = cfg.cache_dir
else:
# If pickled features are saved `cache_dir` not in the same directory with original data files, then
# a full path to data directory have to be appended to `cache_dir`. This is done to avoid collisions
# cache for different datasets is saved to same `cache_dir`.
cache_dir = Path(cfg.cache_dir).joinpath('fsroot', *text_file.expanduser().resolve().parts[1:-1])
dataset = BertPunctuationCapitalizationDataset(
tokenizer=self.tokenizer,
text_file=text_file,
labels_file=labels_file,
pad_label=self._cfg.common_dataset_parameters.pad_label,
**label_kwargs,
max_seq_length=cfg.max_seq_length,
ignore_extra_tokens=self._cfg.common_dataset_parameters.ignore_extra_tokens,
ignore_start_end=self._cfg.common_dataset_parameters.ignore_start_end,
use_cache=cfg.use_cache,
num_samples=cfg.num_samples,
tokens_in_batch=cfg.tokens_in_batch,
n_jobs=cfg.n_jobs,
number_of_batches_is_multiple_of=number_of_batches_is_multiple_of,
batch_shuffling_random_seed=batch_shuffling_random_seed,
verbose=cfg.verbose,
get_label_frequencies=cfg.get_label_frequences,
cache_dir=cache_dir,
label_info_save_dir=cfg.label_info_save_dir,
audio_file=audio_file if cfg.audio_file else None,
sample_rate=cfg.sample_rate,
use_audio=cfg.use_audio,
use_bucketing=cfg.use_bucketing,
preload_audios=cfg.preload_audios,
)
if cfg.shuffle and cfg.use_tarred_dataset:
logging.warning(f"Shuffling in dataloader is not supported for tarred dataset.")
shuffle = False
else:
shuffle = cfg.shuffle
return torch.utils.data.DataLoader(
dataset=dataset,
collate_fn=dataset.collate_fn,
batch_size=1 if cfg.use_bucketing else cfg.batch_size,
shuffle=shuffle,
num_workers=cfg.num_workers,
pin_memory=cfg.pin_memory,
drop_last=cfg.drop_last,
persistent_workers=cfg.persistent_workers if cfg.num_workers > 0 else False,
)
def _setup_infer_dataloader(
self,
queries: List[str],
batch_size: int,
max_seq_length: int,
step: int,
margin: int,
dataloader_kwargs: Optional[Dict[str, Any]],
audio_queries: Optional[Union[List[bytes], List[str]]] = None,
target_sr: Optional[int] = None,
) -> torch.utils.data.DataLoader:
"""
Setup function for an infer data loader.
Args:
queries (:obj:`List[str]`): lower cased text without punctuation
batch_size (:obj:`int`): batch size to use during inference
max_seq_length (:obj:`int`): length of segments into which queries are split. ``max_seq_length`` includes
``[CLS]`` and ``[SEP]`` so every segment contains at most ``max_seq_length-2`` tokens from input a
query.
step (:obj:`int`): number of tokens by which a segment is offset to a previous segment. Parameter ``step``
cannot be greater than ``max_seq_length-2``.
margin (:obj:`int`): number of tokens near the edge of a segment which label probabilities are not used in
final prediction computation.
audio_queries (:obj:`List[str]`, `optional`): paths to audio files.
target_sr (:obj:`int`, `optional`): target sample rate for audios.
Returns:
:obj:`torch.utils.data.DataLoader`: inference data loader
"""
if dataloader_kwargs is None:
dataloader_kwargs = {}
dataset = BertPunctuationCapitalizationInferDataset(
tokenizer=self.tokenizer,
queries=queries,
max_seq_length=max_seq_length,
step=step,
margin=margin,
audio_queries=audio_queries,
target_sr=target_sr,
)
return torch.utils.data.DataLoader(
dataset=dataset,
collate_fn=dataset.collate_fn,
batch_size=batch_size,
shuffle=False,
drop_last=False,
**dataloader_kwargs,
)
@staticmethod
def _remove_margins(tensor: torch.Tensor, margin_size: int, keep_left: bool, keep_right: bool) -> torch.Tensor:
tensor = tensor.detach().clone()
if not keep_left:
tensor = tensor[margin_size + 1 :] # remove left margin and CLS token
if not keep_right:
tensor = tensor[: tensor.shape[0] - margin_size - 1] # remove right margin and SEP token
return tensor
def _transform_logit_to_prob_and_remove_margins_and_extract_word_probs(
self,
punct_logits: torch.Tensor,
capit_logits: torch.Tensor,
subtokens_mask: torch.Tensor,
start_word_ids: Tuple[int],
margin: int,
is_first: Tuple[bool],
is_last: Tuple[bool],
) -> Tuple[List[np.ndarray], List[np.ndarray], List[int]]:
"""
Applies softmax to get punctuation and capitalization probabilities, applies ``subtokens_mask`` to extract
probabilities for words from probabilities for tokens, removes ``margin`` probabilities near edges of a segment.
Left margin of the first segment in a query and right margin of the last segment in a query are not removed.
Calculates new ``start_word_ids`` taking into the account the margins. If the left margin of a segment is
removed corresponding start word index is increased by number of words (number of nonzero values in
corresponding ``subtokens_mask``) in the margin.
Args:
punct_logits: a float tensor of shape ``[batch_size, segment_length, number_of_punctuation_labels]``
capit_logits: a float tensor of shape ``[batch_size, segment_length, number_of_capitalization_labels]``
subtokens_mask: a float tensor of shape ``[batch_size, segment_length]``
start_word_ids: indices of segment first words in a query
margin: number of tokens near edges of a segment which probabilities are discarded
is_first: is segment the first segment in a query
is_last: is segment the last segment in a query
Returns:
b_punct_probs: list containing ``batch_size`` numpy arrays. The numpy arrays have shapes
``[number_of_word_in_this_segment, number_of_punctuation_labels]``. Word punctuation probabilities for
segments in the batch.
b_capit_probs: list containing ``batch_size`` numpy arrays. The numpy arrays have shapes
``[number_of_word_in_this_segment, number_of_capitalization_labels]``. Word capitalization
probabilities for segments in the batch.
new_start_word_ids: indices of segment first words in a query after margin removal
"""
new_start_word_ids = list(start_word_ids)
subtokens_mask = subtokens_mask > 0.5
b_punct_probs, b_capit_probs = [], []
for i, (first, last, pl, cl, stm) in enumerate(
zip(is_first, is_last, punct_logits, capit_logits, subtokens_mask)
):
if not first:
new_start_word_ids[i] += torch.count_nonzero(stm[: margin + 1]).numpy() # + 1 is for [CLS] token
stm = self._remove_margins(stm, margin, keep_left=first, keep_right=last)
for b_probs, logits in [(b_punct_probs, pl), (b_capit_probs, cl)]:
p = torch.nn.functional.softmax(
self._remove_margins(logits, margin, keep_left=first, keep_right=last)[stm], dim=-1,
)
b_probs.append(p.detach().cpu().numpy())
return b_punct_probs, b_capit_probs, new_start_word_ids
@staticmethod
def _move_acc_probs_to_token_preds(
pred: List[int], acc_prob: np.ndarray, number_of_probs_to_move: int
) -> Tuple[List[int], np.ndarray]:
"""
``number_of_probs_to_move`` rows in the beginning are removed from ``acc_prob``. From every remove row the label
with the largest probability is selected and appended to ``pred``.
Args:
pred: list with ready label indices for a query
acc_prob: numpy array of shape ``[number_of_words_for_which_probabilities_are_accumulated, number_of_labels]``
number_of_probs_to_move: int
Returns:
pred: list with ready label indices for a query
acc_prob: numpy array of shape
``[number_of_words_for_which_probabilities_are_accumulated - number_of_probs_to_move, number_of_labels]``
"""
if number_of_probs_to_move > acc_prob.shape[0]:
raise ValueError(
f"Not enough accumulated probabilities. Number_of_probs_to_move={number_of_probs_to_move} "
f"acc_prob.shape={acc_prob.shape}"
)
if number_of_probs_to_move > 0:
pred = pred + list(np.argmax(acc_prob[:number_of_probs_to_move], axis=-1))
acc_prob = acc_prob[number_of_probs_to_move:]
return pred, acc_prob
@staticmethod
def _update_accumulated_probabilities(acc_prob: np.ndarray, update: np.ndarray) -> np.ndarray:
"""
Args:
acc_prob: numpy array of shape ``[A, L]``
update: numpy array of shape ``[A + N, L]``
Returns:
numpy array of shape ``[A + N, L]``
"""
acc_prob = np.concatenate([acc_prob * update[: acc_prob.shape[0]], update[acc_prob.shape[0] :]], axis=0)
return acc_prob
def _apply_punct_capit_predictions(self, query: str, punct_preds: List[int], capit_preds: List[int]) -> str:
"""
Restores punctuation and capitalization in ``query``.
Args:
query: a string without punctuation and capitalization
punct_preds: ids of predicted punctuation labels
capit_preds: ids of predicted capitalization labels
Returns:
a query with restored punctuation and capitalization
"""
query = query.strip().split()
assert len(query) == len(
punct_preds
), f"len(query)={len(query)} len(punct_preds)={len(punct_preds)}, query[:30]={query[:30]}"
assert len(query) == len(
capit_preds
), f"len(query)={len(query)} len(capit_preds)={len(capit_preds)}, query[:30]={query[:30]}"
punct_ids_to_labels = {v: k for k, v in self.punct_label_ids.items()}
capit_ids_to_labels = {v: k for k, v in self.capit_label_ids.items()}
query_with_punct_and_capit = ''
for j, word in enumerate(query):
punct_label = punct_ids_to_labels[punct_preds[j]]
capit_label = capit_ids_to_labels[capit_preds[j]]
if capit_label != self._cfg.common_dataset_parameters.pad_label:
word = word.capitalize()
query_with_punct_and_capit += word
if punct_label != self._cfg.common_dataset_parameters.pad_label:
query_with_punct_and_capit += punct_label
query_with_punct_and_capit += ' '
return query_with_punct_and_capit[:-1]
def _get_labels(self, punct_preds: List[int], capit_preds: List[int]) -> str:
"""
Returns punctuation and capitalization labels in NeMo format for encoded punctuation ``punct_preds``
and ``capit_preds`` labels (see https://docs.nvidia.com/deeplearning/nemo/
user-guide/docs/en/main/nlp/punctuation_and_capitalization.html#nemo-data-format).
Args:
punct_preds: ids of predicted punctuation labels
capit_preds: ids of predicted capitalization labels
Returns:
labels in NeMo format
"""
assert len(capit_preds) == len(
punct_preds
), f"len(capit_preds)={len(capit_preds)} len(punct_preds)={len(punct_preds)}"
punct_ids_to_labels = {v: k for k, v in self.punct_label_ids.items()}
capit_ids_to_labels = {v: k for k, v in self.capit_label_ids.items()}
result = ''
for capit_label, punct_label in zip(capit_preds, punct_preds):
punct_label = punct_ids_to_labels[punct_label]
capit_label = capit_ids_to_labels[capit_label]
result += punct_label + capit_label + ' '
return result[:-1]
def add_punctuation_capitalization(
self,
queries: List[str],
batch_size: int = None,
max_seq_length: int = 64,
step: int = 8,
margin: int = 16,
return_labels: bool = False,
dataloader_kwargs: Dict[str, Any] = None,
) -> List[str]:
"""
Adds punctuation and capitalization to the queries. Use this method for inference.
Parameters ``max_seq_length``, ``step``, ``margin`` are for controlling the way queries are split into segments
which are processed by the model. Parameter ``max_seq_length`` is a length of a segment after tokenization
including special tokens [CLS] in the beginning and [SEP] in the end of a segment. Parameter ``step`` is a
shift between consequent segments. Parameter ``margin`` is used to exclude negative effect of subtokens near
borders of segments which have only one side context.
If segments overlap, probabilities of overlapping predictions are multiplied and then the label with
corresponding to the maximum probability is selected.
Args:
queries (:obj:`List[str]`): lower cased text without punctuation.
batch_size (:obj:`List[str]`, `optional`): batch size to use during inference. If ``batch_size`` parameter
is not provided, then it will be equal to length of ``queries`` list.
max_seq_length (:obj:`int`, `optional`, defaults to :obj:`64`): maximum sequence length of a segment after
tokenization including :code:`[CLS]` and :code:`[SEP]` tokens.
step (:obj:`int`, `optional`, defaults to :obj:`8`): relative shift of consequent segments into which long
queries are split. Long queries are split into segments which can overlap. Parameter ``step`` controls
such overlapping. Imagine that queries are tokenized into characters, ``max_seq_length=5``, and
``step=2``. In such case, query ``"hello"`` is tokenized into segments
``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'l', 'l', 'o', '[SEP]']]``.
margin (:obj:`int`, `optional`, defaults to :obj:`16`): number of subtokens in the beginning and the end of
segments which are not used for prediction computation. The first segment does not have left margin and
the last segment does not have right margin. For example, if an input sequence is tokenized into
characters, ``max_seq_length=5``, ``step=1``, and ``margin=1``, then query ``"hello"`` will be
tokenized into segments ``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'e', 'l', 'l', '[SEP]'],
['[CLS]', 'l', 'l', 'o', '[SEP]']]``. These segments are passed to the model. Before final predictions
computation, margins are removed. In the next list, subtokens which logits are not used for final
predictions computation are marked with asterisk: ``[['[CLS]'*, 'h', 'e', 'l'*, '[SEP]'*],
['[CLS]'*, 'e'*, 'l', 'l'*, '[SEP]'*], ['[CLS]'*, 'l'*, 'l', 'o', '[SEP]'*]]``.
return_labels (:obj:`bool`, `optional`, defaults to :obj:`False`): whether to return labels in NeMo format
(see :ref:`nemo-data-format-label`) instead of queries with restored
punctuation and capitalization.
dataloader_kwargs (:obj:`Dict[str, Any]`, `optional`): an optional dictionary with parameters of PyTorch
data loader. May include keys: ``'num_workers'``, ``'pin_memory'``, ``'worker_init_fn'``,
``'prefetch_factor'``, ``'persistent_workers'``.
Returns:
:obj:`List[str]`: a list of queries with restored capitalization and punctuation if
``return_labels=False``, else a list of punctuation and capitalization labels strings for all queries
"""
if len(queries) == 0:
return []
if batch_size is None:
batch_size = len(queries)
logging.info(f'Using batch size {batch_size} for inference')
result: List[str] = []
mode = self.training
try:
self.eval()
infer_datalayer = self._setup_infer_dataloader(
queries, batch_size, max_seq_length, step, margin, dataloader_kwargs
)
# Predicted labels for queries. List of labels for every query
all_punct_preds: List[List[int]] = [[] for _ in queries]
all_capit_preds: List[List[int]] = [[] for _ in queries]
# Accumulated probabilities (or product of probabilities acquired from different segments) of punctuation
# and capitalization. Probabilities for words in a query are extracted using `subtokens_mask`. Probabilities
# for newly processed words are appended to the accumulated probabilities. If probabilities for a word are
# already present in `acc_probs`, old probabilities are replaced with a product of old probabilities
# and probabilities acquired from new segment. Segments are processed in an order they appear in an
# input query. When all segments with a word are processed, a label with the highest probability
# (or product of probabilities) is chosen and appended to an appropriate list in `all_preds`. After adding
# prediction to `all_preds`, probabilities for a word are removed from `acc_probs`.
acc_punct_probs: List[Optional[np.ndarray]] = [None for _ in queries]
acc_capit_probs: List[Optional[np.ndarray]] = [None for _ in queries]
d = self.device
for batch_i, batch in tqdm(
enumerate(infer_datalayer), total=ceil(len(infer_datalayer.dataset) / batch_size), unit="batch"
):
inp_ids, inp_type_ids, inp_mask, subtokens_mask, start_word_ids, query_ids, is_first, is_last = batch
punct_logits, capit_logits = self.forward(
input_ids=inp_ids.to(d), token_type_ids=inp_type_ids.to(d), attention_mask=inp_mask.to(d),
)
_res = self._transform_logit_to_prob_and_remove_margins_and_extract_word_probs(
punct_logits, capit_logits, subtokens_mask, start_word_ids, margin, is_first, is_last
)
punct_probs, capit_probs, start_word_ids = _res
for i, (q_i, start_word_id, bpp_i, bcp_i) in enumerate(
zip(query_ids, start_word_ids, punct_probs, capit_probs)
):
for all_preds, acc_probs, b_probs_i in [
(all_punct_preds, acc_punct_probs, bpp_i),
(all_capit_preds, acc_capit_probs, bcp_i),
]:
if acc_probs[q_i] is None:
acc_probs[q_i] = b_probs_i
else:
all_preds[q_i], acc_probs[q_i] = self._move_acc_probs_to_token_preds(
all_preds[q_i], acc_probs[q_i], start_word_id - len(all_preds[q_i]),
)
acc_probs[q_i] = self._update_accumulated_probabilities(acc_probs[q_i], b_probs_i)
for all_preds, acc_probs in [(all_punct_preds, acc_punct_probs), (all_capit_preds, acc_capit_probs)]:
for q_i, (pred, prob) in enumerate(zip(all_preds, acc_probs)):
if prob is not None:
all_preds[q_i], acc_probs[q_i] = self._move_acc_probs_to_token_preds(pred, prob, len(prob))
for i, query in enumerate(queries):
result.append(
self._get_labels(all_punct_preds[i], all_capit_preds[i])
if return_labels
else self._apply_punct_capit_predictions(query, all_punct_preds[i], all_capit_preds[i])
)
finally:
# set mode back to its original value
self.train(mode=mode)
return result
@classmethod
def list_available_models(cls) -> List[PretrainedModelInfo]:
"""
This method returns a list of pre-trained models which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
:obj:`List[PretrainedModelInfo]`: a list of available pre-trained models.
"""
result = [
PretrainedModelInfo(
pretrained_model_name="punctuation_en_bert",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/punctuation_en_bert/versions/1.0.0rc1/"
"files/punctuation_en_bert.nemo",
description="The model was trained with NeMo BERT base uncased checkpoint on a subset of data from "
"the following sources: Tatoeba sentences, books from Project Gutenberg, Fisher transcripts.",
),
PretrainedModelInfo(
pretrained_model_name="punctuation_en_distilbert",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/punctuation_en_distilbert/versions/"
"1.0.0rc1/files/punctuation_en_distilbert.nemo",
description="The model was trained with DistilBERT base uncased checkpoint from HuggingFace on a "
"subset of data from the following sources: Tatoeba sentences, books from Project Gutenberg, "
"Fisher transcripts.",
),
]
return result
@property
def output_module(self):
return self
| NeMo-main | nemo/collections/nlp/models/token_classification/punctuation_capitalization_model.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any, Dict, Optional
from omegaconf.omegaconf import MISSING, DictConfig, OmegaConf, open_dict
from nemo.collections.common.parts.adapter_modules import LinearAdapterConfig
from nemo.collections.nlp.data.token_classification.punctuation_capitalization_dataset import (
PunctuationCapitalizationEvalDataConfig,
PunctuationCapitalizationTrainDataConfig,
legacy_data_config_to_new_data_config,
)
from nemo.core.config import TrainerConfig
from nemo.core.config.modelPT import NemoConfig
from nemo.utils.exp_manager import ExpManagerConfig
@dataclass
class FreezeConfig:
is_enabled: bool = False
"""Freeze audio encoder weight and add Conformer Layers on top of it"""
d_model: Optional[int] = 256
"""`d_model` parameter of ``ConformerLayer``"""
d_ff: Optional[int] = 1024
"""``d_ff`` parameter of ``ConformerLayer``"""
num_layers: Optional[int] = 8
"""``num_layers`` number of ``ConformerLayer`` modules to add on top of audio encoder"""
@dataclass
class AdapterConfig:
config: Optional[LinearAdapterConfig] = None
"""Linear adapter config see ``collections.common.parts.LinearAdapterConfig``"""
enable: bool = False
"""Use adapters for audio encoder"""
@dataclass
class FusionConfig:
num_layers: Optional[int] = 4
""""Number of layers to use in fusion"""
num_attention_heads: Optional[int] = 4
"""Number of attention heads to use in fusion"""
inner_size: Optional[int] = 2048
"""Fusion inner size"""
@dataclass
class AudioEncoderConfig:
pretrained_model: str = MISSING
"""A configuration for restoring pretrained audio encoder"""
freeze: Optional[FreezeConfig] = None
adapter: Optional[AdapterConfig] = None
fusion: Optional[FusionConfig] = None
@dataclass
class TokenizerConfig:
"""A structure and default values of source text tokenizer."""
vocab_file: Optional[str] = None
"""A path to vocabulary file which is used in ``'word'``, ``'char'``, and HuggingFace tokenizers"""
tokenizer_name: str = MISSING
"""A name of the tokenizer used for tokenization of source sequences. Possible options are ``'sentencepiece'``,
``'word'``, ``'char'``, HuggingFace tokenizers (e.g. ``'bert-base-uncased'``). For more options see function
``nemo.collections.nlp.modules.common.get_tokenizer``. The tokenizer must have properties ``cls_id``, ``pad_id``,
``sep_id``, ``unk_id``."""
special_tokens: Optional[Dict[str, str]] = None
"""A dictionary with special tokens passed to constructors of ``'char'``, ``'word'``, ``'sentencepiece'``, and
various HuggingFace tokenizers."""
tokenizer_model: Optional[str] = None
"""A path to a tokenizer model required for ``'sentencepiece'`` tokenizer."""
@dataclass
class LanguageModelConfig:
"""
A structure and default values of language model configuration of punctuation and capitalization model. BERT like
HuggingFace models are supported. Provide a valid ``pretrained_model_name`` and, optionally, you may
reinitialize model via ``config_file`` or ``config``.
Alternatively you can initialize the language model using ``lm_checkpoint``.
This config is a part of :class:`PunctuationCapitalizationModelConfig` config.
"""
pretrained_model_name: str = MISSING
"""A mandatory parameter containing name of HuggingFace pretrained model. For example, ``'bert-base-uncased'``."""
config_file: Optional[str] = None
"""A path to a file with HuggingFace model config which is used to reinitialize language model."""
config: Optional[Dict] = None
"""A HuggingFace config which is used to reinitialize language model."""
lm_checkpoint: Optional[str] = None
"""A path to a ``torch`` checkpoint of a language model."""
@dataclass
class HeadConfig:
"""
A structure and default values of configuration of capitalization or punctuation model head. This config defines a
multilayer perceptron which is applied to output of a language model. Number of units in the hidden layer is equal
to the dimension of the language model.
This config is a part of :class:`PunctuationCapitalizationModelConfig` config.
"""
num_fc_layers: int = 1
"""A number of hidden layers in a multilayer perceptron."""
fc_dropout: float = 0.1
"""A dropout used in an MLP."""
activation: str = 'relu'
"""An activation used in hidden layers."""
use_transformer_init: bool = True
"""Whether to initialize the weights of the classifier head with the approach that was used for language model
initialization."""
@dataclass
class ClassLabelsConfig:
"""
A structure and default values of a mandatory part of config which contains names of files which are saved in .nemo
checkpoint. These files can also be used for passing label vocabulary to the model. For using them as label
vocabularies you will need to provide path these files in parameter
``model.common_dataset_parameters.label_vocab_dir``. Each line in labels files
contains 1 label. The values are sorted, ``<line number>==<label id>``, starting from ``0``. A label with ``0`` id
must contain neutral label which must be equal to ``model.common_dataset_parameters.pad_label``.
This config is a part of :class:`~CommonDatasetParametersConfig`.
"""
punct_labels_file: str = MISSING
"""A name of punctuation labels file."""
capit_labels_file: str = MISSING
"""A name of capitalization labels file."""
@dataclass
class CommonDatasetParametersConfig:
"""
A structure and default values of common dataset parameters config which includes label and loss mask information.
If you omit parameters ``punct_label_ids``, ``capit_label_ids``, ``label_vocab_dir``, then labels will be inferred
from a training dataset or loaded from a checkpoint.
Parameters ``ignore_extra_tokens`` and ``ignore_start_end`` are responsible for forming loss mask. A loss mask
defines on which tokens loss is computed.
This parameter is a part of config :class:`~PunctuationCapitalizationModelConfig`.
"""
pad_label: str = MISSING
"""A mandatory parameter which should contain label used for punctuation and capitalization label padding. It
also serves as a neutral label for both punctuation and capitalization. If any of ``punct_label_ids``,
``capit_label_ids`` parameters is provided, then ``pad_label`` must have ``0`` id in them. In addition, if ``label_vocab_dir``
is provided, then ``pad_label`` must be on the first lines in files ``class_labels.punct_labels_file`` and
``class_labels.capit_labels_file``."""
ignore_extra_tokens: bool = False
"""Whether to compute loss on not first tokens in words. If this parameter is ``True``, then loss mask is ``False``
for all tokens in a word except the first."""
ignore_start_end: bool = True
"""If ``False``, then loss is computed on [CLS] and [SEP] tokens."""
punct_label_ids: Optional[Dict[str, int]] = None
"""A dictionary with punctuation label ids. ``pad_label`` must have ``0`` id in this dictionary. You can omit this
parameter and pass label ids through ``class_labels.punct_labels_file`` or let the model to infer label ids from
dataset or load them from checkpoint."""
capit_label_ids: Optional[Dict[str, int]] = None
"""A dictionary with capitalization label ids. ``pad_label`` must have ``0`` id in this dictionary. You can omit
this parameter and pass label ids through ``class_labels.capit_labels_file`` or let model to infer label ids from
dataset or load them from checkpoint."""
label_vocab_dir: Optional[str] = None
"""A path to directory which contains class labels files. See :class:`ClassLabelsConfig`. If this parameter is
provided, then labels will be loaded from files which are located in ``label_vocab_dir`` and have names specified
in ``model.class_labels`` configuration section. A label specified in ``pad_label`` has to be on the first lines
of ``model.class_labels`` files."""
@dataclass
class PunctuationCapitalizationModelConfig:
"""
A configuration of
:class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_model.PunctuationCapitalizationModel`
model.
See an example of model config in
`nemo/examples/nlp/token_classification/conf/punctuation_capitalization_config.yaml
<https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/token_classification/conf/punctuation_capitalization_config.yaml>`_
This config is a part of :class:`~PunctuationCapitalizationConfig`.
"""
class_labels: ClassLabelsConfig = ClassLabelsConfig()
"""A mandatory parameter containing a dictionary with names of label id files used in .nemo checkpoints.
These file names can also be used for passing label vocabularies to the model. If you wish to use ``class_labels``
for passing vocabularies, please provide path to vocabulary files in
``model.common_dataset_parameters.label_vocab_dir`` parameter."""
common_dataset_parameters: Optional[CommonDatasetParametersConfig] = CommonDatasetParametersConfig()
"""Label ids and loss mask information information."""
train_ds: Optional[PunctuationCapitalizationTrainDataConfig] = None
"""A configuration for creating training dataset and data loader."""
validation_ds: Optional[PunctuationCapitalizationEvalDataConfig] = None
"""A configuration for creating validation datasets and data loaders."""
test_ds: Optional[PunctuationCapitalizationEvalDataConfig] = None
"""A configuration for creating test datasets and data loaders."""
punct_head: HeadConfig = HeadConfig()
"""A configuration for creating punctuation MLP head that is applied to a language model outputs."""
capit_head: HeadConfig = HeadConfig()
"""A configuration for creating capitalization MLP head that is applied to a language model outputs."""
tokenizer: Any = TokenizerConfig()
"""A configuration for source text tokenizer."""
language_model: LanguageModelConfig = LanguageModelConfig()
"""A configuration of a BERT-like language model which serves as a model body."""
optim: Optional[Any] = None
"""A configuration of optimizer and learning rate scheduler. There is much variability in such config. For
description see `Optimizers
<https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/core/core.html#optimizers>`_ section in
documentation and `primer <https://github.com/NVIDIA/NeMo/blob/main/tutorials/00_NeMo_Primer.ipynb>_ tutorial."""
@dataclass
class PunctuationCapitalizationLexicalAudioModelConfig(PunctuationCapitalizationModelConfig):
"""
A configuration of
:class:`~nemo.collections.nlp.models.token_classification.punctuation_lexical_audio_capitalization_model.PunctuationCapitalizationLexicalAudioModel`
model.
See an example of model config in
`nemo/examples/nlp/token_classification/conf/punctuation_capitalization_config.yaml
<https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/token_classification/conf/punctuation_capitalization_lexical_audio_config.yaml>`_
Audio encoder can be frozen during training with ``freeze_audio_encoder`` parameter.
Adapter can be added to audio encoder with ``use_adapters`` and ``adapter_config`` parameters.
More conformer layers can be added on top of pretrained audio encoder with ``frozen_conf_d_model``, ``frozen_conf_d_ff`` and ``frozen_conf_num_layers`` parameters.
"""
train_ds: Optional[PunctuationCapitalizationTrainDataConfig] = None
"""A configuration for creating training dataset and data loader."""
validation_ds: Optional[PunctuationCapitalizationEvalDataConfig] = None
"""A configuration for creating validation datasets and data loaders."""
test_ds: Optional[PunctuationCapitalizationEvalDataConfig] = None
"""A configuration for creating test datasets and data loaders."""
audio_encoder: Optional[AudioEncoderConfig] = None
restore_lexical_encoder_from: Optional[str] = None
""""Path to .nemo checkpoint to load weights from""" # add more comments
use_weighted_loss: Optional[bool] = False
"""If set to ``True`` CrossEntropyLoss will be weighted"""
@dataclass
class PunctuationCapitalizationConfig(NemoConfig):
"""
A config for punctuation model training and testing.
See an example of full config in
`nemo/examples/nlp/token_classification/conf/punctuation_capitalization_config.yaml
<https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/token_classification/conf/punctuation_capitalization_config.yaml>`_
"""
pretrained_model: Optional[str] = None
"""Can be an NVIDIA's NGC cloud model or a path to a .nemo checkpoint. You can get list of possible cloud options
by calling method
:func:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_model.PunctuationCapitalizationModel.list_available_models`.
"""
name: Optional[str] = 'Punctuation_and_Capitalization'
"""A name of the model. Used for naming output directories and ``.nemo`` checkpoints."""
do_training: bool = True
"""Whether to perform training of the model."""
do_testing: bool = False
"""Whether ot perform testing of the model."""
model: PunctuationCapitalizationModelConfig = PunctuationCapitalizationModelConfig()
"""A configuration for the
:class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_model.PunctuationCapitalizationModel`
model."""
trainer: Optional[TrainerConfig] = TrainerConfig()
"""Contains ``Trainer`` Lightning class constructor parameters."""
exp_manager: Optional[ExpManagerConfig] = ExpManagerConfig(name=name, files_to_copy=[])
"""A configuration with various NeMo training options such as output directories, resuming from checkpoint,
tensorboard and W&B logging, and so on. For possible options see :ref:`exp-manager-label`."""
@dataclass
class PunctuationCapitalizationLexicalAudioConfig(PunctuationCapitalizationConfig):
model: PunctuationCapitalizationLexicalAudioModelConfig = PunctuationCapitalizationLexicalAudioModelConfig()
def is_legacy_model_config(model_cfg: DictConfig) -> bool:
"""
Test if model config is old style config. Old style configs are configs which were used before
``common_dataset_parameters`` item was added. Old style datasets use ``dataset`` instead of
``common_dataset_parameters``, ``batch_size`` instead of ``tokens_in_batch``. Old style configs do not support
tarred datasets.
Args:
model_cfg: model configuration
Returns:
whether ``model_config`` is legacy
"""
return 'common_dataset_parameters' not in model_cfg
def legacy_model_config_to_new_model_config(model_cfg: DictConfig) -> DictConfig:
"""
Transform old style config into
:class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_config.PunctuationCapitalizationModelConfig`.
Old style configs are configs which were used before ``common_dataset_parameters`` item was added. Old style
datasets use ``dataset`` instead of ``common_dataset_parameters``, ``batch_size`` instead of ``tokens_in_batch``.
Old style configs do not support tarred datasets.
Args:
model_cfg: old style config
Returns:
model config which follows dataclass
:class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_config.PunctuationCapitalizationModelConfig`
"""
train_ds = model_cfg.get('train_ds')
validation_ds = model_cfg.get('validation_ds')
test_ds = model_cfg.get('test_ds')
dataset = model_cfg.dataset
punct_head_config = model_cfg.get('punct_head', {})
capit_head_config = model_cfg.get('capit_head', {})
omega_conf = OmegaConf.structured(
PunctuationCapitalizationModelConfig(
class_labels=model_cfg.class_labels,
common_dataset_parameters=CommonDatasetParametersConfig(
pad_label=dataset.pad_label,
ignore_extra_tokens=dataset.get(
'ignore_extra_tokens', CommonDatasetParametersConfig.ignore_extra_tokens
),
ignore_start_end=dataset.get('ignore_start_end', CommonDatasetParametersConfig.ignore_start_end),
punct_label_ids=model_cfg.punct_label_ids,
capit_label_ids=model_cfg.capit_label_ids,
),
train_ds=None
if train_ds is None
else legacy_data_config_to_new_data_config(train_ds, dataset, train=True),
validation_ds=None
if validation_ds is None
else legacy_data_config_to_new_data_config(validation_ds, dataset, train=False),
test_ds=None if test_ds is None else legacy_data_config_to_new_data_config(test_ds, dataset, train=False),
punct_head=HeadConfig(
num_fc_layers=punct_head_config.get('punct_num_fc_layers', HeadConfig.num_fc_layers),
fc_dropout=punct_head_config.get('fc_dropout', HeadConfig.fc_dropout),
activation=punct_head_config.get('activation', HeadConfig.activation),
use_transformer_init=punct_head_config.get('use_transformer_init', HeadConfig.use_transformer_init),
),
capit_head=HeadConfig(
num_fc_layers=capit_head_config.get('capit_num_fc_layers', HeadConfig.num_fc_layers),
fc_dropout=capit_head_config.get('fc_dropout', HeadConfig.fc_dropout),
activation=capit_head_config.get('activation', HeadConfig.activation),
use_transformer_init=capit_head_config.get('use_transformer_init', HeadConfig.use_transformer_init),
),
tokenizer=model_cfg.tokenizer,
language_model=model_cfg.language_model,
optim=model_cfg.optim,
)
)
with open_dict(omega_conf):
retain_during_legacy_conversion = model_cfg.get('retain_during_legacy_conversion', {})
for key in retain_during_legacy_conversion.keys():
omega_conf[key] = retain_during_legacy_conversion[key]
return omega_conf
| NeMo-main | nemo/collections/nlp/models/token_classification/punctuation_capitalization_config.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List, Optional, Union
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from torch.utils.data import DataLoader
from nemo.collections.common.losses import CrossEntropyLoss
from nemo.collections.nlp.data.data_utils.data_preprocessing import get_labels_to_labels_id_mapping
from nemo.collections.nlp.data.token_classification.token_classification_dataset import (
BertTokenClassificationDataset,
BertTokenClassificationInferDataset,
)
from nemo.collections.nlp.data.token_classification.token_classification_utils import get_label_ids
from nemo.collections.nlp.metrics.classification_report import ClassificationReport
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.collections.nlp.modules.common import TokenClassifier
from nemo.collections.nlp.parts.utils_funcs import get_classification_report, plot_confusion_matrix, tensor2list
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.utils import logging
__all__ = ['TokenClassificationModel']
class TokenClassificationModel(NLPModel):
"""Token Classification Model with BERT, applicable for tasks such as Named Entity Recognition"""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
"""Initializes Token Classification Model."""
# extract str to int labels mapping if a mapping file provided
if isinstance(cfg.label_ids, str):
if os.path.exists(cfg.label_ids):
logging.info(f'Reusing label_ids file found at {cfg.label_ids}.')
label_ids = get_labels_to_labels_id_mapping(cfg.label_ids)
# update the config to store name to id mapping
cfg.label_ids = OmegaConf.create(label_ids)
else:
raise ValueError(f'{cfg.label_ids} not found.')
self.class_weights = None
super().__init__(cfg=cfg, trainer=trainer)
self.classifier = TokenClassifier(
hidden_size=self.hidden_size,
num_classes=len(self._cfg.label_ids),
num_layers=self._cfg.head.num_fc_layers,
activation=self._cfg.head.activation,
log_softmax=False,
dropout=self._cfg.head.fc_dropout,
use_transformer_init=self._cfg.head.use_transformer_init,
)
self.loss = self.setup_loss(class_balancing=self._cfg.dataset.class_balancing)
# setup to track metrics
self.classification_report = ClassificationReport(
len(self._cfg.label_ids), label_ids=self._cfg.label_ids, dist_sync_on_step=True
)
def update_data_dir(self, data_dir: str) -> None:
"""
Update data directory and get data stats with Data Descriptor
Weights are later used to setup loss
Args:
data_dir: path to data directory
"""
self._cfg.dataset.data_dir = data_dir
logging.info(f'Setting model.dataset.data_dir to {data_dir}.')
def setup_loss(self, class_balancing: str = None):
"""Setup loss
Setup or update loss.
Args:
class_balancing: whether to use class weights during training
"""
if class_balancing not in ['weighted_loss', None]:
raise ValueError(f'Class balancing {class_balancing} is not supported. Choose from: [null, weighted_loss]')
if class_balancing == 'weighted_loss' and self.class_weights:
# you may need to increase the number of epochs for convergence when using weighted_loss
loss = CrossEntropyLoss(logits_ndim=3, weight=self.class_weights)
logging.debug(f'Using {class_balancing} class balancing.')
else:
loss = CrossEntropyLoss(logits_ndim=3)
logging.debug(f'Using CrossEntropyLoss class balancing.')
return loss
@typecheck()
def forward(self, input_ids, attention_mask, token_type_ids):
hidden_states = self.bert_model(
input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask
)
if isinstance(hidden_states, tuple):
hidden_states = hidden_states[0]
logits = self.classifier(hidden_states=hidden_states)
return logits
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
input_ids, input_type_ids, input_mask, subtokens_mask, loss_mask, labels = batch
logits = self(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)
loss = self.loss(logits=logits, labels=labels, loss_mask=loss_mask)
lr = self._optimizer.param_groups[0]['lr']
self.log('train_loss', loss)
self.log('lr', lr, prog_bar=True)
return {
'loss': loss,
'lr': lr,
}
def validation_step(self, batch, batch_idx):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
input_ids, input_type_ids, input_mask, subtokens_mask, loss_mask, labels = batch
logits = self(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)
val_loss = self.loss(logits=logits, labels=labels, loss_mask=loss_mask)
subtokens_mask = subtokens_mask > 0.5
preds = torch.argmax(logits, axis=-1)[subtokens_mask]
labels = labels[subtokens_mask]
tp, fn, fp, _ = self.classification_report(preds, labels)
loss = {'val_loss': val_loss, 'tp': tp, 'fn': fn, 'fp': fp}
self.validation_step_outputs.append(loss)
return loss
def on_validation_epoch_end(self):
"""
Called at the end of validation to aggregate outputs.
outputs: list of individual outputs of each validation step.
"""
avg_loss = torch.stack([x['val_loss'] for x in self.validation_step_outputs]).mean()
# calculate metrics and classification report
precision, recall, f1, report = self.classification_report.compute()
logging.info(report)
self.log('val_loss', avg_loss, prog_bar=True)
self.log('precision', precision)
self.log('f1', f1)
self.log('recall', recall)
self.classification_report.reset()
self.validation_step_outputs.clear() # free memory
def test_step(self, batch, batch_idx):
input_ids, input_type_ids, input_mask, subtokens_mask, loss_mask, labels = batch
logits = self(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)
val_loss = self.loss(logits=logits, labels=labels, loss_mask=loss_mask)
subtokens_mask = subtokens_mask > 0.5
preds = torch.argmax(logits, axis=-1)[subtokens_mask]
labels = labels[subtokens_mask]
tp, fn, fp, _ = self.classification_report(preds, labels)
loss = {'test_loss': val_loss, 'tp': tp, 'fn': fn, 'fp': fp}
self.test_step_outputs.append(loss)
return loss
def on_test_epoch_end(self):
avg_loss = torch.stack([x['test_loss'] for x in self.test_step_outputs]).mean()
# calculate metrics and classification report
precision, recall, f1, report = self.classification_report.compute()
logging.info(report)
self.log('test_loss', avg_loss, prog_bar=True)
self.log('precision', precision)
self.log('f1', f1)
self.log('recall', recall)
self.test_step_outputs.clear() # free memory
def setup_training_data(self, train_data_config: Optional[DictConfig] = None):
if train_data_config is None:
train_data_config = self._cfg.train_ds
labels_file = os.path.join(self._cfg.dataset.data_dir, train_data_config.labels_file)
# for older(pre - 1.0.0.b3) configs compatibility
if not hasattr(self._cfg, "class_labels") or self._cfg.class_labels is None:
OmegaConf.set_struct(self._cfg, False)
self._cfg.class_labels = {}
self._cfg.class_labels = OmegaConf.create({'class_labels_file': 'label_ids.csv'})
OmegaConf.set_struct(self._cfg, True)
label_ids, label_ids_filename, self.class_weights = get_label_ids(
label_file=labels_file,
is_training=True,
pad_label=self._cfg.dataset.pad_label,
label_ids_dict=self._cfg.label_ids,
get_weights=True,
class_labels_file_artifact=self._cfg.class_labels.class_labels_file,
)
# save label maps to the config
self._cfg.label_ids = OmegaConf.create(label_ids)
self.register_artifact('class_labels.class_labels_file', label_ids_filename)
self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config)
def setup_validation_data(self, val_data_config: Optional[DictConfig] = None):
if val_data_config is None:
val_data_config = self._cfg.validation_ds
labels_file = os.path.join(self._cfg.dataset.data_dir, val_data_config.labels_file)
get_label_ids(
label_file=labels_file,
is_training=False,
pad_label=self._cfg.dataset.pad_label,
label_ids_dict=self._cfg.label_ids,
get_weights=False,
)
self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config)
def setup_test_data(self, test_data_config: Optional[DictConfig] = None):
if test_data_config is None:
test_data_config = self._cfg.test_ds
labels_file = os.path.join(self._cfg.dataset.data_dir, test_data_config.labels_file)
get_label_ids(
label_file=labels_file,
is_training=False,
pad_label=self._cfg.dataset.pad_label,
label_ids_dict=self._cfg.label_ids,
get_weights=False,
)
self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config)
def _setup_dataloader_from_config(self, cfg: DictConfig) -> DataLoader:
"""
Setup dataloader from config
Args:
cfg: config for the dataloader
Return:
Pytorch Dataloader
"""
dataset_cfg = self._cfg.dataset
data_dir = dataset_cfg.data_dir
if not os.path.exists(data_dir):
raise FileNotFoundError(f"Data directory is not found at: {data_dir}.")
text_file = os.path.join(data_dir, cfg.text_file)
labels_file = os.path.join(data_dir, cfg.labels_file)
if not (os.path.exists(text_file) and os.path.exists(labels_file)):
raise FileNotFoundError(
f'{text_file} or {labels_file} not found. The data should be split into 2 files: text.txt and \
labels.txt. Each line of the text.txt file contains text sequences, where words are separated with \
spaces. The labels.txt file contains corresponding labels for each word in text.txt, the labels are \
separated with spaces. Each line of the files should follow the format: \
[WORD] [SPACE] [WORD] [SPACE] [WORD] (for text.txt) and \
[LABEL] [SPACE] [LABEL] [SPACE] [LABEL] (for labels.txt).'
)
dataset = BertTokenClassificationDataset(
text_file=text_file,
label_file=labels_file,
max_seq_length=dataset_cfg.max_seq_length,
tokenizer=self.tokenizer,
num_samples=cfg.num_samples,
pad_label=dataset_cfg.pad_label,
label_ids=self._cfg.label_ids,
ignore_extra_tokens=dataset_cfg.ignore_extra_tokens,
ignore_start_end=dataset_cfg.ignore_start_end,
use_cache=dataset_cfg.use_cache,
)
return DataLoader(
dataset=dataset,
collate_fn=dataset.collate_fn,
batch_size=cfg.batch_size,
shuffle=cfg.shuffle,
num_workers=dataset_cfg.num_workers,
pin_memory=dataset_cfg.pin_memory,
drop_last=dataset_cfg.drop_last,
)
def _setup_infer_dataloader(self, queries: List[str], batch_size: int) -> 'torch.utils.data.DataLoader':
"""
Setup function for an infer data loader.
Args:
queries: text
batch_size: batch size to use during inference
Returns:
A pytorch DataLoader.
"""
dataset = BertTokenClassificationInferDataset(tokenizer=self.tokenizer, queries=queries, max_seq_length=-1)
return torch.utils.data.DataLoader(
dataset=dataset,
collate_fn=dataset.collate_fn,
batch_size=batch_size,
shuffle=False,
num_workers=self._cfg.dataset.num_workers,
pin_memory=self._cfg.dataset.pin_memory,
drop_last=False,
)
@torch.no_grad()
def _infer(self, queries: List[str], batch_size: int = None) -> List[int]:
"""
Get prediction for the queries
Args:
queries: text sequences
batch_size: batch size to use during inference.
Returns:
all_preds: model predictions
"""
# store predictions for all queries in a single list
all_preds = []
mode = self.training
try:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Switch model to evaluation mode
self.eval()
self.to(device)
infer_datalayer = self._setup_infer_dataloader(queries, batch_size)
for batch in infer_datalayer:
input_ids, input_type_ids, input_mask, subtokens_mask = batch
logits = self.forward(
input_ids=input_ids.to(device),
token_type_ids=input_type_ids.to(device),
attention_mask=input_mask.to(device),
)
subtokens_mask = subtokens_mask > 0.5
preds = tensor2list(torch.argmax(logits, axis=-1)[subtokens_mask])
all_preds.extend(preds)
finally:
# set mode back to its original value
self.train(mode=mode)
return all_preds
def add_predictions(
self, queries: Union[List[str], str], batch_size: int = 32, output_file: Optional[str] = None
) -> List[str]:
"""
Add predicted token labels to the queries. Use this method for debugging and prototyping.
Args:
queries: text
batch_size: batch size to use during inference.
output_file: file to save models predictions
Returns:
result: text with added entities
"""
if queries is None or len(queries) == 0:
return []
if isinstance(queries, str):
logging.info(f'Reading from {queries} file')
with open(queries, 'r') as f:
queries = f.readlines()
result = []
all_preds = self._infer(queries, batch_size)
queries = [q.strip().split() for q in queries]
num_words = [len(q) for q in queries]
if sum(num_words) != len(all_preds):
raise ValueError('Pred and words must have the same length')
ids_to_labels = {v: k for k, v in self._cfg.label_ids.items()}
start_idx = 0
end_idx = 0
for query in queries:
end_idx += len(query)
# extract predictions for the current query from the list of all predictions
preds = all_preds[start_idx:end_idx]
start_idx = end_idx
query_with_entities = ''
for j, word in enumerate(query):
# strip out the punctuation to attach the entity tag to the word not to a punctuation mark
# that follows the word
if word[-1].isalpha():
punct = ''
else:
punct = word[-1]
word = word[:-1]
query_with_entities += word
label = ids_to_labels[preds[j]]
if label != self._cfg.dataset.pad_label:
query_with_entities += '[' + label + ']'
query_with_entities += punct + ' '
result.append(query_with_entities.strip())
if output_file is not None:
with open(output_file, 'w') as f:
for r in result:
f.write(r + '\n')
logging.info(f'Predictions saved to {output_file}')
return result
def evaluate_from_file(
self,
output_dir: str,
text_file: str,
labels_file: Optional[str] = None,
add_confusion_matrix: Optional[bool] = False,
normalize_confusion_matrix: Optional[bool] = True,
batch_size: int = 1,
) -> None:
"""
Run inference on data from a file, plot confusion matrix and calculate classification report.
Use this method for final evaluation.
Args:
output_dir: path to output directory to store model predictions, confusion matrix plot (if set to True)
text_file: path to file with text. Each line of the text.txt file contains text sequences, where words
are separated with spaces: [WORD] [SPACE] [WORD] [SPACE] [WORD]
labels_file (Optional): path to file with labels. Each line of the labels_file should contain
labels corresponding to each word in the text_file, the labels are separated with spaces:
[LABEL] [SPACE] [LABEL] [SPACE] [LABEL] (for labels.txt).'
add_confusion_matrix: whether to generate confusion matrix
normalize_confusion_matrix: whether to normalize confusion matrix
batch_size: batch size to use during inference.
"""
output_dir = os.path.abspath(output_dir)
with open(text_file, 'r') as f:
queries = f.readlines()
all_preds = self._infer(queries, batch_size)
with_labels = labels_file is not None
if with_labels:
with open(labels_file, 'r') as f:
all_labels_str = f.readlines()
all_labels_str = ' '.join([labels.strip() for labels in all_labels_str])
# writing labels and predictions to a file in output_dir is specified in the config
os.makedirs(output_dir, exist_ok=True)
filename = os.path.join(output_dir, 'infer_' + os.path.basename(text_file))
try:
with open(filename, 'w') as f:
if with_labels:
f.write('labels\t' + all_labels_str + '\n')
logging.info(f'Labels save to {filename}')
# convert labels from string label to ids
ids_to_labels = {v: k for k, v in self._cfg.label_ids.items()}
all_preds_str = [ids_to_labels[pred] for pred in all_preds]
f.write('preds\t' + ' '.join(all_preds_str) + '\n')
logging.info(f'Predictions saved to {filename}')
if with_labels and add_confusion_matrix:
all_labels = all_labels_str.split()
# convert labels from string label to ids
label_ids = self._cfg.label_ids
all_labels = [label_ids[label] for label in all_labels]
plot_confusion_matrix(
all_labels, all_preds, output_dir, label_ids=label_ids, normalize=normalize_confusion_matrix
)
logging.info(get_classification_report(all_labels, all_preds, label_ids))
except Exception:
logging.error(
f'When providing a file with labels, check that all labels in {labels_file} were'
f'seen during training.'
)
raise
@classmethod
def list_available_models(cls) -> List[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
model = PretrainedModelInfo(
pretrained_model_name="ner_en_bert",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/ner_en_bert/versions/1.10/files/ner_en_bert.nemo",
description="The model was trained on GMB (Groningen Meaning Bank) corpus for entity recognition and achieves 74.61 F1 Macro score.",
)
result.append(model)
return result
| NeMo-main | nemo/collections/nlp/models/token_classification/token_classification_model.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict, Optional
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from torch.utils.data import DataLoader
from nemo.collections.common.losses import AggregatorLoss, CrossEntropyLoss, SmoothedCrossEntropyLoss
from nemo.collections.common.metrics import Perplexity
from nemo.collections.nlp.data.language_modeling.lm_bert_dataset import (
BertPretrainingDataset,
BertPretrainingPreprocessedDataloader,
)
from nemo.collections.nlp.modules.common import BertPretrainingTokenClassifier, SequenceClassifier
from nemo.collections.nlp.modules.common.lm_utils import get_lm_model
from nemo.collections.nlp.modules.common.tokenizer_utils import get_tokenizer
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.classes.modelPT import ModelPT
from nemo.core.neural_types import NeuralType
from nemo.utils import logging
__all__ = ["BERTLMModel"]
class BERTLMModel(ModelPT):
"""
BERT language model pretraining.
"""
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return self.bert_model.input_types
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
output_types_dict = {"mlm_log_probs": self.mlm_classifier.output_types["log_probs"]}
if not self.only_mlm_loss:
output_types_dict["nsp_logits"] = self.nsp_classifier.output_types["logits"]
return output_types_dict
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
vocab_file = None
config_dict = None
config_file = None
if cfg.tokenizer is not None:
if cfg.tokenizer.get('tokenizer_name') and cfg.tokenizer.get('tokenizer_model'):
self._setup_tokenizer(cfg.tokenizer)
if cfg.get('tokenizer.vocab_file'):
vocab_file = self.register_artifact('tokenizer.vocab_file', cfg.tokenizer.vocab_file)
else:
self.tokenizer = None
super().__init__(cfg=cfg, trainer=trainer)
if cfg.get('language_model.config'):
config_dict = OmegaConf.to_container(cfg.language_model.config)
if cfg.get('language_model.config_file'):
config_file = self.register_artifact('language_model.config_file', cfg.language_model.config_file)
self.bert_model = get_lm_model(
config_file=config_file, config_dict=config_dict, vocab_file=vocab_file, trainer=trainer, cfg=cfg,
)
self.hidden_size = self.bert_model.config.hidden_size
self.vocab_size = self.bert_model.config.vocab_size
self.only_mlm_loss = cfg.only_mlm_loss
self.mlm_classifier = BertPretrainingTokenClassifier(
hidden_size=self.hidden_size,
num_classes=self.vocab_size,
num_layers=cfg.num_tok_classification_layers,
activation="gelu",
log_softmax=True,
use_transformer_init=True,
)
self.mlm_loss = SmoothedCrossEntropyLoss()
if not self.only_mlm_loss:
self.nsp_classifier = SequenceClassifier(
hidden_size=self.hidden_size,
num_classes=2,
num_layers=cfg.num_seq_classification_layers,
log_softmax=False,
activation="tanh",
use_transformer_init=True,
)
self.nsp_loss = CrossEntropyLoss()
self.agg_loss = AggregatorLoss(num_inputs=2)
# # tie weights of MLM softmax layer and embedding layer of the encoder
if (
self.mlm_classifier.mlp.last_linear_layer.weight.shape
!= self.bert_model.embeddings.word_embeddings.weight.shape
):
raise ValueError("Final classification layer does not match embedding layer.")
self.mlm_classifier.mlp.last_linear_layer.weight = self.bert_model.embeddings.word_embeddings.weight
# create extra bias
# setup to track metrics
self.validation_perplexity = Perplexity()
self.setup_optimization(cfg.optim)
@typecheck()
def forward(self, input_ids, attention_mask, token_type_ids):
"""
No special modification required for Lightning, define it as you normally would
in the `nn.Module` in vanilla PyTorch.
"""
hidden_states = self.bert_model(
input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask,
)
if isinstance(hidden_states, tuple):
hidden_states = hidden_states[0]
mlm_log_probs = self.mlm_classifier(hidden_states=hidden_states)
if self.only_mlm_loss:
return (mlm_log_probs,)
nsp_logits = self.nsp_classifier(hidden_states=hidden_states)
return mlm_log_probs, nsp_logits
def _compute_losses(self, mlm_log_probs, nsp_logits, output_ids, output_mask, labels):
mlm_loss = self.mlm_loss(log_probs=mlm_log_probs, labels=output_ids, output_mask=output_mask)
if self.only_mlm_loss:
loss, nsp_loss = mlm_loss, None
else:
nsp_loss = self.nsp_loss(logits=nsp_logits, labels=labels)
loss = self.agg_loss(loss_1=mlm_loss, loss_2=nsp_loss)
return mlm_loss, nsp_loss, loss
def _parse_forward_outputs(self, forward_outputs):
if self.only_mlm_loss:
return forward_outputs[0], None
else:
return forward_outputs
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
input_ids, input_type_ids, input_mask, output_ids, output_mask, labels = batch
forward_outputs = self.forward(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)
mlm_log_probs, nsp_logits = self._parse_forward_outputs(forward_outputs)
_, _, loss = self._compute_losses(mlm_log_probs, nsp_logits, output_ids, output_mask, labels)
lr = self._optimizer.param_groups[0]['lr']
self.log('train_loss', loss)
self.log('lr', lr, prog_bar=True)
return {"loss": loss, "lr": lr}
def validation_step(self, batch, batch_idx):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
input_ids, input_type_ids, input_mask, output_ids, output_mask, labels = batch
forward_outputs = self.forward(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)
mlm_log_probs, nsp_logits = self._parse_forward_outputs(forward_outputs)
_, _, loss = self._compute_losses(mlm_log_probs, nsp_logits, output_ids, output_mask, labels)
self.validation_perplexity(logits=mlm_log_probs)
loss = {'val_loss': loss}
self.validation_step_outputs.append(loss)
return loss
def on_validation_epoch_end(self):
"""Called at the end of validation to aggregate outputs.
Args:
outputs (list): The individual outputs of each validation step.
Returns:
dict: Validation loss and tensorboard logs.
"""
if self.validation_step_outputs:
avg_loss = torch.stack([x['val_loss'] for x in self.validation_step_outputs]).mean()
perplexity = self.validation_perplexity.compute()
logging.info(f"evaluation perplexity {perplexity.cpu().item()}")
self.log(f'val_loss', avg_loss)
self.validation_step_outputs.clear() # free memory
def setup_training_data(self, train_data_config: Optional[DictConfig]):
self._train_dl = (
self._setup_preprocessed_dataloader(train_data_config)
if self.tokenizer is None
else self._setup_dataloader(train_data_config)
)
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
self._validation_dl = (
self._setup_preprocessed_dataloader(val_data_config)
if self.tokenizer is None
else self._setup_dataloader(val_data_config)
)
def setup_test_data(self, test_data_config: Optional[DictConfig]):
pass
def _setup_preprocessed_dataloader(self, cfg: Optional[DictConfig]):
dataset = cfg.data_file
max_predictions_per_seq = cfg.max_predictions_per_seq
batch_size = cfg.batch_size
if os.path.isdir(dataset):
files = [os.path.join(dataset, f) for f in os.listdir(dataset) if os.path.isfile(os.path.join(dataset, f))]
else:
files = [dataset]
files.sort()
dl = BertPretrainingPreprocessedDataloader(
data_files=files, max_predictions_per_seq=max_predictions_per_seq, batch_size=batch_size,
)
return dl
def _setup_tokenizer(self, cfg: DictConfig):
tokenizer = get_tokenizer(
tokenizer_name=cfg.tokenizer_name,
tokenizer_model=cfg.tokenizer_model,
special_tokens=OmegaConf.to_container(cfg.special_tokens) if cfg.special_tokens else None,
vocab_file=cfg.vocab_file,
)
self.tokenizer = tokenizer
def _setup_dataloader(self, cfg: DictConfig):
dataset = BertPretrainingDataset(
tokenizer=self.tokenizer,
data_file=cfg.data_file,
max_seq_length=cfg.max_seq_length,
mask_prob=cfg.mask_prob,
short_seq_prob=cfg.short_seq_prob,
)
dl = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=cfg.batch_size,
collate_fn=dataset.collate_fn,
drop_last=cfg.get("drop_last", False),
shuffle=cfg.shuffle,
num_workers=cfg.get("num_workers", 0),
)
return dl
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
result.append(
PretrainedModelInfo(
pretrained_model_name="bertbaseuncased",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/bertbaseuncased/versions/1.0.0rc1/files/bertbaseuncased.nemo",
description="The model was trained EN Wikipedia and BookCorpus on a sequence length of 512.",
)
)
result.append(
PretrainedModelInfo(
pretrained_model_name="bertlargeuncased",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/bertlargeuncased/versions/1.0.0rc1/files/bertlargeuncased.nemo",
description="The model was trained EN Wikipedia and BookCorpus on a sequence length of 512.",
)
)
return result
| NeMo-main | nemo/collections/nlp/models/language_modeling/bert_lm_model.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
from functools import partial
from typing import Any, List, Optional, Union
import torch
from omegaconf.dictconfig import DictConfig
from omegaconf.omegaconf import open_dict
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.common.tokenizers.sentencepiece_tokenizer import SentencePieceTokenizer
from nemo.collections.nlp.data.language_modeling.megatron.gpt_prompt_learning_dataset import GPTPromptLearningDataset
from nemo.collections.nlp.metrics.prompt_learning_metrics import AccuracyScore, BLEUScore, ROUGEScores
from nemo.collections.nlp.models.language_modeling.megatron_base_prompt_learning_model import (
MegatronBasePromptLearningModel,
)
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.modules.common import VirtualPromptPlaceholderToken, VirtualPromptSource, VirtualPromptStyle
from nemo.collections.nlp.modules.common.megatron.utils import (
ApexGuardDefaults,
average_losses_across_data_parallel_group,
get_iterator_k_split,
)
from nemo.collections.nlp.modules.common.text_generation_utils import (
get_default_length_params,
get_default_sampling_params,
megatron_gpt_generate,
)
from nemo.collections.nlp.modules.common.transformer.text_generation import LengthParam, SamplingParam
from nemo.collections.nlp.parts.nlp_overrides import GradScaler, NLPSaveRestoreConnector
from nemo.collections.nlp.parts.utils_funcs import get_last_rank
from nemo.utils import AppState, logging
try:
from apex.transformer.pipeline_parallel.utils import get_micro_batch_size, get_num_microbatches
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
try:
from megatron.core import InferenceParams, ModelParallelConfig, parallel_state, tensor_parallel
from megatron.core.enums import ModelType
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelParallelConfig = ApexGuardDefaults
HAVE_MEGATRON_CORE = False
__all__ = ['MegatronGPTPromptLearningModel']
class MegatronGPTPromptLearningModel(MegatronBasePromptLearningModel):
"""
Model class for prompt-tuning or p-tuning a pretrained Megatron GPT model.
Prompt Tuning initalizes virtual prompt embeddings directly from a copy of
certain token embeddings from the the pretrained GPT model's vocabulary
and directly tunes these embedding weights. The token embeddings used in
initalization are specified by the user in the config file. The model can
be prompt-tuned for multiple tasks at once. virtual prompts are stored in a
prompt table and can be added or deleted without disrupting virtual prompts
for other tasks.
P-tuning initializes an LSTM encoder model that generates virtual prompt
embeddings for every task. Each task shares the same encoder. After ptuning
is compelete, the learned virtual prompts can be saved to the prompt table
using add_ptuned_prompts_to_prompt_table(). Thus, if a user wants to add a
new virtual prompt via p-tuning, they do not need to retrain on all previous
tasks. This gives p-tuning the same task flexiblity as prompt-tuning.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
super().__init__(cfg, trainer)
self.inference_params = None
# init_model is called by parent class already.
# self.init_model(cfg, trainer)
def init_model(self, cfg: DictConfig, trainer: Trainer):
self.cfg = cfg
self.config: ModelParallelConfig = self.model_parallel_config
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(cfg.get('language_model_path')):
save_restore_connector.model_extracted_dir = cfg.get('language_model_path')
frozen_model_cfg = MegatronGPTModel.restore_from(
cfg.get('language_model_path'),
trainer=trainer,
return_config=True,
save_restore_connector=save_restore_connector,
)
# set hidden size in the model parallel config for pipeline parallel schedules
setattr(self.config, 'hidden_size', frozen_model_cfg.hidden_size)
# Need to overwrite some params in frozen model's config before restoring
with open_dict(frozen_model_cfg):
frozen_model_cfg.megatron_amp_O2 = False
frozen_model_cfg.optim.name = "fused_adam"
frozen_model_cfg.micro_batch_size = self.cfg.micro_batch_size
frozen_model_cfg.global_batch_size = self.cfg.global_batch_size
frozen_model_cfg.precision = trainer.precision
frozen_model_cfg.sequence_parallel = self.cfg.get("sequence_parallel", False)
frozen_model_cfg.activations_checkpoint_granularity = self.cfg.get(
"activations_checkpoint_granularity", None
)
frozen_model_cfg.activations_checkpoint_num_layers = self.cfg.get(
"activations_checkpoint_num_layers", None
)
frozen_model_cfg.activations_checkpoint_method = self.cfg.get("activations_checkpoint_method", None)
if cfg.get('language_model_path', None):
self.frozen_model = MegatronGPTModel.restore_from(
cfg.get('language_model_path'),
trainer=trainer,
save_restore_connector=save_restore_connector,
override_config_path=frozen_model_cfg,
).to(dtype=self.autocast_dtype)
self.megatron_amp_o2 = self.cfg.get('megatron_amp_O2', False)
self.pipeline_parallel = self.cfg.get('pipeline_model_parallel_size', 1) > 1
self.tokenizer = self.frozen_model.tokenizer
self.hidden_size = self.frozen_model.cfg.hidden_size
self.existing_tasks = list(self.cfg.get('existing_tasks', []))
self.new_tasks = list(self.cfg.get('new_tasks', []))
with open_dict(self.cfg):
self.cfg.existing_tasks = (
self.existing_tasks + self.new_tasks
) # TODO: for backward compatibility (@adithyare) in general these tasks lists should be deprecated
self.virtual_prompt_style = VirtualPromptStyle(cfg.virtual_prompt_style)
self.model_type = ModelType.encoder_or_decoder
self.enable_autocast = (
True if (not self.megatron_amp_o2) and (self.autocast_dtype in [torch.float16, torch.bfloat16]) else False
)
if self.pipeline_parallel:
assert (
self.cfg.optim.sched.get("min_lr", 0.0) == 0.0
), "Minimum lr must be 0.0 when pipeline parallel size is > 1"
# Load templates for assigning virtual prompt token positions
self.load_task_templates(self.cfg.task_templates)
if self.first_stage_of_pipeline() and self.virtual_prompt_style in [
VirtualPromptStyle.P_TUNING,
]:
if self.frozen_model.mcore_gpt:
self.word_embeddings = self.frozen_model.model.embedding.word_embeddings
else:
self.word_embeddings = self.frozen_model.model.language_model.embedding.word_embeddings
self.padded_vocab_size = self.frozen_model.padded_vocab_size
# Prepare pseudo token ids for virtual/virtual prompt tokens
self.pseudo_tokens = get_pseudo_tokens(self.max_virtual_tokens)
if isinstance(self.tokenizer, SentencePieceTokenizer):
if not self.tokenizer.legacy:
if self.tokenizer.pad_id != -1:
self.tokenizer.pad_token = self.tokenizer.ids_to_tokens([self.tokenizer.pad_id])[0]
else:
self.tokenizer.pad_token = self.tokenizer.ids_to_tokens([self.tokenizer.eos_id])[0]
self.tokenizer.bos_token = self.tokenizer.ids_to_tokens([self.tokenizer.bos_id])[0]
self.tokenizer.eos_token = self.tokenizer.ids_to_tokens([self.tokenizer.eos_id])[0]
self.tokenizer.legacy = True
self.tokenizer.add_special_tokens(self.pseudo_tokens)
else:
self.tokenizer.add_special_tokens({'additional_special_tokens': self.pseudo_tokens})
self.pseudo_token_ids = self.tokenizer.tokens_to_ids(self.pseudo_tokens)
self.pseudo_token_ids_start = self.pseudo_token_ids[0] if self.pseudo_token_ids else None
self.pad_token_id = self.tokenizer.pad_id if self.tokenizer.pad_id is not None else self.tokenizer.unk_id
# P-Tuning uses an LSTM Encoder to produce virtual token embeddings
if self.virtual_prompt_style == VirtualPromptStyle.P_TUNING:
self.virtual_prompt_source = VirtualPromptSource.PROMPT_ENCODER
elif self.virtual_prompt_style == VirtualPromptStyle.NO_PROMPT:
self.virtual_prompt_source = VirtualPromptSource.NO_PROMPT
else:
raise ValueError(f"\nvirtual prompt style '{cfg.virtual_prompt_style}.'")
self._reduced_loss_buffer = []
self._inference_config = None
# make sure the default pytorch lightning gradient clipping in the basemodel
self.grad_clip_pl_default = True
self.lowest_val_loss = None
self.prompt_encoder = None
# default inference related params -> for evaluation metrics
if hasattr(self.cfg, 'inference') and self.cfg.get("report_validation_metric", False):
self.length_params: LengthParam = {
"max_length": self.cfg.inference.get('tokens_to_generate', 30),
"min_length": self.cfg.inference.get('min_tokens_to_generate', 0),
}
self.sampling_params: SamplingParam = {
"use_greedy": self.cfg.inference.get('greedy', False),
"temperature": self.cfg.inference.get('temperature', 1.0),
"top_k": self.cfg.inference.get('tok_k', 0),
"top_p": self.cfg.inference.get('top_p', 0.9),
"repetition_penalty": self.cfg.inference.get('repetition_penalty', 1.2),
"add_BOS": True,
"all_probs": False,
"compute_logprob": False,
"end_strings": self.cfg.inference.get('end_strings', ["<|endoftext|>"]),
}
elif self.cfg.get("report_validation_metric", False) and not hasattr(self.cfg, 'inference'):
raise ValueError("Must provide inference parameters for reporting validation metric!")
# define validation metric
if self.cfg.get('report_validation_metric', False):
validation_metric = self.cfg.get('validation_metric', 'accuracy')
if validation_metric == 'accuracy':
self.validation_metric = AccuracyScore()
elif validation_metric == 'bleu':
self.validation_metric = BLEUScore()
elif validation_metric == 'rouge':
self.validation_metric = ROUGEScores()
def first_stage_of_pipeline(self):
return self.frozen_model.model.pre_process
def forward(
self,
input_ids,
position_ids,
attention_mask,
taskname_ids,
labels=None,
inference=True,
set_inference_key_value_memory=False,
inference_max_sequence_len=None,
inference_params=None,
):
"""
Special forward method for p-tuning/prompt-tuning pretrained
GPT style models. Bypasses the vocab token preprocessing done
in the MegatronGPT class.
"""
# Get embeddings for text tokens and insert virtual token embeddings
if self.first_stage_of_pipeline():
input_embeds = self.embed_input(input_ids, taskname_ids, use_cached_reps=inference)
if self.frozen_model.mcore_gpt and hasattr(self.frozen_model.model.embedding, "position_embeddings"):
position_embeddings = self.frozen_model.model.embedding.position_embeddings(position_ids)
encoder_input = input_embeds + position_embeddings
elif not self.frozen_model.mcore_gpt and hasattr(
self.frozen_model.model.language_model.embedding, "position_embeddings"
):
position_embeddings = self.frozen_model.model.language_model.embedding.position_embeddings(
position_ids
)
encoder_input = input_embeds + position_embeddings
else:
encoder_input = input_embeds
encoder_input = encoder_input.transpose(0, 1).contiguous()
if self.cfg.get("sequence_parallel", False):
encoder_input = tensor_parallel.mappings.scatter_to_sequence_parallel_region(encoder_input)
else:
encoder_input = None
# Call forward on GPT model with preprocessed embeddings
if self.frozen_model.mcore_gpt:
output = self.frozen_model.model(
input_ids=None,
position_ids=None,
decoder_input=encoder_input,
attention_mask=attention_mask,
labels=labels,
inference_params=inference_params,
)
else:
output = self.frozen_model.model(
input_ids=None,
position_ids=None,
encoder_input=encoder_input,
attention_mask=attention_mask,
labels=labels,
set_inference_key_value_memory=set_inference_key_value_memory,
inference_max_sequence_len=inference_max_sequence_len,
)
return output
def fwd_bwd_step(self, dataloader_iter, batch_idx, forward_only):
"""
Dataloader produces a global batch which is turned into an iterator of microbatches.
The iterator of microbatches is then piped through the pipeline using Core's fwd/bwd functions.
"""
# Get seq length of batch
batch = next(dataloader_iter)
_, seq_length = batch[0].shape
data_iter = get_iterator_k_split(batch, get_num_microbatches())
fwd_bwd_function = get_forward_backward_func()
losses_reduced_per_micro_batch = fwd_bwd_function(
forward_step_func=self.get_forward_output_and_loss_func(),
data_iterator=data_iter,
model=[self],
num_microbatches=get_num_microbatches(),
forward_only=forward_only,
seq_length=seq_length,
micro_batch_size=get_micro_batch_size(),
)
# only the last stages of the pipeline return losses
if losses_reduced_per_micro_batch:
# average loss across micro batches
loss_tensors_list = [loss_reduced['avg'] for loss_reduced in losses_reduced_per_micro_batch]
loss_tensor = torch.concat(loss_tensors_list)
loss_mean = loss_tensor.mean()
else:
# we're not on the last pipeline stage so no losses
loss_mean = torch.tensor(0.0).cuda()
return loss_mean
def training_step(self, dataloader_iter, batch_idx):
# we zero grads here because we also call backward in the megatron-core fwd/bwd functions
self._optimizer.zero_grad()
batch = next(dataloader_iter)
loss_mean = self.fwd_bwd_step(itertools.chain([batch]), batch_idx, forward_only=False)
self.allreduce_gradients()
## logging
# we can only log on one rank if it is rank zero so we broadcast from last rank
# we can avoid this broadcast by updating the PTL log function to accept specific ranks
torch.distributed.broadcast(loss_mean, get_last_rank())
if self.torch_dtype == torch.float16 and hasattr(self.trainer.precision_plugin.scaler, "_scale"):
loss_scale = self.trainer.precision_plugin.scaler._scale
if loss_scale is not None:
self.log('loss_scale', loss_scale, batch_size=1)
self.log('reduced_train_loss', loss_mean, prog_bar=True, rank_zero_only=True, batch_size=1)
lr = self._optimizer.param_groups[0]['lr']
self.log('lr', lr, rank_zero_only=True, batch_size=1)
self.log('global_step', self.trainer.global_step, prog_bar=True, rank_zero_only=True, batch_size=1)
return loss_mean
def backward(self, *args, **kwargs):
""" LightningModule hook to do backward.
We want this to do nothing since we run backward in the fwd/bwd functions from megatron-core.
No need to call it here.
"""
return
def optimizer_zero_grad(self, *args, **kwargs):
""" LightningModule hook to zero grad.
We want this to do nothing as we are zeroing grads during the training_step.
"""
return
def validation_step(self, dataloader_iter, batch_idx):
# Check if iterator is exhausted
dataloader_iter, done = self._val_iterator_done(dataloader_iter)
if done:
return
mode = 'test' if self.trainer.testing else 'val'
batch = next(dataloader_iter)
gbs = self.cfg.get('validation_global_batch_size', self.cfg.global_batch_size)
self._reconfigure_and_process_inference_batch(batch[0].size(0), gbs)
loss_mean = self.fwd_bwd_step(itertools.chain([batch]), batch_idx, forward_only=True)
if loss_mean.item == 0.0:
loss_mean = []
if self.cfg.get('report_validation_metric', False):
preds_text, labels_text = [], []
input_ids, labels, loss_mask, position_ids, attention_mask, taskname_ids = batch
input_lenghts = torch.argmax(loss_mask, 1, keepdim=True)
res = megatron_gpt_generate(
self.cuda(),
(
torch.cat(
(
input_ids,
torch.zeros(
input_ids.shape[0], self.length_params['max_length'], dtype=input_ids.dtype
).to(self.device),
),
axis=1,
),
input_lenghts.squeeze(1),
),
self.tokenizer,
self.length_params,
self.sampling_params,
task_ids=taskname_ids,
)
for pred, label in zip(res['token_ids'], labels):
# ids_to_text ignores special tokens by default
pred = self.tokenizer.ids_to_text(pred)
label = self.tokenizer.ids_to_text(label)
preds_text.append(pred)
labels_text.append(label)
if mode == 'val':
self.validation_step_outputs.append(
{'loss': loss_mean, 'preds': preds_text, 'labels': labels_text,}
)
else:
self.test_step_outputs.append(
{'loss': loss_mean, 'preds': preds_text, 'labels': labels_text,}
)
return {
'loss': loss_mean,
'preds': preds_text,
'labels': labels_text,
}
self.validation_step_outputs.append({'loss': loss_mean}) if mode == 'val' else self.test_step_outputs.append(
{'loss': loss_mean}
)
return {'loss': loss_mean}
def on_train_epoch_start(self) -> None:
gbs = self.cfg.global_batch_size
mbs = self.cfg.micro_batch_size
self._reconfigure_batch_sizes(gbs, mbs)
return super().on_train_epoch_start()
def on_validation_epoch_start(self) -> None:
gbs = self.cfg.get('validation_global_batch_size', self.cfg.global_batch_size)
mbs = self.cfg.get('validation_micro_batch_size', self.cfg.micro_batch_size)
self._reconfigure_batch_sizes(gbs, mbs)
return super().on_validation_epoch_start()
def on_validation_epoch_end(self):
if not self.validation_step_outputs:
return
if parallel_state.is_pipeline_last_stage():
# only the last pipeline parallel stages return loss
averaged_loss = torch.stack([i['loss'] for i in self.validation_step_outputs]).mean()
else:
averaged_loss = torch.tensor(0.0).cuda()
# we can only log on one rank if it is rank zero so we broadcast from last rank
torch.distributed.broadcast(averaged_loss, get_last_rank())
self.log('val_loss', averaged_loss, prog_bar=True, rank_zero_only=True, batch_size=1)
logging.info(f'val_loss: {averaged_loss}')
if self.cfg.get("report_validation_metric", False):
gather_results = [None for _ in range(parallel_state.get_data_parallel_world_size())]
all_preds = list(itertools.chain(*[item['preds'] for item in self.validation_step_outputs]))
all_labels = list(itertools.chain(*[item['labels'] for item in self.validation_step_outputs]))
assert len(all_preds) == len(all_labels)
# Gather inputs, preds, labels from all workers
torch.distributed.all_gather_object(
gather_results,
[(pred, label) for (pred, label) in zip(all_preds, all_labels)],
group=parallel_state.get_data_parallel_group(),
)
# Deduplicate sentences that may have been distributed across multiple data parallel ranks.
if parallel_state.get_data_parallel_rank() == 0:
gather_results_dedup = list(set(itertools.chain(*gather_results)))
val_metric_dict = self.validation_metric.get_score(
[i[1] for i in gather_results_dedup], [i[0] for i in gather_results_dedup],
)
for metric, val in val_metric_dict.items():
logging.info(f'Validation {metric}: {val}')
val_metric = list(val_metric_dict.items())[0][1]
metric_name = list(val_metric_dict.items())[0][0]
else:
val_metric = torch.tensor(0.0).cuda()
metric_name = ''
self.log(f'val_{metric_name}', val_metric, prog_bar=True, rank_zero_only=True, batch_size=1)
gbs = self.cfg.global_batch_size
mbs = self.cfg.micro_batch_size
self._reconfigure_batch_sizes(gbs, mbs)
self.validation_step_outputs.clear() # free memory
def test_step(self, dataloader_iter, batch_idx):
return self.validation_step(dataloader_iter, batch_idx)
def on_test_epoch_end(self):
averaged_loss = average_losses_across_data_parallel_group(self.test_step_outputs)
logging.info(f'test_loss: {averaged_loss[0]}')
self.test_step_outputs.clear() # free memory
def setup(self, stage=None):
super().setup(stage)
if self.cfg.get('transformer_engine', False) or self.cfg.get('mcore_gpt', False):
self.frozen_model.setup_transformer_engine_tp_groups()
def setup_training_data(self, training_data_config=None):
if self.cfg.data.get('train_ds', None):
max_seq_length = self.frozen_model.cfg.encoder_seq_length
if "max_seq_length" in self.cfg.data and self.cfg.data.max_seq_length:
max_seq_length = min(self.cfg.data.max_seq_length, max_seq_length)
self._train_ds, self._train_dl = self.build_virtual_prompt_dataset(
data=self.cfg.data.train_ds,
batch_size=self.cfg.global_batch_size,
max_seq_length=max_seq_length,
min_seq_length=self.cfg.data.get('min_seq_length', 1),
add_bos=self.cfg.data.get('add_bos', False),
add_eos=self.cfg.data.get('add_eos', True),
for_train=True,
drop_last=True,
shuffle=True,
num_workers=self.cfg.data.num_workers,
pin_memory=True,
cache_data_path=self.cfg.data.get('train_cache_data_path', None),
load_cache=self.cfg.data.get('load_cache', False),
)
def setup_validation_data(self, validation_data_config=None):
if self.cfg.data.get('validation_ds', None):
max_seq_length = self.frozen_model.cfg.encoder_seq_length
if "max_seq_length" in self.cfg.data and self.cfg.data.max_seq_length:
max_seq_length = min(self.cfg.data.max_seq_length, max_seq_length)
self._validation_ds, self._validation_dl = self.build_virtual_prompt_dataset(
data=self.cfg.data.validation_ds,
batch_size=self.cfg.get('validation_global_batch_size', self.cfg.global_batch_size),
max_seq_length=max_seq_length,
min_seq_length=self.cfg.data.get('min_seq_length', 1),
add_bos=self.cfg.data.get('add_bos', False),
add_eos=self.cfg.data.get('add_eos', True),
for_train=True,
drop_last=self.cfg.get('validation_drop_last', True),
shuffle=False,
num_workers=self.cfg.data.num_workers,
pin_memory=True,
cache_data_path=self.cfg.data.get('validation_cache_data_path', None),
load_cache=self.cfg.data.get('load_cache', False),
)
def setup_test_data(self, test_data_config=None):
if self.cfg.data.get('test_ds', None):
self._test_ds, self._test_dl = self.build_virtual_prompt_dataset(
data=self.cfg.data.test_ds,
batch_size=self.cfg.get('validation_global_batch_size', self.cfg.global_batch_size),
max_seq_length=self.frozen_model.cfg.encoder_seq_length,
min_seq_length=self.cfg.data.get('min_seq_length', 1),
add_bos=self.cfg.data.get('add_bos', False),
add_eos=self.cfg.data.get('add_eos', True),
for_train=False,
drop_last=False,
shuffle=False,
num_workers=self.cfg.data.num_workers,
pin_memory=True,
cache_data_path=self.cfg.data.get('test_cache_data_path', None),
load_cache=self.cfg.data.get('load_cache', False),
)
def build_virtual_prompt_dataset(
self,
data,
batch_size,
max_seq_length=2048,
min_seq_length=1,
add_bos=False,
add_eos=False,
for_train=True,
drop_last=False,
shuffle=False,
num_workers=0,
pin_memory=False,
tokens_to_generate=None,
get_dataset_only=False,
cache_data_path=None,
load_cache=False,
):
dataset = GPTPromptLearningDataset(
data=data,
tokenizer=self.tokenizer,
virtual_prompt_source=self.virtual_prompt_source,
task_templates=self.task_templates,
pseudo_tokens=self.pseudo_tokens,
pad_token_id=self.pad_token_id,
max_seq_length=max_seq_length,
min_seq_length=min_seq_length,
add_bos=add_bos,
add_eos=add_eos,
for_train=for_train,
tokens_to_generate=tokens_to_generate,
cache_data_path=cache_data_path,
load_cache=load_cache,
)
if get_dataset_only:
return dataset
# Make distributed dataloader
rank = parallel_state.get_data_parallel_rank()
data_parallel_size = parallel_state.get_data_parallel_world_size()
sampler = torch.utils.data.distributed.DistributedSampler(
dataset, num_replicas=data_parallel_size, rank=rank, shuffle=shuffle, seed=self.cfg.seed
)
assert batch_size % data_parallel_size == 0, "Global batch size must be evenly divisible by data parallel size"
if for_train:
if self.cfg.get("sequence_parallel", False):
collate_fn = partial(
dataset.collate_fn, tp_workers=parallel_state.get_tensor_model_parallel_world_size()
)
else:
collate_fn = partial(dataset.collate_fn, tp_workers=0)
else:
collate_fn = dataset.inference_collate_fn
dataloader = torch.utils.data.DataLoader(
dataset,
collate_fn=collate_fn,
sampler=sampler,
batch_size=batch_size // data_parallel_size,
drop_last=drop_last,
num_workers=num_workers,
pin_memory=pin_memory,
persistent_workers=True
if num_workers > 0
else False, # (@adithyare and @eharper) We need this to make spawn=True to work.
)
return dataset, dataloader
def set_input_tensor(self, input_tensor):
"""Set input tensor to be used instead of forward()'s input.
When doing pipeline parallelism the input from the previous
stage comes from communication, not from the input, so the
model's forward_step_func won't have it. This function is thus
used by internal code to bypass the input provided by the
forward_step_func"""
self.frozen_model.model.set_input_tensor(input_tensor)
def get_forward_output_and_loss_func(self):
def fwd_output_and_loss_func(dataloader_iter, model):
batch = next(dataloader_iter)
batch = [x.cuda(non_blocking=True) for x in batch]
input_ids, labels, loss_mask, position_ids, attention_mask, taskname_ids = batch
output_tensor = model(input_ids, position_ids, attention_mask, taskname_ids, labels, inference=False)
if isinstance(output_tensor, tuple):
output_tensor, _ = output_tensor
def loss_func(output_tensor):
loss = self.frozen_model.loss_func(loss_mask, output_tensor)
reduced_loss = average_losses_across_data_parallel_group([loss])
return loss, {'avg': reduced_loss}
return output_tensor, loss_func
return fwd_output_and_loss_func
def get_forward_output_only_func(self):
"""
Used for generate method only for now.
"""
def fwd_output_only_func(dataloader_iter, model):
batch = next(dataloader_iter)
extra_arg = {}
(
tokens,
attention_mask,
position_ids,
task_ids,
set_inference_key_value_memory,
inference_max_sequence_len,
) = batch
tokens = tokens.cuda()
attention_mask = attention_mask.cuda()
position_ids = position_ids.cuda()
task_ids = task_ids.cuda()
if self.frozen_model.mcore_gpt:
# if first step, then clear KV cache, otherwise reuse inference_paarms
if set_inference_key_value_memory[0].item():
self.inference_params = InferenceParams(
max_batch_size=tokens.size(0), max_sequence_length=inference_max_sequence_len[0].item()
)
extra_arg['inference_params'] = self.inference_params
else:
extra_arg['set_inference_key_value_memory'] = set_inference_key_value_memory[0].item()
extra_arg['inference_max_sequence_len'] = inference_max_sequence_len[0].item()
output_tensor = model(tokens, position_ids, attention_mask, task_ids, **extra_arg)
# Advance inference sequence offset.
if self.inference_params:
self.inference_params.sequence_len_offset += output_tensor.size(1)
def id_func(output_tensor):
return output_tensor, {'logits': output_tensor}
return output_tensor, id_func
return fwd_output_only_func
def generate(
self,
inputs: Union[List[str], torch.Tensor, List[dict]],
length_params: LengthParam,
sampling_params: SamplingParam = None,
batch_size: Optional[int] = 1,
):
# check whether the DDP is initialized
if parallel_state.is_unitialized():
def dummy():
return
if self.trainer.strategy.launcher is not None:
self.trainer.strategy.launcher.launch(dummy, trainer=self.trainer)
self.trainer.strategy.setup_environment()
# set the default sampling params if it is None.
# default do greedy sampling
if sampling_params is None:
sampling_params = get_default_sampling_params()
sampling_params["add_BOS"] = self.cfg.data.get("add_bos", False)
if length_params is None:
length_params = get_default_length_params()
max_input_length = self.frozen_model.cfg.encoder_seq_length - length_params["max_length"]
# input dicts are either dataset paths or already loaded example dicts
if "taskname" not in inputs[0].keys():
data = [path["data_path"] for path in inputs]
else:
data = inputs
dataset = self.build_virtual_prompt_dataset(
data=data,
batch_size=batch_size,
max_seq_length=max_input_length,
min_seq_length=self.cfg.data.get('min_seq_length', 1),
add_bos=sampling_params["add_BOS"],
add_eos=False,
for_train=False,
tokens_to_generate=length_params["max_length"],
get_dataset_only=True,
)
full_dataset = [dataset[i] for i in range(len(dataset))]
task_ids, processed_inputs = dataset.inference_collate_fn(full_dataset)
self.frozen_model.model.parallel_output = False
# Call same generate code as in MegatronGPT
return megatron_gpt_generate(
self.cuda(), processed_inputs, self.tokenizer, length_params, sampling_params, task_ids=task_ids
)
def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: Optional[int] = None) -> Any:
inference_config = self.get_inference_config()
if inference_config is None:
return None
else:
length_params: LengthParam = {
"max_length": inference_config["tokens_to_generate"],
"min_length": inference_config["min_tokens_to_generate"],
}
sampling_params: SamplingParam = {
"use_greedy": inference_config["greedy"],
"temperature": inference_config["temperature"],
"top_k": inference_config["top_k"],
"top_p": inference_config["top_p"],
"repetition_penalty": inference_config["repetition_penalty"],
"add_BOS": inference_config["add_BOS"],
"all_probs": inference_config["all_probs"],
"compute_logprob": inference_config["compute_logprob"],
"compute_attention_mask": inference_config.get("compute_attention_mask", True),
"end_strings": inference_config.get('end_strings', ["<|endoftext|>"]),
}
task_ids, processed_inputs = batch
self.frozen_model.model.parallel_output = False
# Call same generate code as in MegatronGPT
return megatron_gpt_generate(
self.cuda(), processed_inputs, self.tokenizer, length_params, sampling_params, task_ids=task_ids
)
@classmethod
def list_available_models(cls):
pass
def get_pseudo_tokens(num_virtual_tokens):
"""
Takes in an integer and returns a list of strings where each string
is a numbered virtual token placeholder. If
num_virtual_tokens = 3, then this function returns:
["<prompt_0>", "<prompt_1>", "<prompt_2>"]
Args:
num_virtual_tokens: (int) Number of virtual token strings you want to make
returns a list of string.
"""
pseudo_tokens = [
VirtualPromptPlaceholderToken.BASE.value + str(i) + VirtualPromptPlaceholderToken.END.value
for i in range(num_virtual_tokens)
]
return pseudo_tokens
| NeMo-main | nemo/collections/nlp/models/language_modeling/megatron_gpt_prompt_learning_model.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Any, List, Optional, Union
import torch
from omegaconf import DictConfig
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.data.language_modeling.megatron.data_samplers import (
MegatronPretrainingRandomSampler,
MegatronPretrainingSampler,
)
from nemo.collections.nlp.data.language_modeling.megatron.retro_dataset import (
build_mock_train_valid_test_datasets,
build_train_valid_test_datasets,
)
from nemo.collections.nlp.models.language_modeling.megatron_base_model import MegatronBaseModel
from nemo.collections.nlp.modules.common.megatron.module import Float16Module
from nemo.collections.nlp.modules.common.megatron.mup.init import normal_
from nemo.collections.nlp.modules.common.megatron.mup.shape import set_base_shapes
from nemo.collections.nlp.modules.common.megatron.retrieval_token_level_encoder_decoder import (
MegatronRetrievalTokenLevelEncoderDecoderModule,
)
from nemo.collections.nlp.modules.common.megatron.utils import (
average_losses_across_data_parallel_group,
build_position_ids,
get_params_for_weight_decay_optimization,
)
from nemo.collections.nlp.modules.common.text_generation_strategy import model_inference_strategy_dispatcher
from nemo.collections.nlp.modules.common.text_generation_utils import (
generate,
get_computeprob_response,
get_default_length_params,
get_default_sampling_params,
megatron_gpt_generate,
)
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer
from nemo.collections.nlp.modules.common.transformer.text_generation import (
LengthParam,
OutputType,
SamplingParam,
TextGeneration,
)
from nemo.collections.nlp.parts.nlp_overrides import GradScaler
from nemo.utils import AppState, logging
try:
from megatron.core import parallel_state
from megatron.core.enums import ModelType
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
__all__ = ["MegatronRetrievalModel"]
class MegatronRetrievalModel(MegatronBaseModel, TextGeneration):
"""
Megatron Retrieval enhanced language model
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
super().__init__(cfg, trainer=trainer)
# TODO does not support PP yet
self.model = self.model_provider_func(pre_process=True, post_process=True, add_encoder=True, add_decoder=True)
self.megatron_amp_o2 = cfg.get('megatron_amp_O2', False)
if self.megatron_amp_o2:
if not self.with_distributed_adam:
# Pre-allocate the model on GPU to have master parameters allocated on the same device with matching data type
self.model.cuda(torch.cuda.current_device())
# Model wrapper to convert both model and inputs to half precision
self.model = Float16Module(
config=self.model_parallel_config, module=self.model, precision=self.cfg.precision
)
# self.setup_optimizer_param_groups()
self.model.model_type = ModelType.encoder_and_decoder
self.enable_autocast = (
True if (not self.megatron_amp_o2) and (self.autocast_dtype in [torch.float16, torch.bfloat16]) else False
)
if hasattr(self.cfg, "shape_file"):
set_base_shapes(self, self.register_artifact("shape_file", self.cfg.shape_file), rescale_params=False)
# here manually initialize all the named parameters with the muTranfer normal initializer
for name, tensor in self.named_parameters():
if name.endswith('.dense_4h_to_h.weight') or name.endswith('.dense.weight'):
# initialize all the output dense matrix weight
# match the megatron lm model
std = self.cfg.init_method_std / math.sqrt(2.0 * 12.0)
normal_(tensor, 0, std)
elif name.endswith('layernorm.weight'):
# initialize all the layer norm weight
if tensor.std() != 0 and tensor.mean() != 1:
raise ValueError(f'need to check {name} init')
normal_(tensor, 1, 0)
elif name.endswith('.weight'):
# initialize all the other dense matrix weight
normal_(tensor, 0, self.cfg.init_method_std)
else:
if tensor.std() != 0 and tensor.mean() != 0:
raise ValueError(f'need to check {name} init')
# here manually overwrite the norm factor
# note, has to turn off the model.apply_query_key_layer_scaling
assert not self.cfg.apply_query_key_layer_scaling
for name, layer in self.named_modules():
if (
name.endswith('.self_attention')
or name.endswith('.inter_attention')
or name.endswith('.cross_attention')
or name.endswith('.core_attention')
):
if hasattr(layer, 'norm_factor') and hasattr(layer, 'hidden_size_per_attention_head'):
layer.norm_factor = (
layer.hidden_size_per_attention_head / 8.0
) # divide 8 to make it consist with ADLR setting
else:
if hasattr(layer, 'norm_factor') or hasattr(layer, 'hidden_size_per_attention_head'):
logging.error(
f'module {name} has norm factor but its name is not ending with attention, need to double check'
)
def _build_tokenizer(self):
self.tokenizer = get_nmt_tokenizer(
library=self._cfg.tokenizer.library,
model_name=self._cfg.tokenizer.type,
tokenizer_model=self.register_artifact("tokenizer.model", self._cfg.tokenizer.model),
vocab_file=self.register_artifact("tokenizer.vocab_file", self._cfg.tokenizer.vocab_file),
merges_file=self.register_artifact("tokenizer.merge_file", self._cfg.tokenizer.merge_file),
delimiter=self.cfg.tokenizer.get('delimiter', None),
legacy=False,
)
# add pad special token
if not hasattr(self.tokenizer, "pad_id"):
self.tokenizer.add_special_tokens({'pad_token': '<pad>'})
elif hasattr(self.tokenizer, "pad_id") and (self.tokenizer.pad_id is None or self.tokenizer.pad_id < 0):
self.tokenizer.add_special_tokens({'pad_token': '<pad>'})
def model_provider_func(self, pre_process, post_process, add_encoder, add_decoder):
# TODO: create get_encoder_decoder_model()here for different losses (e..g, nll, vae, mim)
model = MegatronRetrievalTokenLevelEncoderDecoderModule(
config=self.model_parallel_config,
vocab_size=self.padded_vocab_size,
hidden_size=self.cfg.hidden_size,
max_position_embeddings=self.cfg.max_position_embeddings,
num_attention_heads=self.cfg.num_attention_heads,
ffn_hidden_size=self.cfg.ffn_hidden_size,
apply_query_key_layer_scaling=self.cfg.get('apply_query_key_layer_scaling', True),
kv_channels=self.cfg.get('kv_channels', None),
num_tokentypes=0,
parallel_output=True,
pre_process=pre_process,
post_process=post_process,
init_method_std=self.cfg.get('init_method_std', 0.02),
fp16_cross_entropy=self.cfg.get('fp16_lm_cross_entropy', False),
hidden_dropout=self.cfg.get('hidden_dropout', 0.1),
attention_dropout=self.cfg.get('attention_dropout', 0.1),
precision=self.cfg.get('precision', 16),
fp32_residual_connection=self.cfg.get('fp32_residual_connection', False),
activations_checkpoint_method=self.cfg.get('activations_checkpoint_method', None),
activations_checkpoint_num_layers=self.cfg.get('activations_checkpoint_num_layers', 1),
layernorm_epsilon=self.cfg.get('layernorm_epsilon', 1e-5),
persist_layer_norm=self.cfg.get('persist_layer_norm', False),
bias_gelu_fusion=self.cfg.get('bias_gelu_fusion', True),
bias_dropout_add_fusion=self.cfg.get('bias_dropout_add_fusion', True),
masked_softmax_fusion=self.cfg.get('masked_softmax_fusion', True),
onnx_safe=self.cfg.get('onnx_safe', False),
activation=self.cfg.get('activation', 'gelu'),
bias=self.cfg.get('bias', True),
normalization=self.cfg.get('normalization', 'layernorm'),
headscale=self.cfg.get('headscale', False),
transformer_block_type=self.cfg.get('transformer_block_type', 'pre_ln'),
add_encoder=add_encoder,
add_decoder=add_decoder,
chunk_size=self.cfg.get('chunk_size', 64), # the chunk size used to retrive
enc_num_layers=self.cfg.get('enc_num_layers', 4), # total number of encoder layers
dec_num_layers=self.cfg.get('dec_num_layers', 6), # total number of decoder layers
enc_cross_attention=self.cfg.get('enc_cross_attention', [3]), # layer numbers for cross attention
dec_cross_attention=self.cfg.get(
'dec_cross_attention', [3, 5]
), # layer numbers for chunked cross attention
add_position_embedding=self.cfg.get(
'add_position_embedding', False
), # whether use the absolute postion encoding
tokenizer=self.tokenizer,
activations_checkpoint_granularity=self.cfg.get('activations_checkpoint_granularity', None),
megatron_lm_compatible=self.cfg.get('megatron_lm_compatible', False),
version=self.cfg.get('version', 1),
)
return model
def forward(
self,
input_ids,
input_attn_mask,
retrieved_ids,
retrieved_attn_mask,
token_type_ids=None,
labels=None,
input_emb=None,
position_ids=None,
):
output_tensor = self.model(
input_ids=input_ids,
input_attn_mask=input_attn_mask,
retrieved_ids=retrieved_ids,
retrieved_attn_mask=retrieved_attn_mask,
token_type_ids=token_type_ids,
labels=labels,
input_emb=input_emb,
position_ids=position_ids,
)
return output_tensor
def training_step(self, batch, batch_idx):
input_tokens_id = batch['tokens']
input_attn_mask = batch['tokens_mask']
loss_mask = batch['loss_mask']
retrieved_ids = batch['retrieved_ids']
retrieved_attn_mask = batch['retrieved_emb_mask']
labels = batch['labels']
if self.cfg.get('add_position_embedding', False):
input_position_ids = build_position_ids(input_tokens_id)
else:
input_position_ids = None
loss = self(
input_tokens_id,
input_attn_mask,
retrieved_ids,
retrieved_attn_mask,
labels=labels,
position_ids=input_position_ids,
)
loss_mask = loss_mask.float()
lm_loss = torch.sum(loss.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
reduced_loss = average_losses_across_data_parallel_group([lm_loss])
self._reduced_loss_buffer.append(reduced_loss[0])
if self.torch_dtype == torch.float16:
loss_scale = self.trainer.precision_plugin.scaler._scale
if loss_scale is not None:
self.log('loss_scale', loss_scale, batch_size=1)
if self.with_distributed_adam:
# gradients are reduced internally in distributed optimizer
pass
elif self.megatron_amp_o2:
# while async grad allreduce is enabled, bprop will keep moving forward without waiting for
# the finish of async grad AR works. Hence, to guarantee the correctness of grads reduction,
# we cannot start weight update until all async grad AR works are done.
if self.cfg.get('pipeline_model_parallel_size', 1) == 1:
torch.cuda.synchronize()
# when using pipeline parallelism grads must be reduced after the pipeline (not asynchronously)
if self.cfg.get('pipeline_model_parallel_size', 1) > 1:
# main grads are stored in the MainParamsOptimizer wrapper
self._optimizer.allreduce_main_grads()
else:
# async grad allreduce is not currently implemented for O1/autocasting mixed precision training
# no pipeline, so use the default pytorch lightning way of doing all_reduce
# self.allreduce_gradients() # @sangkug we think this is causing memory to blow up (hurts perf)
pass
if (batch_idx + 1) % self.trainer.accumulate_grad_batches == 0:
# Reduced loss for logging.
average_reduced_loss = sum(self._reduced_loss_buffer) / len(self._reduced_loss_buffer)
self.log('reduced_train_loss', average_reduced_loss, prog_bar=True, batch_size=1)
lr = self._optimizer.param_groups[0]['lr']
self.log('lr', lr, batch_size=1)
self.log('global_step', self.trainer.global_step, prog_bar=True, batch_size=1)
self.log(
'consumed_samples', self._compute_consumed_samples_after_training_step(), prog_bar=True, batch_size=1,
)
self._reduced_loss_buffer = []
return lm_loss
def validation_step(self, batch, batch_idx):
prefix = "test" if self.trainer.testing else "val"
input_tokens_id = batch['tokens']
input_attn_mask = batch['tokens_mask']
loss_mask = batch['loss_mask']
retrieved_ids = batch['retrieved_ids']
retrieved_attn_mask = batch['retrieved_emb_mask']
labels = batch['labels']
if self.cfg.get('add_position_embedding', False):
input_position_ids = build_position_ids(input_tokens_id)
else:
input_position_ids = None
loss = self(
input_tokens_id,
input_attn_mask,
retrieved_ids,
retrieved_attn_mask,
labels=labels,
position_ids=input_position_ids,
)
loss_mask = loss_mask.float()
lm_loss = torch.sum(loss.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
reduced_loss = average_losses_across_data_parallel_group([lm_loss])
if prefix == 'val':
self.validation_step_outputs.append(reduced_loss)
else:
self.test_step_outputs.apped(reduced_loss)
return reduced_loss
def on_validation_epoch_end(self):
if len(self.validation_step_outputs) == 0:
return None
averaged_loss = torch.stack(self.validation_step_outputs).mean()
self.log('val_loss', averaged_loss, prog_bar=True, batch_size=1)
# formula to compute the perplexity
# https://towardsdatascience.com/the-relationship-between-perplexity-and-entropy-in-nlp-f81888775ccc
self.log('perplexity', torch.exp(averaged_loss), prog_bar=True, batch_size=1)
self.validation_step_outputs.clear() # free memory
return averaged_loss
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def on_test_epoch_end(self):
averaged_loss = torch.stack(self.test_step_outputs).mean()
self.log('test_loss', averaged_loss, prog_bar=True, batch_size=1)
logging.info(f'test_loss: {averaged_loss} ')
self.log('perplexity', torch.exp(averaged_loss), prog_bar=True, batch_size=1)
self.test_step_outputs.clear() # free memory
return averaged_loss
def build_train_valid_test_datasets(self):
logging.info('Building RETRO datasets.')
global_batch_size = self.trainer.world_size * self.cfg.micro_batch_size // self.cfg.tensor_model_parallel_size
# Compute trianing micro-batch steps: total_global_batch_steps x grad_acumms_per_global_batch
max_train_steps = self.trainer.max_steps * self.trainer.accumulate_grad_batches
eval_iters = (max_train_steps // self.trainer.val_check_interval + 1) * self.trainer.limit_val_batches
test_iters = self.trainer.limit_test_batches
train_valid_test_num_samples = [
max_train_steps * global_batch_size,
eval_iters * global_batch_size,
test_iters * global_batch_size,
]
if self.cfg.data.get('mock', False):
self._train_ds, self._validation_ds, self._test_ds = build_mock_train_valid_test_datasets(
cfg=self.cfg,
trainer=self.trainer,
splits_string=self.cfg.data.splits_string,
tokenizer=self.tokenizer,
mock_data_size=self.cfg.data.get('mock_data_size', 10000),
)
else:
self._train_ds, self._validation_ds, self._test_ds = build_train_valid_test_datasets(
cfg=self.cfg,
trainer=self.trainer,
data_prefix=self.cfg.data.data_prefix,
data_impl=self.cfg.data.data_impl,
splits_string=self.cfg.data.splits_string,
train_valid_test_num_samples=train_valid_test_num_samples,
seq_length=self.cfg.data.seq_length,
seed=self.cfg.seed,
skip_warmup=self.cfg.data.get('skip_warmup', True),
tokenizer=self.tokenizer,
retrieval_prefix=self.cfg.data.retrieval_prefix,
knn_map_path=self.cfg.data.knn_index,
)
if self._train_ds is not None:
logging.info(f'Length of train dataset: {len(self._train_ds)}')
if self._validation_ds is not None:
logging.info(f'Length of val dataset: {len(self._validation_ds)}')
if self._test_ds is not None:
logging.info(f'Length of test dataset: {len(self._test_ds)}')
logging.info(f'Finished building RETRO datasets.')
return self._train_ds, self._validation_ds, self._test_ds
def build_pretraining_data_loader(self, dataset, consumed_samples):
"""Buld dataloader given an input dataset."""
if dataset is None:
return None
logging.info(f'Building dataloader with consumed samples: {consumed_samples}')
# Megatron sampler
if hasattr(self.cfg.data, 'dataloader_type') and self.cfg.data.dataloader_type is not None:
if self.cfg.data.dataloader_type == 'single':
batch_sampler = MegatronPretrainingSampler(
total_samples=len(dataset),
consumed_samples=consumed_samples,
micro_batch_size=self.cfg.micro_batch_size,
data_parallel_rank=parallel_state.get_data_parallel_rank(),
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
elif self.cfg.data.dataloader_type == 'cyclic':
batch_sampler = MegatronPretrainingRandomSampler(
total_samples=len(dataset),
consumed_samples=consumed_samples,
micro_batch_size=self.cfg.micro_batch_size,
data_parallel_rank=parallel_state.get_data_parallel_rank(),
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
else:
raise ValueError('cfg.data.dataloader_type must be "single" or "cyclic"')
else:
raise ValueError('cfg.data.dataloader_type not found. Must be "single" or "cyclic"')
# Torch dataloader.
return torch.utils.data.DataLoader(
dataset, batch_sampler=batch_sampler, num_workers=self.cfg.data.num_workers, pin_memory=True,
)
def setup(self, stage=None):
resume_checkpoint_path = self.trainer.ckpt_path
if resume_checkpoint_path:
init_consumed_samples = self._extract_consumed_samples_from_ckpt(resume_checkpoint_path)
else:
init_consumed_samples = 0
self.init_consumed_samples = init_consumed_samples
"""A PTL method to setup the training, validation and test datasets."""
if stage == 'predict':
return
if self._train_dl is not None and self._validation_dl is not None:
return
self.build_train_valid_test_datasets()
self.setup_training_data(self._cfg.data)
self.setup_validation_data(self._cfg.data)
self.setup_test_data(self._cfg.data)
def set_inference_config(self, inference_config, retrieval_config):
self._inference_config = inference_config
self.inference_strategy = model_inference_strategy_dispatcher(self, **retrieval_config)
def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: Optional[int] = None) -> Any:
inference_config = self._inference_config
if inference_config is None:
return None
else:
# need to overwrite some configuration, make it immutable
inference_config = inference_config.copy()
compute_logprob = inference_config['compute_logprob']
if compute_logprob:
inference_config['inputs'] = batch
inference_config['tokens_to_generate'] = 1
inference_config['all_probs'] = True
inference_config["add_BOS"] = False
inference_config['greedy'] = True
response = generate(self, **inference_config, strategy=self.inference_strategy)
compute_prob_response = get_computeprob_response(self.tokenizer, response, batch)
return compute_prob_response
else:
inference_config['inputs'] = batch
return generate(self, **inference_config, strategy=self.inference_strategy)
def generate(
self,
inputs: Union[List[str], torch.Tensor, List[dict]],
length_params: LengthParam,
sampling_params: SamplingParam = None,
**args,
) -> OutputType:
# check whether the DDP is initialized
if parallel_state.is_unitialized():
def dummy():
return
if self.trainer.strategy.launcher is not None:
self.trainer.strategy.launcher.launch(dummy, trainer=self.trainer)
self.trainer.strategy.setup_environment()
# set the default sampling params if it is None.
# default do greedy sampling
if sampling_params is None:
sampling_params = get_default_sampling_params()
# set the default length params if it is None.
# default do greedy sampling
if length_params is None:
length_params = get_default_length_params()
return megatron_gpt_generate(self.cuda(), inputs, self.tokenizer, length_params, sampling_params, **args)
def get_forward_output_only_func(self):
"""
Used for generate method only.
"""
def fwd_output_only_func(dataloader_iter, model):
batch = next(dataloader_iter)
extra_arg = {}
(
tokens,
attention_mask,
retrieved,
retrieved_mask,
set_inference_key_value_memory,
inference_max_sequence_len,
neighbors,
position_ids,
) = batch
if len(retrieved.shape) == 1:
retrieved = None
retrieved_mask = None
else:
retrieved = retrieved.cuda()
retrieved_mask = retrieved_mask.cuda()
extra_arg['set_inference_key_value_memory'] = set_inference_key_value_memory[0].item()
extra_arg['inference_max_sequence_len'] = inference_max_sequence_len[0].item()
extra_arg['neighbors'] = neighbors[0].item()
extra_arg['position_ids'] = position_ids
output_tensor = model(tokens, attention_mask, retrieved, retrieved_mask, **extra_arg)
def id_func(output_tensor):
return output_tensor, {'logits': output_tensor}
return output_tensor, id_func
return fwd_output_only_func
def setup_training_data(self, cfg):
if hasattr(self, '_train_ds'):
consumed_samples = self.compute_consumed_samples(0)
self._train_dl = self.build_pretraining_data_loader(self._train_ds, consumed_samples)
def setup_validation_data(self, cfg):
if hasattr(self, '_validation_ds'):
consumed_samples = 0
self._validation_dl = self.build_pretraining_data_loader(self._validation_ds, consumed_samples)
def setup_test_data(self, cfg):
if hasattr(self, '_test_ds'):
consumed_samples = 0
self._test_dl = self.build_pretraining_data_loader(self._test_ds, consumed_samples)
def compute_consumed_samples(self, steps_since_resume=0):
app_state = AppState()
consumed_samples = (
self.init_consumed_samples
+ steps_since_resume
* app_state.data_parallel_size
* self.cfg.micro_batch_size
* self.trainer.accumulate_grad_batches
)
return int(consumed_samples)
def setup_optimizer_param_groups(self):
"""ModelPT override. Optimizer will get self._optimizer_param_groups"""
self._optimizer_param_groups = get_params_for_weight_decay_optimization([self.model])
def list_available_models(self):
pass
| NeMo-main | nemo/collections/nlp/models/language_modeling/megatron_retrieval_model.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from functools import partial
import torch
from omegaconf import DictConfig, ListConfig
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.common.data import ConcatMapDataset
from nemo.collections.common.metrics import MetricStringToTorchMetric
from nemo.collections.common.metrics.classification_accuracy import ExactStringPerCategoryMatchMetric
from nemo.collections.nlp.data.common.sequence_to_sequence_dataset import SequenceToSequenceDataset
from nemo.collections.nlp.data.language_modeling.megatron.base_dataset_utils import (
get_datasets_weights_and_num_samples,
get_train_valid_test_split_,
)
from nemo.collections.nlp.data.language_modeling.megatron.blendable_dataset import BlendableDataset
from nemo.collections.nlp.data.language_modeling.megatron.megatron_batch_samplers import (
MegatronPretrainingBatchSampler,
)
from nemo.collections.nlp.data.language_modeling.megatron.retro_fine_tune_dataset import RetroQAFineTuneDataset
from nemo.collections.nlp.models.language_modeling.megatron_retrieval_model import MegatronRetrievalModel
from nemo.collections.nlp.models.language_modeling.megatron_t5_model import MegatronT5Model, T5Sentinel
from nemo.collections.nlp.parts.nlp_overrides import GlobalBatchDataFetcher
from nemo.utils import AppState, logging
try:
from megatron.core import parallel_state
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
__all__ = ['MegatronRetroFinetuneModel']
def build_all_datasets(
cfg, tokenizer, train_valid_test_num_samples,
):
"""Build train, valid, and test RETRO datasets.
There is one to one mapping between data_prefix and knn_map_path.
Currently only supports one retrieval dataset.
"""
train_dataset = RetroQAFineTuneDataset(
cfg.train_ds.get('file_name'),
tokenizer,
cfg.train_ds.get('answer_only_loss'),
tokenizer.pad_id,
cfg.train_ds.get('seq_length'),
cfg.train_ds.get('add_bos'),
cfg.train_ds.get('add_eos'),
train_valid_test_num_samples[0],
cfg.train_ds.get('seed'),
cfg.train_ds.get('neighbors'),
)
val_dataset = RetroQAFineTuneDataset(
cfg.val_ds.get('file_name'),
tokenizer,
cfg.val_ds.get('answer_only_loss'),
tokenizer.pad_id,
cfg.val_ds.get('seq_length'),
cfg.val_ds.get('add_bos'),
cfg.val_ds.get('add_eos'),
train_valid_test_num_samples[1],
cfg.val_ds.get('seed'),
cfg.val_ds.get('neighbors'),
)
test_dataset = RetroQAFineTuneDataset(
cfg.test_ds.get('file_name'),
tokenizer,
cfg.test_ds.get('answer_only_loss'),
tokenizer.pad_id,
cfg.test_ds.get('seq_length'),
cfg.test_ds.get('add_bos'),
cfg.test_ds.get('add_eos'),
train_valid_test_num_samples[2],
cfg.test_ds.get('seed'),
cfg.test_ds.get('neighbors'),
)
return train_dataset, val_dataset, test_dataset
class MegatronRetroFinetuneModel(MegatronRetrievalModel):
"""Finetune RETRO Model """
def build_train_valid_test_datasets(self):
logging.info('Building RETRO datasets.')
global_batch_size = self.trainer.world_size * self.cfg.micro_batch_size // self.cfg.tensor_model_parallel_size
# Compute trianing micro-batch steps: total_global_batch_steps x grad_acumms_per_global_batch
max_train_steps = self.trainer.max_steps * self.trainer.accumulate_grad_batches
eval_iters = (max_train_steps // self.trainer.val_check_interval + 1) * self.trainer.limit_val_batches
test_iters = int(self.trainer.limit_test_batches)
train_valid_test_num_samples = [
max_train_steps * global_batch_size,
eval_iters * global_batch_size,
test_iters * global_batch_size,
]
self._train_ds, self._validation_ds, self._test_ds = build_all_datasets(
cfg=self.cfg.data, tokenizer=self.tokenizer, train_valid_test_num_samples=train_valid_test_num_samples,
)
if self._train_ds is not None:
logging.info(f'Length of train dataset: {len(self._train_ds)}')
if self._validation_ds is not None:
logging.info(f'Length of val dataset: {len(self._validation_ds)}')
if self._test_ds is not None:
logging.info(f'Length of test dataset: {len(self._test_ds)}')
logging.info(f'Finished building RETRO datasets.')
return self._train_ds, self._validation_ds, self._test_ds
def build_pretraining_data_loader(self, dataset, consumed_samples):
if isinstance(dataset, BlendableDataset):
collate_fun = dataset.datasets[0].collate_fn
else:
collate_fun = dataset.collate_fn
collate_fn = partial(collate_fun, tp_workers=0)
global_batch_size = self.trainer.world_size * self.cfg.micro_batch_size // self.cfg.tensor_model_parallel_size
batch_sampler = MegatronPretrainingBatchSampler(
total_samples=len(dataset),
consumed_samples=consumed_samples,
micro_batch_size=self.cfg.micro_batch_size,
global_batch_size=global_batch_size,
data_parallel_rank=parallel_state.get_data_parallel_rank(),
data_parallel_size=parallel_state.get_data_parallel_world_size(),
drop_last=True,
)
return torch.utils.data.DataLoader(
dataset, batch_sampler=batch_sampler, collate_fn=collate_fn, num_workers=0, pin_memory=True,
)
| NeMo-main | nemo/collections/nlp/models/language_modeling/megatron_retro_fine_tune_model.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import json
from typing import Dict, List
import torch
from omegaconf import DictConfig, ListConfig
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.common.data import ConcatMapDataset
from nemo.collections.common.metrics import MetricStringToTorchMetric
from nemo.collections.common.metrics.classification_accuracy import ExactStringPerCategoryMatchMetric
from nemo.collections.nlp.data.common.sequence_to_sequence_dataset import SequenceToSequenceDataset
from nemo.collections.nlp.models.language_modeling.megatron_t5_model import MegatronT5Model, T5Sentinel
from nemo.collections.nlp.modules.common.megatron.utils import get_iterator_k_split
from nemo.collections.nlp.parts.utils_funcs import get_last_rank
from nemo.utils import AppState, logging
try:
from apex.transformer.pipeline_parallel.utils import (
_reconfigure_microbatch_calculator,
get_current_global_batch_size,
get_micro_batch_size,
get_num_microbatches,
)
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
try:
from megatron.core import parallel_state
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
__all__ = ['MegatronT5FinetuneModel']
class MegatronT5FinetuneModel(MegatronT5Model):
"""Finetune Model that Inherits from MegatronT5Model instead."""
def __init__(self, cfg: DictConfig, trainer: Trainer):
super().__init__(cfg, trainer=trainer)
self.val_metric, self.val_metric_name = self.setup_metric(self.cfg.data.validation_ds)
self.val_metric = torch.nn.ModuleList(self.val_metric)
if hasattr(self.cfg.data, "test_ds"):
self.test_metric, self.test_metric_name = self.setup_metric(self.cfg.data.test_ds)
self.test_metric = torch.nn.ModuleList(self.test_metric)
def setup_metric(self, data_cfg):
# XNLI is a special case.
metric_name = "exact_string_match"
if hasattr(self.cfg, "eval_languages"):
metric = [ExactStringPerCategoryMatchMetric(self.cfg.eval_languages)]
else:
if not hasattr(data_cfg, "metric"):
metric = MetricStringToTorchMetric["exact_string_match"]
else:
if not hasattr(data_cfg.metric, "name"):
raise ValueError("Metric name is not provided in the metric config.")
if data_cfg.metric.name not in MetricStringToTorchMetric:
raise KeyError(
f"{data_cfg.metric.name} is not supported. List of supported metrics: {MetricStringToTorchMetric.keys()}"
)
if data_cfg.metric.name in self._metrics_require_string2category_map:
if data_cfg.metric.average is None:
raise ValueError(
f"{data_cfg.metric.name} requires specifying whether you want to compute a micro or macro average. Found None."
)
if (
data_cfg.metric.get('labels_are_strings', False)
and data_cfg.metric.name in self._metrics_require_string2category_map
):
if data_cfg.metric.num_classes is None:
raise ValueError(
"Number of classes is not provided in the metric section within the data config. "
f"Please provide the number of classes in the data config to use the {data_cfg.metric.name} metric."
)
if data_cfg.metric.get('class_labels', None) is None or not isinstance(
data_cfg.metric.get('class_labels', None), ListConfig
):
raise ValueError(
"Class labels are not provided properly in the metric section witnin the data config. "
f"Please provide the class labels as a list of strings in the data config to use the {data_cfg.metric.name} metric."
)
if len(data_cfg.metric.get('class_labels', None)) != data_cfg.metric.num_classes:
raise ValueError(
f"Number of class labels {len(data_cfg.metric.get('class_labels', None))} does not match `num_classes` : {data_cfg.metric.num_classes}"
)
metric_name = data_cfg.metric.name
metric_class = MetricStringToTorchMetric[metric_name]
# GLUE will not have a "src_file_name" attribute and will always have only a single metric.
if hasattr(data_cfg, "src_file_name") or hasattr(data_cfg, "file_names"):
if (
hasattr(data_cfg, "src_file_name")
and isinstance(data_cfg.src_file_name, ListConfig)
and metric_name != 'rouge'
):
metric = [
metric_class(average=data_cfg.metric.average, num_classes=data_cfg.metric.num_classes)
for _ in range(len(data_cfg.src_file_name))
]
elif (
hasattr(data_cfg, "file_names")
and isinstance(data_cfg.file_names, ListConfig)
and metric_name != 'rouge'
):
metric = [
metric_class(average=data_cfg.metric.average, num_classes=data_cfg.metric.num_classes)
for _ in range(len(data_cfg.file_names))
]
elif hasattr(data_cfg, "src_file_name") and isinstance(data_cfg.src_file_name, ListConfig):
metric = [metric_class() for _ in range(len(data_cfg.src_file_name))]
elif hasattr(data_cfg, "file_names") and isinstance(data_cfg.file_names, ListConfig):
metric = [metric_class() for _ in range(len(data_cfg.file_names))]
else:
metric = [metric_class(average=data_cfg.metric.average, num_classes=data_cfg.metric.num_classes)]
else:
metric = [metric_class()] # GLUE does need to specify average or num_classes.
return metric, metric_name
@property
def _metrics_require_string2category_map(self):
return set(["f1", "accuracy", "average_precision"])
def setup(self, stage=None):
# This is just to keep the parent class happy since we override its setup() method.
self.init_consumed_samples = 0
self.init_global_step = 0
if stage == 'predict':
return
# NOTE: PTL uses the same stage string "test" for both testing and validation.
self.build_train_valid_test_datasets(stage=stage)
if hasattr(self, '_validation_ds'):
self.setup_validation_data()
if hasattr(self, '_test_ds'):
self.setup_test_data()
if hasattr(self, '_train_ds'):
self.setup_training_data()
def on_validation_epoch_start(self):
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=self.cfg.data.validation_ds.global_batch_size,
micro_batch_size=self.cfg.data.validation_ds.micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
return super().on_validation_epoch_start()
def on_test_epoch_start(self):
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=self.cfg.data.test_ds.global_batch_size,
micro_batch_size=self.cfg.data.test_ds.micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
return super().on_test_epoch_start()
def on_train_epoch_start(self) -> None:
# Same logic as validation epoch end, but this may be need if there is no validation sanity check to trigger validation_epoch_end()
# Commenting as on_validation_epoch_end was a no-op in PTL 1.9
# self.on_validation_epoch_end()
return super().on_train_epoch_start()
def cast_for_metric(self, pred, label, metric_name, class_labels=None, labels_are_strings=False):
if metric_name == 'exact_string_match' or 'rouge':
return pred, label
pred = pred.replace(' ', '')
label = label.replace(' ', '')
# Correlation metrics require casting to float.
if metric_name in ['pearson_corr_coef', 'spearman_corr_coef']:
# Text-to-text model predictions may not always be valid floating point numbers.
try:
pred = float(pred)
except ValueError:
pred = 0.0
try:
label = float(label)
except ValueError:
raise ValueError(f'Could not convert {label} to float.')
pred = torch.FloatTensor([pred]).to(self.device)
label = torch.FloatTensor([label]).to(self.device)
# Other metrics require casting to integers.
elif metric_name in self._metrics_require_string2category_map and not labels_are_strings:
# Text-to-text model predictions may not always be valid integers.
try:
pred = int(pred)
except ValueError:
pred = 0
try:
label = int(label)
except ValueError:
raise ValueError(f'Could not convert {label} to int.')
pred = torch.LongTensor([pred]).to(self.device)
label = torch.LongTensor([label]).to(self.device)
# If labels are strings, we need to convert them to indices for some metrics.
elif metric_name in self._metrics_require_string2category_map and labels_are_strings:
# Cast string labels to integers before computing the metric.
if pred not in class_labels:
pred = 0 # If the prediction is not in the class labels, use the first class label.
else:
pred = class_labels.index(pred)
if label not in class_labels:
raise ValueError(f"Ground truth label {label} is not in the class labels list : {class_labels}")
label = class_labels.index(label)
pred = torch.LongTensor([pred]).to(self.device)
label = torch.LongTensor([label]).to(self.device)
else:
raise ValueError(f'Metric {metric_name} not supported.')
return pred, label
def _reconfigure_and_process_inference_batch(self, batch, ds_config):
global_batch_size_per_gpu = batch['text_enc'].size(0)
# This should happen only on the last batch of the dataset.
if (
global_batch_size_per_gpu
!= get_current_global_batch_size() // parallel_state.get_data_parallel_world_size()
):
# NOTE: This is reconfiguring to make sure there is no grad-acc for validation batches.
if (
global_batch_size_per_gpu
!= ds_config.global_batch_size // parallel_state.get_data_parallel_world_size()
):
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=global_batch_size_per_gpu * parallel_state.get_data_parallel_world_size(),
micro_batch_size=global_batch_size_per_gpu,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
# NOTE: need to explicitly handle resetting for multi-validation
else:
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=ds_config.global_batch_size,
micro_batch_size=ds_config.micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
def fwd_bwd_step(self, dataloader_iter, batch_idx, forward_only):
"""
Dataloader produces a global batch which is turned into a list of microbatches.
The list of microbatches is then piped through the pipeline using Apex fwd/bwd functions.
"""
batch = next(dataloader_iter)
if isinstance(batch, dict):
# convert to list if not already converted.
batch = self._process_batch(batch)
# Get seq length of batch
encoder_seq_length = batch[0].size(1)
decoder_seq_length = batch[1].size(1)
tensor_shape = [encoder_seq_length, get_micro_batch_size(), self.cfg.encoder.hidden_size]
data_iter = get_iterator_k_split(batch, get_num_microbatches())
return self._execute_fwd_bwd_function(
data_iterator=data_iter,
forward_only=forward_only,
tensor_shape=tensor_shape,
decoder_seq_length=decoder_seq_length,
)
def inference_step(self, dataloader_iter, batch_idx: int, mode: str, dataloader_idx=0):
# Check if iterator is exhausted
dataloader_iter, done = self._val_iterator_done(dataloader_iter)
if done:
return
# Regular finetuning datasets will return a list of dicts for each microbatch.
# But T0 datasets will return a single dict for the global batch.
batch = next(dataloader_iter)
batch_has_lang_information = isinstance(batch, list) and len(batch[0]) == 7
data_cfg = self.cfg.data.validation_ds if mode == 'validation' else self.cfg.data.test_ds
self._reconfigure_and_process_inference_batch(batch, data_cfg)
# NOTE: There could be extra keys in the processed_batch dictionary such as "langs" for XNLI,
# this will be ignored.
loss = self.fwd_bwd_step(itertools.chain([batch]), batch_idx, forward_only=True)
predicted_token_ids, _ = self.decode(
tokens_enc=batch['text_enc'],
enc_mask=batch['enc_mask'],
num_tokens_to_generate=30,
bos_id=self.tokenizer.pad_id if data_cfg.get('replace_bos_with_pad', False) else self.tokenizer.bos_id,
)
# Special ids to text function to handle stripping <eos> and special tokens with sentencepiece tokenizers.
preds_text = MegatronT5FinetuneModel.ids_to_text(predicted_token_ids, self.tokenizer)
labels_text = MegatronT5FinetuneModel.ids_to_text(batch['labels'], self.tokenizer)
input_text = MegatronT5FinetuneModel.ids_to_text(batch['text_enc'], self.tokenizer)
if not batch_has_lang_information:
categories = [None] * len(preds_text)
else:
categories = batch['lang']
metric = self.val_metric[dataloader_idx] if mode == 'validation' else self.test_metric[dataloader_idx]
assert len(categories) == len(preds_text) == len(labels_text)
for _, (pred, label, category) in enumerate(zip(preds_text, labels_text, categories)):
# To compute metrics like pearson or spearman correlation, we need to cast the predicted string and labels to floats.
pred, label = self.cast_for_metric(
pred=pred,
label=label,
metric_name=self.val_metric_name if mode == 'validation' else self.test_metric_name,
class_labels=data_cfg.metric.get('class_labels', None),
labels_are_strings=data_cfg.metric.get('labels_are_strings', False),
)
if batch_has_lang_information:
_ = metric(pred, label, category)
else:
_ = metric(pred, label)
outputs = {
'preds': preds_text,
'labels': labels_text,
'categories': categories,
'inputs': input_text,
}
if isinstance(loss, dict):
outputs.update(loss)
else:
outputs['loss'] = loss
if mode == 'validation':
if type(self.trainer.val_dataloaders) == list and len(self.trainer.val_dataloaders) > 1:
self.validation_step_outputs[dataloader_idx].append(outputs)
else:
self.validation_step_outputs.append(outputs)
else:
if type(self.trainer.test_dataloaders) == list and len(self.trainer.test_dataloaders) > 1:
self.test_step_outputs[dataloader_idx].append(outputs)
else:
self.test_step_outputs.append(outputs)
return outputs
@classmethod
def ids_to_text(cls, batch_ids, tokenizer):
batch_ids = batch_ids.cpu().numpy().tolist()
texts = []
for ids in batch_ids:
if tokenizer.eos_id in ids:
idx = ids.index(tokenizer.eos_id)
ids = ids[:idx]
if (
len(tokenizer.text_to_ids(T5Sentinel.END.value)) == 1
and tokenizer.text_to_ids(T5Sentinel.END.value)[0] in ids
):
idx = ids.index(tokenizer.text_to_ids(T5Sentinel.END.value)[0])
ids = ids[:idx]
# Legacy sentencepiece detokenization still preserves special tokens which messes up exact string match.
if hasattr(tokenizer, 'special_token_to_id'):
ids = [id for id in ids if id not in tokenizer.special_token_to_id.values()]
text = tokenizer.ids_to_text(ids)
texts.append(text)
return texts
def _determine_log_key(self, data_config, dataloader_idx, metric_name, mode):
# Function that determines whether to log based on the user provided name of the dataset or the dataloader index.
base_key = f"{mode}_{metric_name}_" if metric_name is not None else f"{mode}_"
# If the user provided names for each validation/test dataset, use those.
if hasattr(data_config, "names") and data_config.names is not None:
# With only a single validation/test dataset, the name is not a list.
if not isinstance(data_config.names, ListConfig):
name = data_config.names
else:
name = data_config.names[dataloader_idx]
return base_key + name
else:
return base_key + f"dataloader{dataloader_idx}"
def inference_epoch_end(self, outputs, mode, data_cfg):
# Parent class will handle logging of the loss.
if isinstance(outputs[0], dict):
outputs = [outputs]
averaged_loss = []
averaged_metric = []
metric_name = self.val_metric_name if mode == 'validation' else self.test_metric_name
# Log metrics for each provided validation/test dataset.
for dataloader_idx, output in enumerate(outputs):
# Expand on_validation_epoch_end from parent class MegatronLMEncoderDecoderModel as it doesnt take arg outputs
# loss = super().validation_epoch_end([x['loss'] for x in output])
loss_vals = [x['loss'] for x in output]
# NOTE: we need to make sure outputs is not empty (this is a workaround for a bug in pytorch lightning (?))
if len(loss_vals) == 0:
logging.warning("validation_epoch_end: outputs is empty")
return
if parallel_state.is_pipeline_last_stage():
# only the last pipeline parallel stages return loss
loss = torch.stack(loss_vals).mean()
else:
loss = torch.tensor(0.0).cuda()
# we can only log on one rank if it is rank zero so we broadcast from last rank
torch.distributed.broadcast(loss, get_last_rank())
self.log('val_loss', loss, prog_bar=True, rank_zero_only=True, batch_size=1)
self.log('global_step', self.trainer.global_step, prog_bar=True, rank_zero_only=True, batch_size=1)
# Determine the key used to log the loss based on the user provided name of the dataset or the dataloader index.
loss_log_key = self._determine_log_key(data_cfg, dataloader_idx, "loss", mode)
# Determine the key used to log the eval metric based on the user provided name of the dataset or the dataloader index.
metric_log_key = self._determine_log_key(data_cfg, dataloader_idx, metric_name, mode)
self.log(loss_log_key, loss, batch_size=1)
metric_object = (
self.val_metric[dataloader_idx] if mode == 'validation' else self.test_metric[dataloader_idx]
)
metric = metric_object.compute()
if metric_name == 'rouge':
metric = metric['rouge1_fmeasure']
# Handle logging of GLUE/XNLI separately here. XNLI has a separate metric per language.
if isinstance(metric, dict):
# GLUE case:
if len(metric) == 1 and 'acc' in metric:
metric = metric['acc']
self.log(metric_log_key, metric, batch_size=1)
logging.info(f"{mode} {metric_name}: {metric}")
# XNLI case where the metric dictionary contains the language and the computed metric as values.
else:
for k, v in metric.items():
if k != 'acc' and 'total' not in k:
self.log(metric_log_key + f'_{k}', v, batch_size=1)
logging.info(f"{mode} {metric_name} lang {k} : {v}")
if metric_name != 'rouge':
metric = metric['acc']
else:
self.log(metric_log_key, metric, batch_size=1)
logging.info(f"{metric_log_key}: {metric}")
metric_object.reset()
averaged_loss.append(loss)
averaged_metric.append(metric)
# Write predictions, labels, and inputs to a file for each validation/test dataset.
if data_cfg.get("write_predictions_to_file", False):
# Check if the user provided a prefix path to the file(s) they want to write.
if not hasattr(data_cfg, "output_file_path_prefix") or data_cfg.output_file_path_prefix is None:
raise ValueError(
f"Cannot write predictions to file when output_file_path_prefix is not set or present in the yaml config file."
)
# Gather the outputs object from all data parallel ranks since we are using the DistributedSampler which splits data across DDP ranks.
gathered_outputs = [None for _ in range(parallel_state.get_data_parallel_world_size())]
torch.distributed.all_gather_object(
gathered_outputs,
[
{
'preds': x['preds'],
'labels': x['labels'],
'categories': x['categories'],
'inputs': x['inputs'],
}
for x in output
],
group=parallel_state.get_data_parallel_group(),
)
# Figure out what the suffix of the file should be.
filename_log_key = self._determine_log_key(data_cfg, dataloader_idx, None, mode)
# Keep a set of ground truths and inputs to write deduplicated predictions. Distributed Sampler may duplicate examples.
gt_inp_set = set()
deduplicated_outputs = {
'preds': [],
'labels': [],
'categories': [],
'inputs': [],
}
# PTL models have a self.global_rank attribute and we want to write to disk only on global rank 0.
if self.global_rank == 0:
for rank in range(0, parallel_state.get_data_parallel_world_size()):
for batch in gathered_outputs[rank]:
for pred, label, input, category in zip(
batch['preds'], batch['labels'], batch['inputs'], batch['categories']
):
gt_inp_set.add(input + label)
deduplicated_outputs['preds'].append(pred)
deduplicated_outputs['labels'].append(label)
deduplicated_outputs['categories'].append(category)
deduplicated_outputs['inputs'].append(input)
self.write_predictions_to_file(
deduplicated_outputs, f"{data_cfg.output_file_path_prefix}_{filename_log_key}"
)
torch.distributed.barrier()
outputs[dataloader_idx].clear() # free memory
# Logging of the averaged metrics:
averaged_loss = sum(averaged_loss) / len(averaged_loss)
averaged_metric = sum(averaged_metric) / len(averaged_metric)
# Handle case where metrics can be nan or inf. This can break checkpoint save/load.
if torch.isinf(averaged_metric) or torch.isnan(averaged_metric):
app_state = AppState()
monitor_mode = app_state.checkpoint_callback_params.mode
assert monitor_mode in ['min', 'max']
averaged_metric = 0.0 if monitor_mode == 'max' else 1e5
if mode == 'validation':
self.log("validation_loss", averaged_loss, batch_size=1)
self.log(f"validation_{self.val_metric_name}", averaged_metric, batch_size=1)
elif mode == 'test':
self.log("test_loss", averaged_loss, batch_size=1)
self.log(f"test_{self.test_metric_name}", averaged_metric, batch_size=1)
app_state = AppState()
if hasattr(self, "_train_ds"):
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=self.cfg.data.train_ds.global_batch_size,
micro_batch_size=self.cfg.data.train_ds.micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
# When running `trainer.validate()`, the training dataset is not available.
else:
logging.warning('No training data found, reconfiguring microbatches based on validation batch sizes.')
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=data_cfg.global_batch_size,
micro_batch_size=data_cfg.micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
return averaged_loss, averaged_metric
def write_predictions_to_file(self, outputs, output_file_path_prefix):
with open(output_file_path_prefix + "_inputs_preds_labels.jsonl", "w") as f_json:
assert len(outputs['inputs']) == len(outputs['preds']) == len(outputs['labels'])
for i, p, l in zip(outputs['inputs'], outputs['preds'], outputs['labels']):
f_json.write(json.dumps({'input': i, 'pred': p, 'label': l}) + '\n')
def validation_step(self, dataloader_iter, batch_idx, dataloader_idx=0):
return self.inference_step(dataloader_iter, batch_idx, 'validation', dataloader_idx)
def on_validation_epoch_end(self):
_ = self.inference_epoch_end(self.validation_step_outputs, 'validation', self.cfg.data.validation_ds)
# Commenting as on_validation_epoch_end was a no-op in PTL 1.9
# return super().on_validation_epoch_end()
def test_step(self, dataloader_iter, batch_idx, dataloader_idx=0):
return self.inference_step(dataloader_iter, batch_idx, 'test', dataloader_idx)
def on_test_epoch_end(self):
_ = self.inference_epoch_end(self.test_step_outputs, 'test', self.cfg.data.test_ds)
# Commenting as on_test_epoch_end was a no-op in PTL 1.9
# return super().on_test_epoch_end()
def build_data_loader(
self, dataset, global_batch_size, shuffle, num_workers, pin_memory, drop_last,
):
"""Buld dataloader given an input dataset."""
if dataset is None:
return None
rank = parallel_state.get_data_parallel_rank()
world_size = parallel_state.get_data_parallel_world_size()
sampler = torch.utils.data.distributed.DistributedSampler(
dataset, num_replicas=world_size, rank=rank, shuffle=shuffle
)
if isinstance(dataset, ConcatMapDataset):
collate_fn = dataset.datasets[0].collate_fn
else:
collate_fn = dataset.collate_fn
# Data loader. Note that batch size is the per GPU batch size.
return torch.utils.data.DataLoader(
dataset,
collate_fn=collate_fn,
sampler=sampler,
batch_size=global_batch_size // parallel_state.get_data_parallel_world_size(),
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=drop_last,
)
def setup_training_data(self):
if not self.cfg.data.train_ds.drop_last:
raise AttributeError(
"`drop_last` is required for the training dataset to ensure each batch is the same micro-batch size."
"To set this, set the variable `data.train_ds.drop_last=True` in the config."
)
self._train_dl = self.build_data_loader(
self._train_ds,
global_batch_size=self.cfg.data.train_ds.global_batch_size,
shuffle=self.cfg.data.train_ds.shuffle,
num_workers=self.cfg.data.train_ds.num_workers,
pin_memory=self.cfg.data.train_ds.pin_memory,
drop_last=self.cfg.data.train_ds.drop_last,
)
def setup_eval_data(self, datasets, data_cfg):
dataloaders = []
for dataset in datasets:
eval_dl = self.build_data_loader(
dataset,
global_batch_size=self.cfg.data.train_ds.global_batch_size,
shuffle=data_cfg.shuffle,
num_workers=data_cfg.num_workers,
pin_memory=data_cfg.pin_memory,
drop_last=data_cfg.drop_last,
)
dataloaders.append(eval_dl)
return dataloaders
def setup_validation_data(self):
self._validation_dl = self.setup_eval_data(self._validation_ds, self.cfg.data.validation_ds)
def setup_test_data(self):
self._test_dl = self.setup_eval_data(self._test_ds, self.cfg.data.test_ds)
def _build_train_dataset(self, data_cfg):
"""Build the training dataset."""
if (
data_cfg.drop_last is False
and data_cfg.global_batch_size > data_cfg.micro_batch_size * parallel_state.get_data_parallel_world_size()
):
raise ValueError(
f"Cannot use drop_last=False in your training data with gradient accumulation found grad acc of {data_cfg.global_batch_size // (data_cfg.micro_batch_size * parallel_state.get_data_parallel_world_size())} with global_batch_size {data_cfg.global_batch_size}, micro_batch_size {data_cfg.micro_batch_size}, data parallel size {parallel_state.get_data_parallel_world_size()}"
)
datasets = []
# Determine if we are using a single dataset or a list of datasets.
is_src_list_config = isinstance(data_cfg.src_file_name, ListConfig)
is_tgt_list_config = isinstance(data_cfg.tgt_file_name, ListConfig)
if (is_src_list_config and not is_tgt_list_config) or (is_tgt_list_config and not is_src_list_config):
raise ValueError("src_list and tgt_list must both be either a ListConfig or a string. ")
if is_src_list_config:
if len(data_cfg.src_file_name) != len(data_cfg.tgt_file_name):
raise ValueError("src_file_name and tgt_file_name must have the same number of elements. ")
else:
data_cfg.src_file_name = [data_cfg.src_file_name]
data_cfg.tgt_file_name = [data_cfg.tgt_file_name]
for src, tgt in zip(data_cfg.src_file_name, data_cfg.tgt_file_name):
dataset = SequenceToSequenceDataset(
src_file_name=src,
tgt_file_name=tgt,
src_tokenizer=self.tokenizer,
tgt_tokenizer=self.tokenizer,
max_src_seq_length=data_cfg.max_src_seq_length,
max_tgt_seq_length=data_cfg.max_tgt_seq_length,
add_bos_to_input=data_cfg.get('add_bos_to_input', True),
add_eos_to_input=data_cfg.get('add_eos_to_input', True),
replace_bos_with_pad=data_cfg.get('replace_bos_with_pad', False),
)
datasets.append(dataset)
if len(datasets) > 1:
dataset = ConcatMapDataset(
datasets=datasets,
sampling_technique=data_cfg.get('concat_sampling_technique', 'temperature'),
sampling_temperature=data_cfg.get('concat_sampling_temperature', 5),
sampling_probabilities=data_cfg.get(
'concat_sampling_probabilities', [1 / len(datasets)] * len(datasets)
),
)
return dataset
else:
return datasets[0]
def _build_eval_dataset(self, data_cfg):
"""Build the evaluation dataset."""
if data_cfg.global_batch_size > data_cfg.micro_batch_size * parallel_state.get_data_parallel_world_size():
raise ValueError(
f'You are trying to use "implicit gradient accumulation" of {data_cfg.global_batch_size // (data_cfg.micro_batch_size * parallel_state.get_data_parallel_world_size())} in your validation/test datasets. This is not supported. Please set global_batch_size equal to micro_batch_size * data_parallel_world_size.'
)
datasets = []
# Determine if we are using a single dataset or a list of datasets.
is_src_list_config = isinstance(data_cfg.src_file_name, ListConfig)
is_tgt_list_config = isinstance(data_cfg.tgt_file_name, ListConfig)
is_names_list_config = False
if hasattr(data_cfg, "names"):
if isinstance(data_cfg.names, ListConfig):
is_names_list_config = True
if (is_src_list_config and not is_tgt_list_config) or (is_tgt_list_config and not is_src_list_config):
raise ValueError("src_list and tgt_list must both be either a ListConfig or a string. ")
if is_src_list_config:
if len(data_cfg.src_file_name) != len(data_cfg.tgt_file_name):
raise ValueError("src_file_name and tgt_file_name must have the same number of elements. ")
if is_names_list_config and len(data_cfg.names) != len(data_cfg.src_file_name):
raise ValueError(
"If you are providing names for each src/tgt file, they must have the same number of elements."
)
else:
data_cfg.src_file_name = [data_cfg.src_file_name]
data_cfg.tgt_file_name = [data_cfg.tgt_file_name]
for src, tgt in zip(data_cfg.src_file_name, data_cfg.tgt_file_name):
dataset = SequenceToSequenceDataset(
src_file_name=src,
tgt_file_name=tgt,
src_tokenizer=self.tokenizer,
tgt_tokenizer=self.tokenizer,
max_src_seq_length=data_cfg.max_src_seq_length,
max_tgt_seq_length=data_cfg.max_tgt_seq_length,
add_bos_to_input=data_cfg.get('add_bos_to_input', True),
add_eos_to_input=data_cfg.get('add_eos_to_input', True),
replace_bos_with_pad=data_cfg.get('replace_bos_with_pad', False),
)
datasets.append(dataset)
return datasets
def build_train_valid_test_datasets(self, stage):
logging.info('Building datasets ...')
if stage != 'test':
self._validation_ds = self._build_eval_dataset(self.cfg.data.validation_ds)
if stage != 'validate':
if hasattr(self.cfg.data, 'test_ds'):
self._test_ds = self._build_eval_dataset(self.cfg.data.test_ds)
if stage == 'validate' or stage == 'test':
return
self._train_ds = self._build_train_dataset(self.cfg.data.train_ds)
logging.info(f'Finished building datasets ...')
| NeMo-main | nemo/collections/nlp/models/language_modeling/megatron_finetune_model.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.models.language_modeling.megatron_t5_model import MegatronT5Model
__all__ = ["MegatronBARTModel"]
class MegatronBARTModel(MegatronT5Model):
"""
Megatron BART pretraining
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
super().__init__(cfg, trainer=trainer)
@property
def model_name(self):
"""Allows child classes to implement models with different data regime"""
return "BART"
def _validate_cfg(self):
"""Class-specific cfg validation"""
if self._cfg.data.get('dataset_type', None) != 'bart':
raise ValueError(
f"cfg.data.dataset_type = {self._cfg.data.get('dataset_type', None)} but 'bart' is expected"
)
if self.num_sentinel_tokens != 0:
raise ValueError(
f"cfg.tokenizer.num_sentinel_tokens = {self.num_sentinel_tokens} but 0 is expected for 'bart'"
)
@property
def _build_train_valid_test_datasets_kwargs(self):
"""allows child classes to add kwargs to dataset building"""
return dict(delete_mask_prob=self._cfg.data.get('delete_mask_prob', 0.0),)
def list_available_models(self):
pass
| NeMo-main | nemo/collections/nlp/models/language_modeling/megatron_bart_model.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from typing import Any, List
import torch
from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
from omegaconf.omegaconf import open_dict
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.data.language_modeling.megatron.t5_prompt_learning_dataset import T5PromptLearningDataset
from nemo.collections.nlp.models.language_modeling.megatron_base_prompt_learning_model import (
MegatronBasePromptLearningModel,
)
from nemo.collections.nlp.models.language_modeling.megatron_finetune_model import MegatronT5FinetuneModel
from nemo.collections.nlp.models.language_modeling.megatron_t5_model import MegatronT5Model
from nemo.collections.nlp.modules.common.megatron.utils import (
average_losses_across_data_parallel_group,
get_iterator_k_split,
)
from nemo.collections.nlp.parts.nlp_overrides import NLPSaveRestoreConnector
from nemo.collections.nlp.parts.utils_funcs import get_last_rank
from nemo.utils import AppState, logging
try:
from apex.transformer.pipeline_parallel.utils import (
_reconfigure_microbatch_calculator,
get_micro_batch_size,
get_num_microbatches,
)
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
try:
from megatron.core import parallel_state
from megatron.core.enums import ModelType
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
__all__ = ['MegatronT5PromptLearningModel']
class MegatronT5PromptLearningModel(MegatronBasePromptLearningModel):
"""
Model class for prompt-tuning or p-tuning a pretrained Megatron T5 model.
Prompt Tuning initalizes virtual prompt embeddings directly from a copy of
certain token embeddings from the the pretrained T5 model's vocabulary
and directly tunes these embedding weights. The token embeddings used in
initalization are specified by the user in the config file. The model can
be prompt-tuned for multiple tasks at once. Virtual prompts are stored in a
prompt table and can be added or deleted without disrupting virtual prompts
for other tasks.
P-tuning initializes an LSTM encoder model that generates virtual prompt
embeddings for every task. Each task shares the same encoder. After p-tuning
is compelete, the learned virtual prompts can be saved to the prompt table
using add_ptuned_prompts_to_prompt_table(). Thus, if a user wants to add a
new virtual prompt via p-tuning, they do not need to retrain on all previous
tasks. This gives p-tuning the same task flexiblity as prompt-tuning.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
super().__init__(cfg, trainer)
self.model_type = ModelType.encoder_and_decoder
def first_stage_of_pipeline(self):
if self.frozen_model.enc_dec_model.pre_process and parallel_state.get_pipeline_model_parallel_rank() == 0:
return True
return False
def forward(
self, input_ids, dec_input, enc_mask, dec_mask, position_ids, taskname_ids, labels=None, inference=False,
):
"""
Special forward method for p-tuning/prompt-tuning pretrained
T5 style models.
"""
batch_size, seq_length = input_ids.shape
if self.first_stage_of_pipeline():
# Get embeddings for text tokens and insert virtual token embeddings
input_embeds = self.embed_input(input_ids, taskname_ids, inference)
# TODO: This check needs to be revisited with PP support.
if hasattr(self.frozen_model.enc_dec_model.encoder_embedding, 'position_embeddings'):
position_embeddings = self.frozen_model.enc_dec_model.encoder_embedding.position_embeddings(
position_ids
)
encoder_input = input_embeds + position_embeddings
else:
encoder_input = input_embeds
else:
encoder_input = None
# If the decoder input starts with <pad> instead of <bos>, which is the case for huggingface T5 models, we don't want to mask the first token.
# For NeMo-Megatron, the sequence starts with <bos>, which is never masked so we can always set index 0 to be unmasked.
dec_mask[:, 0] = 1
# Call forward on T5 model with preprocessed embeddings
if self.autocast_dtype == torch.float32:
output = self.frozen_model.enc_dec_model(
enc_input_ids=None,
enc_attn_mask=enc_mask,
dec_input_ids=dec_input,
dec_attn_mask=dec_mask,
token_type_ids=None,
labels=labels,
output_enc_hidden_only=False,
enc_input=encoder_input,
)
else:
with torch.autocast(device_type="cuda", dtype=self.autocast_dtype):
output = self.frozen_model.enc_dec_model(
enc_input_ids=None,
enc_attn_mask=enc_mask,
dec_input_ids=dec_input,
dec_attn_mask=dec_mask,
token_type_ids=None,
labels=labels,
output_enc_hidden_only=False,
enc_input=encoder_input,
)
return output, encoder_input
def load_frozen_model(self, cfg, trainer):
self.megatron_amp_o2 = cfg.get('megatron_amp_O2', False)
# TODO: Fix this once apex patches FusedScaledMaskedSoftmax.
# This is a workaround for the fact that `masked_softmax_fusion` has issues with certain input sizes that may be present while finetuning.
t5_cfg = MegatronT5Model.restore_from(cfg.get('language_model_path'), trainer=trainer, return_config=True)
OmegaConf.set_struct(t5_cfg, True)
with open_dict(t5_cfg):
if hasattr(t5_cfg, 'encoder') and hasattr(t5_cfg, 'decoder'):
t5_cfg.encoder.masked_softmax_fusion = False
t5_cfg.decoder.masked_softmax_fusion = False
else:
t5_cfg.masked_softmax_fusion = False
t5_cfg.megatron_amp_O2 = self.megatron_amp_o2
# hack to make the _GLOBAL_NUM_MICROBATCHES_CALCULATOR initialize
t5_cfg.micro_batch_size = cfg.get('micro_batch_size', 4)
t5_cfg.global_batch_size = cfg.get('global_batch_size', 4)
t5_cfg.precision = trainer.precision
self.frozen_model = MegatronT5Model.restore_from(
cfg.get('language_model_path'),
trainer=trainer,
override_config_path=t5_cfg,
save_restore_connector=NLPSaveRestoreConnector(),
)
def fwd_bwd_step(self, dataloader_iter, batch_idx, forward_only):
"""
Dataloader produces a global batch which is turned into a list of microbatches.
The list of microbatches is then piped through the pipeline using megatron-core fwd/bwd functions.
"""
# Get seq length of batch
batch = next(dataloader_iter)
_, seq_length = batch[0].shape
_, dec_seq_length = batch[1].shape
data_iter = get_iterator_k_split(batch, get_num_microbatches())
fwd_bwd_function = get_forward_backward_func()
losses_reduced_per_micro_batch = fwd_bwd_function(
forward_step_func=self.get_forward_output_and_loss_func(),
data_iterator=data_iter,
model=[self],
num_microbatches=get_num_microbatches(),
forward_only=forward_only,
seq_length=seq_length,
micro_batch_size=get_micro_batch_size(),
decoder_seq_length=dec_seq_length,
)
# only the last stages of the pipeline return losses
if losses_reduced_per_micro_batch:
# average loss across micro batches
loss_tensors_list = [loss_reduced['loss'] for loss_reduced in losses_reduced_per_micro_batch]
loss_tensor = torch.concat(loss_tensors_list)
loss_mean = loss_tensor.mean()
else:
# we're not on the last pipeline stage so no losses
loss_mean = torch.tensor(0.0).cuda()
return loss_mean
def get_forward_output_and_loss_func(self):
# FIXME: consolidate this method into MegatronLMEncoderDecoderModel (or have a common base class)
def fwd_output_and_loss_func(dataloader_iter, model):
batch = next(dataloader_iter)
batch = [x.cuda(non_blocking=True) for x in batch]
enc_input, dec_input, labels, loss_mask, enc_mask, dec_mask, position_ids, taskname_ids = batch
output_tensor, encoder_input = model(
enc_input, dec_input, enc_mask, dec_mask, position_ids, taskname_ids, labels, inference=False
)
output_tensor = output_tensor.contiguous()
def loss_func(output_tensor):
loss = self.frozen_model.loss_func(loss_mask, output_tensor)
reduced_loss = average_losses_across_data_parallel_group([loss])
return loss, {'loss': reduced_loss}
return output_tensor, loss_func
return fwd_output_and_loss_func
def backward(self, *args, **kwargs):
""" LightningModule hook to do backward.
We want this to do nothing since we run backward in the fwd/bwd functions from megatron-core.
No need to call it here.
"""
return
def optimizer_zero_grad(self, *args, **kwargs):
""" LightningModule hook to zero grad.
We want this to do nothing as we are zeroing grads during the training_step.
"""
return
def set_input_tensor(self, input_tensor):
"""Set input tensor to be used instead of forward()'s input.
When using pipeline parallelism the input from the previous
stage comes from communication, not from the input, so the
model's forward_step_func won't have it. This function is thus
used by internal code to bypass the input provided by the
forward_step_func"""
self.frozen_model.enc_dec_model.set_input_tensor(input_tensor)
def on_train_epoch_start(self) -> None:
gbs = self.cfg.global_batch_size
mbs = self.cfg.micro_batch_size
self._reconfigure_batch_sizes(gbs, mbs)
return super().on_train_epoch_start()
def on_validation_epoch_start(self) -> None:
gbs = self.cfg.get('validation_global_batch_size', self.cfg.global_batch_size)
mbs = self.cfg.get('validation_micro_batch_size', self.cfg.micro_batch_size)
self._reconfigure_batch_sizes(gbs, mbs)
return super().on_validation_epoch_start()
def training_step(self, dataloader_iter, batch_idx):
self._optimizer.zero_grad()
batch = next(dataloader_iter)
loss_mean = self.fwd_bwd_step(itertools.chain([batch]), batch_idx, forward_only=False)
self.allreduce_gradients()
## logging
# we can only log on one rank if it is rank zero so we broadcast from last rank
# we can avoid this broadcast by updating the PTL log function to accept specific ranks
torch.distributed.broadcast(loss_mean, get_last_rank())
if self.torch_dtype == torch.float16 and hasattr(self.trainer.precision_plugin.scaler, "_scale"):
loss_scale = self.trainer.precision_plugin.scaler._scale
if loss_scale is not None:
self.log('loss_scale', loss_scale, batch_size=1)
self.log('reduced_train_loss', loss_mean, prog_bar=True, rank_zero_only=True, batch_size=1)
lr = self._optimizer.param_groups[0]['lr']
self.log('lr', lr, rank_zero_only=True, batch_size=1)
self.log('global_step', self.trainer.global_step, prog_bar=True, rank_zero_only=True, batch_size=1)
return loss_mean
def get_predictions(self, input_ids, enc_mask, encoder_input, labels):
predicted_token_ids, log_probs = self.frozen_model.decode(
tokens_enc=input_ids,
enc_mask=enc_mask,
num_tokens_to_generate=self.decoder_seq_length,
encoder_input=encoder_input,
bos_id=self.tokenizer.pad_id
if self.cfg.data.get('decoder_starts_with_pad', False)
else self.tokenizer.bos_id,
)
# Special ids to text function to handle stripping <eos> and special tokens with sentencepiece tokenizers.
preds_text = MegatronT5FinetuneModel.ids_to_text(predicted_token_ids, self.tokenizer)
labels_text = MegatronT5FinetuneModel.ids_to_text(labels, self.tokenizer)
input_text = MegatronT5FinetuneModel.ids_to_text(input_ids, self.tokenizer)
return {
'predicted_token_ids': preds_text,
'labels': labels_text,
'enc_inputs': input_text,
}
def validation_step(self, batch, batch_idx, inference=False):
prefix = "test" if self.trainer.testing else "val"
input_ids, dec_input, labels, loss_mask, enc_mask, dec_mask, position_ids, taskname_ids = batch
# does not use dataloader_iter due to device placement issues arising from PTL
mode = self.training
self.eval()
gbs = self.cfg.get('validation_global_batch_size', self.cfg.global_batch_size)
self._reconfigure_and_process_inference_batch(input_ids.size(0), gbs)
loss_mean = self.fwd_bwd_step(itertools.chain([batch]), batch_idx, forward_only=True)
if self.first_stage_of_pipeline():
# Get embeddings for text tokens and insert virtual token embeddings
input_embeds = self.embed_input(input_ids, taskname_ids, False)
if hasattr(self.frozen_model.enc_dec_model.encoder_embedding, 'position_embeddings'):
position_embeddings = self.frozen_model.enc_dec_model.encoder_embedding.position_embeddings(
position_ids
)
encoder_input = input_embeds + position_embeddings
else:
encoder_input = input_embeds
else:
encoder_input = None
if self.cfg.get("report_validation_metric", False):
metrics = self.get_predictions(input_ids, enc_mask, encoder_input, labels)
metrics['loss'] = loss_mean
else:
metrics = {'loss': loss_mean}
self.train(mode=mode)
self.frozen_model.eval()
self.validation_step_outputs.append(metrics) if prefix == 'val' else self.test_step_outputs.append(metrics)
return metrics
def on_validation_epoch_end(self):
prefix = "test" if self.trainer.testing else "val"
outputs = self.validation_step_outputs if prefix == 'val' else self.test_step_outputs
if self.cfg.get('pipeline_model_parallel_size', 1) > 1:
if parallel_state.is_pipeline_last_stage():
# only the last pipeline parallel stages return loss
averaged_loss = torch.stack([i['loss'] for i in outputs]).mean()
else:
averaged_loss = torch.tensor(0.0).cuda()
# we can only log on one rank if it is rank zero so we broadcast from last rank
torch.distributed.broadcast(averaged_loss, get_last_rank())
self.log('val_loss', averaged_loss, prog_bar=True, rank_zero_only=True, batch_size=1)
logging.info(f'Validation loss: {averaged_loss}')
else:
averaged_loss = torch.stack([item['loss'] for item in outputs]).mean()
logging.info(f'Validation loss: {averaged_loss}')
self.log('val_loss', averaged_loss, prog_bar=True, rank_zero_only=True, batch_size=1)
if self.cfg.get("report_validation_metric", False):
gather_results = [None for _ in range(parallel_state.get_data_parallel_world_size())]
all_preds = list(itertools.chain(*[item['predicted_token_ids'] for item in outputs]))
all_labels = list(itertools.chain(*[item['labels'] for item in outputs]))
all_inputs = list(itertools.chain(*[item['enc_inputs'] for item in outputs]))
assert len(all_preds) == len(all_labels)
assert len(all_preds) == len(all_inputs)
# Gather inputs, preds, labels from all workers
torch.distributed.all_gather_object(
gather_results,
[(input, pred, label) for (input, pred, label) in zip(all_inputs, all_preds, all_labels)],
group=parallel_state.get_data_parallel_group(),
)
# Deduplicate sentences that may have been distributed across multiple data parallel ranks.
if parallel_state.get_data_parallel_rank() == 0:
gather_results_dedup = list(set(itertools.chain(*gather_results)))
val_metric_dict = self.validation_metric.get_score(
[i[2] for i in gather_results_dedup], [i[1] for i in gather_results_dedup],
)
for metric, val in val_metric_dict.items():
logging.info(f'Validation {metric}: {val}')
val_metric = list(val_metric_dict.items())[0][1]
metric_name = list(val_metric_dict.items())[0][0]
else:
val_metric = torch.tensor(0.0).cuda()
metric_name = ''
self.log(f'val_{metric_name}', val_metric, prog_bar=True, rank_zero_only=True, batch_size=1)
gbs = self.cfg.global_batch_size
mbs = self.cfg.micro_batch_size
self._reconfigure_batch_sizes(gbs, mbs)
self.validation_step_outputs.clear() if prefix == 'val' else self.test_step_outputs.clear() # free memory
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def on_test_epoch_end(self):
self.on_validation_epoch_end()
def build_virtual_prompt_dataset(
self, dataset_paths, batch_size, for_train, drop_last, shuffle, num_workers, pin_memory
):
dataset = T5PromptLearningDataset(
datasets=dataset_paths,
tokenizer=self.tokenizer,
virtual_prompt_source=self.virtual_prompt_source,
task_templates=self.task_templates,
pseudo_tokens=self.pseudo_tokens,
pad_token_id=self.pad_token_id,
max_seq_length=self.cfg.data.get('max_seq_length', self.frozen_model.cfg.max_position_embeddings),
min_seq_length=self.cfg.data.get('min_seq_length', 1),
add_bos=self.cfg.data.get('add_bos', False),
add_eos=self.cfg.data.get('add_eos', True),
decoder_starts_with_pad=self.cfg.data.get('decoder_starts_with_pad', False),
add_eos_to_decoder_output=self.cfg.data.get('add_eos_to_decoder_output', True),
add_sentinel_to_input=self.cfg.data.get('add_sentinel_to_input', True),
ul2_prompt_token=self.cfg.data.get('ul2_prompt_token', None),
for_train=for_train,
)
rank = parallel_state.get_data_parallel_rank()
world_size = parallel_state.get_data_parallel_world_size()
sampler = torch.utils.data.distributed.DistributedSampler(
dataset, num_replicas=world_size, rank=rank, shuffle=shuffle, seed=self.cfg.seed
)
dataloader = torch.utils.data.DataLoader(
dataset,
collate_fn=dataset.collate_fn,
sampler=sampler,
batch_size=batch_size // world_size,
drop_last=drop_last,
num_workers=num_workers,
pin_memory=pin_memory,
persistent_workers=True
if num_workers > 0
else False, # (@adithyare and @eharper) We need to set this to True to get around issues with spawn=True
)
print('build success', len(dataloader), dataset_paths)
return dataset, dataloader
def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:
input_ids, dec_input, labels, loss_mask, enc_mask, dec_mask, position_ids, taskname_ids = batch
batch_size, seq_length = input_ids.shape
if self.first_stage_of_pipeline():
input_embeds = self.embed_input(input_ids, taskname_ids, use_cached_reps=True)
# TODO: This check needs to be revisited with PP support.
if hasattr(self.frozen_model.enc_dec_model.encoder_embedding, 'position_embeddings'):
position_embeddings = self.frozen_model.enc_dec_model.encoder_embedding.position_embeddings(
position_ids
)
encoder_input = input_embeds + position_embeddings
else:
encoder_input = input_embeds
else:
encoder_input = torch.zeros((batch_size, seq_length, self.hidden_size), dtype=self.autocast_dtype).cuda()
predicted_token_ids, log_probs = self.frozen_model.decode(
tokens_enc=input_ids,
enc_mask=enc_mask,
num_tokens_to_generate=self.decoder_seq_length,
encoder_input=encoder_input,
bos_id=self.tokenizer.pad_id
if self.cfg.data.get('decoder_starts_with_pad', False)
else self.tokenizer.bos_id,
)
# Special ids to text function to handle stripping <eos> and special tokens with sentencepiece tokenizers.
preds_text = MegatronT5FinetuneModel.ids_to_text(predicted_token_ids, self.tokenizer)
input_text = MegatronT5FinetuneModel.ids_to_text(input_ids, self.tokenizer)
if labels is not None:
labels_text = MegatronT5FinetuneModel.ids_to_text(labels, self.tokenizer)
else:
labels_text = [None] * len(preds_text)
return {
'input_text': input_text,
'preds_text': preds_text,
'labels_text': labels_text,
}
def on_predict_epoch_end(self) -> None:
# PTL 2.0 removes outputs arg from on_predict_epoch_end and is retrived by self.trainer.predict_loop.predictions
outputs = self.trainer.predict_loop.predictions
gather_results = [None for _ in range(parallel_state.get_data_parallel_world_size())]
# In case of single dataloader format of self.trainer.predict_loop.predictions is [{dict1},{dict2}] v/s [[{dict1},{dict2}]] of outputs in 1.9
# Removing indix [0] to avoid TypeError (https://github.com/Lightning-AI/lightning/pull/16655/files)
all_preds = list(itertools.chain(*[item['preds_text'] for item in outputs]))
all_labels = list(itertools.chain(*[item['labels_text'] for item in outputs]))
all_inputs = list(itertools.chain(*[item['input_text'] for item in outputs]))
assert len(all_preds) == len(all_labels)
assert len(all_preds) == len(all_inputs)
# Gather inputs, predictions, and ground truths from all workers
torch.distributed.all_gather_object(
gather_results,
[(input, pred, label) for (input, pred, label) in zip(all_inputs, all_preds, all_labels)],
group=parallel_state.get_data_parallel_group(),
)
# Deduplicate sentences that may have been distributed across multiple data parallel ranks.
if parallel_state.get_data_parallel_rank() == 0:
gather_results_dedup = list(set(itertools.chain(*gather_results)))
input_prediction_pair = []
correct = 0
for (input, pred, label) in gather_results_dedup:
input_prediction_pair.append((input, pred))
if label:
if pred == label:
correct += 1
acc = correct / len(gather_results_dedup) if all_labels[0] else None
logging.info(f'Prediction results: {acc}')
logging.info(f'Test finish')
| NeMo-main | nemo/collections/nlp/models/language_modeling/megatron_t5_prompt_learning_model.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.models.language_modeling.bert_lm_model import BERTLMModel
from nemo.collections.nlp.models.language_modeling.megatron_gpt_prompt_learning_model import (
MegatronGPTPromptLearningModel,
)
from nemo.collections.nlp.models.language_modeling.megatron_retrieval_model import MegatronRetrievalModel
from nemo.collections.nlp.models.language_modeling.transformer_lm_model import TransformerLMModel
| NeMo-main | nemo/collections/nlp/models/language_modeling/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import queue
import warnings
from dataclasses import fields
from functools import partial
from typing import Any, Dict, Iterator, List, Optional, Union
import torch
from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.accelerators import CPUAccelerator
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.data.language_modeling.megatron.data_samplers import (
MegatronPretrainingRandomSampler,
MegatronPretrainingSampler,
)
from nemo.collections.nlp.data.language_modeling.megatron.gpt_dataset import build_train_valid_test_datasets
from nemo.collections.nlp.models.language_modeling.megatron.gpt_model import GPTModel
from nemo.collections.nlp.models.language_modeling.megatron_base_model import MegatronBaseModel
from nemo.collections.nlp.modules.common.megatron.build_model import build_model
from nemo.collections.nlp.modules.common.megatron.module import Float16Module
from nemo.collections.nlp.modules.common.megatron.utils import (
ApexGuardDefaults,
average_losses_across_data_parallel_group,
get_all_params_for_weight_decay_optimization,
get_ltor_masks_and_position_ids,
get_params_for_weight_decay_optimization,
)
from nemo.collections.nlp.modules.common.text_generation_utils import (
generate,
get_computeprob_response,
get_default_length_params,
get_default_sampling_params,
megatron_gpt_generate,
)
from nemo.collections.nlp.modules.common.transformer.text_generation import (
LengthParam,
OutputType,
SamplingParam,
TextGeneration,
)
from nemo.collections.nlp.parts import utils_funcs
from nemo.collections.nlp.parts.utils_funcs import activation_to_func, get_last_rank
from nemo.core.classes import Exportable
from nemo.core.classes.common import PretrainedModelInfo
from nemo.core.neural_types import ChannelType, NeuralType
from nemo.utils import logging
try:
import apex.transformer.pipeline_parallel.utils
from apex.transformer.pipeline_parallel.utils import get_num_microbatches
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
try:
from megatron.core import InferenceParams, parallel_state
from megatron.core.models.gpt import GPTModel as MCoreGPTModel
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
from megatron.core.transformer.module import Float16Module as MCoreFloat16Module
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.utils import init_method_normal, scaled_init_method_normal
# TODO @tmoon: Use once available in Megatron-LM
# from megatron.core.pipeline_parallel.schedules import DataIteratorList
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
TransformerConfig = ApexGuardDefaults
HAVE_MEGATRON_CORE = False
try:
import transformer_engine
from transformer_engine.pytorch import module as te_module
HAVE_TE = True
except (ImportError, ModuleNotFoundError):
HAVE_TE = False
class MegatronGPTExportableModel(torch.nn.Module, Exportable):
"""
Megatron GPT Wrapper for ONNX export
"""
def __init__(self, model):
super().__init__()
self.model = model
self.fp8_enabled = model.cfg.get('fp8', False)
self.fp8_recipe = None
if self.fp8_enabled and HAVE_TE:
self.fp8_recipe = transformer_engine.common.recipe.DelayedScaling(
margin=0, interval=1, fp8_format=transformer_engine.common.recipe.Format.E4M3
)
self.dtype = utils_funcs.torch_dtype_from_precision(model.cfg.precision)
def forward(self, tokens, position_ids, attention_mask):
if self.fp8_enabled and HAVE_TE:
with transformer_engine.pytorch.onnx_export(self.fp8_enabled), transformer_engine.pytorch.fp8_autocast(
enabled=self.fp8_enabled, fp8_recipe=self.fp8_recipe
), torch.no_grad(), torch.inference_mode(), torch.autocast(
'cuda', dtype=self.dtype
), warnings.catch_warnings():
warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning, module=r'.*')
assert tokens.shape == position_ids.shape
assert attention_mask.shape[2] == attention_mask.shape[3] == tokens.shape[1] == position_ids.shape[1]
output_tensor = self.model.forward(
tokens=tokens.cuda(),
text_position_ids=position_ids.cuda(),
attention_mask=attention_mask.cuda(),
labels=None,
)
else:
with torch.no_grad(), torch.inference_mode(), torch.autocast(
'cuda', dtype=self.dtype
), warnings.catch_warnings():
warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning, module=r'.*')
assert tokens.shape == position_ids.shape
assert attention_mask.shape[2] == attention_mask.shape[3] == tokens.shape[1] == position_ids.shape[1]
output_tensor = self.model.forward(
tokens=tokens.cuda(),
text_position_ids=position_ids.cuda(),
attention_mask=attention_mask.cuda(),
labels=None,
)
return output_tensor
def freeze(self):
for param in self.parameters():
param.requires_grad = False
def input_example(self, max_batch=1, max_dim=768, seq_len=6):
ids = [self.model.tokenizer.text_to_ids(text) for text in ["how is the weather on Sunday"]]
id_tensors = [torch.unsqueeze(torch.LongTensor(id_list), dim=0) for id_list in ids]
masks_and_position_ids = [
get_ltor_masks_and_position_ids(id_tensor, self.model.tokenizer.eos_id, False, False, False)
for id_tensor in id_tensors
]
for tokens, attn_mask_and_pos_ids in zip(id_tensors, masks_and_position_ids):
attn_mask, _, pos_ids = attn_mask_and_pos_ids
return tokens, pos_ids, attn_mask
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"position_ids": NeuralType(('B', 'T'), ChannelType()),
"attention_mask": NeuralType(('D', 'D', 'T', 'T'), ChannelType()),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"logits": NeuralType(('B', 'T', 'D'), ChannelType())}
@property
def input_names(self) -> List[str]:
return ['input_ids', 'position_ids', 'attention_mask']
@property
def output_names(self) -> List[str]:
return ['logits']
class MegatronGPTModel(MegatronBaseModel, TextGeneration):
"""
Megatron GPT pretraining
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
if not HAVE_APEX:
raise ImportError(
"Apex was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
if not HAVE_MEGATRON_CORE:
raise ImportError(
"megatron-core was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
# this prevents base constructor from initializing tokenizer
self.tokenizer = None
super().__init__(cfg, trainer=trainer, no_lm_init=True)
self._validate_trainer()
# build the transformer config
# TODO: add type hint once pip package is out
self.transformer_config = self.build_transformer_config()
self.megatron_amp_o2 = cfg.get('megatron_amp_O2', False)
self.mcore_gpt = cfg.get('mcore_gpt', False)
self.rampup_batch_size = self.cfg.get('rampup_batch_size', None)
if self.rampup_batch_size:
self.prev_consumed_samples = 0
self.if_first_step = 0
self.prev_global_batch_size = None
if not self.megatron_amp_o2 and self.cfg.get('virtual_pipeline_model_parallel_size', None):
raise ValueError('Virtual pipeline model parallel is only supported when using megatron_amp_O2')
# build_model returns a list of modules which are used for interleaved pipeline parallelism
if isinstance(self.trainer.accelerator, CPUAccelerator):
self.model = build_model(
model_provider_func=self.model_provider_func,
wrap_with_ddp=False,
on_cpu=True,
virtual_pipeline_model_parallel_size=self.cfg.get('virtual_pipeline_model_parallel_size', None),
)
else:
self.model = build_model(
model_provider_func=self.model_provider_func,
wrap_with_ddp=False,
virtual_pipeline_model_parallel_size=self.cfg.get('virtual_pipeline_model_parallel_size', None),
)
# if we're not using interleaved, then self.model is a module.
if self.cfg.get('virtual_pipeline_model_parallel_size', None) is None:
self.model = self.model[0]
if self.megatron_amp_o2:
if not self.with_distributed_adam:
# Pre-allocate the model on GPU to have master parameters allocated on the same device with matching data type
if isinstance(self.model, list):
for module in self.model:
module.cuda(torch.cuda.current_device())
else:
self.model.cuda(torch.cuda.current_device())
self._wrap_model_for_O2()
self.enable_autocast = (
True if (not self.megatron_amp_o2) and (self.autocast_dtype in [torch.float16, torch.bfloat16]) else False
)
self.transformer_engine = cfg.get('transformer_engine', False)
# configuration used for inference
self._inference_config = None
# Convert the global-batch-based profile index to micro-batch index
if hasattr(self, '_nsys_profile_enabled'):
mp_size = cfg.get('tensor_model_parallel_size', 1) * cfg.get('pipeline_model_parallel_size', 1)
data_parallel_world_size = trainer.world_size // mp_size
grad_accum_steps = cfg.get('global_batch_size') // (cfg.get('micro_batch_size') * data_parallel_world_size)
self._nsys_profile_start_step *= grad_accum_steps
self._nsys_profile_end_step *= grad_accum_steps
self.get_attention_mask_from_fusion = self.cfg.get('get_attention_mask_from_fusion', True)
self.initialize_ub = self.cfg.get('ub_tp_comm_overlap', False)
self.inference_params = None
# default to false since this doesn't work with sequence parallelism currently
self.use_loss_mask = self.cfg.get('use_loss_mask', False)
if self.use_loss_mask and self.transformer_config.sequence_parallel:
raise ValueError('Loss mask is not supported with sequence parallelism.')
def get_gpt_module_list(self):
if isinstance(self.model, list):
return [
model.module if isinstance(model, (Float16Module, MCoreFloat16Module)) else model
for model in self.model
]
elif isinstance(self.model, (Float16Module, MCoreFloat16Module)):
return [self.model.module]
else:
return [self.model]
def set_inference_config(self, inference_config):
self._inference_config = inference_config
def get_inference_config(self):
return self._inference_config
def model_provider_func(self, pre_process, post_process):
"""Model depends on pipeline paralellism."""
if self.mcore_gpt:
model = MCoreGPTModel(
config=self.transformer_config,
vocab_size=self.cfg.get('override_vocab_size', self.padded_vocab_size),
max_sequence_length=self.cfg.get('encoder_seq_length', 512),
pre_process=pre_process,
post_process=post_process,
parallel_output=True,
share_embeddings_and_output_weights=self.cfg.get('share_embeddings_and_output_weights', True),
position_embedding_type=self.cfg.get('position_embedding_type', 'learned_absolute'),
rotary_percent=self.cfg.get('rotary_percentage', 1.0),
seq_len_interpolation_factor=self.cfg.get('seq_len_interpolation_factor', None),
)
else:
assert self.cfg.get('num_query_groups', None) is None or self.cfg.get(
'num_query_groups', None
) == self.cfg.get(
'num_attention_heads', None
), "Group Query Attention is only supported in Megatron Core. Set 'mcore_gpt' to use GQA."
model = GPTModel(
config=self.model_parallel_config,
vocab_size=self.cfg.get('override_vocab_size', self.padded_vocab_size),
hidden_size=self.cfg.hidden_size,
max_position_embeddings=self.cfg.max_position_embeddings,
num_layers=self.cfg.num_layers,
num_attention_heads=self.cfg.num_attention_heads,
apply_query_key_layer_scaling=self.cfg.get('apply_query_key_layer_scaling', True),
kv_channels=self.cfg.get('kv_channels', None),
ffn_hidden_size=self.cfg.ffn_hidden_size,
num_tokentypes=0,
parallel_output=True,
pre_process=pre_process,
post_process=post_process,
init_method_std=self.cfg.get('init_method_std', 0.02),
use_scaled_init_method=self.cfg.get('use_scaled_init_method', True),
fp16_lm_cross_entropy=self.cfg.get('fp16_lm_cross_entropy', False),
megatron_amp_O2=self.cfg.get('megatron_amp_O2', False),
hidden_dropout=self.cfg.get('hidden_dropout', 0.1),
attention_dropout=self.cfg.get('attention_dropout', 0.1),
ffn_dropout=self.cfg.get('ffn_dropout', 0.0),
precision=self.cfg.get('precision', 16),
fp32_residual_connection=self.cfg.get('fp32_residual_connection', False),
activations_checkpoint_granularity=self.cfg.get('activations_checkpoint_granularity', None),
activations_checkpoint_method=self.cfg.get('activations_checkpoint_method', None),
activations_checkpoint_num_layers=self.cfg.get('activations_checkpoint_num_layers', 1),
activations_checkpoint_layers_per_pipeline=self.cfg.get(
'activations_checkpoint_layers_per_pipeline', None
),
normalization=self.cfg.get('normalization', 'layernorm'),
layernorm_epsilon=self.cfg.get('layernorm_epsilon', 1e-5),
onnx_safe=self.cfg.get('onnx_safe', False),
bias=self.cfg.get('bias', True),
bias_activation_fusion=self.cfg.get('bias_activation_fusion', True),
bias_dropout_add_fusion=self.cfg.get('bias_dropout_add_fusion', True),
activation=self.cfg.get('activation', 'gelu'),
headscale=self.cfg.get('headscale', False),
transformer_block_type=self.cfg.get('transformer_block_type', 'pre_ln'),
openai_gelu=self.cfg.get('openai_gelu', False),
normalize_attention_scores=self.cfg.get('normalize_attention_scores', True),
position_embedding_type=self.cfg.get('position_embedding_type', 'learned_absolute'),
rotary_percentage=self.cfg.get('rotary_percentage', 1.0),
share_embeddings_and_output_weights=self.cfg.get('share_embeddings_and_output_weights', True),
attention_type=self.cfg.get('attention_type', 'multihead'),
masked_softmax_fusion=self.cfg.get('masked_softmax_fusion', True),
persist_layer_norm=self.cfg.get('persist_layer_norm', False),
transformer_engine=self.cfg.get('transformer_engine', False),
fp8=self.cfg.get('fp8', False),
fp8_e4m3=self.cfg.get('fp8_e4m3', False),
fp8_hybrid=self.cfg.get('fp8_hybrid', False),
fp8_margin=self.cfg.get('fp8_margin', 0),
fp8_interval=self.cfg.get('fp8_interval', 1),
fp8_amax_history_len=self.cfg.get('fp8_amax_history_len', 1),
fp8_amax_compute_algo=self.cfg.get('fp8_amax_compute_algo', 'most_recent'),
reduce_amax=self.cfg.get('reduce_amax', True),
use_emha=self.cfg.get('use_emha', False),
ub_tp_comm_overlap=self.cfg.get('ub_tp_comm_overlap', False),
use_flash_attention=self.cfg.get('use_flash_attention', False),
megatron_legacy=self.cfg.get('megatron_legacy', False),
seq_len_interpolation_factor=self.cfg.get('seq_len_interpolation_factor', None),
)
return model
def setup_optimizer_param_groups(self):
"""ModelPT override. Optimizer will get self._optimizer_param_groups"""
if self.cfg.get('do_layer_norm_weight_decay', False):
if isinstance(self.model, list):
self._optimizer_param_groups = get_all_params_for_weight_decay_optimization(self.model)
else:
self._optimizer_param_groups = get_all_params_for_weight_decay_optimization([self.model])
else:
self._optimizer_param_groups = get_params_for_weight_decay_optimization(self.model)
def configure_optimizers(self):
if self.with_distributed_adam:
# Disable overlapped grad sync for embedding grad when
# pipeline parallelism is enabled
if parallel_state.get_pipeline_model_parallel_world_size() > 1:
modules = self.get_gpt_module_list()
if parallel_state.is_pipeline_first_stage(ignore_virtual=True):
if len(modules) > 1:
module = modules[0] # only the first virtual rank has the embeddings
else:
module = modules[0]
if self.cfg.get('share_embeddings_and_output_weights', True):
param = (
module.shared_embedding_or_output_weight()
if self.mcore_gpt
else module.word_embeddings_weight()
)
param._disable_greedy_grad_copy = not self.megatron_amp_o2
param._disable_overlap_grad_sync = True
if parallel_state.is_pipeline_last_stage(ignore_virtual=True):
if len(modules) > 1:
module = modules[-1] # only the last virtual rank has the embeddings
else:
module = modules[0]
if self.cfg.get('share_embeddings_and_output_weights', True):
param = (
module.shared_embedding_or_output_weight()
if self.mcore_gpt
else module.word_embeddings_weight()
)
param._disable_greedy_grad_copy = not self.megatron_amp_o2
param._disable_overlap_grad_sync = True
# Disable overlapped grad sync for layer norm grads when
# sequence parallelism is enabled
for param in self.parameters():
if getattr(param, 'sequence_parallel', False):
param._disable_greedy_grad_copy = not self.megatron_amp_o2
param._disable_overlap_grad_sync = True
# Initialize parameter buckets for overlapped grad and param syncs
# Note: Params with disabled overlapping are put in the
# last param bucket
buckets = []
if self.cfg.get('virtual_pipeline_model_parallel_size', None) is not None:
# Initialize a bucket for each virtual pipeline stage
for module in self.model:
if isinstance(module, (Float16Module, MCoreFloat16Module)):
module = module.module
stage_bucket = []
layers = module.decoder.layers if self.mcore_gpt else module.language_model.encoder.layers
for layer in layers:
stage_bucket.extend(
p for p in layer.parameters() if not getattr(p, '_disable_overlap_grad_sync', False)
)
buckets.append(stage_bucket)
else:
# Initialize a bucket for each Transformer layer
modules = self.model if isinstance(self.model, list) else [self.model]
for module in modules:
if isinstance(module, (Float16Module, MCoreFloat16Module)):
module = module.module
layers = module.decoder.layers if self.mcore_gpt else module.language_model.encoder.layers
for layer in layers:
buckets.append(
[p for p in layer.parameters() if not getattr(p, '_disable_overlap_grad_sync', False)]
)
buckets.reverse()
used_params = set()
for bucket in buckets:
used_params.update(bucket)
remaining_params = [p for p in self.parameters() if p not in used_params]
if remaining_params:
buckets.append(remaining_params)
self.distributed_adam_buckets = buckets
return super().configure_optimizers()
def forward(self, tokens, text_position_ids, attention_mask, labels):
output_tensor = self.model(tokens, text_position_ids, attention_mask, labels=labels)
return output_tensor
def fwd_bwd_step(self, dataloader_iter, batch_idx, forward_only):
# handle asynchronous grad reduction
no_sync_func = None
grad_sync_func = None
param_sync_func = None
if not forward_only and self.with_distributed_adam:
no_sync_func = partial(self._optimizer.no_sync, greedy_grad_copy=self.megatron_amp_o2,)
grad_sync_func = self.reduce_overlap_gradients
param_sync_func = self.sync_overlap_parameters
# pipeline schedules will get these from self.model.config
for module in self.get_gpt_module_list():
module.config.no_sync_func = no_sync_func
module.config.grad_sync_func = grad_sync_func
module.config.param_sync_func = param_sync_func
# run forward and backwards passes for an entire global batch
# we do this inside training_step to support pipeline parallelism
fwd_bwd_function = get_forward_backward_func()
# TODO @akhattar: add num_micro_batches_with_partial_activation_checkpoints when ready
losses_reduced_per_micro_batch = fwd_bwd_function(
forward_step_func=self.get_forward_output_and_loss_func(forward_only),
data_iterator=self._make_data_iterator_list(dataloader_iter),
model=self.model,
num_microbatches=get_num_microbatches(),
forward_only=forward_only,
seq_length=self.cfg.encoder_seq_length,
micro_batch_size=self.cfg.micro_batch_size,
)
# only the last stages of the pipeline return losses
if losses_reduced_per_micro_batch:
if (not forward_only) or self.cfg.data.get('validation_drop_last', True):
# average loss across micro batches
loss_tensors_list = [loss_reduced['avg'] for loss_reduced in losses_reduced_per_micro_batch]
loss_tensor = torch.concat(loss_tensors_list)
loss_mean = loss_tensor.mean()
else:
# Get the total loss since micro batches sizes are not uniform
loss_sum_tensors_list = [
loss_sum['loss_sum_and_ub_size']
for loss_sum in losses_reduced_per_micro_batch
if loss_sum['loss_sum_and_ub_size'][1] > 0
]
loss_sum = (
torch.vstack(loss_sum_tensors_list).sum(axis=0)
if len(loss_sum_tensors_list) > 0
else torch.tensor([0.0, 0.0]).cuda()
)
return loss_sum
else:
# we're not on the last pipeline stage so no losses
if forward_only:
loss_mean = []
else:
loss_mean = torch.tensor(0.0).cuda()
return loss_mean
def initialize_ub_func(self):
ub_cfgs = self.cfg.get('ub_tp_comm_overlap_cfg', None)
if ub_cfgs is None:
warnings.warn(
"Couldn't find TP config. Please check the path correctness. Initializing TP comm overlap with the default config."
)
input_shape = [
self.cfg.get('encoder_seq_length') * self.cfg.get('micro_batch_size'),
self.cfg.get('hidden_size'),
]
te_module.base.initialize_ub(
shape=input_shape,
tp_size=self.cfg.get('tensor_model_parallel_size'),
use_fp8=self.cfg.get('fp8'),
ub_cfgs=ub_cfgs,
)
self.initialize_ub = False
def training_step(self, dataloader_iter, batch_idx):
"""
We pass the dataloader iterator function to the micro-batch scheduler.
The input batch to each micro-batch is fetched using the dataloader function
in the micro-batch fwd function.
"""
# Initialize userbuffer communicators.
if self.initialize_ub:
self.initialize_ub_func()
if self.rampup_batch_size:
num_microbatch_calculator = apex.transformer.pipeline_parallel.utils._GLOBAL_NUM_MICROBATCHES_CALCULATOR
current_global_batch_size = num_microbatch_calculator.current_global_batch_size
# do validation and save the checkpoint when gbs is changed
if self.prev_global_batch_size != current_global_batch_size and self.prev_global_batch_size:
self.trainer.should_stop = True
# we zero grads here because we also call backward in the megatron-core fwd/bwd functions
self._optimizer.zero_grad()
if self.with_distributed_adam:
# hack to enable overlapping param sync and forward compute
# note: the distributed optimizer monkey-patches each
# parameter's __getattribute__ function so that it can
# launch parameter all-gathers the first time the
# parameter is accessed after the optimizer step. However,
# PyTorch directly passes embedding parameters into a C++,
# bypassing this process. A quick-and-dirty hack is to
# manually interact with the parameter.
modules = self.model if isinstance(self.model, list) else [self.model]
for module in modules:
if isinstance(module, (Float16Module, MCoreFloat16Module)):
module = module.module
if not self.mcore_gpt:
module = module.language_model
if hasattr(module, 'embedding'):
for param in module.embedding.parameters():
param.data_ptr()
loss_mean = self.fwd_bwd_step(dataloader_iter, batch_idx, False)
# when using sequence parallelism, the sequence parallel layernorm grads must be all-reduced
if self.cfg.get('tensor_model_parallel_size', 1) > 1 and self.cfg.get('sequence_parallel', False):
self.allreduce_sequence_parallel_gradients()
if self.with_distributed_adam:
# synchronize asynchronous grad reductions
# note: not necessary, but reduces performance degradation
# from multiple simultaneous NCCL calls
self._optimizer._finish_bucket_grad_sync()
elif self.megatron_amp_o2:
# when using pipeline parallelism grads must be all-reduced after the pipeline (not asynchronously)
if self.cfg.get('pipeline_model_parallel_size', 1) > 1 or self.cfg.get('sequence_parallel', False):
# main grads are stored in the MainParamsOptimizer wrapper
self._optimizer.allreduce_main_grads()
else:
# async grad allreduce is not currently implemented for O1/autocasting mixed precision training
# so we all-reduce gradients after the pipeline
self.allreduce_gradients() # @sangkug we think this is causing memory to blow up (hurts perf)
if self.cfg.get('pipeline_model_parallel_size', 1) > 1 and self.cfg.get(
'share_embeddings_and_output_weights', True
):
# when using pipeline parallelism the first and last stage must keep embeddings in sync
self.allreduce_first_last_embeddings()
## logging
# we can only log on one rank if it is rank zero so we broadcast from last rank
# we can avoid this broadcast by updating the PTL log function to accept specific ranks
torch.distributed.broadcast(loss_mean, get_last_rank())
# (@adithyare) we need to check for the _scaler attribute to enable pp>1 for adapter training
if self.torch_dtype == torch.float16 and hasattr(self.trainer.precision_plugin.scaler, "_scale"):
loss_scale = self.trainer.precision_plugin.scaler._scale
if loss_scale is not None:
self.log('loss_scale', loss_scale, batch_size=1)
self.log('reduced_train_loss', loss_mean, prog_bar=True, rank_zero_only=True, batch_size=1)
lr = self._optimizer.param_groups[0]['lr']
self.log('lr', lr, rank_zero_only=True, batch_size=1)
self.log(
'global_step', self.trainer.global_step, prog_bar=True, rank_zero_only=True, batch_size=1,
)
consumed_samples = self._compute_consumed_samples_after_training_step()
# TODO: make sure compute_consumed_samples works for pipeline parallelism
self.log(
'consumed_samples', consumed_samples, prog_bar=True, rank_zero_only=True, batch_size=1,
)
if self.rampup_batch_size:
self.prev_global_batch_size = current_global_batch_size
self.prev_consumed_samples = consumed_samples
num_microbatch_calculator.update(
consumed_samples=consumed_samples, consistency_check=False,
)
current_global_batch_size = num_microbatch_calculator.current_global_batch_size
self.log('global_batch_size', current_global_batch_size, prog_bar=True, rank_zero_only=True, batch_size=1)
self.if_first_step = 1
return loss_mean
def backward(self, *args, **kwargs):
""" LightningModule hook to do backward.
We want this to do nothing since we run backward in the fwd/bwd functions from megatron-core.
No need to call it here.
"""
return
def optimizer_zero_grad(self, *args, **kwargs):
""" LightningModule hook to zero grad.
We want this to do nothing as we are zeroing grads during the training_step.
"""
return
def _append_sequence_parallel_module_grads(self, module, grads):
""" Helper method for allreduce_sequence_parallel_gradients"""
for param in module.parameters():
sequence_parallel_param = getattr(param, 'sequence_parallel', False) or getattr(
param, 'sequence_parallel_enabled', False
)
# (@adithyare) adapter training now extends MegatronGPTModel
# so we have to add this check here to ensure we do not
# perform all_reduce when grad is None.
# grad can be None when performing PeFT training.
if sequence_parallel_param and param.requires_grad:
if self.megatron_amp_o2:
grad = param.main_grad
else:
grad = param.grad
grads.append(grad.data)
def allreduce_sequence_parallel_gradients(self):
""" All-reduce layernorm parameters across model parallel nodes when sequence parallelism is used.
Modified from megatron-lm:
https://gitlab-master.nvidia.com/ADLR/megatron-lm/-/blob/3f91f09bb2ab32f9904b47f46f19d2fc3f518ed8/megatron/training.py#L425
"""
grads = []
if isinstance(self.model, list):
for module in self.model:
self._append_sequence_parallel_module_grads(module, grads)
else:
self._append_sequence_parallel_module_grads(self.model, grads)
coalesced = torch._utils._flatten_dense_tensors(grads)
torch.distributed.all_reduce(coalesced, group=parallel_state.get_tensor_model_parallel_group())
for buf, synced in zip(grads, torch._utils._unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
def allreduce_first_last_embeddings(self):
# Modified from megatron-lm: https://github.com/NVIDIA/Megatron-LM/blob/d41696840ed0a7edb7e0499eb82a48ae112d9bb3/megatron/training.py#L407
# All-reduce word_embeddings' grad across first and last stages to ensure
# that word_embeddings parameters stay in sync.
# This should only run for models that support pipelined model parallelism
# (BERT and GPT-2).
if parallel_state.get_pipeline_model_parallel_world_size() > 1 and (
parallel_state.is_pipeline_first_stage(ignore_virtual=True)
or parallel_state.is_pipeline_last_stage(ignore_virtual=True)
):
module_list = self.get_gpt_module_list()
if parallel_state.is_pipeline_first_stage(ignore_virtual=True):
module = module_list[0] # only the first virtual rank has the embeddings
elif parallel_state.is_pipeline_last_stage(ignore_virtual=True):
module = module_list[-1] # only the last virtual rank has the embeddings
share_embeddings = (
module.share_embeddings_and_output_weights if self.mcore_gpt else module.share_token_embeddings
)
if share_embeddings:
word_embeddings_weight = (
module.shared_embedding_or_output_weight() if self.mcore_gpt else module.word_embeddings_weight()
)
# (@adithyare) adapter training now extends MegatronGPTModel so we have to add this check here to ensure we do not perform all_reduce when grad is None.
# grad can be None when performing PeFT training.
if word_embeddings_weight.requires_grad:
if self.megatron_amp_o2:
# O2 recipe stores a "main" copy of weights and grads
grad = word_embeddings_weight.main_grad
else:
grad = word_embeddings_weight.grad
torch.distributed.all_reduce(grad, group=parallel_state.get_embedding_group())
def _make_data_iterator_list(self, data_iterator: Iterator) -> List[Iterator]:
""" Convert data iterator into form expected by Megatron
With interleaved pipeline parallelism, Megatron expects a
list of one data iterator per model chunk. Each model
chunk independently gets data from its data iterator, so
we need to interact with the data iterator multiple times
for each microbatch step. Instead of incorporating this
logic into the data loader, we cache the iterator's output
to the first model chunk and reuse it in the other model
chunks.
"""
if not isinstance(self.model, list) or len(self.model) == 1:
return data_iterator # TODO @tmoon: Remove
# TODO @tmoon: Use once available in Megatron-LM
# return DataIteratorList([data_iterator])
class CachingIterator:
"""Iterator wrapper that caches values"""
class Proxy:
"""Returns values from caching iterator wrapper
Assumed to never advance past the caching iterator.
"""
def __init__(self):
self.cache = queue.Queue()
def __iter__(self):
return self
def __next__(self):
return self.cache.get_nowait()
def __init__(self, iterator: Iterator):
self.iterator = iterator
self.proxies = []
def make_proxy(self):
self.proxies.append(CachingIterator.Proxy())
return self.proxies[-1]
def __iter__(self):
return self
def __next__(self):
val = next(self.iterator)
for proxy in self.proxies:
proxy.cache.put(val)
return val
# Make list of iterator wrappers
iters = [CachingIterator(data_iterator)]
while len(iters) < len(self.model):
iters.append(iters[0].make_proxy())
return iters # TODO @tmoon: Remove
# TODO @tmoon: Use once available in Megatron-LM
# return DataIteratorList(iters)
def get_forward_output_and_loss_func(self, validation_step=False):
def fwd_output_and_loss_func(dataloader_iter, model, checkpoint_activations_all_layers=None):
# Get data batch
batch = next(dataloader_iter)
# Transfer needed data to GPU
required_keys = set()
if parallel_state.get_pipeline_model_parallel_world_size() == 1:
required_keys.update(batch.keys())
else:
required_keys.add('attention_mask')
if parallel_state.is_pipeline_first_stage():
required_keys.update(('tokens', 'position_ids'))
if parallel_state.is_pipeline_last_stage():
required_keys.update(('labels', 'loss_mask'))
if self.get_attention_mask_from_fusion:
required_keys.remove('attention_mask')
batch = {key: val.cuda(non_blocking=True) if key in required_keys else None for key, val in batch.items()}
# Model forward pass
forward_args = {
'input_ids': batch['tokens'],
'position_ids': batch['position_ids'],
'attention_mask': batch['attention_mask'],
'labels': batch['labels'],
'loss_mask': batch['loss_mask'],
}
if not self.mcore_gpt:
forward_args['checkpoint_activations_all_layers'] = checkpoint_activations_all_layers
if not self.use_loss_mask:
forward_args.pop('loss_mask')
else:
# TODO: @eharper can we add this to mcore?
forward_args.pop('loss_mask')
output_tensor = model(**forward_args)
def loss_func(output_tensor):
# Loss for a micro-batch (ub)
loss_for_ub = self.loss_func(batch['loss_mask'], output_tensor)
if validation_step and not self.cfg.data.get('validation_drop_last', True):
num_valid_tokens_in_ub = batch['loss_mask'].sum()
if loss_for_ub.isnan():
assert batch['loss_mask'].count_nonzero() == 0, 'Got NaN loss with non-empty input'
loss_sum_for_ub = torch.zeros_like(num_valid_tokens_in_ub)
else:
loss_sum_for_ub = num_valid_tokens_in_ub * loss_for_ub
loss_sum_and_ub_size_all_gpu = torch.cat(
[
loss_sum_for_ub.clone().detach().view(1),
torch.tensor([num_valid_tokens_in_ub]).cuda().clone().detach(),
]
)
# Could potentially reduce num_valid_samples_in_microbatch and use that to aggregate instead of len(self._validation_ds)
torch.distributed.all_reduce(
loss_sum_and_ub_size_all_gpu, group=parallel_state.get_data_parallel_group()
)
return loss_for_ub, {'loss_sum_and_ub_size': loss_sum_and_ub_size_all_gpu}
else:
reduced_loss = average_losses_across_data_parallel_group([loss_for_ub])
return loss_for_ub, {'avg': reduced_loss}
return output_tensor, loss_func
return fwd_output_and_loss_func
def get_forward_output_only_func(self):
def fwd_output_only_func(dataloader_iter, model):
batch = next(dataloader_iter)
extra_arg = {}
if len(batch) == 3:
batch = [x.cuda() for x in batch]
tokens, attention_mask, position_ids = batch
attention_mask = attention_mask[0:1]
else:
(
tokens,
attention_mask,
position_ids,
set_inference_key_value_memory,
inference_max_sequence_len,
) = batch
tokens = tokens.cuda()
position_ids = position_ids.cuda()
if attention_mask is not None:
attention_mask = attention_mask.cuda()
attention_mask = attention_mask[0:1]
if self.mcore_gpt:
# if first step, then clear KV cache, otherwise reuse inference_paarms
if set_inference_key_value_memory[0].item():
self.inference_params = InferenceParams(
max_batch_size=tokens.size(0), max_sequence_length=inference_max_sequence_len[0].item()
)
extra_arg['inference_params'] = self.inference_params
else:
extra_arg['set_inference_key_value_memory'] = set_inference_key_value_memory[0].item()
extra_arg['inference_max_sequence_len'] = inference_max_sequence_len[0].item()
output_tensor = model(tokens, position_ids, attention_mask, **extra_arg)
# Advance inference sequence offset.
if self.inference_params:
# if last stage, then (final) output is [b, s, h], otherwise it's [s, b, h]
if parallel_state.is_pipeline_last_stage():
self.inference_params.sequence_len_offset += output_tensor.size(1)
else:
self.inference_params.sequence_len_offset += output_tensor.size(0)
def id_func(output_tensor):
return output_tensor, {'logits': output_tensor}
return output_tensor, id_func
return fwd_output_only_func
def validation_step(self, dataloader_iter, batch_idx):
"""
Our dataloaders produce a micro-batch and then we fetch
a number of microbatches depending on the global batch size and model parallel size
from the dataloader to produce a list of microbatches.
The list of microbatches is then piped through the pipeline using megatron-core fwd/bwd functions.
"""
# Check if iterator is exhausted
dataloader_iter, done = self._val_iterator_done(dataloader_iter)
if done:
return
mode = 'test' if self.trainer.testing else 'val'
# Initialize userbuffer communicators.
if self.initialize_ub:
self.initialize_ub_func()
if isinstance(self.model, list):
for model_module in self.model:
model_module.eval()
loss = self.fwd_bwd_step(dataloader_iter, batch_idx, True)
if isinstance(self.model, list):
for model_module in self.model:
model_module.train()
self.validation_step_outputs.append(loss) if mode == 'val' else self.test_step_outputs.append(loss)
return loss
def on_validation_epoch_end(self):
if parallel_state.is_pipeline_last_stage():
# only the last pipeline parallel stages return loss with their batch size
if self.cfg.data.get('validation_drop_last', True):
averaged_loss = torch.stack(self.validation_step_outputs).mean()
else:
# Compute the avg loss by total_loss across all samples / total number of samples
total_loss_and_total_samples = torch.vstack(self.validation_step_outputs).sum(axis=0)
avg_loss = total_loss_and_total_samples[0] / total_loss_and_total_samples[1]
averaged_loss = avg_loss.type(torch.float32).cuda()
else:
averaged_loss = torch.tensor(0.0, dtype=torch.float32).cuda()
# we can only log on one rank if it is rank zero so we broadcast from last rank
torch.distributed.broadcast(averaged_loss, get_last_rank())
self.log('val_loss', averaged_loss, prog_bar=True, rank_zero_only=True, batch_size=1)
self.validation_step_outputs.clear() # free memory
return averaged_loss
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def on_test_epoch_end(self):
averaged_loss = average_losses_across_data_parallel_group(self.test_step_outputs)
logging.info(f'test_loss: {averaged_loss[0]}')
self.test_step_outputs.clear() # free memory
def loss_func(self, loss_mask, output_tensor):
losses = output_tensor.float()
loss_mask = loss_mask.view(-1).float()
# TODO: add nemo version here
loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum() # sequence level nll
return loss
def build_train_valid_test_datasets(self):
# Override limit_val_batches to be a multiple of num microbatches to prevent val_step from exiting in between a step
self._reconfigure_val_batches()
logging.info('Building GPT datasets.')
if self.trainer.limit_val_batches > 1.0 and isinstance(self.trainer.limit_val_batches, float):
raise ValueError("limit_val_batches must be an integer or float less than or equal to 1.0.")
global_batch_size = self.cfg.global_batch_size
max_train_steps = self.trainer.max_steps
eval_iters = (max_train_steps // self.trainer.val_check_interval + 1) * self.trainer.limit_val_batches
test_iters = self.trainer.limit_test_batches
train_valid_test_num_samples = [
max_train_steps * global_batch_size,
eval_iters * global_batch_size,
test_iters * global_batch_size,
]
if self.trainer.limit_val_batches <= 1.0 and isinstance(self.trainer.limit_val_batches, float):
train_valid_test_num_samples[
1
] = 1 # This is to make sure we only have one epoch on every validation iteration
self._train_ds, self._validation_ds, self._test_ds = build_train_valid_test_datasets(
cfg=self.cfg,
trainer=self.trainer,
data_prefix=self.cfg.data.data_prefix,
data_impl=self.cfg.data.data_impl,
splits_string=self.cfg.data.splits_string,
train_valid_test_num_samples=train_valid_test_num_samples,
seq_length=self.cfg.data.seq_length,
seed=self.cfg.seed,
skip_warmup=self.cfg.data.get('skip_warmup', True),
tokenizer=self.tokenizer,
)
if self._train_ds is not None:
logging.info(f'Length of train dataset: {len(self._train_ds)}')
if self._validation_ds is not None:
logging.info(f'Length of val dataset: {len(self._validation_ds)}')
if self._test_ds is not None:
logging.info(f'Length of test dataset: {len(self._test_ds)}')
logging.info(f'Finished building GPT datasets.')
return self._train_ds, self._validation_ds, self._test_ds
def build_pretraining_data_loader(
self, dataset, consumed_samples, dataset_type=None, drop_last=True, pad_samples_to_global_batch_size=False
):
"""Buld dataloader given an input dataset."""
logging.info(f'Building dataloader with consumed samples: {consumed_samples}')
# Megatron sampler
if hasattr(self.cfg.data, 'dataloader_type') and self.cfg.data.dataloader_type is not None:
if self.cfg.data.dataloader_type == 'single':
batch_sampler = MegatronPretrainingSampler(
total_samples=len(dataset),
consumed_samples=consumed_samples,
micro_batch_size=self.cfg.micro_batch_size,
data_parallel_rank=parallel_state.get_data_parallel_rank(),
data_parallel_size=parallel_state.get_data_parallel_world_size(),
drop_last=drop_last,
global_batch_size=self.cfg.global_batch_size,
rampup_batch_size=self.cfg.get('rampup_batch_size', None),
pad_samples_to_global_batch_size=pad_samples_to_global_batch_size,
)
elif self.cfg.data.dataloader_type == 'cyclic':
batch_sampler = MegatronPretrainingRandomSampler(
total_samples=len(dataset),
consumed_samples=consumed_samples,
micro_batch_size=self.cfg.micro_batch_size,
data_parallel_rank=parallel_state.get_data_parallel_rank(),
data_parallel_size=parallel_state.get_data_parallel_world_size(),
drop_last=self.cfg.get('drop_last', True),
)
else:
raise ValueError('cfg.data.dataloader_type must be "single" or "cyclic"')
else:
raise ValueError('cfg.data.dataloader_type not found. Must be "single" or "cyclic"')
return torch.utils.data.DataLoader(
dataset,
batch_sampler=batch_sampler,
num_workers=self.cfg.data.num_workers,
pin_memory=True,
persistent_workers=True if self.cfg.data.num_workers > 0 else False,
)
def setup(self, stage=None):
""" PTL hook that is executed after DDP spawns.
We setup datasets here as megatron datasets require DDP to instantiate.
See https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#setup for more information.
Args:
stage (str, optional): Can be 'fit', 'validate', 'test' or 'predict'. Defaults to None.
"""
num_parameters_on_device, total_num_parameters = self._get_total_params_across_model_parallel_groups_gpt_bert(
self.model
)
logging.info(
f'Pipeline model parallel rank: {parallel_state.get_pipeline_model_parallel_rank()}, '
f'Tensor model parallel rank: {parallel_state.get_tensor_model_parallel_rank()}, '
f'Number of model parameters on device: {num_parameters_on_device:.2e}. '
f'Total number of model parameters: {total_num_parameters:.2e}.'
)
resume_checkpoint_path = self.trainer.ckpt_path
if resume_checkpoint_path:
init_consumed_samples = self._extract_consumed_samples_from_ckpt(resume_checkpoint_path)
else:
init_consumed_samples = 0
self.init_consumed_samples = init_consumed_samples
self.init_global_step = self.trainer.global_step
if self.rampup_batch_size:
optimizer = self.cfg.optim.get('name', None)
assert (
optimizer == 'fused_adam'
), f'{optimizer} optimizer is not supported yet with rampup batch size. Please, use fused_adam optimizer instead.'
num_microbatch_calculator = apex.transformer.pipeline_parallel.utils._GLOBAL_NUM_MICROBATCHES_CALCULATOR
num_microbatch_calculator.update(self.init_consumed_samples, consistency_check=False)
self.prev_consumed_samples = self.init_consumed_samples
if stage == 'predict':
return
else:
# TODO: consider adding a ModelPT guard to check if model is being restored.
# allowing restored models to optionally setup datasets
self.build_train_valid_test_datasets()
self.setup_training_data(self.cfg.data)
self.setup_validation_data(self.cfg.data)
self.setup_test_data(self.cfg.data)
if stage == 'fit':
if parallel_state.get_pipeline_model_parallel_world_size() > 1:
if self.cfg.get('share_embeddings_and_output_weights', True):
for index, module in enumerate(self.get_gpt_module_list()):
if parallel_state.get_virtual_pipeline_model_parallel_world_size() is not None:
parallel_state.set_virtual_pipeline_model_parallel_rank(index)
sync_embeddings = (
module.initialize_last_stage_with_word_embeddings
if self.mcore_gpt
else module.sync_initial_word_embeddings
)
sync_embeddings()
if parallel_state.get_virtual_pipeline_model_parallel_world_size() is not None:
parallel_state.set_virtual_pipeline_model_parallel_rank(0)
if self.cfg.get('transformer_engine', False) or self.cfg.get('mcore_gpt', False):
self.setup_transformer_engine_tp_groups()
def setup_training_data(self, cfg):
if hasattr(self, '_train_ds'):
consumed_samples = self.compute_consumed_samples(0)
logging.info(
f'Setting up train dataloader with len(len(self._train_ds)): {len(self._train_ds)} and consumed samples: {consumed_samples}'
)
self._train_dl = self.build_pretraining_data_loader(self._train_ds, consumed_samples)
def setup_validation_data(self, cfg):
if hasattr(self, '_validation_ds'):
consumed_samples = 0
logging.info(
f'Setting up validation dataloader with len(len(self._validation_ds)): {len(self._validation_ds)} and consumed samples: {consumed_samples}'
)
drop_last = True
if not self.cfg.data.get('validation_drop_last', True):
logging.info(f'Drop last in validation dataset is set to False')
drop_last = False
pad_samples_to_global_batch_size = False
if self.cfg.data.get('pad_samples_to_global_batch_size', False):
logging.info('pad_samples_to_global_batch_size set to True')
pad_samples_to_global_batch_size = True
self._validation_dl = self.build_pretraining_data_loader(
self._validation_ds, consumed_samples, "validation", drop_last, pad_samples_to_global_batch_size
)
def setup_test_data(self, cfg):
if hasattr(self, '_test_ds'):
consumed_samples = 0
logging.info(
f'Setting up test dataloader with len(len(self._test_ds)): {len(self._test_ds)} and consumed samples: {consumed_samples}'
)
self._test_dl = self.build_pretraining_data_loader(self._test_ds, consumed_samples)
def generate(
self,
inputs: Union[List[str], torch.Tensor, List[dict]],
length_params: LengthParam,
sampling_params: SamplingParam = None,
) -> OutputType:
# check whether the DDP is initialized
if parallel_state.is_unitialized():
def dummy():
return
if self.trainer.strategy.launcher is not None:
self.trainer.strategy.launcher.launch(dummy, trainer=self.trainer)
self.trainer.strategy.setup_environment()
if self.cfg.get('transformer_engine', False):
self.setup_transformer_engine_tp_groups()
# set the default sampling params if it is None.
# default do greedy sampling
if sampling_params is None:
sampling_params = get_default_sampling_params()
# set the default length params if it is None.
# default do greedy sampling
if length_params is None:
length_params = get_default_length_params()
return megatron_gpt_generate(self.cuda(), inputs, self.tokenizer, length_params, sampling_params)
def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: Optional[int] = None) -> Any:
inference_config = self.get_inference_config()
if inference_config is None:
return None
else:
# need to overwrite some configuration, make it immutable
inference_config = inference_config.copy()
compute_logprob = inference_config['compute_logprob']
if compute_logprob:
inference_config['inputs'] = batch
inference_config['tokens_to_generate'] = 1
inference_config['all_probs'] = True
inference_config["add_BOS"] = False
inference_config['greedy'] = True
response = generate(self, **inference_config)
compute_prob_response = get_computeprob_response(self.tokenizer, response, batch)
return compute_prob_response
else:
inference_config['inputs'] = batch
return generate(self, **inference_config)
def list_available_models(self):
return None
def transfer_batch_to_device(self, batch: Any, device: torch.device, dataloader_idx: int) -> Any:
""" PTL hook: https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#transfer-batch-to-device
When using pipeline parallelism, we need the global batch to remain on the CPU,
since the memory overhead will be too high when using a large number of microbatches.
Microbatches are transferred from CPU to GPU inside the pipeline.
"""
return batch
def _validate_trainer(self):
""" Certain trainer configurations can break training.
Here we try to catch them and raise an error.
"""
if self.trainer.accumulate_grad_batches > 1:
raise ValueError(
f'Gradient accumulation is done within training_step. trainer.accumulate_grad_batches must equal 1'
)
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
result.append(
PretrainedModelInfo(
pretrained_model_name="megatron_gpt_345m",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/megatron_gpt_345m/versions/1/files/megatron_gpt_345m.nemo",
description="345M parameter GPT generative Megatron model.",
)
)
return result
def setup_transformer_engine_tp_groups(self):
""" This should be called after model parallel groups have been initialized
and only needs to be called when using Transformer Engine.
"""
for module in self.get_gpt_module_list():
"""Set TP group
Copied from: https://github.com/NVIDIA/TransformerEngine/blob/main/transformer_engine/pytorch/transformer.py#L398
"""
# Deep iterate but skip self to avoid infinite recursion.
for index, child in enumerate(module.modules()):
if index == 0:
continue
if hasattr(child, "set_tensor_parallel_group"):
tp_group = parallel_state.get_tensor_model_parallel_group()
child.set_tensor_parallel_group(tp_group)
def on_save_checkpoint(self, checkpoint) -> None:
"""LightningModule hook:
https://pytorch-lightning.readthedocs.io/en/stable/common/lightning_module.html#on-save-checkpoint
"""
# mcore uses distributed checkpointing
if self.mcore_gpt:
checkpoint['sharded_state_dict'] = self.sharded_state_dict()
# legacy checkpointing for interleaved
else:
if isinstance(self.model, list):
for i in range(len(self.model)):
parallel_state.set_virtual_pipeline_model_parallel_rank(i)
checkpoint[f'model{i}'] = self.model[i].module.state_dict_for_save_checkpoint()
parallel_state.set_virtual_pipeline_model_parallel_rank(0)
def on_load_checkpoint(self, checkpoint) -> None:
"""LightningModule hook:
https://pytorch-lightning.readthedocs.io/en/stable/common/lightning_module.html#on-load-checkpoint
"""
# mcore uses distributed checkpointing
if self.mcore_gpt:
if 'state_dict' in checkpoint:
for index, module in enumerate(self.get_gpt_module_list()):
if parallel_state.get_virtual_pipeline_model_parallel_world_size() is not None:
checkpoint_state_dict = checkpoint['state_dict'][f'model_{index}']
else:
checkpoint_state_dict = checkpoint['state_dict']
# checkpoint_state_dict has "model." but module does not so we need to remove it when loading
checkpoint_state_dict = {
key.replace('model.', ''): checkpoint_state_dict.pop(key)
for key in list(checkpoint_state_dict.keys())
}
module.load_state_dict(checkpoint_state_dict, strict=True)
else:
# when restoring a distributed checkpoint from a ptl checkpoint we need to defer loading the state_dict
# see NLPModel.on_load_checkpoint
checkpoint['state_dict'] = {}
# legacy checkpointing for interleaved
else:
if isinstance(self.model, list):
for i in range(len(self.model)):
parallel_state.set_virtual_pipeline_model_parallel_rank(i)
self.model[i].module.load_state_dict(checkpoint[f'model{i}'], strict=True)
parallel_state.set_virtual_pipeline_model_parallel_rank(0)
def sharded_state_dict(self, prefix: str = '') -> Dict[str, Any]:
"""
Creates the sharded state dict which is used by dist_checkpoint to save the sharded tensors to disk.
When given the sharded_stated_dict, dist_checkpoint.load will load the tensors corresponding to
self.state_dict().
The sharded tensor mapping is defined in the GPTModel class from mcore.
"""
if self.mcore_gpt:
module_prefix = f'{prefix}model.'
sharded_state_dict = {}
for index, module in enumerate(self.get_gpt_module_list()):
if parallel_state.get_virtual_pipeline_model_parallel_world_size() is not None:
# virtual pipline rank must be set so that GPTModel returns the correct sharded state dict
parallel_state.set_virtual_pipeline_model_parallel_rank(index)
module_sharded_state_dict = module.sharded_state_dict(prefix=module_prefix)
sharded_state_dict[f'model_{index}'] = module_sharded_state_dict
else:
module_sharded_state_dict = module.sharded_state_dict(prefix=module_prefix)
sharded_state_dict.update(module_sharded_state_dict)
# reset vp rank
if parallel_state.get_virtual_pipeline_model_parallel_world_size() is not None:
parallel_state.set_virtual_pipeline_model_parallel_rank(0)
return sharded_state_dict
def parameters(self):
if isinstance(self.model, list):
return itertools.chain.from_iterable(module.parameters() for module in self.model)
else:
return self.model.parameters()
@property
def mgpt_wrapper(self):
return MegatronGPTExportableModel(self)
def list_export_subnets(self):
return ['mgpt_wrapper']
def _reset_activation_checkpointing_args(self):
""" Disables activation checkpointing completely and saves the values so that
_restore_activation_checkpointing_args can restore them later. This function must always be
called before _restore_activation_checkpointing_args.
"""
# Store values to restore them later.
self.last_activations_checkpoint_granularity = self.cfg.activations_checkpoint_granularity
self.last_activations_checkpoint_method = self.cfg.activations_checkpoint_method
self.last_activations_checkpoint_num_layers = self.cfg.activations_checkpoint_num_layers
self.last_activations_checkpoint_layers_per_pipeline = self.cfg.activations_checkpoint_layers_per_pipeline
# Reset config values. Needed for calling generate.
self.cfg.activations_checkpoint_granularity = None
self.cfg.activations_checkpoint_method = None
self.cfg.activations_checkpoint_num_layers = None
self.cfg.activations_checkpoint_layers_per_pipeline = None
# Reset model parameters.
for module in self.get_gpt_module_list():
if self.cfg.get('mcore_gpt', False):
module.decoder.config.recompute_granularity = None
module.decoder.config.recompute_method = None
module.decoder.config.recompute_num_layers = None
else:
module.language_model.encoder.activations_checkpoint_granularity = None
module.language_model.encoder.activations_checkpoint_method = None
module.language_model.encoder.activations_checkpoint_num_layers = None
module.language_model.encoder.activations_checkpoint_layers_per_pipeline = None
def _restore_activation_checkpointing_args(self):
""" Restores the activation checkpointing parameters using the values saved by
_reset_activation_checkpointing_args. This function must never be called before
_reset_activation_checkpointing_args.
"""
# Restore config values.
self.cfg.activations_checkpoint_granularity = self.last_activations_checkpoint_granularity
self.cfg.activations_checkpoint_method = self.last_activations_checkpoint_method
self.cfg.activations_checkpoint_num_layers = self.last_activations_checkpoint_num_layers
self.cfg.activations_checkpoint_layers_per_pipeline = self.last_activations_checkpoint_layers_per_pipeline
# Restore model parameters.
for module in self.get_gpt_module_list():
if self.cfg.get('mcore_gpt', False):
module.decoder.config.recompute_granularity = self.last_activations_checkpoint_granularity
module.decoder.config.recompute_method = self.last_activations_checkpoint_method
module.decoder.config.recompute_num_layers = self.last_activations_checkpoint_num_layers
else:
module.language_model.encoder.activations_checkpoint_granularity = (
self.last_activations_checkpoint_granularity
)
module.language_model.encoder.activations_checkpoint_method = self.last_activations_checkpoint_method
module.language_model.encoder.activations_checkpoint_num_layers = (
self.last_activations_checkpoint_num_layers
)
module.language_model.encoder.activations_checkpoint_layers_per_pipeline = (
self.last_activations_checkpoint_layers_per_pipeline
)
def _reset_sequence_parallelism_args(self):
""" Disables sequence parallelism completely and saves the values so that
_restore_sequence_parallelism_args can restore them later. This function must always be
called before _restore_sequence_parallelism_args.
"""
# Store values to restore them later.
self.last_sequence_parallel = self.cfg.sequence_parallel
# Reset config values. Needed for calling generate.
self.cfg.sequence_parallel = False
self.model_parallel_config.sequence_parallel = False
self.transformer_config.sequence_parallel = False
# Reset model parameters.
for module in self.get_gpt_module_list():
for mod in module.modules():
if hasattr(mod, "sequence_parallel"):
mod.sequence_parallel = False
def _restore_sequence_parallelism_args(self):
""" Restores the sequence parallelism parameters using the values saved by
_reset_sequence_parallelism_args. This function must never be called before
_reset_sequence_parallelism_args.
"""
# Restore config values.
self.cfg.sequence_parallel = self.last_sequence_parallel
self.model_parallel_config.sequence_parallel = self.last_sequence_parallel
self.transformer_config.sequence_parallel = self.last_sequence_parallel
# Restore model parameters.
for module in self.get_gpt_module_list():
for mod in module.modules():
if hasattr(mod, "sequence_parallel"):
mod.sequence_parallel = self.last_sequence_parallel
def build_transformer_config(self) -> TransformerConfig:
""" Builds the megatron core gpt transformer config for the model.
For attributes in the nemo model config that are the same
as the megatron core TransformerConfig, we will use the value from the nemo model config.
For attributes in TransformerConfig that are not in the nemo model config, we add custom logic.
"""
# create a dictionary copy of the model config
cfg = OmegaConf.to_container(self.cfg, resolve=True)
# create a dict to store the transformer config arguments
transformer_config_dict = {}
# get model parallel configs from the base class
model_parallel_config = self.build_model_parallel_config()
add_bias_linear = self.cfg.get('bias', True)
activation = self.cfg.get('activation', 'gelu')
# TODO: need to check which activation functions are supported in mcore
gated_linear_unit = activation.endswith('glu')
activation_func = activation_to_func(activation)
normalization = self.cfg.get('normalization', 'layernorm')
if normalization == 'layernorm':
normalization = 'LayerNorm'
elif normalization == 'rmsnorm':
normalization = 'RMSNorm'
else:
logging.warning(
f"The normalization type: {normalization} might not be supported in megatron core."
f"Supported types are LayerNorm and RMSNorm."
)
init_method_std = self.cfg.get('init_method_std', 0.02)
# default used in mcore
init_method = init_method_normal(init_method_std)
output_layer_init_method = init_method
num_layers = self.cfg.get('num_layers', 1)
use_scaled_init_method = self.cfg.get('use_scaled_init_method', True)
if use_scaled_init_method:
output_layer_init_method = scaled_init_method_normal(init_method_std, num_layers=num_layers)
attention_softmax_in_fp32 = False # not currently used in NeMo unless apply_query_key_layer_scaling is True
apply_query_key_layer_scaling = self.cfg.get('apply_query_key_layer_scaling', False)
if apply_query_key_layer_scaling:
attention_softmax_in_fp32 = True
bias_activation_fusion = self.cfg.get('bias_activation_fusion', True)
bias_gelu_fusion = True if bias_activation_fusion else False
bias_dropout_fusion = self.cfg.get('bias_dropout_add_fusion', True)
# TODO: need to check if recompute APIs are matching up properly
recompute_granularity = self.cfg.get('activations_checkpoint_granularity', None)
recompute_method = self.cfg.get('activations_checkpoint_method', None)
recompute_num_layers = self.cfg.get('activations_checkpoint_num_layers', None)
if not self.cfg.get('fp8', False):
fp8 = None
elif self.cfg.get('fp8_e4m3', False):
fp8 = 'e4m3'
elif self.cfg.get('fp8_hybrid', False):
fp8 = 'hybrid'
else:
raise ValueError(f"fp8 enabled but fp8_format (fp8_e4m3 | fp8_hybrid) is not set.")
# any configs that are not in the nemo model config will be added here
config_mapping = {
'apply_residual_connection_post_layernorm': False, # we don't use this in NeMo
'layernorm_zero_centered_gamma': False, # not currently used in NeMo
'add_bias_linear': add_bias_linear,
'gated_linear_unit': gated_linear_unit,
'activation_func': activation_func,
'normalization': normalization,
'init_method': init_method,
'output_layer_init_method': output_layer_init_method,
'attention_softmax_in_fp32': attention_softmax_in_fp32,
'bias_gelu_fusion': bias_gelu_fusion,
'bias_dropout_fusion': bias_dropout_fusion,
'recompute_granularity': recompute_granularity,
'recompute_method': recompute_method,
'recompute_num_layers': recompute_num_layers,
'distribute_saved_activations': False, # not currently used in NeMo
'fp8': fp8,
}
# populate the transformer config dict
for field in fields(TransformerConfig):
# config mapping has priority
if field.name in config_mapping:
transformer_config_dict[field.name] = config_mapping[field.name]
# then config
elif field.name in cfg:
transformer_config_dict[field.name] = cfg[field.name]
# then model parallel config
elif field in fields(model_parallel_config):
transformer_config_dict[field.name] = getattr(model_parallel_config, field.name)
else:
logging.warning(
f"The model: {self} does not have field.name: {field.name} in its cfg. "
f"Add this key to cfg or config_mapping to make to make it configurable."
)
transformer_config = TransformerConfig(**transformer_config_dict)
return transformer_config
def _wrap_model_for_O2(self):
""" Wraps self.model in a float16 wrapper if the model is using megatron amp O2.
Args:
model: The model to wrap. Can be a list of modules or a single module.
Returns:
The wrapped model. Returns a list of wrapped modules or a single wrapped module.
"""
Float16Wrapper = MCoreFloat16Module if self.mcore_gpt else Float16Module
nemo_args = {
'config': self.model_parallel_config,
'precision': self.cfg.precision,
'share_token_embeddings': self.cfg.get('share_embeddings_and_output_weights', True),
}
mcore_args = {
'config': self.transformer_config,
}
args = mcore_args if self.mcore_gpt else nemo_args
# Model wrapper to convert both model and inputs to half precision
if isinstance(self.model, list):
converted_model = []
for module in self.model:
args['module'] = module
converted_model.append(Float16Wrapper(**args))
self.model = converted_model
else:
args['module'] = self.model
self.model = Float16Wrapper(**args)
args.pop('module')
| NeMo-main | nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from omegaconf import DictConfig, ListConfig
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.data.language_modeling.megatron.base_dataset_utils import (
get_datasets_weights_and_num_samples,
)
from nemo.collections.nlp.data.language_modeling.megatron.blendable_dataset import BlendableDataset
from nemo.collections.nlp.data.language_modeling.megatron.megatron_batch_samplers import (
MegatronPretrainingBatchSampler,
)
from nemo.collections.nlp.data.language_modeling.t0_dataset import T0Dataset
from nemo.collections.nlp.models.language_modeling.megatron_finetune_model import MegatronT5FinetuneModel
from nemo.utils import AppState, logging
try:
from apex.transformer.pipeline_parallel.utils import _reconfigure_microbatch_calculator
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
try:
from megatron.core import parallel_state
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
__all__ = ['MegatronT0Model']
class MegatronT0Model(MegatronT5FinetuneModel):
"""T0 (https://arxiv.org/abs/2110.08207) Model that Inherits from MegatronT5FinetuneModel and overrides the dataset building."""
def __init__(self, cfg: DictConfig, trainer: Trainer):
super().__init__(cfg, trainer=trainer)
def setup(self, stage=None):
# NOTE: super().__init__ will try and setup train/val/test datasets, but we sidestep this using a if self._train_ds is not None condition
# We then set things up for real only once setup() of this class is called.
resume_checkpoint_path = self.trainer.ckpt_path
if resume_checkpoint_path:
init_consumed_samples = self._extract_consumed_samples_from_ckpt(resume_checkpoint_path)
else:
init_consumed_samples = 0
self.init_consumed_samples = init_consumed_samples
if stage == 'predict':
return
# If the user wants to manually override train and validation dataloaders before calling `.fit()`
if self._train_dl is not None and self._validation_dl is not None:
return
self.build_train_valid_test_datasets(stage=stage)
if hasattr(self, '_train_ds'):
self.setup_training_dataloader()
if hasattr(self, '_validation_ds'):
self._validation_dl = self.setup_eval_dataloader(self._validation_ds, self.cfg.data.validation_ds)
if hasattr(self.cfg.data, 'test_ds'):
self._test_dl = self.setup_test_data(self._test_ds, self.cfg.data.test_ds)
# when using pipeline model parallel the final stage need to initialize word embeddings
if parallel_state.get_pipeline_model_parallel_world_size() > 1:
self.enc_dec_model.sync_initial_word_embeddings()
# Only synchronize position embeddings if using absolute position embeddings in both the encoder and decoder.
if (
self.cfg.encoder.get("position_embedding_type", "learned_absolute") == "learned_absolute"
and self.cfg.decoder.get("position_embedding_type", "learned_absolute") == "learned_absolute"
):
self.enc_dec_model.sync_initial_position_embeddings()
def _build_dataset(self, data_cfg, check_implict_grad_acc=False, is_train=True):
if (
check_implict_grad_acc
and data_cfg.global_batch_size > data_cfg.micro_batch_size * parallel_state.get_data_parallel_world_size()
):
raise ValueError(
f'You are trying to use "implicit gradient accumulation" of {data_cfg.global_batch_size // (data_cfg.micro_batch_size * parallel_state.get_data_parallel_world_size())} in your validation/test datasets. This is not supported. Please set global_batch_size equal to micro_batch_size * data_parallel_world_size.'
)
datasets = []
# Determine if we are using a single dataset or a list of datasets.
is_list_config = isinstance(data_cfg.file_names, ListConfig)
if not is_list_config:
raise ValueError(f"T0 train/validation datasets must be provided as a list of individual JSONL files.")
if is_train:
# Construct the data prefix list for `get_datasets_weights_and_num_samples()` that is of the format [weight1,file_name1,weight2,file_name2,...]
if data_cfg.concat_sampling_probabilities is None or not isinstance(
data_cfg.concat_sampling_probabilities, ListConfig
):
raise ValueError(
f"concat_sampling_probabilities must be a ListConfig with the same number of files in file_names. Found: {data_cfg.concat_sampling_probabilities}"
)
if len(data_cfg.get('concat_sampling_probabilities', None)) != len(data_cfg.file_names):
raise ValueError(
f"concat_sampling_probabilities must be of the same size as file_names. Provided size {len(data_cfg.concat_sampling_probabilities)}, number of datasets {len(data_cfg.file_names)}"
)
data_prefix = []
for weight, prefix in zip(data_cfg.concat_sampling_probabilities, data_cfg.file_names):
data_prefix.append(weight)
data_prefix.append(prefix)
if self.trainer.max_steps is None or self.trainer.max_steps <= 0:
raise ValueError(
f'Trainer max_steps must be set to a positive integer. Found {self.trainer.max_steps}'
)
num_train_samples = [self.trainer.max_steps * self.cfg.global_batch_size]
_, _, num_train_samples_per_dataset = get_datasets_weights_and_num_samples(data_prefix, num_train_samples)
num_train_samples_after_blend = sum([x[0] for x in num_train_samples_per_dataset])
else:
num_train_samples_per_dataset = [[None]] * len(data_cfg.file_names)
for file_path, num_samples in zip(data_cfg.file_names, num_train_samples_per_dataset):
dataset = T0Dataset(
file_path=file_path,
tokenizer=self.tokenizer,
max_src_seq_length=data_cfg.max_src_seq_length,
max_tgt_seq_length=data_cfg.max_tgt_seq_length,
add_bos_to_input=data_cfg.get('add_bos_to_input', False),
add_eos_to_input=data_cfg.get('add_eos_to_input', False),
replace_bos_with_pad=data_cfg.get('replace_bos_with_pad', False),
max_num_samples=num_samples[0],
seed=data_cfg.get('seed', 1234),
)
datasets.append(dataset)
if is_train:
dataset = BlendableDataset(
datasets=datasets, weights=data_cfg.concat_sampling_probabilities, size=num_train_samples_after_blend
)
return dataset
else:
return datasets
def training_step(self, dataloader_iter, batch_idx):
return super(MegatronT5FinetuneModel, self).training_step(dataloader_iter, batch_idx)
# Override the parent batch reconfiguring logic.
def _reconfigure_and_process_inference_batch(self, batch):
global_batch_per_gpu = batch['text_enc'].size(0)
# This should happen only on the last batch of the validation/test dataset with drop_last=False.
if global_batch_per_gpu != self.cfg.data.validation_ds.global_batch_size:
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=global_batch_per_gpu * parallel_state.get_data_parallel_world_size(),
micro_batch_size=global_batch_per_gpu,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
return batch
def build_train_valid_test_datasets(self, stage):
if stage != 'test':
logging.info('Building T0 validation datasets.')
# Wrap this in a list since the general finetuning parent class supports multi-validation.
self._validation_ds = self._build_dataset(
self.cfg.data.validation_ds, check_implict_grad_acc=True, is_train=False
)
logging.info(f'Length of val dataset: {len(self._validation_ds[0])}')
if stage != 'validate':
if hasattr(self.cfg.data, 'test_ds'):
logging.info('Building T0 test datasets.')
# Wrap this in a list since the general finetuning parent class supports multi-validation.
self._test_ds = self._build_dataset(self.cfg.data.test_ds, check_implict_grad_acc=True, is_train=False)
logging.info(f'Length of test dataset: {len(self._test_ds[0])}')
if stage == 'validate' or stage == 'test':
return
logging.info('Building T0 traing datasets.')
self._train_ds = self._build_dataset(self.cfg.data.train_ds, check_implict_grad_acc=False)
logging.info(f'Length of train dataset: {len(self._train_ds)}')
def build_data_loader(
self, dataset, data_cfg, consumed_samples=0,
):
"""Buld dataloader given an input dataset."""
logging.info(f'Building dataloader with consumed samples: {consumed_samples}')
if isinstance(dataset, BlendableDataset):
collate_fn = dataset.datasets[0].collate_fn
else:
collate_fn = dataset.collate_fn
batch_sampler = MegatronPretrainingBatchSampler(
total_samples=len(dataset),
consumed_samples=consumed_samples,
micro_batch_size=data_cfg.micro_batch_size,
global_batch_size=data_cfg.global_batch_size,
data_parallel_rank=parallel_state.get_data_parallel_rank(),
data_parallel_size=parallel_state.get_data_parallel_world_size(),
drop_last=True,
)
return torch.utils.data.DataLoader(
dataset,
batch_sampler=batch_sampler,
collate_fn=collate_fn,
num_workers=data_cfg.num_workers,
pin_memory=data_cfg.pin_memory,
)
def setup_training_dataloader(self):
if hasattr(self, '_train_ds'):
consumed_samples = self.compute_consumed_samples(0)
self._train_dl = self.build_data_loader(
dataset=self._train_ds, data_cfg=self.cfg.data.train_ds, consumed_samples=consumed_samples,
)
def setup_eval_dataloader(self, datasets, data_cfg):
dataloaders = []
for dataset in datasets:
eval_dl = self.build_data_loader(dataset=dataset, data_cfg=data_cfg, consumed_samples=0,)
dataloaders.append(eval_dl)
return dataloaders
# TODO: Temporary overrides of finetune model. This needs to removed in the finetune model.
def on_train_start(self) -> None:
super(MegatronT5FinetuneModel, self).on_train_start()
def on_validation_start(self) -> None:
super(MegatronT5FinetuneModel, self).on_validation_start()
def on_test_start(self) -> None:
super(MegatronT5FinetuneModel, self).on_test_start()
| NeMo-main | nemo/collections/nlp/models/language_modeling/megatron_t0_model.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
from typing import Dict, Optional
import numpy as np
import torch
import torch.utils.data as pt_data
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from nemo.collections.common.losses import SmoothedCrossEntropyLoss
from nemo.collections.common.metrics import GlobalAverageLossMetric
from nemo.collections.common.parts import transformer_weights_init
from nemo.collections.nlp.data import SentenceDataset, TarredSentenceDataset
from nemo.collections.nlp.metrics import SequencePerplexity
from nemo.collections.nlp.modules.common import TokenClassifier
from nemo.collections.nlp.modules.common.lm_utils import get_transformer
from nemo.collections.nlp.modules.common.tokenizer_utils import get_tokenizer
from nemo.core.classes.common import typecheck
from nemo.core.classes.modelPT import ModelPT
from nemo.utils import logging, model_utils
__all__ = ["TransformerLMModel"]
class TransformerLMModel(ModelPT):
"""
Left-to-right Transformer language model.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
# Get global rank and total number of GPU workers for IterableDataset partitioning, if applicable
self.world_size = 1
if trainer is not None:
self.world_size = trainer.num_nodes * trainer.num_devices
cfg = model_utils.convert_model_config_to_dict_config(cfg)
cfg = model_utils.maybe_update_config_version(cfg)
# Instantiates tokenizer and register to be saved with NeMo Model archive
# After this call, ther will be self.tokenizer which can convert between tokens and token_ids.
self.setup_tokenizer(
tokenizer_name=cfg.tokenizer.get("tokenizer_name", "yttm"),
tokenizer_model=cfg.tokenizer.get("tokenizer_model", None),
vocab_file=cfg.tokenizer.get("vocab_file", None),
bpe_dropout=cfg.tokenizer.get("bpe_dropout", 0.0),
)
# init superclass
super().__init__(cfg=cfg, trainer=trainer)
# make vocabulary size divisible by 8 for fast fp16 training
vocab_size = 8 * math.ceil(self.tokenizer.vocab_size / 8)
# encoder from NeMo, Megatron-LM, or HuggingFace
encoder_cfg_dict = OmegaConf.to_container(cfg.get('encoder'))
encoder_cfg_dict['vocab_size'] = vocab_size
library = encoder_cfg_dict.pop('library', 'nemo')
model_name = encoder_cfg_dict.pop('model_name', None)
pretrained = encoder_cfg_dict.pop('pretrained', False)
self.encoder = get_transformer(
library=library,
model_name=model_name,
pretrained=pretrained,
config_dict=encoder_cfg_dict,
encoder=True,
pre_ln_final_layer_norm=encoder_cfg_dict.get(
'pre_ln_final_layer_norm', encoder_cfg_dict.get('pre_ln', True)
),
)
self.log_softmax = TokenClassifier(
hidden_size=self.encoder.hidden_size,
num_classes=vocab_size,
activation=cfg.head.activation,
log_softmax=cfg.head.log_softmax,
dropout=cfg.head.dropout,
use_transformer_init=cfg.head.use_transformer_init,
)
# tie weights of embedding and softmax matrices
self.log_softmax.mlp.layer0.weight = self.encoder.embedding.token_embedding.weight
std_init_range = 1 / self.encoder.hidden_size ** 0.5
# initialize weights if not using pretrained encoder
if not self._cfg.encoder.get('pretrained', False):
self.encoder.apply(lambda module: transformer_weights_init(module, std_init_range))
self.log_softmax.apply(lambda module: transformer_weights_init(module, std_init_range))
self.loss_fn = SmoothedCrossEntropyLoss(pad_id=self.tokenizer.pad_id, label_smoothing=cfg.label_smoothing)
self.eval_loss_fn = SmoothedCrossEntropyLoss(pad_id=self.tokenizer.pad_id)
self.eval_loss = GlobalAverageLossMetric(dist_sync_on_step=False, take_avg_loss=True)
self.eval_ppl = SequencePerplexity()
@typecheck()
def forward(self, input_ids, attention_mask):
"""
No special modification required for Lightning, define it as you normally would
in the `nn.Module` in vanilla PyTorch.
"""
hidden_states = self.encoder(input_ids=input_ids, encoder_mask=attention_mask)
log_probs = self.log_softmax(hidden_states=hidden_states)
return log_probs
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
# forward pass
for i in range(len(batch)):
if batch[i].ndim == 3:
# Dataset returns already batched data and the first dimension of size 1
# added by DataLoader is excess.
batch[i] = batch[i].squeeze(dim=0)
ids, mask = batch
input_ids, labels = ids[:, :-1], ids[:, 1:]
input_mask = mask[:, :-1]
log_probs = self(input_ids=input_ids, attention_mask=input_mask)
train_loss = self.loss_fn(log_probs=log_probs, labels=labels)
tensorboard_logs = {
"train_loss": train_loss,
"lr": self._optimizer.param_groups[0]["lr"],
}
return {"loss": train_loss, "log": tensorboard_logs}
def eval_step(self, batch, batch_idx):
mode = 'test' if self.trainer.testing else 'val'
for i in range(len(batch)):
if batch[i].ndim == 3:
# Dataset returns already batched data and the first dimension of size 1
# added by DataLoader is excess.
batch[i] = batch[i].squeeze(dim=0)
ids, mask = batch
input_ids, labels = ids[:, :-1], ids[:, 1:]
input_mask, output_mask = mask[:, :-1], mask[:, 1:]
log_probs = self(input_ids=input_ids, attention_mask=input_mask)
eval_loss = self.eval_loss_fn(log_probs=log_probs, labels=labels)
self.eval_loss(loss=eval_loss, num_measurements=log_probs.shape[0] * log_probs.shape[1])
self.eval_ppl(log_probs=log_probs, labels=labels, mask=output_mask)
self.validation_step_outputs.append({}) if mode == 'val' else self.test_step_outputs.append({})
return {}
def test_step(self, batch, batch_idx):
return self.eval_step(batch, batch_idx)
def validation_step(self, batch, batch_idx):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
return self.eval_step(batch, batch_idx)
def eval_epoch_end(self, outputs, mode):
eval_loss = self.eval_loss.compute()
eval_ppl = self.eval_ppl.compute()
self.log(f"{mode}_loss", eval_loss, sync_dist=True)
self.log(f"{mode}_PPL", eval_ppl, sync_dist=True)
dataset_name = "Validation" if mode == 'val' else "Test"
logging.info(f"\n\n\n\n{dataset_name} PPL: {np.round(eval_ppl.item(), 2)}")
def on_validation_epoch_end(self):
"""
Called at the end of validation to aggregate outputs.
:param outputs: list of individual outputs of each validation step.
"""
self.eval_epoch_end(self.validation_step_outputs, 'val')
self.validation_step_outputs.clear() # free memory
self.eval_loss.reset()
self.eval_ppl.reset()
def on_test_epoch_end(self):
self.eval_epoch_end(self.test_step_outputs, 'test')
self.test_step_outputs.clear() # free memory
def setup_tokenizer(
self, tokenizer_name=None, tokenizer_model=None, vocab_file=None, bpe_dropout=0.0,
):
supported_tokenizers = ['yttm', 'huggingface', 'sentencepiece', 'word']
if tokenizer_name not in supported_tokenizers:
raise NotImplementedError(f"Currently we only support tokenizers in {supported_tokenizers}.")
self.tokenizer = get_tokenizer(
tokenizer_name=tokenizer_name,
tokenizer_model=self.register_artifact("cfg.tokenizer.tokenizer_model", tokenizer_model),
vocab_file=vocab_file,
bpe_dropout=bpe_dropout,
special_tokens=None,
use_fast=False,
)
def setup_training_data(self, train_data_config: Optional[DictConfig]):
self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config)
# Need to set this because if using an IterableDataset, the length of the dataloader is the total number
# of samples rather than the number of batches, and this messes up the tqdm progress bar.
# So we set the number of steps manually (to the correct number) to fix this.
if 'use_tarred_dataset' in train_data_config and train_data_config['use_tarred_dataset']:
# We also need to check if limit_train_batches is already set.
# If it's an int, we assume that the user has set it to something sane, i.e. <= # training batches,
# and don't change it. Otherwise, adjust batches accordingly if it's a float (including 1.0).
if self._trainer is not None and isinstance(self._trainer.limit_train_batches, float):
self._trainer.limit_train_batches = int(
self._trainer.limit_train_batches * math.ceil(len(self._train_dl.dataset) / self.world_size)
)
elif self._trainer is None:
logging.warning(
"Model Trainer was not set before constructing the dataset, incorrect number of "
"training batches will be used. Please set the trainer and rebuild the dataset."
)
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config)
# Need to set this because if using an IterableDataset, the length of the dataloader is the total number
# of samples rather than the number of batches, and this messes up the tqdm progress bar.
# So we set the number of steps manually (to the correct number) to fix this.
if 'use_tarred_dataset' in val_data_config and val_data_config['use_tarred_dataset']:
# We also need to check if limit_val_batches is already set.
# If it's an int, we assume that the user has set it to something sane, i.e. <= # validation batches,
# and don't change it. Otherwise, adjust batches accordingly if it's a float (including 1.0).
if self._trainer is not None and isinstance(self._trainer.limit_val_batches, float):
self._trainer.limit_val_batches = int(
self._trainer.limit_val_batches * math.ceil(len(self._validation_dl.dataset) / self.world_size)
)
elif self._trainer is None:
logging.warning(
"Model Trainer was not set before constructing the dataset, incorrect number of "
"validation batches will be used. Please set the trainer and rebuild the dataset."
)
def setup_test_data(self, test_data_config: Optional[DictConfig]):
self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config)
def _setup_dataloader_from_config(self, cfg: DictConfig, predict_last_k=0):
if cfg.get("use_tarred_dataset", False):
if cfg.get("metadata_file") is None:
raise FileNotFoundError("Trying to use tarred data set but could not find metadata path in config.")
else:
metadata_file = cfg.get('metadata_file')
with open(metadata_file) as metadata_reader:
metadata = json.load(metadata_reader)
if cfg.get('tar_files') is None:
tar_files = metadata.get('tar_files')
if tar_files is not None:
logging.info(f'Loading from tarred dataset {tar_files}')
else:
raise FileNotFoundError("Could not find tarred dataset in config or metadata.")
else:
tar_files = cfg.get('tar_files')
if metadata.get('tar_files') is not None:
raise ValueError(
'Tar files specified in config and in metadata file. Tar files should only be specified once.'
)
dataset = TarredSentenceDataset(
text_tar_filepaths=tar_files,
metadata_path=metadata_file,
tokenizer=self.tokenizer,
shuffle_n=cfg.get("tar_shuffle_n", 100),
shard_strategy=cfg.get("shard_strategy", "scatter"),
global_rank=self.global_rank,
world_size=self.world_size,
)
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=1,
num_workers=cfg.get("num_workers", 2),
pin_memory=cfg.get("pin_memory", False),
drop_last=cfg.get("drop_last", False),
)
else:
dataset = SentenceDataset(
tokenizer=self.tokenizer,
dataset=cfg.file_name,
tokens_in_batch=cfg.tokens_in_batch,
clean=cfg.get("clean", False),
max_seq_length=cfg.get("max_seq_length", 512),
min_seq_length=cfg.get("min_seq_length", 1),
cache_ids=cfg.get("cache_ids", False),
)
if cfg.shuffle:
sampler = pt_data.RandomSampler(dataset)
else:
sampler = pt_data.SequentialSampler(dataset)
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=1,
sampler=sampler,
num_workers=cfg.get("num_workers", 2),
pin_memory=cfg.get("pin_memory", False),
drop_last=cfg.get("drop_last", False),
)
@classmethod
def list_available_models(cls) -> Optional[Dict[str, str]]:
pass
| NeMo-main | nemo/collections/nlp/models/language_modeling/transformer_lm_model.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import math
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.data.language_modeling.megatron.dataset_utils import build_train_valid_test_datasets
from nemo.collections.nlp.models.language_modeling.megatron_lm_encoder_decoder_model import (
MegatronLMEncoderDecoderModel,
)
from nemo.utils import logging
__all__ = ["MegatronT5Model"]
class T5Sentinel(enum.Enum):
FIRST = '<extra_id_0>'
END = '<extra_id_1>'
class MegatronT5Model(MegatronLMEncoderDecoderModel):
"""
Megatron T5 pretraining
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
super().__init__(cfg, trainer=trainer)
# validate cfg
self._validate_cfg()
@property
def model_name(self):
"""Allows child classes to implement models with different data regime"""
return "T5"
def _validate_cfg(self):
"""Class-specific cfg validation"""
# Make sure the user specifies dataset type as either 't5' or 't5_prefix_lm' only.
if self._cfg.data.get('dataset_type', None) is not None:
if self._cfg.data.get('dataset_type') not in ['t5', 't5_prefix_lm', 'ul2']:
raise ValueError(
f"dataset_type must be either 't5', 't5_prefix_lm' or 'ul2'. found {self._cfg.data.get('dataset_type')}"
)
if hasattr(self._cfg.data, 'seq_length_dec') and self._cfg.data.get('dataset_type') == 't5':
if self._cfg.data.seq_length_dec < self._cfg.data.seq_length * self._cfg.data.masked_lm_prob:
raise ValueError(
f"Cannot have decoder max sequence length ({self._cfg.data.seq_length_dec}) less than encoder sequence length ({self._cfg.data.seq_length}) * masked_lm_prob ({self._cfg.data.masked_lm_prob})"
)
if self._cfg.data.get("dataset_type", "t5") == "ul2":
if self._cfg.data.seq_length_dec != self._cfg.data.seq_length:
raise ValueError(
f"Encoder and decoder sequence lengths must be the same while using the UL2 dataset type. Found encoder length {self._cfg.data.seq_length} and decoder length {self._cfg.data.seq_length_dec}"
)
if (
self._cfg.tokenizer.num_sentinel_tokens
< self._cfg.data.seq_length * self._cfg.data.extreme_masked_lm_prob
):
raise ValueError(
f"Not enough sentinel tokens specified. Need at least {math.ceil(self._cfg.data.seq_length * self._cfg.data.extreme_masked_lm_prob)} sentinel tokens. Found {self._cfg.tokenizer.num_sentinel_tokens}"
)
@property
def _build_train_valid_test_datasets_kwargs(self):
"""allows child classes to add kwargs to dataset building"""
return dict(max_seq_length_dec=self._cfg.data.seq_length_dec,)
def _build_vocab(self):
self.num_sentinel_tokens = self._cfg.tokenizer.num_sentinel_tokens
MegatronT5Model.add_special_tokens_to_tokenizer(
tokenizer=self.tokenizer,
tokenizer_cfg=self._cfg.tokenizer,
dataset_type=self._cfg.data.get("dataset_type", "t5"),
add_sentinel_tokens_in_reverse_order=self._cfg.tokenizer.get(
"add_sentinel_tokens_in_reverse_order", False
),
add_sentinel_tokens_first=self._cfg.tokenizer.get("add_sentinel_tokens_first", False),
)
super()._build_vocab()
@classmethod
def _add_sentinel_tokens(cls, tokenizer, num_sentinel_tokens, add_sentinel_tokens_in_reverse_order):
# Special check to see if <extra_id_{}> is already present in the tokenizer. If it is, only modify the additional_special_tokens function.
for i in range(num_sentinel_tokens):
if add_sentinel_tokens_in_reverse_order:
i = num_sentinel_tokens - i - 1
if len(tokenizer.text_to_ids(f'<extra_id_{i}>')) == 1:
tokenizer.special_token_to_id[f'<extra_id_{i}>'] = tokenizer.text_to_ids(f'<extra_id_{i}>')[0]
else:
tokenizer.add_special_tokens([f'<extra_id_{i}>'])
@classmethod
def _add_base_special_tokens(cls, tokenizer, is_huggingface_converted_model):
# Need to add cls, sep, mask tokens to the tokenizer if they don't exist.
# If cls, sep and mask are not attributes of the tokenizer, add it.
if not hasattr(tokenizer, 'cls_token'):
tokenizer.add_special_tokens({'cls_token': '<cls>'})
if not hasattr(tokenizer.tokenizer, 'sep_id'):
tokenizer.add_special_tokens({'sep_token': '<sep>'})
if not hasattr(tokenizer.tokenizer, 'mask_id'):
tokenizer.add_special_tokens({'mask_token': '<mask>'})
# bos, eos, pad and unk may be present in the provided spm .model file, if they are, use it.
if not hasattr(tokenizer, 'pad_token'):
# TODO: Figure out how to do backward compat with pad_id > 0 and >= 0.
if is_huggingface_converted_model:
if hasattr(tokenizer.tokenizer, 'pad_id') and tokenizer.tokenizer.pad_id() >= 0:
tokenizer.pad_token = tokenizer.tokenizer.id_to_piece(tokenizer.tokenizer.pad_id())
else:
tokenizer.add_special_tokens({'pad_token': '<pad>'})
else:
if hasattr(tokenizer.tokenizer, 'pad_id') and tokenizer.tokenizer.pad_id() > 0:
tokenizer.pad_token = tokenizer.tokenizer.id_to_piece(tokenizer.tokenizer.pad_id())
else:
tokenizer.add_special_tokens({'pad_token': '<pad>'})
else:
tokenizer.add_special_tokens({'pad_token': '<pad>'})
if not hasattr(tokenizer, 'bos_token'):
if hasattr(tokenizer.tokenizer, 'bos_id') and tokenizer.tokenizer.bos_id() > 0:
tokenizer.bos_token = tokenizer.tokenizer.id_to_piece(tokenizer.tokenizer.bos_id())
else:
tokenizer.add_special_tokens({'bos_token': '<bos>'})
else:
tokenizer.add_special_tokens({'bos_token': '<s>'})
if not hasattr(tokenizer, 'eos_token'):
if hasattr(tokenizer.tokenizer, 'eos_id') and tokenizer.tokenizer.eos_id() > 0:
tokenizer.eos_token = tokenizer.tokenizer.id_to_piece(tokenizer.tokenizer.eos_id())
else:
tokenizer.add_special_tokens({'eos_token': '<eos>'})
else:
tokenizer.add_special_tokens({'eos_token': '</s>'})
@classmethod
def add_special_tokens_to_tokenizer(
cls,
tokenizer,
tokenizer_cfg,
dataset_type="t5",
add_sentinel_tokens_in_reverse_order=False,
add_sentinel_tokens_first=False,
):
# T5-related construction
if tokenizer_cfg.library == 'huggingface' or tokenizer_cfg.library == 'megatron':
additional_tokens = {
'additional_special_tokens': [
f'<extra_id_{i}>' for i in range(tokenizer_cfg.get('num_sentinel_tokens', 0))
]
}
if dataset_type == "ul2":
mask_types = ['r', 's', 'x']
for mask_type in mask_types:
additional_tokens['additional_special_tokens'].extend([f'<extra_id_{mask_type}>'])
if additional_tokens['additional_special_tokens']:
tokenizer.add_special_tokens(additional_tokens)
if tokenizer_cfg.library == 'sentencepiece':
# NOTE: This is an ugly way to support both NeMo-Megatron trained checkpoints and huggingface checkpoints.
# Huggingface and Google checkpoints will add sentinel tokens first (right after the base vocabulary), but in NeMo-Megatron, we add <cls>, <sep>, <mask>, <pad>, <bos> etc. beofore sentinel tokens <extra_id_xx>.
if add_sentinel_tokens_first:
if tokenizer_cfg.get('num_sentinel_tokens', 0) > 0:
cls._add_sentinel_tokens(
tokenizer, tokenizer_cfg.num_sentinel_tokens, add_sentinel_tokens_in_reverse_order
)
cls._add_base_special_tokens(tokenizer, is_huggingface_converted_model=True)
else:
cls._add_base_special_tokens(tokenizer, is_huggingface_converted_model=False)
if tokenizer_cfg.get('num_sentinel_tokens', 0) > 0:
cls._add_sentinel_tokens(
tokenizer, tokenizer_cfg.num_sentinel_tokens, add_sentinel_tokens_in_reverse_order
)
if dataset_type == "ul2":
for mask_type in ['r', 's', 'x']:
if len(tokenizer.text_to_ids(f'▁<extra_id_{mask_type}>')) == 1:
tokenizer.special_token_to_id[f'<extra_id_{mask_type}>'] = tokenizer.text_to_ids(
f'<extra_id_{mask_type}>'
)[0]
else:
tokenizer.add_special_tokens([f'<extra_id_{mask_type}>'])
def build_train_valid_test_datasets(self):
# Override limit_val_batches to be a multiple of num microbatches to prevent val_step from exiting in between a step
self._reconfigure_val_batches()
logging.info(f'Building {self.model_name} datasets.')
if self.trainer.limit_val_batches > 1.0 and isinstance(self.trainer.limit_val_batches, float):
raise ValueError("limit_val_batches must be an integer or float less than or equal to 1.0.")
global_batch_size = self._cfg.global_batch_size
eval_iters = (self.trainer.max_steps // self.trainer.val_check_interval + 1) * self.trainer.limit_val_batches
test_iters = self.trainer.limit_test_batches
train_valid_test_num_samples = [
self.trainer.max_steps * global_batch_size,
eval_iters * global_batch_size,
test_iters * global_batch_size,
]
if self.trainer.limit_val_batches <= 1.0 and isinstance(self.trainer.limit_val_batches, float):
train_valid_test_num_samples[
1
] = 1 # This is to make sure we only have one epoch on every validation iteration
self._train_ds, self._validation_ds, self._test_ds = build_train_valid_test_datasets(
cfg=self._cfg,
trainer=self.trainer,
tokenizer=self.tokenizer,
data_prefix=self._cfg.data.data_prefix,
data_impl=self._cfg.data.data_impl,
splits_string=self._cfg.data.splits_string,
train_valid_test_num_samples=train_valid_test_num_samples,
max_seq_length=self._cfg.data.seq_length,
masked_lm_prob=self._cfg.data.masked_lm_prob,
short_seq_prob=self._cfg.data.short_seq_prob,
seed=self._cfg.seed,
skip_warmup=self._cfg.data.skip_warmup,
dataset_type=self._cfg.data.get('dataset_type', self.model_name.lower()),
max_ngram_size=self._cfg.data.get('max_ngram_size', 10),
mean_ngram_size=self._cfg.data.get('mean_ngram_size', None),
geometric_dist=self._cfg.data.get('geometric_dist', True),
permutation=self._cfg.data.get('permutation', False),
whole_word_masking=self._cfg.data.get('whole_word_masking', True),
favor_long_ngrams=self._cfg.data.get('favor_long_ngrams', False),
respect_document_boundaries=self._cfg.data.get('respect_document_boundaries', True),
data_impl_kwargs=self._cfg.data.get('data_impl_kwargs', {}),
# additional arguments from child classes
**self._build_train_valid_test_datasets_kwargs,
)
logging.info(f'Length of train dataset: {len(self._train_ds)}')
logging.info(f'Length of val dataset: {len(self._validation_ds)}')
logging.info(f'Length of test dataset: {len(self._test_ds)}')
logging.info(f'Finished building {self.model_name} datasets.')
return self._train_ds, self._validation_ds, self._test_ds
def list_available_models(self):
pass
| NeMo-main | nemo/collections/nlp/models/language_modeling/megatron_t5_model.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code has been adapted from the following private repo: https://gitlab-master.nvidia.com/ADLR/megatron-lm/-/tree/prompt-learning/prefix_tuning_v2
# Adapted by: @adithyare
import itertools
from typing import Any
import torch
from omegaconf.dictconfig import DictConfig
from omegaconf.omegaconf import open_dict
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.common.parts.adapter_modules import LinearAdapterConfig
from nemo.collections.nlp.models.language_modeling.megatron_finetune_model import MegatronT5FinetuneModel
from nemo.collections.nlp.models.language_modeling.megatron_t5_model import MegatronT5Model
from nemo.collections.nlp.models.language_modeling.megatron_t5_prompt_learning_model import (
MegatronT5PromptLearningModel,
)
from nemo.collections.nlp.modules.common import VirtualPromptStyle
from nemo.collections.nlp.modules.common.megatron.adapters.parallel_adapters import (
AdapterName,
InfusedAdapterConfig,
LoraKQVAdapterConfig,
LoraKVAdapterConfig,
LoraQAdapterConfig,
MLPInfusedAdapterConfig,
ParallelLinearAdapterConfig,
)
from nemo.collections.nlp.parts.utils_funcs import get_last_rank
from nemo.core.classes.mixins import adapter_mixins
from nemo.utils import logging, model_utils
try:
from megatron.core import parallel_state
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
class MegatronT5BaseAdapterModel(MegatronT5PromptLearningModel):
def __init__(self, cfg: DictConfig, trainer: Trainer):
super().__init__(cfg, trainer)
self.adapter_name_keys = []
def forward(
self, input_ids, dec_input, enc_mask, dec_mask, position_ids, taskname_ids, labels=None, inference=False,
):
# Call forward on T5 model with preprocessed embeddings
if self.autocast_dtype == torch.float32:
output = self.frozen_model.enc_dec_model(
enc_input_ids=input_ids,
enc_attn_mask=enc_mask,
dec_input_ids=dec_input,
dec_attn_mask=dec_mask,
token_type_ids=None,
labels=labels,
output_enc_hidden_only=False,
enc_input=None,
)
else:
with torch.autocast(device_type="cuda", dtype=self.autocast_dtype):
output = self.frozen_model.enc_dec_model(
enc_input_ids=input_ids,
enc_attn_mask=enc_mask,
dec_input_ids=dec_input,
dec_attn_mask=dec_mask,
token_type_ids=None,
labels=labels,
output_enc_hidden_only=False,
enc_input=None,
)
return output, None
def setup(self, stage=None):
if stage == 'predict':
self.frozen_model.freeze()
return
self.setup_test_data()
if stage == 'test':
return
self.setup_training_data()
self.setup_validation_data()
logging.info(f'setup completed:\n{self.frozen_model.summarize()}')
def on_train_end(self):
# Save the best nemo model
self.save_to(save_path=self.cfg.nemo_path)
def compute_accuracy(self, enc_input, enc_mask, encoder_input, labels):
predicted_token_ids, log_probs = self.frozen_model.decode(
tokens_enc=enc_input,
enc_mask=enc_mask,
num_tokens_to_generate=self.decoder_seq_length,
encoder_input=encoder_input,
)
processed_inputs, processed_preds, processed_labels = [], [], []
preds = predicted_token_ids.cpu().numpy().tolist()
labels = labels.cpu().numpy().tolist()
enc_inputs = enc_input.cpu().numpy().tolist()
for i, (enc_input, pred, label) in enumerate(zip(enc_inputs, preds, labels)):
if self.tokenizer.eos_id in pred:
idx = pred.index(self.tokenizer.eos_id)
pred = pred[:idx]
additional_special_tokens_ids = []
if hasattr(self.tokenizer.tokenizer, "additional_special_tokens_ids"):
additional_special_tokens_ids = self.tokenizer.tokenizer.additional_special_tokens_ids
pred = [id for id in pred if id not in additional_special_tokens_ids]
label = [id for id in label if id not in additional_special_tokens_ids]
enc_input = [id for id in enc_input if id not in additional_special_tokens_ids]
pred = self.tokenizer.ids_to_text(pred)
label = self.tokenizer.ids_to_text(label)
enc_input = self.tokenizer.ids_to_text(enc_input)
processed_preds.append(pred)
processed_labels.append(label)
processed_inputs.append(enc_input)
return {
'predicted_token_ids': processed_preds,
'labels': processed_labels,
'enc_inputs': processed_inputs,
}
def validation_step(self, dataloader_iter, batch_idx, inference=False):
# Check if iterator is exhausted
dataloader_iter, done = self._val_iterator_done(dataloader_iter)
if done:
return
batch = next(dataloader_iter)
enc_input, dec_input, labels, loss_mask, enc_mask, dec_mask, position_ids, taskname_ids = batch
mode = self.training
self.eval()
gbs = self.cfg.get('validation_global_batch_size', self.cfg.global_batch_size)
self._reconfigure_and_process_inference_batch(enc_input.size(0), gbs)
loss_mean = self.fwd_bwd_step(itertools.chain([batch]), batch_idx, forward_only=True)
if self.cfg.get('report_validation_metric', False):
metrics = self.compute_accuracy(enc_input, enc_mask, labels)
metrics['loss'] = loss_mean
else:
metrics = {'loss': loss_mean}
self.validation_step_outputs.append(metrics)
self.train(mode=mode)
return metrics
def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:
enc_input, dec_input, labels, loss_mask, enc_mask, dec_mask, position_ids, taskname_ids = batch
gbs = self.cfg.get('validation_global_batch_size', self.cfg.global_batch_size)
self._reconfigure_and_process_inference_batch(enc_input.size(0), gbs)
predicted_token_ids, log_probs = self.frozen_model.decode(
tokens_enc=enc_input,
enc_mask=enc_mask,
num_tokens_to_generate=self.decoder_seq_length,
encoder_input=None,
)
# Special ids to text function to handle stripping <eos> and special tokens with sentencepiece tokenizers.
preds_text = MegatronT5FinetuneModel.ids_to_text(predicted_token_ids, self.tokenizer)
input_text = MegatronT5FinetuneModel.ids_to_text(enc_input, self.tokenizer)
if labels is not None:
labels_text = MegatronT5FinetuneModel.ids_to_text(labels, self.tokenizer)
else:
labels_text = [None] * len(preds_text)
return {
'input_text': input_text,
'preds_text': preds_text,
'labels_text': labels_text,
}
def setup_optimizer_param_groups(self):
"""
ModelPT override. Optimizer will get self._optimizer_param_groups.
Makes two optimizer param groups, one for the frozen model params
and one for the prompt-table/prompt-encoder params. The learning
rate for the frozen model's params will always be zero effectively
freezing the model's params but still allowing for the needed gradients
to be passed around in pipeline parallel models. The prompt-encoder
and/or prompt table will use the learning rate set by the user.
"""
self.frozen_model.freeze() # Freeze the entire model
opt_params = []
for _, module in self.frozen_model.named_modules():
if isinstance(module, adapter_mixins.AdapterModuleMixin) and module.is_adapter_available():
module.set_enabled_adapters(enabled=True)
module.unfreeze_enabled_adapters() # selectively unfreeze the adapter modules.
opt_params += [p for p in module.parameters()]
self._optimizer_param_groups = [{'params': opt_params}]
logging.info(f'Optimizer groups set:\n{self.frozen_model.summarize()}')
def get_forward_output_only_func(self):
"""
Used for generate method only for now.
"""
def fwd_output_only_func(dataloader_iter, model):
batch = next(dataloader_iter)
extra_arg = {}
(
tokens,
attention_mask,
position_ids,
task_ids,
set_inference_key_value_memory,
inference_max_sequence_len,
) = batch
tokens = tokens.cuda()
attention_mask = attention_mask.cuda()
position_ids = position_ids.cuda()
task_ids = task_ids.cuda()
extra_arg['set_inference_key_value_memory'] = set_inference_key_value_memory[0].item()
extra_arg['inference_max_sequence_len'] = inference_max_sequence_len[0].item()
output_tensor = model(tokens, position_ids, attention_mask, task_ids, **extra_arg)
def id_func(output_tensor):
return output_tensor, {'logits': output_tensor}
return output_tensor, id_func
return fwd_output_only_func
def state_dict(self, destination=None, prefix=None, keep_vars=False):
"""
Creates a state_dict using only the adapter parameters.
This ensures that this wrapper class will only checkpoint the adapter
weights and not the rest of the base GPT Model.
"""
state_dict_ = {}
for name, module in self.frozen_model.named_modules():
if isinstance(module, adapter_mixins.AdapterModuleMixin) and module.is_adapter_available():
for adapter_key in self.adapter_name_keys:
adapter_module = module.get_adapter_module(adapter_key)
if adapter_module:
state_adapter_key = ':'.join([name, adapter_key])
state_dict_[state_adapter_key] = adapter_module.state_dict()
module.set_enabled_adapters(enabled=True)
return state_dict_
def load_state_dict(self, state_dict, strict: bool = True):
"""
Loads a state_dict expecting the state_dict to contain key,values
only for the adapter parameters.
"""
for name, module in self.frozen_model.named_modules():
if isinstance(module, adapter_mixins.AdapterModuleMixin) and module.is_adapter_available():
for adapter_key in self.adapter_name_keys:
adapter_module = module.get_adapter_module(adapter_key)
if adapter_module:
state_adapter_key = ':'.join([name, adapter_key])
adapter_module.load_state_dict(state_dict[state_adapter_key], strict)
module.set_enabled_adapters(enabled=True)
def on_validation_epoch_end(self):
if self.cfg.get('pipeline_model_parallel_size', 1) > 1:
if parallel_state.is_pipeline_last_stage():
# only the last pipeline parallel stages return loss
averaged_loss = torch.stack([i['loss'] for i in self.validation_step_outputs]).mean()
else:
averaged_loss = torch.tensor(0.0).cuda()
# we can only log on one rank if it is rank zero so we broadcast from last rank
torch.distributed.broadcast(averaged_loss, get_last_rank())
self.log('val_loss', averaged_loss, prog_bar=True, rank_zero_only=True, batch_size=1)
logging.info(f'Validation loss: {averaged_loss}')
else:
averaged_loss = torch.stack([item['loss'] for item in self.validation_step_outputs]).mean()
logging.info(f'Validation loss: {averaged_loss}')
self.log('val_loss', averaged_loss, prog_bar=True, rank_zero_only=True, batch_size=1)
if self.cfg.get('report_validation_accuracy', False):
gather_results = [None for _ in range(parallel_state.get_data_parallel_world_size())]
all_preds = list(itertools.chain(*[item['predicted_token_ids'] for item in self.validation_step_outputs]))
all_labels = list(itertools.chain(*[item['labels'] for item in self.validation_step_outputs]))
all_inputs = list(itertools.chain(*[item['enc_inputs'] for item in self.validation_step_outputs]))
assert len(all_preds) == len(all_labels)
assert len(all_preds) == len(all_inputs)
# Gather inputs, preds, labels from all workers
torch.distributed.all_gather_object(
gather_results,
[(input, pred, label) for (input, pred, label) in zip(all_inputs, all_preds, all_labels)],
group=parallel_state.get_data_parallel_group(),
)
# Deduplicate sentences that may have been distributed across multiple data parallel ranks.
if parallel_state.get_data_parallel_rank() == 0:
gather_results_dedup = list(set(itertools.chain(*gather_results)))
correct = 0
for (input, pred, label) in gather_results_dedup:
if pred == label:
correct += 1
val_acc = correct / len(gather_results_dedup)
val_acc = torch.tensor(val_acc).cuda()
logging.info(f'Validation accuracy: {val_acc}')
else:
val_acc = torch.tensor(0.0).cuda()
self.log('val_acc', val_acc, prog_bar=True, rank_zero_only=True, batch_size=1)
gbs = self.cfg.global_batch_size
mbs = self.cfg.micro_batch_size
self._reconfigure_batch_sizes(gbs, mbs)
self.validation_step_outputs.clear() # free memory
class MegatronT5AdapterLearningModel(MegatronT5BaseAdapterModel):
"""
TODO (@adithyare)
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
super().__init__(cfg, trainer)
assert cfg.adapter_tuning.get('adapter_dim', 0) > 0, "adapter_dim has not been set."
assert (
cfg.adapter_tuning.adapter_dim % cfg.tensor_model_parallel_size == 0
), "The adapter dim should be divisible by tensor_model_parallel_size."
assert cfg.adapter_tuning.type in [
'linear_adapter',
'parallel_adapter',
], "Adapter type should be 'linear_adapter' or 'parallel_adapter'"
self.adapter_name_keys = [AdapterName.PRE_ATTN_ADAPTER, AdapterName.POST_ATTN_ADAPTER]
frozen_model_cfg = MegatronT5Model.restore_from(
cfg.get('language_model_path'), trainer=trainer, return_config=True
)
for _, layer in self.frozen_model.named_modules():
if hasattr(layer, 'activations_checkpoint_method'):
layer.activations_checkpoint_method = (
None # (@adithyare) adapter learning does not support activations checkpointing atm.
)
self.frozen_model.freeze()
logging.info(f'Before adding adapters:\n{self.frozen_model.summarize()}')
encoder = self.frozen_model.enc_dec_model.enc_dec_model.encoder
decoder = self.frozen_model.enc_dec_model.enc_dec_model.decoder
if encoder:
encoder_cfg = self._get_component_cfg('encoder', frozen_model_cfg, cfg)
self._add_adapters_to_component(encoder, encoder_cfg, self.adapter_name_keys)
logging.info(f'Adding encoder adapters:\n{self.frozen_model.summarize()}')
if decoder:
decoder_cfg = self._get_component_cfg('decoder', frozen_model_cfg, cfg)
self._add_adapters_to_component(decoder, decoder_cfg, self.adapter_name_keys)
logging.info(f'Adding decoder adapters:\n{self.frozen_model.summarize()}')
def _add_adapters_to_component(self, component, component_cfg, adapter_name_keys):
for _, module in component.named_modules():
if isinstance(module, adapter_mixins.AdapterModuleMixin):
for adapter_key in adapter_name_keys:
adapter_cfg = self._get_adapter_cfg(component_cfg)
if model_utils.import_class_by_path(adapter_cfg._target_) in module.get_accepted_adapter_types():
module.add_adapter(name=adapter_key, cfg=adapter_cfg)
def _get_component_cfg(self, component_name, frozen_model_cfg, cfg):
if component_name in frozen_model_cfg:
component_cfg = frozen_model_cfg.get(component_name)
with open_dict(component_cfg):
component_cfg.tensor_model_parallel_size = frozen_model_cfg.tensor_model_parallel_size
component_cfg.adapter_tuning = cfg.adapter_tuning
else:
component_cfg = frozen_model_cfg
with open_dict(component_cfg):
component_cfg.adapter_tuning = cfg.adapter_tuning
return component_cfg
def _get_adapter_cfg(self, component_cfg):
if component_cfg.adapter_tuning.type == "parallel_adapter":
adapter_cfg = ParallelLinearAdapterConfig(
in_features=component_cfg.hidden_size,
out_features=component_cfg.hidden_size,
dim=component_cfg.adapter_tuning.adapter_dim,
norm_position=component_cfg.adapter_tuning.get('norm_position', 'pre'),
norm_type=component_cfg.adapter_tuning.get('norm_type', 'mixedfusedlayernorm'),
column_init_method=component_cfg.adapter_tuning.get('column_init_method', 'xavier'),
row_init_method=component_cfg.adapter_tuning.get('row_init_method', 'zero'),
dropout=component_cfg.adapter_tuning.adapter_dropout,
)
else:
adapter_cfg = LinearAdapterConfig(
in_features=component_cfg.hidden_size,
dim=component_cfg.adapter_tuning.adapter_dim,
norm_position=component_cfg.adapter_tuning.get('norm_position', 'pre'),
dropout=component_cfg.adapter_tuning.adapter_dropout,
)
return adapter_cfg
@classmethod
def list_available_models(cls):
pass
class MegatronT5LoraModel(MegatronT5BaseAdapterModel):
"""
TODO (@adithyare)
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
super().__init__(cfg, trainer)
# assert cfg.lora_tuning.get('adapter_dim', 0) > 0, "adapter_dim has not been set."
# assert (
# cfg.lora_tuning.adapter_dim % cfg.tensor_model_parallel_size == 0
# ), "The adapter dim should be divisible by tensor_model_parallel_size."
encoder_adapter_name_keys = [AdapterName.LORA_KQV_ADAPTER]
decoder_adapter_name_keys = [
AdapterName.LORA_KQV_ADAPTER,
AdapterName.LORA_KV_ADAPTER,
AdapterName.LORA_Q_ADAPTER,
]
# add adapter keys to the list -> to update state dict
self.adapter_name_keys = encoder_adapter_name_keys + decoder_adapter_name_keys
frozen_model_cfg = MegatronT5Model.restore_from(
cfg.get('language_model_path'), trainer=trainer, return_config=True
)
for _, layer in self.frozen_model.named_modules():
if hasattr(layer, 'activations_checkpoint_method'):
layer.activations_checkpoint_method = (
None # (@adithyare) adapter learning does not support activations checkpointing atm.
)
self.frozen_model.freeze()
logging.info(f'Before adding adapters:\n{self.frozen_model.summarize()}')
encoder = self.frozen_model.enc_dec_model.enc_dec_model.encoder
decoder = self.frozen_model.enc_dec_model.enc_dec_model.decoder
if encoder:
encoder_cfg = self._get_component_cfg('encoder', frozen_model_cfg, cfg)
self._add_adapters_to_component(encoder, encoder_cfg, encoder_adapter_name_keys)
logging.info(f'Adding encoder adapters:\n{self.frozen_model.summarize()}')
if decoder:
decoder_cfg = self._get_component_cfg('decoder', frozen_model_cfg, cfg)
self._add_adapters_to_component(decoder, decoder_cfg, decoder_adapter_name_keys)
logging.info(f'Adding decoder adapters:\n{self.frozen_model.summarize()}')
def _add_adapters_to_component(self, component, component_cfg, adapter_name_keys):
for _, module in component.named_modules():
if isinstance(module, adapter_mixins.AdapterModuleMixin):
for adapter_key in adapter_name_keys:
adapter_cfg = self._get_adapter_cfg(component_cfg, adapter_key)
if model_utils.import_class_by_path(adapter_cfg._target_) in module.get_accepted_adapter_types():
module.add_adapter(name=adapter_key, cfg=adapter_cfg)
print(f"in adding {adapter_key}")
def _get_component_cfg(self, component_name, frozen_model_cfg, cfg):
if component_name in frozen_model_cfg:
component_cfg = frozen_model_cfg.get(component_name)
with open_dict(component_cfg):
component_cfg.tensor_model_parallel_size = frozen_model_cfg.tensor_model_parallel_size
component_cfg.lora_tuning = cfg.lora_tuning
else:
component_cfg = frozen_model_cfg
with open_dict(component_cfg):
component_cfg.lora_tuning = cfg.lora_tuning
return component_cfg
def _get_adapter_cfg(self, component_cfg, adapter_key):
if component_cfg.kv_channels is None:
assert (
component_cfg.hidden_size % component_cfg.num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = component_cfg.hidden_size // component_cfg.num_attention_heads
else:
kv_channels = component_cfg.kv_channels
projection_size = kv_channels * component_cfg.num_attention_heads
if adapter_key == AdapterName.LORA_KQV_ADAPTER:
adapter_cfg = LoraKQVAdapterConfig(
in_features=component_cfg.hidden_size,
out_features=3 * projection_size,
dim=component_cfg.lora_tuning.kqv_adapter_dim,
norm_position="none",
norm_type="none",
activation="identity",
column_init_method=component_cfg.lora_tuning.get("column_init_method", "normal"),
row_init_method=component_cfg.lora_tuning.get("row_init_method", "zero"),
gather_output=False,
dropout=0.0,
)
elif adapter_key == AdapterName.LORA_KV_ADAPTER:
adapter_cfg = LoraKVAdapterConfig(
in_features=component_cfg.hidden_size,
out_features=2 * projection_size,
dim=component_cfg.lora_tuning.kv_adapter_dim,
norm_position="none",
norm_type="none",
activation="identity",
column_init_method=component_cfg.lora_tuning.get("column_init_method", "normal"),
row_init_method=component_cfg.lora_tuning.get("row_init_method", "zero"),
gather_output=False,
dropout=0.0,
)
elif adapter_key == AdapterName.LORA_Q_ADAPTER:
adapter_cfg = LoraQAdapterConfig(
in_features=component_cfg.hidden_size,
out_features=1 * projection_size,
dim=component_cfg.lora_tuning.q_adapter_dim,
norm_position="none",
norm_type="none",
activation="identity",
column_init_method=component_cfg.lora_tuning.get("column_init_method", "normal"),
row_init_method=component_cfg.lora_tuning.get("row_init_method", "zero"),
gather_output=False,
dropout=0.0,
)
else:
raise RuntimeError("Unexpected adapter key name..")
return adapter_cfg
@classmethod
def list_available_models(cls):
pass
class MegatronT5InfusedAdapterModel(MegatronT5BaseAdapterModel):
"""
MegatronGPTInfusedAdapterModel is a model that combines a base model (GPTModel) with a "Infused Adapter that can Inhibiting and Amplify Inner Activations", known as IA3.
This class supports the addition of IA3 into a transformer based LM as described in Liu et al. (https://arxiv.org/pdf/2205.05638.pdf)
Three adapter's are inserted into each Transformer layer in the base GPT Model. Each adapter is basically a vector that simply scales the key, value or ffn hidden representations.
It is assumed that these set of adapters will then be trained for a specific task.
Once trained, the adapter weights will be saved and can be re-loaded
and infused into the same GPT Model for inference.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
super().__init__(cfg, trainer)
frozen_model_cfg = MegatronT5Model.restore_from(
cfg.get('language_model_path'), trainer=trainer, return_config=True
)
for _, layer in self.frozen_model.named_modules():
if hasattr(layer, 'activations_checkpoint_method'):
layer.activations_checkpoint_method = (
None # (@adithyare) adapter learning does not support activations checkpointing atm.
)
self.adapter_name_keys = [AdapterName.KEY_INFUSED, AdapterName.VALUE_INFUSED, AdapterName.MLP_INFUSED]
self.frozen_model.freeze()
logging.info(f'Before adding adapters:\n{self.frozen_model.summarize()}')
encoder = self.frozen_model.enc_dec_model.enc_dec_model.encoder
decoder = self.frozen_model.enc_dec_model.enc_dec_model.decoder
if encoder:
encoder_cfg = self._get_component_cfg('encoder', frozen_model_cfg)
self._add_adapters_to_component(encoder, encoder_cfg, self.adapter_name_keys)
logging.info(f'After adding encoder adapters:\n{self.frozen_model.summarize()}')
if decoder:
decoder_cfg = self._get_component_cfg('decoder', frozen_model_cfg)
self._add_adapters_to_component(decoder, decoder_cfg, self.adapter_name_keys)
logging.info(f'After adding all adapters:\n{self.frozen_model.summarize()}')
def _add_adapters_to_component(self, component, component_cfg, adapter_name_keys):
for _, module in component.named_modules():
if isinstance(module, adapter_mixins.AdapterModuleMixin):
for adapter_key in adapter_name_keys:
adapter_cfg = self._get_adapter_cfg(component_cfg, adapter_key)
if model_utils.import_class_by_path(adapter_cfg._target_) in module.get_accepted_adapter_types():
module.add_adapter(name=adapter_key, cfg=adapter_cfg)
def _get_component_cfg(self, component_name, frozen_model_cfg):
if component_name in frozen_model_cfg:
component_cfg = frozen_model_cfg.get(component_name)
with open_dict(component_cfg):
component_cfg.tensor_model_parallel_size = frozen_model_cfg.tensor_model_parallel_size
else:
component_cfg = frozen_model_cfg
return component_cfg
def _get_adapter_cfg(self, component_cfg, adapter_key):
if adapter_key == AdapterName.MLP_INFUSED:
cfg = MLPInfusedAdapterConfig(
in_features=component_cfg.ffn_hidden_size // component_cfg.tensor_model_parallel_size
)
elif adapter_key in [AdapterName.KEY_INFUSED, AdapterName.VALUE_INFUSED]:
if component_cfg.get('kv_channels', None):
cfg = InfusedAdapterConfig(
in_features=component_cfg.kv_channels
* component_cfg.num_attention_heads
// component_cfg.tensor_model_parallel_size
)
else:
cfg = InfusedAdapterConfig(
in_features=component_cfg.hidden_size // component_cfg.tensor_model_parallel_size
)
else:
raise ValueError(f"Adapter Key {adapter_key} is unknown.")
return cfg
def _component_state_dict(self, component_name, component, adapter_name_keys):
state_dict_ = {}
for name, module in component.named_modules():
if isinstance(module, adapter_mixins.AdapterModuleMixin) and module.is_adapter_available():
for adapter_key in adapter_name_keys:
adapter_module = module.get_adapter_module(adapter_key)
if adapter_module:
state_adapter_key = ':'.join([component_name, name, adapter_key])
state_dict_[state_adapter_key] = adapter_module.state_dict()
module.set_enabled_adapters(enabled=True)
return state_dict_
def _load_component_state_dict(
self, component_name, component, adapter_name_keys, state_dict, strict: bool = True
):
for name, module in component.named_modules():
if isinstance(module, adapter_mixins.AdapterModuleMixin) and module.is_adapter_available():
for adapter_key in adapter_name_keys:
adapter_module = module.get_adapter_module(adapter_key)
if adapter_module:
state_adapter_key = ':'.join([component_name, name, adapter_key])
adapter_module.load_state_dict(state_dict[state_adapter_key], strict)
module.set_enabled_adapters(enabled=True)
def state_dict(self, destination=None, prefix=None, keep_vars=False):
"""
Creates a state_dict using only the adapter parameters.
This ensures that this wrapper class will only checkpoint the adapter
weights and not the rest of the base GPT Model.
"""
encoder = self.frozen_model.enc_dec_model.enc_dec_model.encoder
decoder = self.frozen_model.enc_dec_model.enc_dec_model.decoder
encoder_state_dict = self._component_state_dict('encoder', encoder, self.adapter_name_keys) if encoder else {}
decoder_state_dict = self._component_state_dict('decoder', decoder, self.adapter_name_keys) if decoder else {}
state_dict_ = {
**encoder_state_dict,
**decoder_state_dict,
} # merge the two state dicts (does not check for collisions in keys)
return state_dict_
def load_state_dict(self, state_dict, strict: bool = True):
"""
Loads a state_dict expecting the state_dict to contain key,values
only for the adapter parameters.
"""
encoder = self.frozen_model.enc_dec_model.enc_dec_model.encoder
decoder = self.frozen_model.enc_dec_model.enc_dec_model.decoder
if encoder:
self._load_component_state_dict('encoder', encoder, self.adapter_name_keys, state_dict, strict)
if decoder:
self._load_component_state_dict('decoder', decoder, self.adapter_name_keys, state_dict, strict)
@classmethod
def list_available_models(cls):
pass
| NeMo-main | nemo/collections/nlp/models/language_modeling/megatron_t5_adapter_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import json
from functools import partial
from typing import Any, Optional
import torch
from omegaconf import DictConfig, ListConfig
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.common.metrics import MetricStringToTorchMetric
from nemo.collections.nlp.data.language_modeling.megatron.base_dataset_utils import (
get_datasets_weights_and_num_samples,
)
from nemo.collections.nlp.data.language_modeling.megatron.blendable_dataset import BlendableDataset
from nemo.collections.nlp.data.language_modeling.megatron.gpt_sft_chat_dataset import GPTSFTChatDataset
from nemo.collections.nlp.data.language_modeling.megatron.gpt_sft_dataset import GPTSFTDataset
from nemo.collections.nlp.data.language_modeling.megatron.megatron_batch_samplers import (
MegatronPretrainingBatchSampler,
)
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.modules.common.megatron.utils import get_iterator_k_split
from nemo.collections.nlp.modules.common.text_generation_utils import (
LengthParam,
SamplingParam,
generate,
get_computeprob_response,
megatron_gpt_generate,
)
from nemo.collections.nlp.parts.utils_funcs import get_last_rank
from nemo.utils import AppState, logging
try:
from apex.transformer.pipeline_parallel.utils import (
_reconfigure_microbatch_calculator,
get_current_global_batch_size,
get_micro_batch_size,
get_num_microbatches,
)
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
try:
from megatron.core import parallel_state
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
__all__ = ['MegatronGPTSFTModel']
class MegatronGPTSFTModel(MegatronGPTModel):
"""
Megatron GPT Supervised Fine-Tuning
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
if not HAVE_APEX:
raise ImportError(
"Apex was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
super().__init__(cfg, trainer=trainer)
self.sep_id = cfg.get('sep_id', 49704)
if hasattr(self.cfg.data, "validation_ds"):
self.val_metric, self.val_metric_name = self.setup_metric(self.cfg.data.validation_ds)
self.val_metric = torch.nn.ModuleList(self.val_metric) if self.val_metric is not None else None
# Used other keys from metadata to calulate metrics
if hasattr(self.cfg.data.validation_ds, "metric"):
self.val_metric_label_key = self.cfg.data.validation_ds.metric.get('label_key', 'labels')
if hasattr(self.cfg.data, "test_ds"):
self.test_metric, self.test_metric_name = self.setup_metric(self.cfg.data.test_ds)
self.test_metric = torch.nn.ModuleList(self.test_metric) if self.test_metric is not None else None
# Used other keys from metadata to calulate metrics
if hasattr(self.cfg.data.test_ds, "metric"):
self.test_metric_label_key = self.cfg.data.test_ds.metric.get('label_key', 'labels')
if self.cfg.get('megatron_amp_O2', False):
base_module = self.model.module
else:
base_module = self.model
self._reset_activation_checkpointing_args()
self._reset_sequence_parallelism_args()
self.virtual_tokens = 0
def setup_metric(self, data_cfg):
metric_name = "exact_string_match"
if not hasattr(data_cfg, "metric"):
metric = MetricStringToTorchMetric["exact_string_match"]
else:
if not hasattr(data_cfg.metric, "name"):
raise ValueError("Metric name is not provided in the metric config.")
if data_cfg.metric.name == "loss":
return None, "loss"
if data_cfg.metric.name not in MetricStringToTorchMetric:
raise KeyError(
f"{data_cfg.metric.name} is not supported. List of supported metrics: {MetricStringToTorchMetric.keys()}"
)
if data_cfg.metric.name in self._metrics_require_string2category_map:
if data_cfg.metric.average is None:
raise ValueError(
f"{data_cfg.metric.name} requires specifying whether you want to compute a micro or macro average. Found None."
)
if (
data_cfg.metric.get('labels_are_strings', False)
and data_cfg.metric.name in self._metrics_require_string2category_map
):
if data_cfg.metric.num_classes is None:
raise ValueError(
"Number of classes is not provided in the metric section within the data config. "
f"Please provide the number of classes in the data config to use the {data_cfg.metric.name} metric."
)
if data_cfg.metric.get('class_labels', None) is None or not isinstance(
data_cfg.metric.get('class_labels', None), ListConfig
):
raise ValueError(
"Class labels are not provided properly in the metric section witnin the data config. "
f"Please provide the class labels as a list of strings in the data config to use the {data_cfg.metric.name} metric."
)
if len(data_cfg.metric.get('class_labels', None)) != data_cfg.metric.num_classes:
raise ValueError(
f"Number of class labels {len(data_cfg.metric.get('class_labels', None))} does not match `num_classes` : {data_cfg.metric.num_classes}"
)
metric_name = data_cfg.metric.name
metric = MetricStringToTorchMetric[metric_name]
if isinstance(data_cfg.file_names, ListConfig):
if 'rouge' not in data_cfg.metric.name:
metric = [
metric(average=data_cfg.metric.average, num_classes=data_cfg.metric.num_classes)
for _ in range(len(data_cfg.file_names))
]
else:
metric = [metric() for _ in range(len(data_cfg.file_names))]
else:
if 'rouge' not in data_cfg.metric.name:
metric = [metric(average=data_cfg.metric.average, num_classes=data_cfg.metric.num_classes)]
else:
metric = [metric()]
return metric, metric_name
@property
def _metrics_require_string2category_map(self):
return set(["f1", "accuracy", "average_precision"])
def setup(self, stage=None):
# NOTE: super().__init__ will try and setup train/val/test datasets, but we sidestep this using a if self._train_ds is not None condition
# We then set things up for real only once setup() of this class is called.
resume_checkpoint_path = self.trainer.ckpt_path
if resume_checkpoint_path:
init_consumed_samples = self._extract_consumed_samples_from_ckpt(resume_checkpoint_path)
else:
init_consumed_samples = 0
self.init_consumed_samples = init_consumed_samples
if stage == 'predict':
return
# If the user wants to manually override train and validation dataloaders before calling `.fit()`
if self._train_dl is not None and self._validation_dl is not None:
return
self.build_train_valid_test_datasets(stage=stage)
if hasattr(self, '_train_ds'):
self.setup_training_dataloader()
if hasattr(self, '_validation_ds'):
self._validation_dl = self.setup_eval_dataloader(self._validation_ds, self.cfg.data.validation_ds)
if hasattr(self.cfg.data, 'test_ds') and self.cfg.data.test_ds.get('file_names', None) is not None:
self._test_dl = self.setup_eval_dataloader(self._test_ds, self.cfg.data.test_ds)
# Raise error if using multiple dataloaders
if type(self._validation_dl) == list and len(self._validation_dl) > 1:
raise NotImplementedError('Lightning 2.0 does not support multiple dataloaders with dataloader_iter')
if type(self._test_dl) == list and len(self._test_dl) > 1:
raise NotImplementedError('Lightning 2.0 does not support multiple dataloaders with dataloader_iter')
# when using pipeline model parallel the final stage need to initialize word embeddings
if not self.cfg.get('mcore_gpt', False):
if parallel_state.get_pipeline_model_parallel_world_size() > 1:
if isinstance(self.model, list):
for i, module in enumerate(self.model):
parallel_state.set_virtual_pipeline_model_parallel_rank(i)
module.sync_initial_word_embeddings()
parallel_state.set_virtual_pipeline_model_parallel_rank(0)
else:
self.model.sync_initial_word_embeddings()
if self.cfg.get('transformer_engine', False):
self.setup_transformer_engine_tp_groups()
def _build_dataset(self, data_cfg, is_train=True):
datasets = []
# Determine if we are using a single dataset or a list of datasets.
is_list_config = isinstance(data_cfg.file_names, ListConfig)
if not is_list_config:
raise ValueError(f"SFT train/validation datasets must be provided as a list of individual JSONL files.")
if is_train:
# Construct the data prefix list for `get_datasets_weights_and_num_samples()`
# that is of the format [weight1,file_name1,weight2,file_name2,...]
if data_cfg.concat_sampling_probabilities is None or not isinstance(
data_cfg.concat_sampling_probabilities, ListConfig
):
raise ValueError(
(
f"concat_sampling_probabilities must be a ListConfig with the same number of files in file_names."
f"Found: {data_cfg.concat_sampling_probabilities}"
)
)
if len(data_cfg.get('concat_sampling_probabilities', None)) != len(data_cfg.file_names):
raise ValueError(
(
f"concat_sampling_probabilities must be of the same size as file_names.",
f"Provided size {len(data_cfg.concat_sampling_probabilities)}, number of datasets {len(data_cfg.file_names)}",
)
)
data_prefix = []
for weight, prefix in zip(data_cfg.concat_sampling_probabilities, data_cfg.file_names):
data_prefix.append(weight)
data_prefix.append(prefix)
if self.trainer.max_steps is None or self.trainer.max_steps <= 0:
raise ValueError(
f'Trainer max_steps must be set to a positive integer. Found {self.trainer.max_steps}'
)
num_train_samples = [self.trainer.max_steps * data_cfg.global_batch_size]
_, _, num_train_samples_per_dataset = get_datasets_weights_and_num_samples(data_prefix, num_train_samples)
num_train_samples_after_blend = sum([x[0] for x in num_train_samples_per_dataset])
else:
num_train_samples_per_dataset = [[None]] * len(data_cfg.file_names)
# Check dataset max_seq_legnth and max_position_embeddings size
if (
self.cfg.get('position_embedding_type', None) in [None, 'learned_absolute']
and data_cfg.max_seq_length > self.cfg.max_position_embeddings
):
logging.warning(
f"Set dataset max_seq_length to max_position_embeddings {self.cfg.max_position_embeddings} if using learned_absolute position embedding"
)
data_cfg.max_seq_length = self.cfg.max_position_embeddings
for file_path, num_samples in zip(data_cfg.file_names, num_train_samples_per_dataset):
if self.cfg.data.get("chat", False):
dataset_cls = GPTSFTChatDataset
else:
dataset_cls = GPTSFTDataset
dataset = dataset_cls(
file_path=file_path,
tokenizer=self.tokenizer,
max_seq_length=data_cfg.max_seq_length,
min_seq_length=data_cfg.min_seq_length,
add_bos=data_cfg.get('add_bos', False),
add_eos=data_cfg.get('add_eos', True),
add_sep=data_cfg.get('add_sep', False),
sep_id=self.sep_id,
max_num_samples=num_samples[0],
seed=data_cfg.get('seed', 1234),
label_key=data_cfg.get('label_key', 'answer'),
answer_only_loss=self.cfg.get('answer_only_loss', True),
truncation_field=data_cfg.get('truncation_field', 'text'),
pad_to_max_length=data_cfg.get('pad_to_max_length', False),
index_mapping_dir=data_cfg.get('index_mapping_dir', None),
prompt_template=data_cfg.get('prompt_template', None),
virtual_tokens=self.virtual_tokens,
tokens_to_generate=data_cfg.get(
'tokens_to_generate', 0
), # used at inference time to allocate tensor positions for tokens that will be generated by inf procedure.
memmap_workers=data_cfg.get(
'memmap_workers', None
), # used to set num. of workers to create the memmap index files
hf_dataset=data_cfg.get(
'hf_dataset', False
), # Whether to load the json file with the HuggingFace dataset. otherwise, will load the jsonl file with the JSONLMemMapDataset.
truncation_method=data_cfg.get(
'truncation_method', 'right'
), # used to choose truncation method. Options: ['random', 'left', 'right']
)
datasets.append(dataset)
if is_train:
dataset = BlendableDataset(
datasets=datasets, weights=data_cfg.concat_sampling_probabilities, size=num_train_samples_after_blend
)
return dataset
else:
return datasets
def _determine_log_key(self, data_config, dataloader_idx, metric_name, mode):
# Function that determines whether to log based on the user provided name of the dataset or the dataloader index.
base_key = f"{mode}_{metric_name}_" if metric_name is not None else f"{mode}_"
# If the user provided names for each validation/test dataset, use those.
if hasattr(data_config, "names") and data_config.names is not None:
# With only a single validation/test dataset, the name is not a list.
if not isinstance(data_config.names, ListConfig):
name = data_config.names
else:
name = data_config.names[dataloader_idx]
return base_key + name
else:
return base_key + f"dataloader{dataloader_idx}"
def fwd_bwd_step(self, dataloader_iter, batch_idx, forward_only):
batch = next(dataloader_iter)
# Pass only torch.Tensor to prevent errors when process get_iterator_k_split()
batch = {k: v for k, v in batch.items() if isinstance(v, torch.Tensor)}
_, seq_length = batch['tokens'].shape
data_iter = get_iterator_k_split(batch, get_num_microbatches())
# handle asynchronous grad reduction
no_sync_func = None
grad_sync_func = None
param_sync_func = None
if not forward_only and self.with_distributed_adam:
no_sync_func = partial(self._optimizer.no_sync, greedy_grad_copy=self.megatron_amp_o2,)
grad_sync_func = self.reduce_overlap_gradients
param_sync_func = self.sync_overlap_parameters
self.model.config.no_sync_func = no_sync_func
self.model.config.grad_sync_func = grad_sync_func
self.model.config.param_sync_func = param_sync_func
fwd_bwd_function = get_forward_backward_func()
losses_reduced_per_micro_batch = fwd_bwd_function(
forward_step_func=self.get_forward_output_and_loss_func(),
data_iterator=data_iter,
model=[self.model],
num_microbatches=get_num_microbatches(),
forward_only=forward_only,
seq_length=seq_length,
micro_batch_size=get_micro_batch_size(),
)
# only the last stages of the pipeline return losses
if losses_reduced_per_micro_batch:
if (not forward_only) or self.cfg.data.get('validation_drop_last', True):
# average loss across micro batches
loss_tensors_list = [loss_reduced['avg'] for loss_reduced in losses_reduced_per_micro_batch]
loss_tensor = torch.concat(loss_tensors_list)
loss_mean = loss_tensor.mean()
else:
# Get the total loss since micro batches sizes are not uniform
loss_sum_tensors_list = [
loss_sum['loss_sum_and_ub_size']
for loss_sum in losses_reduced_per_micro_batch
if loss_sum['loss_sum_and_ub_size'][1] > 0
]
loss_sum = (
torch.vstack(loss_sum_tensors_list).sum(axis=0)
if len(loss_sum_tensors_list) > 0
else torch.tensor([0.0, 0.0]).cuda()
)
return loss_sum
else:
# we're not on the last pipeline stage so no losses
if forward_only:
loss_mean = []
else:
loss_mean = torch.tensor(0.0).cuda()
return loss_mean
def validation_step(self, dataloader_iter, batch_idx, dataloader_idx=0):
return self.inference_step(dataloader_iter, batch_idx, 'validation', dataloader_idx)
def test_step(self, dataloader_iter, batch_idx, dataloader_idx=0):
# Add try except since dataloader_iter in PTL 2.0 doesnt catch the end of iterables
return self.inference_step(dataloader_iter, batch_idx, 'test', dataloader_idx)
def inference_step(self, dataloader_iter, batch_idx, mode, dataloader_idx=0):
# Check if iterator is exhausted
dataloader_iter, done = self._val_iterator_done(dataloader_iter)
if done:
return
batch = next(dataloader_iter)
data_cfg = self.cfg.data.validation_ds if mode == 'validation' else self.cfg.data.test_ds
self._reconfigure_and_process_inference_batch(batch, data_cfg)
# Meta data from dataset
metadata = batch.get('metadata', [{}] * len(batch['tokens']))
loss = super().validation_step(itertools.chain([batch]), batch_idx)
if data_cfg.get("write_predictions_to_file", False) or data_cfg.metric.name != 'loss':
# We need _inference_config to get generation params
# add_BOS and tokens_to_generate are set in dataset
if self.get_inference_config() is None:
self.set_inference_config(inference_config={})
self._inference_config['add_BOS'] = data_cfg.add_bos
self._inference_config['tokens_to_generate'] = data_cfg.get('tokens_to_generate')
output = self.predict_step(batch, batch_idx, dataloader_idx)
inputs_text = [self.tokenizer.ids_to_text(c.tolist()) for c in batch['contexts']]
labels_text = [self.tokenizer.ids_to_text(a.tolist()) for a in batch['answers']]
preds_text = [
self.tokenizer.ids_to_text(t[l.item() :][: data_cfg.get('tokens_to_generate')])
for t, l in zip(output['token_ids'], batch['context_lengths'])
]
else:
inputs_text, labels_text, preds_text = [], [], []
outputs = {
'loss': loss,
'preds': preds_text, # [str]
'labels': labels_text, # [str]
'inputs': inputs_text, # [str]
'metadata': metadata, # [dict]
}
if mode == 'validation':
if type(self.trainer.val_dataloaders) == list and len(self.trainer.val_dataloaders) > 1:
# super().validation_step appends just loss to self.validation_step_outputs, replace the last appended loss with the outputs dict
self.validation_step_outputs[dataloader_idx][-1] = outputs
else:
# super().validation_step appends just loss to self.validation_step_outputs, replace the last appended loss with the outputs dict
self.validation_step_outputs[-1] = outputs
else:
if type(self.trainer.test_dataloaders) == list and len(self.trainer.test_dataloaders) > 1:
self.test_step_outputs[dataloader_idx][-1] = outputs
else:
self.test_step_outputs[-1] = outputs
return outputs
def inference_epoch_end(self, outputs, mode, data_cfg):
# Parent class will handle logging of the loss.
if not outputs:
return
if isinstance(outputs[0], dict):
outputs = [outputs]
averaged_loss = []
averaged_metric = []
# Log metrics for each provided validation/test dataset.
for dataloader_idx, output in enumerate(outputs):
# Expand on_validation_epoch_end from parent class MegatronGPTModel as on_validation_epoch_end doesnt take outputs arg
# loss = super().on_validation_epoch_end([x['loss'] for x in output])
loss_vals = [x['loss'] for x in output]
if parallel_state.is_pipeline_last_stage():
# only the last pipeline parallel stages return loss with their batch size
if self.cfg.data.get('validation_drop_last', True):
loss = torch.stack(loss_vals).mean()
else:
# Compute the avg loss by total_loss across all samples / total number of samples
total_loss_and_total_samples = torch.vstack(loss_vals).sum(axis=0)
avg_loss = total_loss_and_total_samples[0] / total_loss_and_total_samples[1]
loss = avg_loss.type(torch.float32).cuda()
else:
loss = torch.tensor(0.0, dtype=torch.float32).cuda()
# we can only log on one rank if it is rank zero so we broadcast from last rank
torch.distributed.broadcast(loss, get_last_rank())
self.log('val_loss', loss, prog_bar=True, rank_zero_only=True, batch_size=1)
# Determine the key used to log the loss based on the user provided name of the dataset or the dataloader index.
loss_log_key = self._determine_log_key(data_cfg, dataloader_idx, "loss", mode)
self.log(loss_log_key, loss, batch_size=1)
averaged_loss.append(loss)
# Gather the outputs object from all data parallel ranks since we are using the DistributedSampler which splits data across DDP ranks.
gathered_outputs = [None for _ in range(parallel_state.get_data_parallel_world_size())]
torch.distributed.all_gather_object(
gathered_outputs,
[
{'preds': x['preds'], 'labels': x['labels'], 'inputs': x['inputs'], 'metadata': x['metadata']}
for x in output
],
group=parallel_state.get_data_parallel_group(),
)
# Remove duplicate examples due to distributed sampler.
inp_label_set = set()
deduplicated_outputs = {
'preds': [],
'labels': [],
'inputs': [],
'metadata': [],
}
total_size = 0
for rank in range(0, parallel_state.get_data_parallel_world_size()):
for batch in gathered_outputs[rank]:
for pred, label, input, metadata in zip(
batch['preds'], batch['labels'], batch['inputs'], batch['metadata']
):
key = input + label
total_size += 1
if key not in inp_label_set:
inp_label_set.add(key)
deduplicated_outputs['preds'].append(pred)
deduplicated_outputs['labels'].append(label)
deduplicated_outputs['inputs'].append(input)
deduplicated_outputs['metadata'].append(metadata)
# Compute metric score
metric_name = self.val_metric_name if mode == 'validation' else self.test_metric_name
metric_label_key = self.val_metric_label_key if mode == 'validation' else self.test_metric_label_key
if metric_name != 'loss':
metric_log_key = self._determine_log_key(data_cfg, dataloader_idx, metric_name, mode)
metric_fn = (
self.val_metric[dataloader_idx] if mode == 'validation' else self.test_metric[dataloader_idx]
)
if metric_label_key in deduplicated_outputs['metadata'][0]:
labels = [m[metric_label_key] for m in deduplicated_outputs['metadata']]
else:
labels = deduplicated_outputs['labels']
for pred, label in zip(deduplicated_outputs['preds'], labels):
_ = metric_fn(pred, label)
metric_result = metric_fn.compute()
if metric_name == 'rouge':
for k, v in metric_result.items():
if 'fmeasure' in k:
self.log(metric_log_key + f'_{k}', v.item(), sync_dist=True)
logging.info(f"{mode} {metric_name} {k}: {v.item()}")
metric_result = metric_result['rouge1_fmeasure']
else:
self.log(metric_log_key, metric_result.item(), sync_dist=True)
logging.info(f"{mode} {metric_name}: {metric_result.item()}")
metric_fn.reset()
averaged_metric.append(metric_result)
# Write predictions to file
if self.global_rank == 0 and data_cfg.get("write_predictions_to_file", False):
logging.info(
f"Total deduplicated inference data size: {total_size} to {len(deduplicated_outputs['inputs'])}"
)
# Check if the user provided a prefix path to the file(s) they want to write.
if not hasattr(data_cfg, "output_file_path_prefix") or data_cfg.output_file_path_prefix is None:
raise ValueError(
f"Cannot write predictions to file when output_file_path_prefix is not set or present in the yaml config file."
)
filename_log_key = self._determine_log_key(data_cfg, dataloader_idx, None, mode)
self.write_predictions_to_file(
deduplicated_outputs, f"{data_cfg.output_file_path_prefix}_{filename_log_key}"
)
torch.distributed.barrier(group=parallel_state.get_data_parallel_group())
outputs[dataloader_idx].clear() # free memory
# Logging of the averaged metrics:
averaged_loss = sum(averaged_loss) / len(averaged_loss)
averaged_metric = sum(averaged_metric) / len(averaged_metric) if len(averaged_metric) > 1 else None
# Handle case where metrics can be nan or inf. This can break checkpoint save/load.
if averaged_metric is not None and (torch.isinf(averaged_metric) or torch.isnan(averaged_metric)):
app_state = AppState()
monitor_mode = app_state.checkpoint_callback_params.mode
assert monitor_mode in ['min', 'max']
averaged_metric = 0.0 if monitor_mode == 'max' else 1e5
if mode == 'validation':
self.log("validation_loss", averaged_loss, batch_size=1)
if averaged_metric is not None:
self.log(f"validation_{self.val_metric_name}", averaged_metric)
elif mode == 'test':
self.log("test_loss", averaged_loss, batch_size=1)
if averaged_metric is not None:
self.log(f"test_{self.test_metric_name}", averaged_metric)
# Merge the functionality of previous on_inference_epoch_end() within inference_epoch_end() func here
app_state = AppState()
self._restore_activation_checkpointing_args()
self._restore_sequence_parallelism_args()
if hasattr(self, "_train_ds"):
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=self.cfg.data.train_ds.global_batch_size,
micro_batch_size=self.cfg.data.train_ds.micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
# When running `trainer.validate()`, the training dataset is not available.
else:
logging.warning('No training data found, reconfiguring microbatches based on validation batch sizes.')
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=data_cfg.global_batch_size,
micro_batch_size=data_cfg.micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
return averaged_loss, averaged_metric
def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: Optional[int] = None) -> Any:
inference_config = self.get_inference_config()
# need to overwrite some configuration, make it immutable
inference_config = inference_config.copy()
global_batch_size_per_gpu = batch['tokens'].size(0)
num_micro_batches_before_decode = get_num_microbatches()
compute_logprob = inference_config.get('compute_logprob', False)
if compute_logprob:
inference_config['inputs'] = batch
inference_config['tokens_to_generate'] = 1
inference_config['all_probs'] = True
inference_config["add_BOS"] = False
inference_config['greedy'] = True
response = generate(self, **inference_config)
response = get_computeprob_response(self.tokenizer, response, batch)
else:
# for megatron_gpt_eval.py
if isinstance(batch, list):
inference_config['inputs'] = batch
else:
# peft_eval.py
inference_config['inputs'] = (batch['contexts'].cuda(), batch['context_lengths'].cuda())
response = generate(self, **inference_config)
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=global_batch_size_per_gpu * parallel_state.get_data_parallel_world_size(),
micro_batch_size=global_batch_size_per_gpu // num_micro_batches_before_decode,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
return response
def write_predictions_to_file(self, outputs, output_file_path_prefix):
output_file_path = output_file_path_prefix + "_inputs_preds_labels.jsonl"
with open(output_file_path, "w") as f_json:
assert (
len(outputs['inputs']) == len(outputs['preds']) == len(outputs['labels']) == len(outputs['metadata'])
)
for i, p, l, m in zip(outputs['inputs'], outputs['preds'], outputs['labels'], outputs['metadata']):
json_string = {'input': i, 'pred': p, 'label': l}
for k, v in m.items():
if k not in json_string:
json_string[k] = v
f_json.write(json.dumps(json_string) + '\n')
logging.info(f'Predictions saved to {output_file_path}')
def cast_for_metric(self, pred, label, metric_name, class_labels=None, labels_are_strings=False):
if metric_name == 'exact_string_match' or 'rouge' in metric_name:
return pred, label
pred = pred.replace(' ', '')
label = label.replace(' ', '')
# Correlation metrics require casting to float.
if metric_name in ['pearson_corr_coef', 'spearman_corr_coef']:
# Text-to-text model predictions may not always be valid floating point numbers.
try:
pred = float(pred)
except ValueError:
pred = 0.0
try:
label = float(label)
except ValueError:
raise ValueError(f'Could not convert {label} to float.')
pred = torch.FloatTensor([pred]).to(self.device)
label = torch.FloatTensor([label]).to(self.device)
# Other metrics require casting to integers.
elif metric_name in self._metrics_require_string2category_map and not labels_are_strings:
# Text-to-text model predictions may not always be valid integers.
try:
pred = int(pred)
except ValueError:
pred = 0
try:
label = int(label)
except ValueError:
raise ValueError(f'Could not convert {label} to int.')
pred = torch.LongTensor([pred]).to(self.device)
label = torch.LongTensor([label]).to(self.device)
# If labels are strings, we need to convert them to indices for some metrics.
elif metric_name in self._metrics_require_string2category_map and labels_are_strings:
# Cast string labels to integers before computing the metric.
if pred not in class_labels:
pred = 0 # If the prediction is not in the class labels, use the first class label.
else:
pred = class_labels.index(pred)
if label not in class_labels:
raise ValueError(f"Ground truth labe; {label} is not in the class labels list : {class_labels}")
label = class_labels.index(label)
pred = torch.LongTensor([pred]).to(self.device)
label = torch.LongTensor([label]).to(self.device)
else:
raise ValueError(f'Metric {metric_name} not supported.')
return pred, label
# Override the parent batch reconfiguring logic.
def _reconfigure_and_process_inference_batch(self, batch, data_cfg):
global_batch_size_per_gpu = batch['tokens'].size(0)
# This should happen only on the last batch of the dataset.
if (
global_batch_size_per_gpu
!= get_current_global_batch_size() // parallel_state.get_data_parallel_world_size()
):
# NOTE: This is reconfiguring to make sure there is no grad-acc for validation batches.
if (
global_batch_size_per_gpu
!= data_cfg.global_batch_size // parallel_state.get_data_parallel_world_size()
):
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=global_batch_size_per_gpu * parallel_state.get_data_parallel_world_size(),
micro_batch_size=global_batch_size_per_gpu,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
# NOTE: need to explicitly handle resetting for multi-validation
else:
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=data_cfg.global_batch_size,
micro_batch_size=data_cfg.micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
def build_train_valid_test_datasets(self, stage):
if stage != 'test':
logging.info('Building GPT SFT validation datasets.')
# Wrap this in a list since the general finetuning parent class supports multi-validation.
self._validation_ds = self._build_dataset(self.cfg.data.validation_ds, is_train=False)
logging.info(f'Length of val dataset: {len(self._validation_ds[0])}')
if stage != 'validate':
if hasattr(self.cfg.data, 'test_ds') and self.cfg.data.test_ds.get('file_names', None) is not None:
logging.info('Building GPT SFT test datasets.')
# Wrap this in a list since the general finetuning parent class supports multi-validation.
self._test_ds = self._build_dataset(self.cfg.data.test_ds, is_train=False)
logging.info(f'Length of test dataset: {len(self._test_ds[0])}')
if stage == 'validate' or stage == 'test':
return
logging.info('Building GPT SFT traing datasets.')
self._train_ds = self._build_dataset(self.cfg.data.train_ds)
logging.info(f'Length of train dataset: {len(self._train_ds)}')
def build_data_loader(self, dataset, data_cfg, consumed_samples=0):
"""Buld dataloader given an input dataset."""
logging.info(f'Building dataloader with consumed samples: {consumed_samples}')
if isinstance(dataset, BlendableDataset):
collate_fn = dataset.datasets[0].collate_fn
else:
collate_fn = dataset.collate_fn
batch_sampler = MegatronPretrainingBatchSampler(
total_samples=len(dataset),
consumed_samples=consumed_samples,
micro_batch_size=data_cfg.micro_batch_size,
global_batch_size=data_cfg.global_batch_size,
data_parallel_rank=parallel_state.get_data_parallel_rank(),
data_parallel_size=parallel_state.get_data_parallel_world_size(),
drop_last=data_cfg.drop_last,
pad_samples_to_global_batch_size=not data_cfg.drop_last,
)
return torch.utils.data.DataLoader(
dataset,
batch_sampler=batch_sampler,
collate_fn=collate_fn,
num_workers=data_cfg.num_workers,
pin_memory=data_cfg.pin_memory,
)
def setup_training_dataloader(self):
if hasattr(self, '_train_ds'):
consumed_samples = self.compute_consumed_samples(0)
self._train_dl = self.build_data_loader(
dataset=self._train_ds, data_cfg=self.cfg.data.train_ds, consumed_samples=consumed_samples,
)
def setup_eval_dataloader(self, datasets, data_cfg):
dataloaders = []
for dataset in datasets:
eval_dl = self.build_data_loader(dataset=dataset, data_cfg=data_cfg, consumed_samples=0,)
dataloaders.append(eval_dl)
return dataloaders
def on_validation_epoch_start(self):
self._reset_activation_checkpointing_args()
self._reset_sequence_parallelism_args()
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=self.cfg.data.validation_ds.global_batch_size,
micro_batch_size=self.cfg.data.validation_ds.micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
return super().on_validation_epoch_start()
def on_test_epoch_start(self):
self._reset_activation_checkpointing_args()
self._reset_sequence_parallelism_args()
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=self.cfg.data.test_ds.global_batch_size,
micro_batch_size=self.cfg.data.test_ds.micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
return super().on_test_epoch_start()
def on_test_epoch_end(self):
_ = self.inference_epoch_end(self.test_step_outputs, 'test', self.cfg.data.test_ds)
# Commenting as on_test_epoch_end was a no-op in PTL 1.9
# return super().on_test_epoch_end()
def on_validation_epoch_end(self):
_ = self.inference_epoch_end(self.validation_step_outputs, 'validation', self.cfg.data.validation_ds)
# Commenting as on_validation_epoch_end was a no-op in PTL 1.9
# return super().on_validation_epoch_end()
def on_train_epoch_start(self) -> None:
# Same logic as validation epoch end, but this may be need if there is no validation sanity check to trigger on_validation_epoch_end()
self.on_validation_epoch_end()
return super().on_train_epoch_start()
| NeMo-main | nemo/collections/nlp/models/language_modeling/megatron_gpt_sft_model.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from typing import Any, Dict, List, Optional, Union
import torch
import torch.nn.functional as F
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.data.language_modeling.megatron import dataset_utils
from nemo.collections.nlp.data.language_modeling.megatron.data_samplers import (
MegatronPretrainingRandomSampler,
MegatronPretrainingSampler,
)
from nemo.collections.nlp.models.language_modeling.megatron.bert_model import BertModel
from nemo.collections.nlp.models.language_modeling.megatron_base_model import MegatronBaseModel
from nemo.collections.nlp.modules.common.megatron.build_model import build_model
from nemo.collections.nlp.modules.common.megatron.module import Float16Module
from nemo.collections.nlp.modules.common.megatron.utils import (
average_losses_across_data_parallel_group,
get_params_for_weight_decay_optimization,
)
from nemo.collections.nlp.parts.nlp_overrides import GradScaler
from nemo.collections.nlp.parts.utils_funcs import get_last_rank
from nemo.core.classes.common import PretrainedModelInfo
from nemo.core.neural_types import ChannelType, MaskType, NeuralType
from nemo.utils import AppState, logging
try:
from apex.transformer.pipeline_parallel.utils import get_num_microbatches
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
try:
import logging
from lddl.torch_mp import get_bert_pretrain_data_loader
HAVE_LDDL = True
except (ImportError, ModuleNotFoundError):
HAVE_LDDL = False
try:
from megatron.core import parallel_state
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
class MegatronBertModel(MegatronBaseModel):
"""
Megatron Bert pretraining.
Model returns [batch, seq, hidden] shape
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
if not HAVE_MEGATRON_CORE:
raise ImportError(
"megatron-core was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
self.megatron_amp_o2 = cfg.get('megatron_amp_O2', False)
self.cfg = cfg
if not self.megatron_amp_o2 and self.cfg.get('virtual_pipeline_model_parallel_size', None):
raise ValueError('Virtual pipeline model parallel is only supported when using megatron_amp_O2')
super().__init__(cfg, trainer=trainer, no_lm_init=False)
self._validate_trainer()
self.enable_autocast = (
True if (not self.megatron_amp_o2) and (self.autocast_dtype in [torch.float16, torch.bfloat16]) else False
)
# used in NVIDIA NGC PyTorch containers
# buffer used during train_step for logging average loss over gradient accumulation steps
self._reduced_lm_loss_buffer = []
self._reduced_sop_loss_buffer = []
# build_model returns a list of modules which are used for interleaved pipeline parallelism
self.model = build_model(
model_provider_func=self.model_provider_func,
wrap_with_ddp=False,
virtual_pipeline_model_parallel_size=self.cfg.get('virtual_pipeline_model_parallel_size', None),
)
# if we're not using interleaved, then self.model is a module.
if self.cfg.get('virtual_pipeline_model_parallel_size', None) is None:
self.model = self.model[0]
if self.megatron_amp_o2:
if not self.with_distributed_adam:
# Pre-allocate the model on GPU to have master parameters allocated on the same device with matching data type
if isinstance(self.model, list):
for module in self.model:
module.cuda(torch.cuda.current_device())
else:
self.model.cuda(torch.cuda.current_device())
# Model wrapper to convert both model and inputs to half precision
if isinstance(self.model, list):
converted_model = []
for module in self.model:
converted_model.append(
Float16Module(config=self.model_parallel_config, module=module, precision=self.cfg.precision)
)
self.model = converted_model
else:
self.model = Float16Module(
config=self.model_parallel_config, module=self.model, precision=self.cfg.precision
)
if hasattr(self, '_nsys_profile_enabled'):
mp_size = cfg.get('tensor_model_parallel_size', 1) * cfg.get('pipeline_model_parallel_size', 1)
data_parallel_world_size = trainer.world_size // mp_size
grad_accum_steps = cfg.get('global_batch_size') // (cfg.get('micro_batch_size') * data_parallel_world_size)
self._nsys_profile_start_step *= grad_accum_steps
self._nsys_profile_end_step *= grad_accum_steps
def model_provider_func(self, pre_process, post_process):
cfg = self.cfg
num_tokentypes = 2 if cfg.bert_binary_head else 0
model = BertModel(
config=self.model_parallel_config,
vocab_size=self.padded_vocab_size,
hidden_size=cfg.hidden_size,
max_position_embeddings=cfg.max_position_embeddings,
num_layers=cfg.num_layers,
num_attention_heads=cfg.num_attention_heads,
apply_query_key_layer_scaling=cfg.get('apply_query_key_layer_scaling', True),
kv_channels=cfg.get('kv_channels', None),
ffn_hidden_size=cfg.ffn_hidden_size,
num_tokentypes=num_tokentypes,
parallel_output=True,
pre_process=pre_process,
post_process=post_process,
init_method_std=cfg.get('init_method_std', 0.02),
fp16_lm_cross_entropy=cfg.get('fp16_lm_cross_entropy', False),
megatron_amp_O2=self.cfg.get('megatron_amp_O2', False),
hidden_dropout=cfg.get('hidden_dropout', 0.1),
precision=cfg.get('precision', 16),
fp32_residual_connection=cfg.get('fp32_residual_connection', False),
activations_checkpoint_granularity=self.cfg.get('activations_checkpoint_granularity', None),
activations_checkpoint_method=self.cfg.get('activations_checkpoint_method', None),
activations_checkpoint_num_layers=self.cfg.get('activations_checkpoint_num_layers', 1),
activations_checkpoint_layers_per_pipeline=self.cfg.get(
'activations_checkpoint_layers_per_pipeline', None
),
layernorm_epsilon=cfg.get('layernorm_epsilon', 1e-5),
masked_softmax_fusion=cfg.get('masked_softmax_fusion', True),
bias_gelu_fusion=cfg.get('bias_gelu_fusion', True),
bias_dropout_add_fusion=cfg.get("bias_dropout_add_fusion", True),
onnx_safe=cfg.get('onnx_safe', False),
add_binary_head=cfg.bert_binary_head,
megatron_legacy=cfg.get('megatron_legacy', False),
position_embedding_type=self.cfg.get("position_embedding_type", "learned_absolute"),
)
return model
def _validate_trainer(self):
""" Certain trainer configurations can break training.
Here we try to catch them and raise an error.
"""
if self.trainer.accumulate_grad_batches > 1:
raise ValueError(
f'Gradient accumulation is done within training_step. trainer.accumulate_grad_batches must equal 1'
)
def get_forward_output_and_loss_func(self):
def fwd_output_and_loss_func(dataloader_iter, model, checkpoint_activations_all_layers=None):
if parallel_state.get_pipeline_model_parallel_world_size() == 1:
batch = next(dataloader_iter)
tokens, types, sentence_order, loss_mask, lm_labels, padding_mask = (
batch['text'].cuda(non_blocking=True),
batch['types'].cuda(non_blocking=True),
batch['is_random'].cuda(non_blocking=True),
batch['loss_mask'].cuda(non_blocking=True),
batch['labels'].cuda(non_blocking=True),
batch['padding_mask'].cuda(non_blocking=True),
)
else:
batch = next(dataloader_iter)
if parallel_state.is_pipeline_first_stage():
tokens = batch['text'].cuda(non_blocking=True)
types = batch['types'].cuda(non_blocking=True)
sentence_order = batch['is_random'].cuda(non_blocking=True)
padding_mask = batch['padding_mask'].cuda(non_blocking=True)
loss_mask, lm_labels = None, None
elif parallel_state.is_pipeline_last_stage():
loss_mask = batch['loss_mask'].cuda(non_blocking=True)
lm_labels = batch['labels'].cuda(non_blocking=True)
sentence_order = batch['is_random'].cuda(non_blocking=True)
padding_mask = batch['padding_mask'].cuda(non_blocking=True)
tokens, types = None, None
else:
padding_mask = batch['padding_mask'].cuda(non_blocking=True)
sentence_order = batch['is_random'].cuda(non_blocking=True)
tokens, types, loss_mask, lm_labels = None, None, None, None
if not self.cfg.bert_binary_head:
types = None
output_tensor = self.forward(
tokens,
padding_mask,
types,
lm_labels,
checkpoint_activations_all_layers=checkpoint_activations_all_layers,
model=model,
)
def loss_func(output_tensor):
loss_dict = self.loss_func(loss_mask, sentence_order, output_tensor)
if 'sop loss' in loss_dict:
lm_loss = loss_dict['lm loss']
sop_loss = loss_dict['sop loss']
loss = lm_loss + sop_loss
reduced_loss = average_losses_across_data_parallel_group([loss, lm_loss, sop_loss])
else:
lm_loss = loss_dict['lm loss']
loss = lm_loss
reduced_loss = average_losses_across_data_parallel_group([loss, lm_loss])
return loss, {'loss': reduced_loss}
return output_tensor, loss_func
return fwd_output_and_loss_func
def forward(
self,
input_ids,
attention_mask,
token_type_ids,
lm_labels=None,
checkpoint_activations_all_layers=None,
model=None,
):
if model is None:
model = self.model
output_tensor = model(
input_ids,
attention_mask,
token_type_ids=token_type_ids,
lm_labels=lm_labels,
checkpoint_activations_all_layers=checkpoint_activations_all_layers,
)
if parallel_state.is_pipeline_last_stage():
# Return the output tensor of encoder and transpose from [seq_len, batch, hidden] to [batch, seq_len, hidden]
if torch.is_tensor(output_tensor):
output_tensor = output_tensor.transpose(1, 0).contiguous()
else:
lm_loss_, sop_logits = output_tensor
lm_loss_ = lm_loss_.transpose(1, 0).contiguous()
if sop_logits is not None:
sop_logits = sop_logits.transpose(1, 0).contiguous()
output_tensor = (lm_loss_, sop_logits)
return output_tensor
def training_step(self, dataloader_iter, batch_idx):
self._optimizer.zero_grad()
if self.with_distributed_adam:
# hack to enable overlapping param sync and forward compute
# note: the distributed optimizer monkey-patches each
# parameter's __getattribute__ function so that it can
# launch parameter all-gathers the first time the
# parameter is accessed after the optimizer step. However,
# PyTorch directly passes embedding parameters into a C++,
# bypassing this process. A quick-and-dirty hack is to
# manually interact with the parameter.
modules = self.model if isinstance(self.model, list) else [self.model]
for module in modules:
if isinstance(module, Float16Module):
module = module.module
module = module.language_model
if hasattr(module, 'embedding'):
for param in module.embedding.parameters():
param.data_ptr()
if self.cfg.data.dataloader_type == "LDDL":
# this is of type bert dataset
seq_length = dataloader_iter.iterator.loaders.get_seqlen()
else:
seq_length = self.cfg.encoder_seq_length
# run forward and backwards passes for an entire global batch
# we do this inside training_step to support pipeline parallelism
fwd_bwd_function = get_forward_backward_func()
losses_reduced_per_micro_batch = fwd_bwd_function(
forward_step_func=self.get_forward_output_and_loss_func(),
data_iterator=dataloader_iter,
model=[self.model],
num_microbatches=get_num_microbatches(),
forward_only=False,
seq_length=seq_length,
micro_batch_size=self.cfg.micro_batch_size,
)
if losses_reduced_per_micro_batch:
loss_tensors_list = [loss_reduced['loss'] for loss_reduced in losses_reduced_per_micro_batch]
loss_tensor = torch.vstack(loss_tensors_list)
loss_mean = loss_tensor.mean(axis=0)
else:
if self.cfg.bert_binary_head == True:
loss_mean = torch.tensor([0.0, 0.0, 0.0]).cuda()
else:
loss_mean = torch.tensor([0.0, 0.0]).cuda()
# when using sequence parallelism, the sequence parallel layernorm grads must be all-reduced
if self.cfg.get('tensor_model_parallel_size', 1) > 1 and self.cfg.get('sequence_parallel', False):
self.allreduce_sequence_parallel_gradients()
if self.with_distributed_adam:
# synchronize asynchronous grad reductions
# note: not necessary, but reduces performance degradation
# from multiple simultaneous NCCL calls
self._optimizer._finish_bucket_grad_sync()
elif self.megatron_amp_o2:
if self.cfg.get('pipeline_model_parallel_size', 1) > 1 or self.cfg.get('sequence_parallel', False):
# when using pipeline parallelism grads must be all-reduced after the pipeline (not asynchronously)
self._optimizer.allreduce_main_grads()
else:
# async grad allreduce is not currently implemented for O1/autocasting mixed precision training
# so we all-reduce gradients after the pipeline
self.allreduce_gradients() # @sangkug we think this is causing memory to blow up (hurts perf)
if self.cfg.get('pipeline_model_parallel_size', 1) > 1:
# when using pipeline parallelism the first and last stage must keep embeddings in sync
self.allreduce_first_last_embeddings()
torch.distributed.broadcast(loss_mean, get_last_rank())
if self.torch_dtype == torch.float16:
loss_scale = self.trainer.precision_plugin.scaler._scale
if loss_scale is not None:
self.log('loss_scale', loss_scale, batch_size=1)
if (batch_idx + 1) % self.trainer.accumulate_grad_batches == 0:
# Reduced loss for logging.
self.log('reduced_train_loss', loss_mean[0], prog_bar=True, batch_size=1)
if len(loss_mean) > 2:
self.log('reduced_lm_train_loss', loss_mean[1], prog_bar=True, batch_size=1)
self.log('reduced_sop_train_loss', loss_mean[2], prog_bar=True, batch_size=1)
lr = self._optimizer.param_groups[0]['lr']
self.log('lr', lr, batch_size=1)
self.log('global_step', self.trainer.global_step, prog_bar=True, batch_size=1)
self.log(
'consumed_samples', self._compute_consumed_samples_after_training_step(), prog_bar=True, batch_size=1,
)
return loss_mean[0]
def allreduce_first_last_embeddings(self):
# Modified from megatron-lm: https://github.com/NVIDIA/Megatron-LM/blob/d41696840ed0a7edb7e0499eb82a48ae112d9bb3/megatron/training.py#L407
# All-reduce word_embeddings' grad across first and last stages to ensure
# that word_embeddings parameters stay in sync.
# This should only run for models that support pipelined model parallelism
# (BERT and GPT-2).
if parallel_state.get_pipeline_model_parallel_world_size() > 1 and (
parallel_state.is_pipeline_first_stage(ignore_virtual=True)
or parallel_state.is_pipeline_last_stage(ignore_virtual=True)
):
if parallel_state.is_pipeline_first_stage(ignore_virtual=True):
if isinstance(self.model, list):
module = self.model[0] # only the first virtual rank has the embeddings
else:
module = self.model
if parallel_state.is_pipeline_last_stage(ignore_virtual=True):
if isinstance(self.model, list):
module = self.model[-1] # only the last virtual rank has the embeddings
else:
module = self.model
if module.share_token_embeddings:
word_embeddings_weight = module.word_embeddings_weight()
if self.megatron_amp_o2:
# O2 recipe stores a "main" copy of weights and grads
grad = word_embeddings_weight.main_grad
else:
grad = word_embeddings_weight.grad
torch.distributed.all_reduce(grad, group=parallel_state.get_embedding_group())
def validation_step(self, dataloader_iter, batch_idx):
# Check if iterator is exhausted
dataloader_iter, done = self._val_iterator_done(dataloader_iter)
if done:
return
prefix = "test" if self.trainer.testing else "val"
if self.cfg.data.dataloader_type == "LDDL":
seq_length = dataloader_iter.iterator.get_seqlen()
else:
seq_length = self.cfg.encoder_seq_length
fwd_bwd_function = get_forward_backward_func()
losses_reduced_per_micro_batch = fwd_bwd_function(
forward_step_func=self.get_forward_output_and_loss_func(),
data_iterator=dataloader_iter,
model=[self.model],
num_microbatches=get_num_microbatches(),
forward_only=True,
seq_length=seq_length,
micro_batch_size=self.cfg.micro_batch_size,
)
if losses_reduced_per_micro_batch:
loss_tensors_list = [loss_reduced['loss'] for loss_reduced in losses_reduced_per_micro_batch]
loss_tensor = torch.vstack(loss_tensors_list)
loss_mean = loss_tensor.mean(axis=0)
else:
loss_mean = torch.tensor([0.0]).cuda()
loss = loss_mean[0]
self.validation_step_outputs.append(loss) if prefix == 'val' else self.test_step_outputs.append(loss)
return loss
def on_validation_epoch_end(self):
if parallel_state.is_pipeline_last_stage():
averaged_loss = torch.stack(self.validation_step_outputs).mean()
else:
averaged_loss = torch.tensor(0.0, dtype=torch.float32).cuda()
torch.distributed.broadcast(averaged_loss, get_last_rank())
self.log('val_loss', averaged_loss, prog_bar=True, batch_size=1)
self.validation_step_outputs.clear() # free memory
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def on_test_epoch_end(self):
averaged_loss = average_losses_across_data_parallel_group(self.test_step_outputs)
logging.info(f'test_loss: {averaged_loss[0]}')
def loss_func(self, loss_mask, sentence_order, output_tensor):
lm_loss_, sop_logits = output_tensor
lm_loss_ = lm_loss_.float()
loss_mask = loss_mask.float()
# Sometimes when the number of tokens is very small, none of the tokens get masked for prediction. In that case loss mask is all zeros
# i.e Happens when the entire batch is masked out (Practically when MBS=1 or 2, and the number of tokens in each batch is < 7 )
if loss_mask.sum() == 0:
lm_loss = torch.sum(lm_loss_.view(-1)) * 0.0
else:
lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
if sop_logits is not None:
sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(), sentence_order.view(-1), ignore_index=-1)
sop_loss = sop_loss.float()
return {'lm loss': lm_loss, 'sop loss': sop_loss}
# loss = lm_loss + sop_loss
# averaged_losses = average_losses_across_data_parallel_group(
# [lm_loss, sop_loss])
# return loss, {'lm loss': averaged_losses[0],
# 'sop loss': averaged_losses[1]}
else:
return {'lm loss': lm_loss}
# loss = lm_loss
# averaged_losses = average_losses_across_data_parallel_group(
# [lm_loss])
# return loss, {'lm loss': averaged_losses[0]}
def build_LDDL_data(self, cfg):
if not HAVE_LDDL:
raise ImportError(
"LDDL was not found. Please see the LDDL README for installation instructions: https://github.com/NVIDIA/LDDL#installation."
)
logging.info(f'Starting building LDDL Dataloaders')
self._train_ds = None
self._validation_ds = None
self._test_ds = None
data_parallel_size = parallel_state.get_data_parallel_world_size()
num_micro_batches = self.cfg.global_batch_size // (self.cfg.micro_batch_size * data_parallel_size)
global_batch_size_on_this_data_parallel_rank = num_micro_batches * self.cfg.micro_batch_size
samples_consumed_dploader = self.compute_consumed_samples(0) // data_parallel_size
# We run under the assumption that the datapath is the prefix if LDDL dataloader
train_lddl_data_path = self.cfg.data.data_prefix[0]
self._train_dl = get_bert_pretrain_data_loader(
train_lddl_data_path,
dp_rank=parallel_state.get_data_parallel_rank(),
local_rank=self.local_rank,
shuffle_buffer_size=16384,
shuffle_buffer_warmup_factor=16,
vocab_file=self.cfg.tokenizer.vocab_file,
data_loader_kwargs={
'batch_size': global_batch_size_on_this_data_parallel_rank,
'num_workers': self.cfg.data.num_workers,
'prefetch_factor': 2,
},
mlm_probability=0.15,
base_seed=self.cfg.seed,
log_level=logging.CRITICAL,
log_dir="/tmp/log",
return_raw_samples=False,
start_epoch=0,
sequence_length_alignment=8,
ignore_index=-1,
samples_seen=samples_consumed_dploader,
micro_batch_size=self.cfg.micro_batch_size,
)
logging.info(f'Completed build train LDDL Dataloader')
if len(self.cfg.data.data_prefix) > 1:
val_lddl_data_path = self.cfg.data.data_prefix[1]
self._validation_dl = get_bert_pretrain_data_loader(
val_lddl_data_path,
dp_rank=parallel_state.get_data_parallel_rank(),
local_rank=self.local_rank,
shuffle_buffer_size=16384,
shuffle_buffer_warmup_factor=16,
vocab_file=self.cfg.tokenizer.vocab_file,
data_loader_kwargs={
'batch_size': global_batch_size_on_this_data_parallel_rank,
'num_workers': self.cfg.data.num_workers,
'prefetch_factor': 2,
},
mlm_probability=0.15,
base_seed=self.cfg.seed,
log_level=logging.CRITICAL,
log_dir="/tmp/log",
return_raw_samples=False,
start_epoch=0,
sequence_length_alignment=8,
ignore_index=-1,
micro_batch_size=self.cfg.micro_batch_size,
)
if len(self.cfg.data.data_prefix) > 2:
test_lddl_data_path = self.cfg.data.data_prefix[2]
self._test_dl = get_bert_pretrain_data_loader(
test_lddl_data_path,
dp_rank=parallel_state.get_data_parallel_rank(),
local_rank=self.local_rank,
shuffle_buffer_size=16384,
shuffle_buffer_warmup_factor=16,
vocab_file=self.cfg.tokenizer.vocab_file,
data_loader_kwargs={
'batch_size': global_batch_size_on_this_data_parallel_rank,
'num_workers': self.cfg.data.num_workers,
'prefetch_factor': 2,
},
mlm_probability=0.15,
base_seed=self.cfg.seed,
log_level=logging.CRITICAL,
log_dir="/tmp/log",
return_raw_samples=False,
start_epoch=0,
sequence_length_alignment=8,
ignore_index=-1,
micro_batch_size=self.cfg.micro_batch_size,
)
logging.info(f'Finished building LDDL Dataloaders')
def build_train_valid_test_datasets(self):
# Override limit_val_batches to be a multiple of num microbatches to prevent val_step from exiting in between a step
self._reconfigure_val_batches()
logging.info('Building Bert datasets.')
if self.trainer.limit_val_batches > 1.0 and isinstance(self.trainer.limit_val_batches, float):
raise ValueError("limit_val_batches must be an integer or float less than or equal to 1.0.")
global_batch_size = self.cfg.global_batch_size
# Compute trianing micro-batch steps: total_global_batch_steps x grad_acumms_per_global_batch
max_train_steps = self.trainer.max_steps
eval_iters = (max_train_steps // self.trainer.val_check_interval + 1) * self.trainer.limit_val_batches
test_iters = self.trainer.limit_test_batches
train_valid_test_num_samples = [
max_train_steps * global_batch_size,
eval_iters * global_batch_size,
test_iters * global_batch_size,
]
if self.trainer.limit_val_batches <= 1.0 and isinstance(self.trainer.limit_val_batches, float):
train_valid_test_num_samples[
1
] = 1 # This is to make sure we only have one epoch on every validation iteration
self._train_ds, self._validation_ds, self._test_ds = dataset_utils.build_train_valid_test_datasets(
cfg=self.cfg,
trainer=self.trainer,
data_prefix=self.cfg.data.data_prefix,
data_impl=self.cfg.data.data_impl,
splits_string=self.cfg.data.splits_string,
train_valid_test_num_samples=train_valid_test_num_samples,
max_seq_length=self.cfg.data.seq_length,
masked_lm_prob=self.cfg.data.masked_lm_prob,
short_seq_prob=self.cfg.data.short_seq_prob,
seed=self.cfg.seed,
skip_warmup=self.cfg.data.get('skip_warmup', True),
binary_head=self.cfg.bert_binary_head,
max_seq_length_dec=None,
dataset_type='standard_bert',
tokenizer=self.tokenizer.tokenizer,
)
if self._train_ds is not None:
logging.info(f'Length of train dataset: {len(self._train_ds)}')
if self._validation_ds is not None:
logging.info(f'Length of val dataset: {len(self._validation_ds)}')
if self._test_ds is not None:
logging.info(f'Length of test dataset: {len(self._test_ds)}')
logging.info(f'Finished building Bert datasets.')
return self._train_ds, self._validation_ds, self._test_ds
def backward(self, *args, **kwargs):
""" LightningModule hook to do backward.
We want this to do nothing since we run backward in the fwd/bwd functions from megatron-core.
No need to call it here.
"""
return
def optimizer_zero_grad(self, *args, **kwargs):
""" LightningModule hook to zero grad.
We want this to do nothing as we are zeroing grads during the training_step.
"""
return
def _append_sequence_parallel_module_grads(self, module, grads):
""" Helper method for allreduce_sequence_parallel_gradients"""
for param in module.parameters():
sequence_parallel_param = getattr(param, 'sequence_parallel', False)
if sequence_parallel_param:
if self.megatron_amp_o2:
grad = param.main_grad
else:
grad = param.grad
grads.append(grad.data)
def setup(self, stage=None):
""" PTL hook that is executed after DDP spawns.
We setup datasets here as megatron datasets require DDP to instantiate.
See https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#setup for more information.
Args:
stage (str, optional): Can be 'fit', 'validate', 'test' or 'predict'. Defaults to None.
"""
num_parameters_on_device, total_num_parameters = self._get_total_params_across_model_parallel_groups_gpt_bert(
self.model
)
logging.info(
f'Pipeline model parallel rank: {parallel_state.get_pipeline_model_parallel_rank()}, '
f'Tensor model parallel rank: {parallel_state.get_tensor_model_parallel_rank()}, '
f'Number of model parameters on device: {num_parameters_on_device:.2e}. '
f'Total number of model parameters: {total_num_parameters:.2e}.'
)
resume_checkpoint_path = self.trainer.ckpt_path
if resume_checkpoint_path:
init_consumed_samples = self._extract_consumed_samples_from_ckpt(resume_checkpoint_path)
else:
init_consumed_samples = 0
self.init_consumed_samples = init_consumed_samples
self.init_global_step = self.trainer.global_step
if stage == 'predict':
return
else:
# TODO: consider adding a ModelPT guard to check if model is being restored.
# allowing restored models to optionally setup datasets
if self.cfg.data.dataloader_type == "LDDL":
self.build_LDDL_data(self.cfg.data)
torch.distributed.barrier()
else:
self.build_train_valid_test_datasets()
self.setup_training_data(self.cfg.data)
self.setup_validation_data(self.cfg.data)
self.setup_test_data(self.cfg.data)
# when using pipeline model parallel the final stage need to initialize word embeddings
if parallel_state.get_pipeline_model_parallel_world_size() > 1:
if isinstance(self.model, list):
for i, module in enumerate(self.model):
parallel_state.set_virtual_pipeline_model_parallel_rank(i)
module.sync_initial_word_embeddings()
parallel_state.set_virtual_pipeline_model_parallel_rank(0)
else:
self.model.sync_initial_word_embeddings()
if self.cfg.get('transformer_engine', False):
self.setup_transformer_engine_tp_groups()
def allreduce_sequence_parallel_gradients(self):
""" All-reduce layernorm parameters across model parallel nodes when sequence parallelism is used.
Modified from megatron-lm:
https://gitlab-master.nvidia.com/ADLR/megatron-lm/-/blob/3f91f09bb2ab32f9904b47f46f19d2fc3f518ed8/megatron/training.py#L425
"""
grads = []
if isinstance(self.model, list):
for module in self.model:
self._append_sequence_parallel_module_grads(module, grads)
else:
self._append_sequence_parallel_module_grads(self.model, grads)
coalesced = torch._utils._flatten_dense_tensors(grads)
torch.distributed.all_reduce(coalesced, group=parallel_state.get_tensor_model_parallel_group())
for buf, synced in zip(grads, torch._utils._unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
def build_pretraining_data_loader(self, dataset, consumed_samples):
"""Buld dataloader given an input dataset."""
if dataset is None:
return None
# Megatron sampler
if hasattr(self.cfg.data, 'dataloader_type') and self.cfg.data.dataloader_type is not None:
if self.cfg.data.dataloader_type == 'single':
batch_sampler = MegatronPretrainingSampler(
total_samples=len(dataset),
consumed_samples=consumed_samples,
micro_batch_size=self.cfg.micro_batch_size,
global_batch_size=self.cfg.global_batch_size,
data_parallel_rank=parallel_state.get_data_parallel_rank(),
data_parallel_size=parallel_state.get_data_parallel_world_size(),
drop_last=self.cfg.get('drop_last', True),
)
elif self.cfg.data.dataloader_type == 'cyclic':
batch_sampler = MegatronPretrainingRandomSampler(
total_samples=len(dataset),
consumed_samples=consumed_samples,
micro_batch_size=self.cfg.micro_batch_size,
data_parallel_rank=parallel_state.get_data_parallel_rank(),
data_parallel_size=parallel_state.get_data_parallel_world_size(),
drop_last=self.cfg.get('drop_last', True),
)
else:
raise ValueError('cfg.data.dataloader_type must be "single" or "cyclic"')
else:
raise ValueError('cfg.data.dataloader_type not found. Must be "single" or "cyclic"')
# Torch dataloader.
return torch.utils.data.DataLoader(
dataset,
batch_sampler=batch_sampler,
num_workers=self.cfg.data.num_workers,
pin_memory=True,
persistent_workers=True if self.cfg.data.num_workers > 0 else False,
)
def setup_training_data(self, cfg):
if hasattr(self, '_train_ds'):
consumed_samples = self.compute_consumed_samples(0)
logging.info(
f'Setting up train dataloader with len(len(self._train_ds)): {len(self._train_ds)} and consumed samples: {consumed_samples}'
)
self._train_dl = self.build_pretraining_data_loader(self._train_ds, consumed_samples)
def setup_validation_data(self, cfg):
if hasattr(self, '_validation_ds'):
consumed_samples = 0
logging.info(
f'Setting up validation dataloader with len(len(self._validation_ds)): {len(self._validation_ds)} and consumed samples: {consumed_samples}'
)
self._validation_dl = self.build_pretraining_data_loader(self._validation_ds, consumed_samples)
def setup_test_data(self, cfg):
if hasattr(self, '_test_ds'):
consumed_samples = 0
logging.info(
f'Setting up test dataloader with len(len(self._test_ds)): {len(self._test_ds)} and consumed samples: {consumed_samples}'
)
self._test_dl = self.build_pretraining_data_loader(self._test_ds, consumed_samples)
def transfer_batch_to_device(self, batch: Any, device: torch.device, dataloader_idx: int) -> Any:
""" PTL hook: https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#transfer-batch-to-device
When using pipeline parallelism, we need the global batch to remain on the CPU,
since the memory overhead will be too high when using a large number of microbatches.
Microbatches are transferred from CPU to GPU inside the pipeline.
"""
return batch
def parameters(self):
if isinstance(self.model, list):
return itertools.chain.from_iterable(module.parameters() for module in self.model)
else:
return self.model.parameters()
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
for vocab in ['cased', 'uncased']:
result.append(
PretrainedModelInfo(
pretrained_model_name=f"megatron_bert_345m_{vocab}",
location=f"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/megatron_bert_345m_{vocab}/versions/1/files/megatron_bert_345m_{vocab}.nemo",
description=f"345M parameter BERT Megatron model with {vocab} vocab.",
)
)
for vocab_size in ['50k', '30k']:
for vocab in ['cased', 'uncased']:
result.append(
PretrainedModelInfo(
pretrained_model_name=f"biomegatron345m_biovocab_{vocab_size}_{vocab}",
location=f"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/biomegatron345m_biovocab_{vocab_size}_{vocab}/versions/1/files/BioMegatron345m-biovocab-{vocab_size}-{vocab}.nemo",
description="Megatron 345m parameters model with biomedical vocabulary ({vocab_size} size) {vocab}, pre-trained on PubMed biomedical text corpus.",
)
)
for vocab in ['cased', 'uncased']:
result.append(
PretrainedModelInfo(
pretrained_model_name=f"biomegatron-bert-345m-{vocab}",
location=f"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/biomegatron345m{vocab}/versions/1/files/BioMegatron345m{vocab.capitalize()}.nemo",
description=f"Megatron pretrained on {vocab} biomedical dataset PubMed with 345 million parameters.",
)
)
return result
def setup_optimizer_param_groups(self):
"""ModelPT override. Optimizer will get self._optimizer_param_groups"""
self._optimizer_param_groups = get_params_for_weight_decay_optimization(self.model)
def configure_optimizers(self):
if self.with_distributed_adam:
# Disable overlapped grad sync for embedding grad when
# pipeline parallelism is enabled
if parallel_state.get_pipeline_model_parallel_world_size() > 1:
if parallel_state.is_pipeline_first_stage(ignore_virtual=True):
if isinstance(self.model, list):
module = self.model[0] # only the first virtual rank has the embeddings
else:
module = self.model
if module.share_token_embeddings:
param = module.word_embeddings_weight()
param._disable_greedy_grad_copy = not self.megatron_amp_o2
param._disable_overlap_grad_sync = True
if parallel_state.is_pipeline_last_stage(ignore_virtual=True):
if isinstance(self.model, list):
module = self.model[-1] # only the last virtual rank has the embeddings
else:
module = self.model
if module.share_token_embeddings:
param = module.word_embeddings_weight()
param._disable_greedy_grad_copy = not self.megatron_amp_o2
param._disable_overlap_grad_sync = True
# Disable overlapped grad sync for layer norm grads when
# sequence parallelism is enabled
for param in self.parameters():
if getattr(param, 'sequence_parallel', False):
param._disable_greedy_grad_copy = not self.megatron_amp_o2
param._disable_overlap_grad_sync = True
# sequence parallelism is enabled
for param in self.parameters():
if getattr(param, 'sequence_parallel', False):
param._disable_greedy_grad_copy = not self.megatron_amp_o2
param._disable_overlap_grad_sync = True
# Initialize parameter buckets for overlapped grad and param syncs
# Note: Params with disabled overlapping are put in the
# last param bucket
buckets = []
if self.cfg.get('virtual_pipeline_model_parallel_size', None) is not None:
# Initialize a bucket for each virtual pipeline stage
for module in self.model:
if isinstance(module, Float16Module):
module = module.module
stage_bucket = []
for layer in module.language_model.encoder.layers:
stage_bucket.extend(
p for p in layer.parameters() if not getattr(p, '_disable_overlap_grad_sync', False)
)
buckets.append(stage_bucket)
else:
# Initialize a bucket for each Transformer layer
modules = self.model if isinstance(self.model, list) else [self.model]
for module in modules:
if isinstance(module, Float16Module):
module = module.module
for layer in module.language_model.encoder.layers:
buckets.append(
[p for p in layer.parameters() if not getattr(p, '_disable_overlap_grad_sync', False)]
)
buckets.reverse()
used_params = set()
for bucket in buckets:
used_params.update(bucket)
buckets[-1].extend(p for p in self.parameters() if p not in used_params)
self.distributed_adam_buckets = buckets
return super().configure_optimizers()
# Required for ONNX export
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"attention_mask": NeuralType(('B', 'T'), MaskType(), optional=True),
"token_type_ids": NeuralType(('B', 'T'), ChannelType(), optional=True),
}
# Required for ONNX export
def input_example(self, max_batch=1, max_dim=256):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
sample = next(self.parameters())
sz = (max_batch, max_dim)
input_ids = torch.randint(low=0, high=2048, size=sz, device=sample.device)
token_type_ids = torch.randint(low=0, high=1, size=sz, device=sample.device)
attention_mask = torch.randint(low=0, high=1, size=sz, device=sample.device)
input_dict = {"input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids}
return tuple([input_dict])
def on_save_checkpoint(self, checkpoint) -> None:
"""LightningModule hook:
https://pytorch-lightning.readthedocs.io/en/stable/common/lightning_module.html#on-save-checkpoint
"""
if isinstance(self.model, list):
for i in range(len(self.model)):
parallel_state.set_virtual_pipeline_model_parallel_rank(i)
checkpoint[f'model{i}'] = self.model[i].module.state_dict_for_save_checkpoint()
parallel_state.set_virtual_pipeline_model_parallel_rank(0)
def on_load_checkpoint(self, checkpoint) -> None:
"""LightningModule hook:
https://pytorch-lightning.readthedocs.io/en/stable/common/lightning_module.html#on-load-checkpoint
"""
if isinstance(self.model, list):
for i in range(len(self.model)):
parallel_state.set_virtual_pipeline_model_parallel_rank(i)
self.model[i].module.load_state_dict(checkpoint[f'model{i}'], strict=True)
parallel_state.set_virtual_pipeline_model_parallel_rank(0)
| NeMo-main | nemo/collections/nlp/models/language_modeling/megatron_bert_model.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code has been adapted from the following private repo: https://gitlab-master.nvidia.com/ADLR/megatron-lm/-/tree/prompt-learning/prefix_tuning_v2
# Adapted by: @adithyare
import itertools
import os
import torch
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.common.parts.adapter_modules import LinearAdapterConfig
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.models.language_modeling.megatron_gpt_prompt_learning_model import (
MegatronGPTPromptLearningModel,
)
from nemo.collections.nlp.modules.common import VirtualPromptStyle
from nemo.collections.nlp.modules.common.megatron.adapters.parallel_adapters import (
AdapterName,
InfusedAdapterConfig,
MLPInfusedAdapterConfig,
ParallelLinearAdapterConfig,
)
from nemo.collections.nlp.modules.common.megatron.utils import average_losses_across_data_parallel_group
from nemo.collections.nlp.parts.nlp_overrides import NLPSaveRestoreConnector
from nemo.collections.nlp.parts.utils_funcs import get_last_rank
from nemo.core.classes.mixins import adapter_mixins
from nemo.utils import logging, model_utils
class MegatronGPTBaseAdapterModel(MegatronGPTPromptLearningModel):
def __init__(self, cfg: DictConfig, trainer: Trainer):
super().__init__(cfg, trainer)
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(cfg.get('language_model_path')):
save_restore_connector.model_extracted_dir = cfg.get('language_model_path')
self.frozen_model_cfg = MegatronGPTModel.restore_from(
cfg.get('language_model_path'),
trainer=trainer,
return_config=True,
save_restore_connector=save_restore_connector,
)
self.adapter_name_keys = []
def forward(
self,
input_ids,
position_ids,
attention_mask,
taskname_ids,
labels=None,
inference=True,
set_inference_key_value_memory=False,
inference_max_sequence_len=None,
):
if self.autocast_dtype == torch.float32:
output = self.frozen_model.model(
input_ids=input_ids,
position_ids=position_ids,
encoder_input=None,
attention_mask=attention_mask,
labels=labels,
set_inference_key_value_memory=set_inference_key_value_memory,
inference_max_sequence_len=inference_max_sequence_len,
)
else:
with torch.autocast(device_type="cuda", dtype=self.autocast_dtype):
output = self.frozen_model.model(
input_ids=input_ids,
position_ids=position_ids,
encoder_input=None,
attention_mask=attention_mask,
labels=labels,
set_inference_key_value_memory=set_inference_key_value_memory,
inference_max_sequence_len=inference_max_sequence_len,
)
return output
def setup(self, stage=None):
if stage == 'predict':
self.frozen_model.freeze()
return
self.setup_test_data()
if stage == 'test':
return
self.setup_training_data()
self.setup_validation_data()
logging.info(f'setup completed:\n{self.frozen_model.summarize()}')
def on_train_end(self):
# Save the best nemo model
self.save_to(save_path=self.cfg.nemo_path)
def get_forward_output_only_func(self):
"""
Used for generate method only for now.
"""
def fwd_output_only_func(dataloader_iter, model):
batch = next(dataloader_iter)
extra_arg = {}
(
tokens,
attention_mask,
position_ids,
task_ids,
set_inference_key_value_memory,
inference_max_sequence_len,
) = batch
tokens = tokens.cuda()
attention_mask = attention_mask.cuda()
position_ids = position_ids.cuda()
task_ids = task_ids.cuda()
extra_arg['set_inference_key_value_memory'] = set_inference_key_value_memory[0].item()
extra_arg['inference_max_sequence_len'] = inference_max_sequence_len[0].item()
output_tensor = model(tokens, position_ids, attention_mask, task_ids, **extra_arg)
def id_func(output_tensor):
return output_tensor, {'logits': output_tensor}
return output_tensor, id_func
return fwd_output_only_func
def state_dict(self, destination=None, prefix=None, keep_vars=False):
"""
Creates a state_dict using only the adapter parameters.
This ensures that this wrapper class will only checkpoint the adapter
weights and not the rest of the base GPT Model.
"""
state_dict_ = {}
for name, module in self.frozen_model.named_modules():
if isinstance(module, adapter_mixins.AdapterModuleMixin) and module.is_adapter_available():
for adapter_key in self.adapter_name_keys:
adapter_module = module.get_adapter_module(adapter_key)
if adapter_module:
state_adapter_key = ':'.join([name, adapter_key])
state_dict_[state_adapter_key] = adapter_module.state_dict()
module.set_enabled_adapters(enabled=True)
return state_dict_
def load_state_dict(self, state_dict, strict: bool = True):
"""
Loads a state_dict expecting the state_dict to contain key,values
only for the adapter parameters.
"""
for name, module in self.frozen_model.named_modules():
if isinstance(module, adapter_mixins.AdapterModuleMixin) and module.is_adapter_available():
for adapter_key in self.adapter_name_keys:
adapter_module = module.get_adapter_module(adapter_key)
if adapter_module:
state_adapter_key = ':'.join([name, adapter_key])
adapter_module.load_state_dict(state_dict[state_adapter_key], strict)
module.set_enabled_adapters(enabled=True)
def setup_optimizer_param_groups(self):
"""
ModelPT override. Optimizer will get self._optimizer_param_groups.
Makes two optimizer param groups, one for the frozen model params
and one for the prompt-table/prompt-encoder params. The learning
rate for the frozen model's params will always be zero effectively
freezing the model's params but still allowing for the needed gradients
to be passed around in pipeline parallel models. The prompt-encoder
and/or prompt table will use the learning rate set by the user.
"""
self.frozen_model.freeze() # Freeze the entire model
opt_params = []
for _, module in self.frozen_model.named_modules():
if isinstance(module, adapter_mixins.AdapterModuleMixin) and module.is_adapter_available():
module.set_enabled_adapters(enabled=True)
module.unfreeze_enabled_adapters() # selectively unfreeze the adapter modules.
opt_params += [p for p in module.parameters()]
self._optimizer_param_groups = [{'params': opt_params}]
logging.info(f'Optimizer groups set:\n{self.frozen_model.summarize()}')
def get_forward_output_and_loss_func(self):
def fwd_output_and_loss_func(dataloader_iter, model):
batch = next(dataloader_iter)
batch = [x.cuda(non_blocking=True) for x in batch]
input_ids, labels, loss_mask, position_ids, attention_mask, taskname_ids = batch
output_tensor = model(input_ids, position_ids, attention_mask, taskname_ids, labels, inference=False)
def loss_func(output_tensor):
loss = self.frozen_model.loss_func(loss_mask, output_tensor)
reduced_loss = average_losses_across_data_parallel_group([loss])
return loss, {'avg': reduced_loss}
return output_tensor, loss_func
return fwd_output_and_loss_func
def training_step(self, dataloader_iter, batch_idx):
# we zero grads here because we also call backward in the megatron-core fwd/bwd functions
self._optimizer.zero_grad()
batch = next(dataloader_iter)
loss_mean = self.fwd_bwd_step(itertools.chain([batch]), batch_idx, forward_only=False)
self.allreduce_gradients()
## logging
# we can only log on one rank if it is rank zero so we broadcast from last rank
# we can avoid this broadcast by updating the PTL log function to accept specific ranks
torch.distributed.broadcast(loss_mean, get_last_rank())
if self.torch_dtype == torch.float16:
loss_scale = self.trainer.precision_plugin.scaler._scale
if loss_scale is not None:
self.log('loss_scale', loss_scale, batch_size=1)
self.log('reduced_train_loss', loss_mean, prog_bar=True, rank_zero_only=True, batch_size=1)
lr = self._optimizer.param_groups[0]['lr']
self.log('lr', lr, rank_zero_only=True, batch_size=1)
self.log('global_step', self.trainer.global_step, prog_bar=True, rank_zero_only=True, batch_size=1)
# Need to make sure the frozen model param learning rate stays 0.0
# so forceing lr to be 0.0 for gpt layers before param update
return loss_mean
class MegatronGPTAdapterLearningModel(MegatronGPTBaseAdapterModel):
"""
MegatronGPTAdapterLearningModel is a model that combines a base model (GPTModel) with a adapters.
This class only supports the canonical Adapter training described in Houlsby et al. (https://arxiv.org/pdf/1902.00751.pdf)
Two adapter's are inserted into each Transformer layer in the base GPT Model.
It is assumed that these set of adapters will then be trained for a specific task.
Once trained, the adapter weights will be saved and can be re-loaded
and infused into the same GPT Model for inference.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
super().__init__(cfg, trainer)
assert cfg.adapter_tuning.get('adapter_dim', 0) > 0, "adapter_dim has not been set."
assert (
cfg.adapter_tuning.adapter_dim % cfg.tensor_model_parallel_size == 0
), "The adapter dim should be divisible by tensor_model_parallel_size."
assert cfg.adapter_tuning.type in [
'linear_adapter',
'parallel_adapter',
], "Adapter type should be 'linear_adapter' or 'parallel_adapter'"
self.adapter_name_keys = [AdapterName.PRE_ATTN_ADAPTER, AdapterName.POST_ATTN_ADAPTER]
for _, layer in self.frozen_model.named_modules():
if hasattr(layer, 'activations_checkpoint_method'):
layer.activations_checkpoint_method = (
None # (@adithyare) adapter learning does not support activations checkpointing atm.
)
logging.info(f'Before adding adapters:\n{self.frozen_model.summarize()}')
if cfg.adapter_tuning.type == "parallel_adapter":
adapter_cfg = ParallelLinearAdapterConfig(
in_features=self.frozen_model_cfg.hidden_size,
out_features=self.frozen_model_cfg.hidden_size,
dim=cfg.adapter_tuning.adapter_dim,
norm_position=cfg.adapter_tuning.get('norm_position', 'pre'),
norm_type=cfg.adapter_tuning.get('norm_type', 'mixedfusedlayernorm'),
column_init_method=cfg.adapter_tuning.get('column_init_method', 'xavier'),
row_init_method=cfg.adapter_tuning.get('row_init_method', 'zero'),
dropout=cfg.adapter_tuning.adapter_dropout,
)
else:
adapter_cfg = LinearAdapterConfig(
in_features=self.frozen_model_cfg.hidden_size,
dim=cfg.adapter_tuning.adapter_dim,
norm_position=cfg.adapter_tuning.get('norm_position', 'pre'),
dropout=cfg.adapter_tuning.adapter_dropout,
)
self.frozen_model.freeze()
for _, module in self.frozen_model.named_modules():
if isinstance(module, adapter_mixins.AdapterModuleMixin):
for adapter_key in self.adapter_name_keys:
if model_utils.import_class_by_path(adapter_cfg._target_) in module.get_accepted_adapter_types():
module.add_adapter(
name=adapter_key, cfg=adapter_cfg,
)
logging.info(f'After adding adapters:\n{self.frozen_model.summarize()}')
@classmethod
def list_available_models(cls):
pass
class MegatronGPTInfusedAdapterModel(MegatronGPTBaseAdapterModel):
"""
MegatronGPTInfusedAdapterModel is a model that combines a base model (GPTModel) with a "Infused Adapter that can Inhibiting and Amplify Inner Activations", known as IA3.
This class supports the addition of IA3 into a transformer based LM as described in Liu et al. (https://arxiv.org/pdf/2205.05638.pdf)
Three adapter's are inserted into each Transformer layer in the base GPT Model. Each adapter is basically a vector that simply scales the key, value or ffn hidden representations.
It is assumed that these set of adapters will then be trained for a specific task.
Once trained, the adapter weights will be saved and can be re-loaded
and infused into the same GPT Model for inference.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
super().__init__(cfg, trainer)
self.adapter_name_keys = [AdapterName.KEY_INFUSED, AdapterName.VALUE_INFUSED, AdapterName.MLP_INFUSED]
for _, layer in self.frozen_model.named_modules():
if hasattr(layer, 'activations_checkpoint_method'):
layer.activations_checkpoint_method = (
None # (@adithyare) adapter learning does not support activations checkpointing atm.
)
logging.info(f'Before adding adapters:\n{self.frozen_model.summarize()}')
self.frozen_model.freeze()
for _, module in self.frozen_model.named_modules():
if isinstance(module, adapter_mixins.AdapterModuleMixin):
for adapter_key in self.adapter_name_keys:
if adapter_key == AdapterName.MLP_INFUSED:
cfg = MLPInfusedAdapterConfig(
in_features=self.frozen_model_cfg.ffn_hidden_size
// self.frozen_model_cfg.tensor_model_parallel_size
)
elif adapter_key in [AdapterName.KEY_INFUSED, AdapterName.VALUE_INFUSED]:
cfg = InfusedAdapterConfig(
in_features=self.frozen_model_cfg.hidden_size
// self.frozen_model_cfg.tensor_model_parallel_size
)
else:
raise ValueError(f"Adapter Key {adapter_key} is unknown.")
if model_utils.import_class_by_path(cfg._target_) in module.get_accepted_adapter_types():
module.add_adapter(name=adapter_key, cfg=cfg)
logging.info(f'After adding adapters:\n{self.frozen_model.summarize()}')
@classmethod
def list_available_models(cls):
pass
| NeMo-main | nemo/collections/nlp/models/language_modeling/megatron_gpt_adapter_model.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.models.language_modeling.megatron_gpt_sft_model import MegatronGPTSFTModel
from nemo.collections.nlp.modules.common.megatron.adapters.mcore_mixins import (
MCoreGPTEmbeddingMixin,
MCoreSelfAttentionMixin,
MCoreTransformerLayerMixin,
swap_mcore_mixin,
)
from nemo.collections.nlp.modules.common.megatron.adapters.parallel_adapters import (
AdapterName,
InfusedAdapterConfig,
LoraKQVAdapterConfig,
LoraKQVAdapterWeightTyingConfig,
MLPInfusedAdapterConfig,
ParallelLinearAdapterConfig,
ParallelLinearAdapterWeightTyingConfig,
PromptEncoderAdapterConfig,
)
from nemo.core.classes.mixins import adapter_mixins
from nemo.utils import logging, model_utils
try:
from megatron.core import parallel_state
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
class MegatronGPTPEFTModel(MegatronGPTSFTModel):
"""
base class for all mixin based adapter models
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
super().__init__(cfg, trainer)
self.setup_complete = False
self.base_keys = self.get_all_keys()
self.freeze()
self.init_peft_modules()
self.adapter_keys = self.get_all_keys() - self.base_keys
def first_stage_of_pipeline(self):
if hasattr(self, "model") and hasattr(self.model, "pre_process"):
return self.model.pre_process
elif hasattr(self, "model") and hasattr(self.model, "module") and hasattr(self.model.module, "pre_process"):
# (guyueh1): this if condition is used to handle amp O2
# when amp_O2 is on, self.model will be wrapped by the Float16Module class
return self.model.module.pre_process
logging.warning("no attribute named model or no model.pre_process found. Can not detect stage of pipeline...")
return False
def init_peft_modules(self):
"""
Randomly initialize the peft params and add them to the appropriate modules.
"""
assert len(self.peft_name_keys) > 0, "peft_name_keys have not been set no PEFT modules will be added"
assert not self.mcore_gpt or hasattr(
self, 'name_key_to_mcore_mixins'
), f"{self.__class__.__name__} is not supported in megatron core mode yet."
assert len(self.name_key_to_cfg) > 0, "name_key_to_cfg has not been set no PEFT modules will be added"
logging.info(f"Before adding PEFT params:\n{self.summarize()}")
for name, module in self.named_modules():
if self.mcore_gpt:
for peft_key in self.peft_name_keys:
for mcore_target, mcore_mixin in self.name_key_to_mcore_mixins[peft_key]:
if name in [
f'model.{mcore_target}',
f'model.module.{mcore_target}',
]: # simple string match for now
swap_mcore_mixin(module, mcore_mixin)
peft_cfg = self.name_key_to_cfg[peft_key]
if (
model_utils.import_class_by_path(peft_cfg._target_)
in module.get_accepted_adapter_types()
):
module.add_adapter(
name=peft_key,
cfg=peft_cfg,
base_model_cfg=self.cfg,
model_parallel_config=self.model_parallel_config,
)
else:
if isinstance(module, adapter_mixins.AdapterModuleMixin):
for peft_key in self.peft_name_keys:
peft_cfg = self.name_key_to_cfg[peft_key]
if model_utils.import_class_by_path(peft_cfg._target_) in module.get_accepted_adapter_types():
module.add_adapter(
name=peft_key,
cfg=peft_cfg,
base_model_cfg=self.cfg,
model_parallel_config=self.model_parallel_config,
)
logging.info(f"After adding PEFT params:\n{self.summarize()}")
return True
def setup(self, stage=None):
super().setup(stage)
self.setup_complete = True
def get_all_keys(self,):
"""
Returns all the keys in the model
"""
k = [n for n, p in self.named_parameters()]
b = [n for n, p in self.named_buffers() if n.replace("model.module.", "model.", 1) in self.state_dict().keys()]
# we include buffers because ptuning representations are cached in a buffer and saved to state_dict for inference time use.
return set(k + b)
def get_peft_state_dict(self,):
"""
Gets the keys associated with the adapters only.
"""
state_dict = self.model.state_dict(prefix="model.module." if self.cfg.megatron_amp_O2 else "model.")
peft_state_dict = {}
for k in self.adapter_keys:
# state_dict keys needs to be in non-O2 format and will be corrected in PEFTSaveRestoreConnector if O2=True
new_k = k.replace("model.module.", "model.", 1)
peft_state_dict[new_k] = state_dict[k]
return peft_state_dict
def state_dict(self, destination=None, prefix=None, keep_vars=False):
if self.setup_complete:
# Once setup is complete we no longer need to track the frozen part of the model. Only there adapter state dict keeps changing so state_dict only track these.
return self.get_peft_state_dict()
else:
# we want all the params with the same keys as calling self.state_dict()
# but we can't call self.state_dict() here as it would be a recursive call.
# so we call self.model.state_dict(prefix="model.") which will return all the keys and params same as calling self.state_dict()
return self.model.state_dict(prefix="model.")
def sharded_state_dict(self, prefix: str = ''):
if self.setup_complete:
return None
else:
return self.model.sharded_state_dict(prefix="model.")
def load_state_dict(self, state_dict, strict: bool = True):
if len(state_dict) == 0:
return # checkpoint is loaded in on_load_checkpoint()
if self.setup_complete:
# at this stage only PEFT params will appear in the state_dict arg
# so we only update those while the rest of the model is frozen.
# setting strict=False will ignore the missing keys (which are not being updated anyway)
# explicitly check if state_dict.keys matches all the expected self.adapter_keys since we don't have the
# safety in strict=True anymore.
assert set(state_dict.keys()) == self.adapter_keys
super().load_state_dict(state_dict, strict=False)
else:
super().load_state_dict(state_dict, strict=True)
def on_load_checkpoint(self, checkpoint) -> None:
"""LightningModule hook:
https://pytorch-lightning.readthedocs.io/en/stable/common/lightning_module.html#on-load-checkpoint
"""
if self.setup_complete:
# same as super().on_load_checkpoint() but strict=False and only check unexpected keys
# mcore uses distributed checkpointing
print('enter peft loading')
if self.mcore_gpt:
for index, module in enumerate(self.get_gpt_module_list()):
if parallel_state.get_virtual_pipeline_model_parallel_world_size() is not None:
checkpoint_state_dict = checkpoint['state_dict'][f'model_{index}']
else:
checkpoint_state_dict = checkpoint['state_dict']
# checkpoint_state_dict has "model." but module does not so we need to remove it when loading
checkpoint_state_dict = {
key.replace('model.', ''): checkpoint_state_dict.pop(key)
for key in list(checkpoint_state_dict.keys())
}
missing_keys, unexpected_keys = module.load_state_dict(checkpoint_state_dict, strict=False)
assert len(unexpected_keys) == 0, 'Unexpected key(s) in state_dict: {}. '.format(
', '.join('"{}"'.format(k) for k in unexpected_keys)
)
# legacy checkpointing for interleaved
else:
if isinstance(self.model, list):
for i in range(len(self.model)):
parallel_state.set_virtual_pipeline_model_parallel_rank(i)
self.model[i].module.load_state_dict(checkpoint[f'model{i}'], strict=True)
parallel_state.set_virtual_pipeline_model_parallel_rank(0)
else:
super().on_load_checkpoint(checkpoint)
def setup_optimizer_param_groups(self):
"""
ModelPT override. Optimizer will get self._optimizer_param_groups.
Makes two optimizer param groups, one for the frozen model params
and one for the prompt-table/prompt-encoder params. The learning
rate for the frozen model's params will always be zero effectively
freezing the model's params but still allowing for the needed gradients
to be passed around in pipeline parallel models. The prompt-encoder
and/or prompt table will use the learning rate set by the user.
"""
self.freeze() # Freeze the entire model
opt_params = []
for _, module in self.named_modules():
if isinstance(module, adapter_mixins.AdapterModuleMixin) and module.is_adapter_available():
module.set_enabled_adapters(enabled=True)
module.unfreeze_enabled_adapters() # selectively unfreeze the adapter modules.
opt_params += [p for p in module.parameters() if p.requires_grad]
self._optimizer_param_groups = ({"params": opt_params},)
logging.info(f"Optimizer groups set:\n{self.summarize()}")
class MegatronGPTLayerwisePEFTModel(MegatronGPTPEFTModel):
def __init__(
self, cfg: DictConfig, trainer: Trainer,
):
super().__init__(cfg, trainer)
def init_peft_modules(self):
"""
Randomly initialize the peft params and add them to the appropriate modules.
"""
assert len(self.peft_name_keys) > 0, "peft_name_keys have not been set no PEFT modules will be added"
assert not self.mcore_gpt or hasattr(
self, 'name_key_to_mcore_mixins'
), f"{self.__class__.__name__} is not supported in megatron core mode yet."
assert len(self.name_key_to_cfg) > 0, "name_key_to_cfg has not been set no PEFT modules will be added"
logging.info(f"Before adding PEFT params:\n{self.summarize()}")
if self.mcore_gpt:
if self.cfg.megatron_amp_O2:
layers = self.model.module.decoder.layers
else:
layers = self.model.decoder.layers
else:
if self.cfg.megatron_amp_O2:
layers = self.model.module.language_model.encoder.layers
else:
layers = self.model.language_model.encoder.layers
for layer in layers:
if layer.layer_number in self.layer_selection:
for name, module in layer.named_modules():
if self.mcore_gpt:
for peft_key in self.peft_name_keys:
for mcore_target, mcore_mixin in self.name_key_to_mcore_mixins[peft_key]:
if name == mcore_target:
swap_mcore_mixin(module, mcore_mixin)
peft_cfg = self.name_key_to_cfg[peft_key]
if (
model_utils.import_class_by_path(peft_cfg._target_)
in module.get_accepted_adapter_types()
):
module.add_adapter(
name=peft_key,
cfg=peft_cfg,
model_parallel_config=self.model_parallel_config,
)
else:
if isinstance(module, adapter_mixins.AdapterModuleMixin):
for peft_key in self.peft_name_keys:
peft_cfg = self.name_key_to_cfg[peft_key]
if (
model_utils.import_class_by_path(peft_cfg._target_)
in module.get_accepted_adapter_types()
):
module.add_adapter(
name=peft_key,
cfg=peft_cfg,
base_model_cfg=self.cfg,
model_parallel_config=self.model_parallel_config,
)
logging.info(f"After adding PEFT params:\n{self.summarize()}")
return True
class MegatronGPTAdapterModel(MegatronGPTLayerwisePEFTModel):
"""
MegatronGPTAdapterLearningModel is a model that combines a base model (GPTSFTModel) with a adapters.
This class only supports the canonical Adapter training described in Houlsby et al. (https://arxiv.org/pdf/1902.00751.pdf)
Two adapter's are inserted into each Transformer layer in the base GPT Model.
It is assumed that these set of adapters will then be trained for a specific task.
Once trained, the adapter weights will be saved and can be re-loaded
and infused into the same GPT Model for inference.
"""
def __init__(
self, cfg: DictConfig, trainer: Trainer,
):
self.peft_name_keys = [
AdapterName.PRE_ATTN_ADAPTER,
AdapterName.POST_ATTN_ADAPTER,
]
adapter_tuning_cfg = cfg.peft.adapter_tuning
adapter_cfg = ParallelLinearAdapterConfig(
in_features=cfg.hidden_size,
out_features=cfg.hidden_size,
dim=adapter_tuning_cfg.adapter_dim,
norm_position=adapter_tuning_cfg.get("norm_position", "pre"),
norm_type=adapter_tuning_cfg.get("norm_type", "mixedfusedlayernorm"),
column_init_method=adapter_tuning_cfg.get("column_init_method", "xavier"),
row_init_method=adapter_tuning_cfg.get("row_init_method", "zero"),
dropout=adapter_tuning_cfg.adapter_dropout,
)
self.name_key_to_cfg = {}
self.name_key_to_mcore_mixins = {}
for k in self.peft_name_keys:
self.name_key_to_cfg[k] = adapter_cfg
self.name_key_to_mcore_mixins[k] = [("", MCoreTransformerLayerMixin)]
self.layer_selection = adapter_tuning_cfg.get("layer_selection", None)
if self.layer_selection is None:
self.layer_selection = list(range(1, cfg.num_layers + 1))
super().__init__(cfg, trainer)
class MegatronGPTAdapterModelWeightTying(MegatronGPTLayerwisePEFTModel):
"""
TODO
"""
def __init__(
self, cfg: DictConfig, trainer: Trainer,
):
self.peft_name_keys = [
AdapterName.PRE_ATTN_ADAPTER,
AdapterName.POST_ATTN_ADAPTER,
]
adapter_tuning_cfg = cfg.peft.adapter_tuning
adapter_cfg = ParallelLinearAdapterWeightTyingConfig(
in_features=cfg.hidden_size,
out_features=cfg.hidden_size,
dim=adapter_tuning_cfg.adapter_dim,
norm_position=adapter_tuning_cfg.get("norm_position", "pre"),
norm_type=adapter_tuning_cfg.get("norm_type", "mixedfusedlayernorm"),
column_init_method=adapter_tuning_cfg.get("column_init_method", "xavier"),
row_init_method=adapter_tuning_cfg.get("row_init_method", "zero"),
dropout=adapter_tuning_cfg.adapter_dropout,
num_position_embeddings=cfg.num_layers * 2,
dim_position_embeddings=cfg.hidden_size,
position_embedding_strategy=adapter_tuning_cfg.get("position_embedding_strategy", None),
)
self.name_key_to_cfg = {}
self.name_key_to_mcore_mixins = {}
for k in self.peft_name_keys:
self.name_key_to_cfg[k] = adapter_cfg
self.name_key_to_mcore_mixins[k] = [("", MCoreTransformerLayerMixin)]
self.layer_selection = adapter_tuning_cfg.get("layer_selection", None)
if self.layer_selection is None:
self.layer_selection = list(range(1, cfg.num_layers + 1))
super().__init__(cfg, trainer)
self.tie_weights()
def tie_weights(self,):
pos_idx = 0
if self.mcore_gpt:
if self.cfg.megatron_amp_O2:
layers = self.model.module.decoder.layers
else:
layers = self.model.decoder.layers
else:
if self.cfg.megatron_amp_O2:
layers = self.model.module.language_model.encoder.layers
else:
layers = self.model.language_model.encoder.layers
layer0 = layers[0]
for adapter_name in layer0.adapter_layer:
adapter = layer0.get_adapter_module(adapter_name)
print(adapter_name, pos_idx)
adapter.set_position(pos_idx)
pos_idx += 1
for layer in layers[1:]:
for adapter_name in layer.adapter_layer:
print(adapter_name, pos_idx)
adapter_l = layer.get_adapter_module(adapter_name)
adapter_0 = layer0.get_adapter_module(adapter_name)
if hasattr(adapter_0, "layer_norm"):
lnorm = adapter_0.layer_norm
else:
lnorm = None
adapter_l.tie_weights(pos_idx, adapter_0)
pos_idx += 1
class MegatronGPTIA3Model(MegatronGPTLayerwisePEFTModel):
"""
MegatronGPTInfusedAdapterModel is a model that combines a base model (GPTSFTModel) with a "Infused Adapter that can Inhibiting and Amplify Inner Activations", known as IA3.
This class supports the addition of IA3 into a transformer based LM as described in Liu et al. (https://arxiv.org/pdf/2205.05638.pdf)
Three adapter's are inserted into each Transformer layer in the base GPT Model. Each adapter is basically a vector that simply scales the key, value or ffn hidden representations.
It is assumed that these set of adapters will then be trained for a specific task.
Once trained, the adapter weights will be saved and can be re-loaded
and infused into the same GPT Model for inference.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
self.peft_name_keys = [AdapterName.KEY_INFUSED, AdapterName.VALUE_INFUSED, AdapterName.MLP_INFUSED]
mlp_infused_adapter_cfg = MLPInfusedAdapterConfig(
in_features=cfg.ffn_hidden_size // cfg.tensor_model_parallel_size
)
infused_adapter_cfg = InfusedAdapterConfig(in_features=cfg.hidden_size // cfg.tensor_model_parallel_size)
self.name_key_to_cfg = {}
for k in self.peft_name_keys:
if k == AdapterName.MLP_INFUSED:
self.name_key_to_cfg[k] = mlp_infused_adapter_cfg
elif k in [
AdapterName.KEY_INFUSED,
AdapterName.VALUE_INFUSED,
]:
self.name_key_to_cfg[k] = infused_adapter_cfg
else:
raise ValueError(f"PEFT Key {k} is unknown.")
self.layer_selection = cfg.peft.ia3_tuning.get("layer_selection", None)
if self.layer_selection is None:
self.layer_selection = list(range(1, cfg.num_layers + 1))
super().__init__(cfg, trainer)
class MegatronGPTPTuningModel(MegatronGPTPEFTModel):
"""
MegatronGPTPTuningModel is a model that combines a base model (GPTSFTModel) with a p-tuning prefix in the
input word embedding representations using a prompt-encoder as descripted in Liu et al. https://arxiv.org/pdf/2103.10385.pdf
The mixin framework adds the output of prompt-encoder (i.e. the virtual embeddings) inside
nemo/collections/nlp/modules/common/megatron/language_model.py
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
self.peft_name_keys = [AdapterName.PTUNING_ADAPTER]
adapter_cfg = PromptEncoderAdapterConfig(
cfg.peft.p_tuning.virtual_tokens,
cfg.peft.p_tuning.bottleneck_dim,
cfg.peft.p_tuning.embedding_dim,
cfg.peft.p_tuning.init_std,
cfg.hidden_size,
)
self.name_key_to_cfg = {AdapterName.PTUNING_ADAPTER: adapter_cfg}
self.name_key_to_mcore_mixins = {AdapterName.PTUNING_ADAPTER: [('embedding', MCoreGPTEmbeddingMixin)]}
super().__init__(cfg, trainer)
self.virtual_tokens = cfg.peft.p_tuning.virtual_tokens
def init_peft_modules(self,):
"""
Initialize the p-tuning prompt encoder in the mixin.
This should only happen in the first stage of the pipeline unlike other PEFT methods like Lora or Adapters
because p-tuning only adds params at input to the encoder layer.
"""
if not self.first_stage_of_pipeline():
# There are no params to add if we are not in the first state of the pipeline
return True
super().init_peft_modules()
return True
def state_dict(self, destination=None, prefix=None, keep_vars=False):
"""
Reimplement state_dict for ptuning because we also need to check the stage of the pipeline.
The check is required to make pp>1 to work.
"""
if self.setup_complete:
if self.first_stage_of_pipeline():
return self.get_peft_state_dict()
# if we are not in the first state of pipeline after setup is done
# there should be no params in the state_dict
return {}
else:
return self.model.state_dict(prefix="model.")
def load_state_dict(self, state_dict, strict: bool = True):
"""
Reimplement load_state_dict for ptuning because we also need to check the stage of the pipeline.
The check is required to make pp>1 to work.
"""
if len(state_dict) == 0:
return # checkpoint is loaded in on_load_checkpoint()
if self.setup_complete:
if self.first_stage_of_pipeline():
# if we are not in the first state of pipeline after setup is done
# there should be no params to load...
assert set(state_dict.keys()) == self.adapter_keys
super().load_state_dict(state_dict, strict=False)
else:
super().load_state_dict(state_dict, strict=True)
def on_load_checkpoint(self, checkpoint) -> None:
"""LightningModule hook:
https://pytorch-lightning.readthedocs.io/en/stable/common/lightning_module.html#on-load-checkpoint
"""
if self.setup_complete:
if self.first_stage_of_pipeline():
super().on_load_checkpoint(checkpoint)
else:
super().on_load_checkpoint(checkpoint)
def setup_optimizer_param_groups(self):
if self.first_stage_of_pipeline():
super().setup_optimizer_param_groups()
else:
self.freeze() # Freeze the entire model
self._optimizer_param_groups = ({"params": []},)
logging.info(f"Optimizer groups set:\n{self.summarize()}")
class MegatronGPTAdapterPTuningModel(MegatronGPTPEFTModel):
"""
Want to combine adapters and p-tuning? Why not? they are orthogonal methods.
This class includes both sets of params.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
self.peft_name_keys = [
AdapterName.PRE_ATTN_ADAPTER,
AdapterName.POST_ATTN_ADAPTER,
AdapterName.PTUNING_ADAPTER,
]
ptuning_cfg = PromptEncoderAdapterConfig(
cfg.peft.p_tuning.virtual_tokens,
cfg.peft.p_tuning.bottleneck_dim,
cfg.peft.p_tuning.embedding_dim,
cfg.peft.p_tuning.init_std,
cfg.hidden_size,
)
adapter_tuning_cfg = cfg.peft.adapter_tuning
adapter_cfg = ParallelLinearAdapterConfig(
in_features=cfg.hidden_size,
out_features=cfg.hidden_size,
dim=adapter_tuning_cfg.adapter_dim,
norm_position=adapter_tuning_cfg.get("norm_position", "pre"),
norm_type=adapter_tuning_cfg.get("norm_type", "mixedfusedlayernorm"),
column_init_method=adapter_tuning_cfg.get("column_init_method", "xavier"),
row_init_method=adapter_tuning_cfg.get("row_init_method", "zero"),
dropout=adapter_tuning_cfg.adapter_dropout,
)
self.name_key_to_cfg = {
AdapterName.PRE_ATTN_ADAPTER: adapter_cfg,
AdapterName.POST_ATTN_ADAPTER: adapter_cfg,
AdapterName.PTUNING_ADAPTER: ptuning_cfg,
}
logging.warning("AdapterPTuning doesn't support mcore for now. need to use regex to match target.")
self.name_key_to_mcore_mixins = {
AdapterName.PRE_ATTN_ADAPTER: [('', MCoreTransformerLayerMixin)],
AdapterName.POST_ATTN_ADAPTER: [('', MCoreTransformerLayerMixin)],
AdapterName.PTUNING_ADAPTER: [('embedding', MCoreGPTEmbeddingMixin)],
}
super().__init__(cfg, trainer)
self.virtual_tokens = cfg.peft.p_tuning.virtual_tokens
class MegatronGPTLoRAModel(MegatronGPTLayerwisePEFTModel):
"""
MegatronGPTLoRAModel is a model that combines a base model (GPTSFTModel) with a low-rank adapters.
The lora adapters will be added in `nemo/collections/nlp/modules/common/megatron/attention.py`
The implementation is based on Hu et al. nemo/collections/nlp/modules/common/megatron/attention.py
A single low-rank feedfowrad layer is used in parallel with the KQV projection layer.
TODO: Add support to also include an option to adda low-rank adapter in the output projection layer.
"""
def __init__(
self, cfg: DictConfig, trainer: Trainer,
):
self.peft_name_keys = [
AdapterName.LORA_KQV_ADAPTER,
]
lora_cfg = cfg.peft.lora_tuning
if cfg.get("kv_channels", None) is None:
assert (
cfg.hidden_size % cfg.num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = cfg.hidden_size // cfg.num_attention_heads
else:
kv_channels = cfg.kv_channels
projection_size = kv_channels * cfg.num_attention_heads
num_query_groups = cfg.get("num_query_groups", None)
if num_query_groups is None:
num_query_groups = cfg.num_attention_heads
qkv_projection_size = projection_size + 2 * kv_channels * num_query_groups
adapter_cfg = LoraKQVAdapterConfig(
in_features=cfg.hidden_size,
out_features=qkv_projection_size,
dim=lora_cfg.adapter_dim,
norm_position=None,
norm_type=None,
activation="identity",
column_init_method=lora_cfg.get("column_init_method", "normal"),
row_init_method=lora_cfg.get("row_init_method", "zero"),
gather_output=False,
dropout=lora_cfg.adapter_dropout,
)
self.name_key_to_cfg = {}
self.name_key_to_mcore_mixins = {} # maps peft_key to a list of tuples (mcore_target, mcore_mixin)
for k in self.peft_name_keys:
self.name_key_to_cfg[k] = adapter_cfg
self.name_key_to_mcore_mixins[k] = [("self_attention", MCoreSelfAttentionMixin)]
self.layer_selection = lora_cfg.get("layer_selection", None)
if self.layer_selection is None:
self.layer_selection = list(range(1, cfg.num_layers + 1))
super().__init__(cfg, trainer)
class MegatronGPTLoRAModelWeightTying(MegatronGPTLayerwisePEFTModel):
"""
TODO
"""
def __init__(
self, cfg: DictConfig, trainer: Trainer,
):
self.peft_name_keys = [
AdapterName.LORA_KQV_ADAPTER,
]
lora_cfg = cfg.peft.lora_tuning
if cfg.get("kv_channels", None) is None:
assert (
cfg.hidden_size % cfg.num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = cfg.hidden_size // cfg.num_attention_heads
else:
kv_channels = cfg.kv_channels
projection_size = kv_channels * cfg.num_attention_heads
num_query_groups = cfg.get("num_query_groups", None)
if num_query_groups is None:
num_query_groups = cfg.num_attention_heads
qkv_projection_size = projection_size + 2 * kv_channels * num_query_groups
position_embedding_strategy = lora_cfg.get("position_embedding_strategy", None)
if position_embedding_strategy is None:
dim_position_embeddings = 0
elif position_embedding_strategy == "add":
dim_position_embeddings = cfg.hidden_size
elif position_embedding_strategy == "biasadd":
dim_position_embeddings = 3 * projection_size
elif position_embedding_strategy == "concat":
dim_position_embeddings = lora_cfg.adapter_dim
elif position_embedding_strategy == "mlpconcat":
dim_position_embeddings = lora_cfg.adapter_dim
else:
raise RuntimeError(f"Unknown position embedding strategy {position_embedding_strategy} for tied weights")
adapter_cfg = LoraKQVAdapterWeightTyingConfig(
in_features=cfg.hidden_size,
out_features=qkv_projection_size,
dim=lora_cfg.adapter_dim,
norm_position=None,
norm_type=None,
activation="identity",
column_init_method=lora_cfg.get("column_init_method", "normal"),
row_init_method=lora_cfg.get("row_init_method", "zero"),
gather_output=False,
dropout=lora_cfg.adapter_dropout,
num_position_embeddings=cfg.num_layers,
dim_position_embeddings=dim_position_embeddings,
position_embedding_strategy=position_embedding_strategy,
)
self.name_key_to_cfg = {}
self.name_key_to_mcore_mixins = {}
for k in self.peft_name_keys:
self.name_key_to_cfg[k] = adapter_cfg
self.name_key_to_mcore_mixins[k] = [("self_attention", MCoreSelfAttentionMixin)]
self.layer_selection = lora_cfg.get("layer_selection", None)
if self.layer_selection is None:
self.layer_selection = list(range(1, cfg.num_layers + 1))
super().__init__(cfg, trainer)
self.tie_weights()
def tie_weights(self,):
pos_idx = 0
if self.mcore_gpt:
if self.cfg.megatron_amp_O2:
layers = self.model.module.decoder.layers
else:
layers = self.model.decoder.layers
else:
if self.cfg.megatron_amp_O2:
layers = self.model.module.language_model.encoder.layers
else:
layers = self.model.language_model.encoder.layers
layer0 = layers[0]
for adapter_name in layer0.self_attention.adapter_layer:
adapter = layer0.self_attention.get_adapter_module(adapter_name)
print(adapter_name, pos_idx)
adapter.set_position(pos_idx)
pos_idx += 1
for layer in layers[1:]:
for adapter_name in layer.self_attention.adapter_layer:
print(adapter_name, pos_idx)
adapter_l = layer.self_attention.get_adapter_module(adapter_name)
adapter_0 = layer0.self_attention.get_adapter_module(adapter_name)
position_embeddings_0 = None
if adapter_0.position_embedding_strategy:
position_embeddings_0 = adapter_0.position_embeddings
adapter_l.tie_weights(pos_idx, adapter_0)
pos_idx += 1
| NeMo-main | nemo/collections/nlp/models/language_modeling/megatron_gpt_peft_models.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import re
from collections import OrderedDict
from typing import Any, Optional
import torch
from omegaconf.dictconfig import DictConfig
from omegaconf.omegaconf import open_dict
from pytorch_lightning.trainer.trainer import Trainer
from torch import Tensor
from nemo.collections.common.tokenizers.sentencepiece_tokenizer import SentencePieceTokenizer
from nemo.collections.nlp.metrics.prompt_learning_metrics import AccuracyScore, BLEUScore, ROUGEScores
from nemo.collections.nlp.models.language_modeling.megatron_base_model import MegatronBaseModel
from nemo.collections.nlp.modules.common import (
PromptEncoder,
PromptEncoderType,
VirtualPromptPlaceholderToken,
VirtualPromptSource,
VirtualPromptStyle,
)
from nemo.collections.nlp.modules.common.megatron.utils import ApexGuardDefaults
from nemo.collections.nlp.modules.common.transformer.text_generation import TextGeneration
from nemo.collections.nlp.parts.nlp_overrides import GradScaler
from nemo.utils import AppState, logging
try:
from apex.transformer.pipeline_parallel.utils import _reconfigure_microbatch_calculator
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
try:
from megatron.core import ModelParallelConfig, parallel_state
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelParallelConfig = ApexGuardDefaults
HAVE_MEGATRON_CORE = False
__all__ = ['MegatronBasePromptLearningModel']
class MegatronBasePromptLearningModel(MegatronBaseModel, TextGeneration):
"""
Model class for prompt-tuning or p-tuning a pretrained Megatron model.
Prompt Tuning initalizes virtual prompt embeddings directly from a copy of
certain token embeddings from the the pretrained model's vocabulary
and directly tunes these embedding weights. The token embeddings used in
initalization are specified by the user in the config file. The model can
be prompt-tuned for multiple tasks at once. virtual prompts are stored in a
prompt table and can be added or deleted without disrupting virtual prompts
for other tasks.
P-tuning initializes an LSTM encoder model that generates virtual prompt
embeddings for every task. Each task shares the same encoder. After ptuning
is compelete, the learned virtual prompts can be saved to the prompt table
using add_ptuned_prompts_to_prompt_table(). Thus, if a user wants to add a
new virtual prompt via p-tuning, they do not need to retrain on all previous
tasks. This gives p-tuning the same task flexiblity as prompt-tuning.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
super().__init__(cfg, trainer)
self.config: ModelParallelConfig = self.model_parallel_config
self.load_frozen_model(cfg, trainer)
self.prompt_encoder = None
self.tokenizer = self.frozen_model.tokenizer
if hasattr(self.frozen_model.cfg, "encoder") and hasattr(self.frozen_model.cfg, "decoder"):
self.hidden_size = (
self.frozen_model.cfg.encoder.hidden_size
) # Encoder and decoder need to have the same hidden size and we check for this in the frozen enc-dec model.
self.config.hidden_size = self.hidden_size
else:
self.hidden_size = self.frozen_model.cfg.hidden_size
self.config.hidden_size = self.hidden_size
self.existing_tasks = list(self.cfg.get('existing_tasks', []))
self.new_tasks = list(self.cfg.get('new_tasks', []))
self.virtual_prompt_style = VirtualPromptStyle(cfg.virtual_prompt_style)
# Load templates for assigning virtual prompt token positions
self.load_task_templates(self.cfg.task_templates)
if self.first_stage_of_pipeline() and self.virtual_prompt_style in [
VirtualPromptStyle.P_TUNING,
]:
# TODO: Handle this when moving GPT prompt learning to the base class.
self.word_embeddings = self.frozen_model.enc_dec_model.encoder_embedding.word_embeddings
# P-Tuning uses an LSTM Encoder to produce virtual token embeddings
if self.virtual_prompt_style == VirtualPromptStyle.P_TUNING:
self.virtual_prompt_source = VirtualPromptSource.PROMPT_ENCODER
elif self.virtual_prompt_style == VirtualPromptStyle.NO_PROMPT:
self.virtual_prompt_source = VirtualPromptSource.NO_PROMPT
else:
raise ValueError(f"\nvirtual prompt style '{cfg.virtual_prompt_style}'")
self._reduced_loss_buffer = []
self._inference_config = None
# Prepare pseudo token ids for virtual/virtual prompt tokens
self.pseudo_tokens = get_pseudo_tokens(self.max_virtual_tokens)
if isinstance(self.tokenizer, SentencePieceTokenizer):
self.tokenizer.add_special_tokens(self.pseudo_tokens)
else:
self.tokenizer.add_special_tokens({'additional_special_tokens': self.pseudo_tokens})
self.pseudo_token_ids = self.tokenizer.tokens_to_ids(self.pseudo_tokens)
self.pseudo_token_ids_start = self.pseudo_token_ids[0] if self.pseudo_token_ids else None
self.pad_token_id = self.tokenizer.pad_id if self.tokenizer.pad_id is not None else self.tokenizer.unk_id
self.decoder_seq_length = cfg.get('decoder_seq_length', 40)
# make sure the default pytorch lightning gradient clipping in the basemodel
self.grad_clip_pl_default = True
self.lowest_val_loss = None
self.prompt_encoder = None
self.enable_autocast = (
True if (not self.megatron_amp_o2) and (self.autocast_dtype in [torch.float16, torch.bfloat16]) else False
)
# define validation metric
if self.cfg.get('report_validation_metric', False):
validation_metric = self.cfg.get('validation_metric', 'accuracy')
if validation_metric == 'accuracy':
self.validation_metric = AccuracyScore()
elif validation_metric == 'bleu':
self.validation_metric = BLEUScore()
elif validation_metric == 'rouge':
self.validation_metric = ROUGEScores()
def load_task_templates(self, task_templates):
"""
Takes in the task template portion of the config and turns
it into a table where each task's prompt template and
the number of virtual tokens to insert in a given part of
the prompt template are specified.
"""
self.task_templates = {}
self.task_id_num_to_name = {}
self.max_virtual_tokens = 0
task_id_num = 0
for task in task_templates:
self.task_templates[task.taskname] = {
"prompt_template": task.prompt_template,
"prompt_template_fields": re.findall("\{(.*?)\}", task.prompt_template),
"answer_only_loss": task.get("answer_only_loss", False),
"answer_field": task.get("answer_field", None),
"truncate_field": task.truncate_field,
"total_virtual_tokens": task.total_virtual_tokens,
"virtual_token_splits": task.virtual_token_splits,
"task_id_num": task_id_num,
}
self.max_virtual_tokens = max(self.max_virtual_tokens, task.total_virtual_tokens)
self.task_id_num_to_name[task_id_num] = task.taskname
task_id_num += 1
# Check that all new tasks have the same total num virtual tokens
# Num virtual tokens for new tasks don't need to match num used for previously tuned tasks
if self.new_tasks:
new_task_name = self.new_tasks[0]
self.total_new_task_virtual_tokens = self.task_templates[new_task_name]["total_virtual_tokens"]
assert all(
self.task_templates[taskname]["total_virtual_tokens"] == self.total_new_task_virtual_tokens
for taskname in self.new_tasks
), "Total virtual tokens for each task tuned simultaneously must match. If you want to use a different number of virtual tokens for different tasks, tune them separately."
def init_prompt_encoder(self):
"""
Init the prompt encoder needed for p-tuning on a new task
"""
# Total virtual tokens should be the same across all new tasks, so just need one
new_task = self.new_tasks[0]
total_virtual_tokens = self.task_templates[new_task]["total_virtual_tokens"]
encoder_type = PromptEncoderType(self.cfg.p_tuning.get("encoder_type", "tpmlp").lower())
self.prompt_encoder = PromptEncoder(
config=self.model_parallel_config,
encoder_type=encoder_type,
total_virtual_tokens=total_virtual_tokens,
token_dim=self.hidden_size,
hidden_size=self.cfg.p_tuning.get("encoder_hidden", self.hidden_size // 2),
lstm_dropout=self.cfg.p_tuning.get("dropout", 0.0),
num_layers=self.cfg.p_tuning.get("num_layers", 2),
init_std=self.cfg.p_tuning.get("init_std", 0.023),
taskname=new_task,
)
def freeze_existing_word_embeddings(self):
"""Freeze params of existing virtual prompts that should not be tuned further
"""
# Make sure word embeddings are frozen
for params in self.word_embeddings.parameters():
params.requires_grad = False
def state_dict(self):
"""
Custom state dict that only contains prompt table and prompt encoder parameters.
No frozen model parameters are stored in the state dict. Prompt encoder parameters
are only in state dict for intermediate checkpoints saved during training. Final
nemo checkpoints at the end of training will contain prompt table parameters only.
"""
state_dict_ = {}
if self.first_stage_of_pipeline():
if self.virtual_prompt_source == VirtualPromptSource.PROMPT_ENCODER:
state_dict_ = self.prompt_encoder.state_dict()
else:
raise ValueError("invalid virtual prompt source")
return state_dict_
def load_state_dict(self, state_dict, strict: bool = True):
"""
Custom load state dict method that only loads prompt table and prompt encoder
parameters. Matching load method for this class' custom state dict method.
"""
if self.first_stage_of_pipeline():
if self.virtual_prompt_source == VirtualPromptSource.PROMPT_ENCODER:
if self.prompt_encoder is None:
self.init_prompt_encoder()
self.prompt_encoder.load_state_dict(state_dict, strict)
else:
raise ValueError("invalid virtual prompt source")
def setup_optimizer_param_groups(self):
"""
ModelPT override. Optimizer will get self._optimizer_param_groups.
Only want virtual prompt params to be passed to the optimizer.
"""
## Freeze frozen model
for param in self.frozen_model.parameters():
param.requires_grad = False
virtual_prompt_params = {'params': []}
if self.first_stage_of_pipeline():
if self.virtual_prompt_source == VirtualPromptSource.PROMPT_ENCODER:
virtual_prompt_params['params'].extend([param for param in self.prompt_encoder.parameters()])
else:
raise ValueError("Optimizer only supports Prompt Encoder.")
self._optimizer_param_groups = (virtual_prompt_params,)
def embed_input(self, input_ids: Tensor, taskname_ids: Tensor, use_cached_reps: bool):
"""
Replaces the virtual tokens in the input_ids with embeddings
calculated from either the 'prompt_table' or 'prompt_encoder'.
The virtual token placeholders have token_ids listed in
`self.pseudo_token_ids`.
params:
input_ids: the input token ids
taskname_ids: the NLP task tag token ids
returns:
the token embedding for the LM model.
"""
# Replace virtual token ids with padding for forward pass through vocab embeddings
discrete_token_ids = input_ids.clone()
discrete_token_ids[(input_ids >= self.pseudo_token_ids_start)] = self.pad_token_id
discrete_token_embeds = self.word_embeddings(discrete_token_ids).clone()
# Find the indicies where virtual tokens should be inserted
virtual_token_locations = input_ids >= self.pseudo_token_ids_start
# If there are no virtual tokens, just return discrete token embeds
if not virtual_token_locations.any():
return discrete_token_embeds
if self.virtual_prompt_source == VirtualPromptSource.PROMPT_ENCODER:
# taskname_embeddings = self.word_embeddings(taskname_ids)
batch_size, _ = taskname_ids.size()
virtual_token_embeds = self.prompt_encoder(batch_size=batch_size, use_cached_reps=use_cached_reps)
else:
raise ValueError("invalid VirtualPromptSource.")
# Create index template specifying where virtual token embeddings should be placed
batch_size, _, embedding_size = discrete_token_embeds.shape
virtual_token_index = virtual_token_locations.nonzero().reshape((batch_size, -1, 2))[:, :, 1][:, :, None]
virtual_token_index = virtual_token_index.expand(
batch_size, self.total_new_task_virtual_tokens, embedding_size
)
# Make sure discrete_token_embeds and virtual_token_embeds share the same dtype
discrete_token_embeds = discrete_token_embeds.type(virtual_token_embeds.dtype)
# Insert virtual token embeddings where they belong amoung the discrete token embeddings
discrete_token_embeds.scatter_(1, virtual_token_index, virtual_token_embeds)
input_embeds = discrete_token_embeds
return input_embeds
def on_train_end(self):
# Save p-tuned prompts to prompt table for inference or future task training
self.save_to(save_path=self.cfg.nemo_path)
def setup(self, stage=None):
if stage == 'predict' and self.first_stage_of_pipeline():
self.freeze_existing_word_embeddings()
return
self.setup_test_data()
if stage == 'test':
return
if self.first_stage_of_pipeline():
if self.virtual_prompt_style == VirtualPromptStyle.P_TUNING:
if self.prompt_encoder is None:
self.init_prompt_encoder()
self.freeze_existing_word_embeddings()
self.setup_training_data()
self.setup_validation_data()
def setup_training_data(self, training_data_config=None):
if self.cfg.data.get('train_ds', None):
self._train_ds, self._train_dl = self.build_virtual_prompt_dataset(
dataset_paths=self.cfg.data.train_ds,
batch_size=self.cfg.global_batch_size,
for_train=True,
drop_last=True,
shuffle=True,
num_workers=self.cfg.data.num_workers,
pin_memory=True,
)
def setup_validation_data(self, validation_data_config=None):
if self.cfg.data.get('validation_ds', None):
self._validation_ds, self._validation_dl = self.build_virtual_prompt_dataset(
dataset_paths=self.cfg.data.validation_ds,
batch_size=self.cfg.get("validation_global_batch_size", self.cfg.global_batch_size),
for_train=True,
drop_last=self.cfg.get("validation_drop_last", True),
shuffle=False,
num_workers=self.cfg.data.num_workers,
pin_memory=True,
)
def setup_test_data(self, test_data_config=None):
if self.cfg.data.get('test_ds', None):
self._test_ds, self._test_dl = self.build_virtual_prompt_dataset(
dataset_paths=self.cfg.data.test_ds,
batch_size=self.cfg.get("validation_global_batch_size", self.cfg.global_batch_size),
for_train=False,
drop_last=False,
shuffle=False,
num_workers=self.cfg.data.num_workers,
pin_memory=True,
)
def _reconfigure_and_process_inference_batch(self, global_batch_size_per_gpu, gbs):
# This should happen only on the last batch of the dataset.
if global_batch_size_per_gpu != gbs // parallel_state.get_data_parallel_world_size():
# NOTE: This is reconfiguring to make sure there is no grad-acc for validation batches.
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=global_batch_size_per_gpu * parallel_state.get_data_parallel_world_size(),
micro_batch_size=global_batch_size_per_gpu,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
def _reconfigure_batch_sizes(self, gbs: int, mbs: int):
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=gbs,
micro_batch_size=mbs,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
def set_inference_config(self, inference_config):
self._inference_config = inference_config
def get_inference_config(self):
return self._inference_config
def set_input_tensor(self, input_tensor):
pass
def first_stage_of_pipeline(self):
pass
@classmethod
def list_available_models(cls):
pass
def load_frozen_model(self, cfg, trainer):
pass
def get_pseudo_tokens(num_virtual_tokens):
"""
Takes in an integer and returns a list of strings where each string
is a numbered virtual token placeholder. If
num_virtual_tokens = 3, then this function returns:
["<prompt_0>", "<prompt_1>", "<prompt_2>"]
Args:
num_virtual_tokens: (int) Number of virtual token strings you want to make
returns a list of string.
"""
pseudo_tokens = [
VirtualPromptPlaceholderToken.BASE.value + str(i) + VirtualPromptPlaceholderToken.END.value
for i in range(num_virtual_tokens)
]
return pseudo_tokens
| NeMo-main | nemo/collections/nlp/models/language_modeling/megatron_base_prompt_learning_model.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import itertools
import os
import re
from dataclasses import fields
from typing import Any, Dict, Optional, Union
import omegaconf
import torch
from omegaconf import OmegaConf, open_dict
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.plugins.precision import MixedPrecisionPlugin
from pytorch_lightning.trainer.connectors.logger_connector.fx_validator import _FxValidator
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.collections.nlp.modules.common.megatron.attention import HAVE_FLASH_ATTENTION
from nemo.collections.nlp.modules.common.megatron.clip_grads import (
clip_grad_norm_distributed_optimizer,
clip_grad_norm_fp32,
)
from nemo.collections.nlp.modules.common.megatron.megatron_init import initialize_model_parallel_for_nemo
from nemo.collections.nlp.modules.common.megatron.utils import ApexGuardDefaults
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer
from nemo.collections.nlp.parts import utils_funcs
from nemo.collections.nlp.parts.nlp_overrides import NEMO_MEGATRON_MODEL_PARALLEL_APPSTATE_OVERRIDE, GradScaler
from nemo.core.optim import MainParamsOptimizerWrapper, prepare_lr_scheduler
from nemo.utils import AppState, logging
from nemo.utils.get_rank import is_global_rank_zero
try:
from apex.transformer.pipeline_parallel.utils import get_num_microbatches
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
try:
from megatron.core import ModelParallelConfig, parallel_state
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelParallelConfig = ApexGuardDefaults
HAVE_MEGATRON_CORE = False
__all__ = ["MegatronBaseModel"]
class MegatronBaseModel(NLPModel):
"""
Megatron base class. All NeMo Megatron models inherit from this class.
- Initialize the model parallel world for nemo.
- Turn on all of the nvidia optimizations.
- If `cfg.tokenizer` is available, it loads the tokenizer and pad the vocab to the
correct size for tensor model parallelism.
- If using distributed optimizer, configure to be compatible
with O2 level optimizations and/or model parallelism.
- Perform gradient clipping: `grad_clip_pl_default` triggers
the PyTorch Lightning default implementation, `with_distributed_adam` triggers
the distributed optimizer's implementation, `megatron_amp_o2` triggers gradient clipping on the main grads,
and otherwise gradient clipping is performed on the model grads.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer, no_lm_init=True):
if not HAVE_MEGATRON_CORE:
raise ImportError(
"megatron-core was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
if trainer is None:
raise ValueError(f"Trainer cannot be None for Megatron-based models. Please provide a PTL trainer object.")
if cfg.get('use_flash_attention', False) and not HAVE_FLASH_ATTENTION:
raise ImportError(
"flash_attn was not found. Please see the installation instructions: https://github.com/HazyResearch/flash-attention."
"If you use flash_attn with triton. Please install triton==2.0.0.dev20221202."
)
# this prevents base constructor from initializing tokenizer
self.tokenizer = None
with open_dict(cfg):
if cfg.get('precision', None) is None and trainer is not None:
cfg.precision = trainer.precision
super().__init__(cfg, trainer=trainer, no_lm_init=no_lm_init)
# TODO: @maanug-nv consolidate into one attribute (requires lots of changes in subclasses)
self.torch_dtype = utils_funcs.torch_dtype_from_precision(self.cfg.precision) # Mixed precision datatype
self.autocast_dtype = self.torch_dtype # Mixed precision datatype
# instantiate weights in mixed precision datatype if using megatron amp O2
self.params_dtype = (
self.torch_dtype
if self.torch_dtype in [torch.bfloat16, torch.float16] and self.cfg.get('megatron_amp_O2', False)
else torch.float32
)
# set the megatron core model parallel config
self.model_parallel_config: ModelParallelConfig = self.build_model_parallel_config()
self.with_distributed_adam = cfg.optim.get('name') == 'distributed_fused_adam'
# used in NVIDIA NGC PyTorch containers
self._enable_nvidia_optimizations()
if self._cfg.get('use_cpu_initialization', False) is False:
torch.cuda.set_device(trainer.local_rank)
# buffer used during train_step for logging average loss over gradient accumulation steps
self._reduced_loss_buffer = []
# Overrides used when converting checkpoints
if os.environ.get(NEMO_MEGATRON_MODEL_PARALLEL_APPSTATE_OVERRIDE, "false").lower() == "true":
app_state = AppState()
init_world_size = app_state.tensor_model_parallel_size * app_state.pipeline_model_parallel_size
init_global_rank = app_state.global_rank
init_local_rank = app_state.local_rank
else:
init_world_size = trainer.world_size
init_global_rank = trainer.global_rank
init_local_rank = trainer.local_rank
# Set virtual pipeline size to None if it is 1 and
# confirm that the number of model chunks is the same across all pipeline stages.
vp_size = self.cfg.get('virtual_pipeline_model_parallel_size', None)
if vp_size is not None:
if vp_size == 1:
vp_size = None
else:
assert (
self.cfg.num_layers // self.cfg.pipeline_model_parallel_size
) % vp_size == 0, 'Make sure the number of model chunks is the same across all pipeline stages.'
initialize_model_parallel_for_nemo(
world_size=init_world_size,
global_rank=init_global_rank,
local_rank=init_local_rank,
tensor_model_parallel_size=cfg.get('tensor_model_parallel_size', 1),
pipeline_model_parallel_size=cfg.get('pipeline_model_parallel_size', 1),
virtual_pipeline_model_parallel_size=vp_size,
pipeline_model_parallel_split_rank=cfg.get('pipeline_model_parallel_split_rank', 0),
micro_batch_size=cfg.get('micro_batch_size'),
global_batch_size=cfg.get('global_batch_size'),
rampup_batch_size=cfg.get('rampup_batch_size', None),
use_fp8=cfg.get('fp8', False),
init_mpi_proc_group=cfg.get('ub_tp_comm_overlap', False),
seed=self.cfg.get('seed', 1234),
apex_transformer_log_level=self.cfg.get('apex_transformer_log_level', 30),
)
# This must be called after initialize model parallel since it needs to know the data parallel size
self._validate_and_override_config()
# set the megatron core model parallel config
self.model_parallel_config: ModelParallelConfig = self.build_model_parallel_config()
self.grad_clip_pl_default = False # use pytorch default for gradient clipping. Default False
if hasattr(self._cfg, "tokenizer") or (
hasattr(self._cfg, "encoder_tokenizer") and hasattr(self._cfg, "decoder_tokenizer")
):
# build tokenizer (defaults to nemo supported tokenizers)
self._build_tokenizer()
# manipulate vocabulary (e.g., pad vocabulary for better efficiency)
self._build_vocab()
# TODO: remove this when PTL 1.7.3 is released
_FxValidator.functions["configure_gradient_clipping"] = {
"allowed_on_step": (False, True),
"allowed_on_epoch": (False, True),
"default_on_step": True,
"default_on_epoch": False,
}
self.gc_interval = cfg.get('gc_interval', 0)
assert self.gc_interval >= 0, "gc_interval should be an integer value larger than or equal to 0."
# If gc_interval > 0, memory garbage collection is manually controlled.
# The automatic garbage collector sould be disabled before training starts.
if self.gc_interval > 0:
gc.disable()
self.validation_global_step = 1
def _reconfigure_val_batches(self):
"""
Reconfigure trainer.limit_val_batches for pretraining
"""
# Override limit_val_batches to be a multiple of num microbatches and so there are limit_val_batches//num_micro_batches num of global batches
self.trainer.limit_val_batches *= get_num_microbatches()
# Override num sanity steps to be a multiple of num of microbatches
self.trainer.num_sanity_val_steps *= get_num_microbatches()
def _enable_nvidia_optimizations(self):
"These optimizations are present in NVIDIA NGC PyTorch Containers"
# NVIDIA container version check
nvidia_torch_version = os.getenv('NVIDIA_PYTORCH_VERSION', None)
if nvidia_torch_version is not None:
try:
NVIDIA_TORCH_MAJOR = int(nvidia_torch_version.split('.')[0])
except Exception:
NVIDIA_TORCH_MAJOR = 0
try:
NVIDIA_TORCH_MINOR = int(nvidia_torch_version.split('.')[1])
except Exception:
NVIDIA_TORCH_MINOR = 0
# Apex Persistent layer norm is supported from Nvidia PyTorch container v21.11
# This only depends on Apex version?
if NVIDIA_TORCH_MAJOR < 21 or (NVIDIA_TORCH_MAJOR == 21 and NVIDIA_TORCH_MINOR < 11):
self.cfg.persist_layer_norm = False
# NVFUSER available starting with 21.11
if NVIDIA_TORCH_MAJOR >= 21 or (NVIDIA_TORCH_MAJOR == 21 and NVIDIA_TORCH_MINOR >= 11):
# NVFUSER
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(True)
torch._C._debug_set_autodiff_subgraph_inlining(False)
else:
# Not a Nvidia container. NVFUSER Dependency check is on users
pass
def _build_tokenizer(self):
"""
Default tokenizer is based on available nemo tokenizers.
Override this method to use an external tokenizer.
All tokenizers are expected to provide compatible interface.
Override default Encoder-decoder tokenizer to use legacy=True for sentencepiece.
"""
if hasattr(self._cfg.tokenizer, "sentencepiece_legacy"):
legacy = self._cfg.tokenizer.sentencepiece_legacy
else:
legacy = True if self._cfg.tokenizer.library == 'sentencepiece' else False
self.tokenizer = get_nmt_tokenizer(
library=self._cfg.tokenizer.library,
model_name=self._cfg.tokenizer.type,
tokenizer_model=self.register_artifact("tokenizer.model", self._cfg.tokenizer.get('model', None)),
vocab_file=self.register_artifact("tokenizer.vocab_file", self._cfg.tokenizer.get('vocab_file', None)),
merges_file=self.register_artifact("tokenizer.merge_file", self._cfg.tokenizer.get('merge_file', None)),
use_fast=self.cfg.tokenizer.get('use_fast', False),
delimiter=self.cfg.tokenizer.get('delimiter', None),
special_tokens=self.cfg.tokenizer.get('special_tokens', None),
legacy=legacy,
)
if self._cfg.tokenizer.get('additional_special_tokens', None) is not None:
tokens_list = omegaconf.OmegaConf.to_object(self._cfg.tokenizer.additional_special_tokens)
self.tokenizer.add_special_tokens({'additional_special_tokens': tokens_list})
def on_train_start(self) -> None:
super().on_train_start()
self.init_global_step = self.trainer.global_step
def on_validation_start(self) -> None:
super().on_validation_start()
if self.gc_interval > 0:
gc.collect()
def on_validation_end(self) -> None:
super().on_validation_end()
if self.gc_interval > 0:
gc.collect()
def _build_vocab(self):
"""
Manipulate vocabulary (e.g., pad vocabulary for increased performance)/
"""
# TODO: add config to allow to disable it?
self.padded_vocab_size = self._vocab_size_with_padding(
orig_vocab_size=self.tokenizer.vocab_size,
make_vocab_size_divisible_by=self._cfg.get('make_vocab_size_divisible_by', 128),
tensor_model_parallel_size=self._cfg.get('tensor_model_parallel_size', 1),
)
def _vocab_size_with_padding(self, orig_vocab_size, make_vocab_size_divisible_by, tensor_model_parallel_size):
"""Pad vocab size so it is divisible by model parallel size and
still having GPU friendly size."""
after = orig_vocab_size
multiple = make_vocab_size_divisible_by * tensor_model_parallel_size
while (after % multiple) != 0:
after += 1
logging.info(
f'Padded vocab_size: {after}, original vocab_size: {orig_vocab_size}, dummy tokens: {after - orig_vocab_size}.'
)
return after
def get_parameters_with_grad(self):
"""
Get all parameters with grad from optimizer param groups
"""
params = []
for param_group in self._optimizer_param_groups:
for param in param_group['params']:
if (
param.grad is not None
): # (@adithyare) adapter training with pp>1 can result in params with no grads
params.append(param)
return params
def configure_gradient_clipping(self, *args, **kwargs):
"""PTL hook to configure gradients.
We use gradient clipping implementation from megatron-lm.
"""
clip_val = self.trainer.gradient_clip_val
if clip_val is None:
return
clip_val = float(clip_val)
if clip_val <= 0:
return
if self.grad_clip_pl_default:
# use the default behavior
return super().configure_gradient_clipping(*args, **kwargs)
if self.with_distributed_adam:
grad_norm = clip_grad_norm_distributed_optimizer(self._optimizer, clip_val)
else:
if self.megatron_amp_o2:
# grep fp32 master parameters for gradient clipping
parameters = self._optimizer.get_parameters_with_grad()
else:
parameters = self.get_parameters_with_grad()
grad_norm = clip_grad_norm_fp32(parameters=parameters, max_norm=clip_val)
self.log('grad_norm', grad_norm, rank_zero_only=True, batch_size=1)
def allreduce_gradients(self):
"""Reduce gradients across data parallel ranks.
Modified from megatron-lm: https://github.com/NVIDIA/Megatron-LM/blob/d41696840ed0a7edb7e0499eb82a48ae112d9bb3/megatron/model/distributed.py#L188
"""
# Bucketize and all-reduce
buckets = {}
for param in self.parameters():
if param.requires_grad and param.grad is not None:
tp = param.data.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(param)
# param.main_grad = param.grad
# For each bucket, all-reduce and copy all-reduced grads.
for tp in buckets:
bucket = buckets[tp]
grads = [param.grad.data for param in bucket]
coalesced = torch._utils._flatten_dense_tensors(grads)
coalesced /= parallel_state.get_data_parallel_world_size()
torch.distributed.all_reduce(coalesced, group=parallel_state.get_data_parallel_group())
for buf, synced in zip(grads, torch._utils._unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
def reduce_overlap_gradients(self, params=None):
"""Reduce grads if overlapped grad sync is enabled
Used for pipeline parallelism with the distributed Adam
optimizer. In the first pipeline stage, the grad sync is
overlapped with the final backward pass. In other pipeline
stages, the grad sync is deferred until the bubble overhead.
"""
if self.with_distributed_adam and self._optimizer.overlap_grad_sync:
if params is None:
params = self._optimizer.parameters()
self._optimizer.try_grad_sync(params)
def sync_overlap_parameters(self, params=None):
if self.with_distributed_adam:
self._optimizer._try_start_bucket_param_sync(params)
def on_train_batch_end(self, outputs, dataloader_iter: Any, batch_idx: int, unused: Optional[int] = 0) -> None:
super().on_train_batch_end(outputs, dataloader_iter, batch_idx)
# TODO: Replace with newer override for scheduler.step() instead of
# search for plugins for fp16 GradScalar
if self.trainer.precision_plugin is not None and isinstance(
self.trainer.precision_plugin, MixedPrecisionPlugin
):
precision_plugin = self.trainer.precision_plugin
if (
hasattr(precision_plugin, 'scaler')
and precision_plugin.scaler is not None
and isinstance(precision_plugin.scaler, GradScaler)
):
grad_scaler = precision_plugin.scaler
# If the grad scaler skipped its optimizer step due to infs/nans,
# decrement the step of all schedulers.
if grad_scaler.optimizer_update_skipped is not None and grad_scaler.optimizer_update_skipped is True:
scheduler_cfgs = self.trainer.lr_scheduler_configs
if not scheduler_cfgs or not self.trainer.lightning_module.automatic_optimization:
return
for scheduler_cfg in scheduler_cfgs:
# Decrement the counter by 2, then perform a scheduler.step() to perform a no-up
# as well as update the optimizer lr in all param groups
scheduler_cfg.scheduler.last_epoch -= 2
scheduler_cfg.scheduler.step()
# Removing the line below because it messes up train_valid_test_num_samples calculation.
# self.trainer.fit_loop.max_steps = self.trainer.fit_loop.max_steps + 1
# Reset the optimizer update skipped to `None` - this is to prevent scheduler no-ops during
# accumulated gradient updates.
grad_scaler.optimizer_update_skipped = None
if self.gc_interval > 0 and (self.trainer.global_step % self.gc_interval == 0):
gc.collect()
def on_validation_batch_end(self, outputs, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> None:
super().on_validation_batch_end(outputs, batch, batch_idx, dataloader_idx)
if self.gc_interval > 0:
if self.validation_global_step % self.gc_interval == 0:
gc.collect()
self.validation_global_step += 1
def setup_optimization(
self, optim_config: Optional[Union[DictConfig, Dict]] = None, optim_kwargs: Optional[Dict[str, Any]] = None,
):
optim_kwargs = {} if optim_kwargs is None else optim_kwargs.copy()
if self.with_distributed_adam:
# Allocate contiguous buffer to avoid extra copies
optim_kwargs['contiguous_grad_buffer'] = True
# Make sure optimizer state is in FP32
optim_dtype = torch.float32
optim_kwargs['dtype'] = optim_dtype
# Make sure embedding grad reductions are in FP32
for name, param in self.named_parameters():
if 'word_embedding' in name or 'position_embedding' in name or 'output_layer' in name:
param._with_fp32_optimizer = True
# Match param allgather with model dtype
model_dtype = torch.float32
if self.megatron_amp_o2 and hasattr(self, 'autocast_dtype'):
model_dtype = self.autocast_dtype
optim_kwargs['param_sync_dtype'] = model_dtype
# Determine whether to store master params in optimizer
if optim_dtype == model_dtype:
optim_kwargs['store_params'] = False
elif optim_dtype == torch.float32 and model_dtype == torch.bfloat16:
optim_kwargs['store_params'] = False
optim_kwargs['store_param_remainders'] = True
else:
optim_kwargs['store_params'] = True
return super().setup_optimization(optim_config=optim_config, optim_kwargs=optim_kwargs)
def configure_optimizers(self):
self.setup_optimization()
# Wrap the baseline optimizer with the optimizer class with master parameters
if self.megatron_amp_o2 and not self.with_distributed_adam and self._optimizer is not None:
if self.torch_dtype == torch.bfloat16:
fp32_grad_accum = True
contiguous_grad_bucket = True
elif self.torch_dtype == torch.float16:
fp32_grad_accum = False
# TODO: contiguous grad bucket for fp16 is also planned to be supported
contiguous_grad_bucket = False
raise ValueError(
"fp16 training is not yet supported with O2. Please set megatron_amp_O2 to False in the model config."
)
# if using tensor parallel only, we automatically use async grad all-reduce
# if using pipeline parallel or sequence parallel or gradient accumulation fusion, then we disable it
if self.cfg.get('pipeline_model_parallel_size', 1) == 1 and not (
self.cfg.get('sequence_parallel', False) or self.cfg.get('gradient_accumulation_fusion', False)
):
async_grad_allreduce = True
else:
async_grad_allreduce = False
if async_grad_allreduce:
# we need this to be configurable until make_nccl_premul_sum is in public PyTorch.
# currently cannot be imported in PyTorch 1.12.0
grad_div_ar_fusion = self.cfg.get('grad_div_ar_fusion', False)
else:
grad_div_ar_fusion = False
self._optimizer = MainParamsOptimizerWrapper(
self._optimizer,
fp32_grad_accum=fp32_grad_accum,
contiguous_grad_bucket=contiguous_grad_bucket,
async_grad_allreduce=async_grad_allreduce,
grad_div_ar_fusion=grad_div_ar_fusion,
grad_allreduce_chunk_size_mb=self.cfg.get('grad_allreduce_chunk_size_mb', 125),
)
assert self._trainer.max_steps is not None, "'max_steps' is missing in trainer config."
if hasattr(self._cfg.optim, 'sched'):
sched_config = self._cfg.optim.sched
sched_config['max_steps'] = self._trainer.max_steps
self._scheduler = prepare_lr_scheduler(
optimizer=self._optimizer, scheduler_config=sched_config, train_dataloader=self._train_dl
)
# Configure distributed optimizer
if self.with_distributed_adam:
# Initialize param buckets if explicitly provided
if hasattr(self, 'distributed_adam_buckets'):
for bucket in self.distributed_adam_buckets:
self._optimizer.init_params_bucket(bucket)
del self.distributed_adam_buckets
# Make sure all params are initialized so main grads are
# available
# Note: Consolidate grads without overlap
overlap_params = []
no_overlap_params = []
for p in self.parameters():
if getattr(p, '_disable_overlap_grad_sync', False):
no_overlap_params.append(p)
else:
overlap_params.append(p)
self._optimizer.init_params(reversed(overlap_params))
self._optimizer.init_params(reversed(no_overlap_params))
# Initialize contiguous parameter buffer
if self._optimizer.contiguous_param_buffer:
self._optimizer.init_param_buffer()
if self._scheduler is None:
return self._optimizer
else:
return [self._optimizer], [self._scheduler]
def compute_consumed_samples(self, steps_since_resume=0):
app_state = AppState()
if self.cfg.get('rampup_batch_size', None):
from apex.transformer.pipeline_parallel.utils import _GLOBAL_NUM_MICROBATCHES_CALCULATOR
current_global_batch_size = getattr(_GLOBAL_NUM_MICROBATCHES_CALCULATOR, 'current_global_batch_size', 1)
consumed_samples = self.prev_consumed_samples + self.if_first_step * current_global_batch_size
else:
consumed_samples = (
self.init_consumed_samples
+ steps_since_resume
* app_state.data_parallel_size
* self.cfg.micro_batch_size
* get_num_microbatches()
)
return int(consumed_samples)
def _compute_consumed_samples_after_training_step(self):
# Add +1 to account for the current batch, which is not counted yet in `trainer.global_step`.
return self.compute_consumed_samples(self.trainer.global_step + 1 - self.init_global_step)
def _extract_consumed_samples_from_ckpt(self, ckpt_path):
try:
init_consumed_samples = int(float(re.findall(r"consumed_samples\=([0-9]+.[0-9]+)", ckpt_path)[0]))
except (ValueError, TypeError, IndexError):
logging.warning("Cannot parse the checkpoint file to get the consumed samples. assume it is zero.")
init_consumed_samples = 0
return init_consumed_samples
def _validate_and_override_config(self):
""" Certain configurations might be incompatible or discouraged.
We can check for them here and override if necessary.
"""
app_state = AppState()
if self.cfg.get('sequence_parallel', False) and self.cfg.get('tensor_model_parallel_size', 1) == 1:
logging.info(
"Sequence parallel should only be used with tensor parallel size > 1. Setting sequence parallel to False"
)
with open_dict(self.cfg):
self.cfg.sequence_parallel = False
# Gradient accumulation fusion does not work with our baseline implementaiton of
# async grad allreduce. This should be fixed!
# For now we must disable it whenever using the baseline implementaion.
# The distributed adam from apex does work with gradient accumulation fusion.
distributed_fused_adam = self.cfg.optim.get('name', 'fused_adam') == 'distributed_fused_adam'
pipeline_model_parallel_size = self.cfg.get('pipeline_model_parallel_size', 1)
data_parallel_size = app_state.data_parallel_size
if self.cfg.get('gradient_accumulation_fusion', False):
if data_parallel_size > 1 and pipeline_model_parallel_size == 1 and not distributed_fused_adam:
logging.info(
"When not using pipeline model parallel, gradient accumulation fusion can only be used with distributed_fused_adam."
)
with open_dict(self.cfg):
self.cfg.gradient_accumulation_fusion = False
if self.cfg.get('gradient_accumulation_fusion', False) and not self.cfg.get('megatron_amp_O2', False):
logging.info("Gradient accumulation fusion can only be used with megatron amp O2 mixed precision.")
with open_dict(self.cfg):
self.cfg.gradient_accumulation_fusion = False
if self.cfg.get('use_emha', False):
raise ValueError('use_emha is not yet supported please set to False')
vp_size = self.cfg.get('virtual_pipeline_model_parallel_size', None)
if vp_size is not None:
if vp_size == 1:
self.cfg['virtual_pipeline_model_parallel_size'] = None
else:
assert (
self.cfg.num_layers // self.cfg.pipeline_model_parallel_size
) % vp_size == 0, 'Make sure the number of model chunks is the same across all pipeline stages.'
if self.cfg.get('ub_tp_comm_overlap', False):
if not self.cfg.get('transformer_engine', False) or not self.cfg.get('sequence_parallel', False):
logging.info(
"Userbuffer tensor-parallel communication overlap is available with both Transformer Engine and sequence-parallelism."
)
with open_dict(self.cfg):
self.cfg.ub_tp_comm_overlap = False
def is_data_parallel_rank_zero(self):
if is_global_rank_zero():
return True
else:
try:
data_parallel_rank = parallel_state.get_data_parallel_rank()
except:
data_parallel_rank = None
if data_parallel_rank is not None and data_parallel_rank == 0:
return True
else:
return False
def _get_total_params_across_model_parallel_groups_gpt_bert(self, model):
"""Returns the total number of parameters across all model parallel groups."""
# log number of parameters
if isinstance(model, list):
num_parameters_on_device = sum(
[sum([p.nelement() for p in model_module.parameters()]) for model_module in model]
)
if (
parallel_state.get_pipeline_model_parallel_world_size() > 1
and parallel_state.is_pipeline_last_stage(ignore_virtual=True)
and self.cfg.get('share_embeddings_and_output_weights', True)
):
word_embeddings_weight = (
model[-1].module.shared_embedding_or_output_weight()
if getattr(self, 'mcore_gpt', False)
else model[-1].word_embeddings_weight()
)
# substract the embedding weights on the last virtual stage
num_word_embedding_parameters = sum([p.nelement() for p in word_embeddings_weight])
num_parameters_on_device -= num_word_embedding_parameters
else:
num_parameters_on_device = sum([p.nelement() for p in model.parameters()])
if (
parallel_state.get_pipeline_model_parallel_world_size() > 1
and parallel_state.is_pipeline_last_stage(ignore_virtual=True)
and self.cfg.get('share_embeddings_and_output_weights', True)
):
word_embeddings_weight = (
model.module.shared_embedding_or_output_weight()
if getattr(self, 'mcore_gpt', False)
else model.word_embeddings_weight()
)
# substract the embedding weights on the last stage
num_word_embedding_parameters = sum([p.nelement() for p in word_embeddings_weight])
num_parameters_on_device -= num_word_embedding_parameters
# to be summed across data parallel group
total_num_parameters = torch.tensor(num_parameters_on_device).cuda()
torch.distributed.all_reduce(total_num_parameters, group=parallel_state.get_model_parallel_group())
return num_parameters_on_device, total_num_parameters
def _get_total_params_across_model_parallel_groups_enc_dec(self, model):
"""Returns the total number of parameters across all model parallel groups."""
# log number of parameters
# TODO: If/when we add interleaved model parallelism, we will need to add another if/else here.
num_parameters_on_device = sum([p.nelement() for p in model.parameters()])
if parallel_state.get_pipeline_model_parallel_world_size() > 1 and (
parallel_state.get_pipeline_model_parallel_rank() == self.cfg.get('pipeline_model_parallel_split_rank', 0)
or parallel_state.is_pipeline_last_stage()
):
# If the current rank is the in the decoder first stage (decoder emb) or last rank (output layer), subtract those weights since it is already accounted for in the encoder first stage.
# TODO: If we support embedding untying with PP > 1, we will need to update this.
num_word_embedding_parameters = sum([p.nelement() for p in model.word_embeddings_weight()])
num_parameters_on_device -= num_word_embedding_parameters
# Subtract decoder position embedding params that are shared with encoder.
if (
parallel_state.is_pipeline_stage_at_split()
and self.cfg.encoder.get("position_embedding_type", "learned_absolute") == "learned_absolute"
):
num_position_embedding_parameters = sum([p.nelement() for p in model.position_embeddings_weight()])
num_parameters_on_device -= num_position_embedding_parameters
# Check and remove RPE embeddings from the encoder that are replicated.
if (
parallel_state.get_pipeline_model_parallel_world_size() > 1
and parallel_state.is_pipeline_stage_before_split()
and not parallel_state.is_pipeline_first_stage()
and self.cfg.encoder.get("position_embedding_type", "learned_absolute") == "relative"
):
# substract the RPE params on intermediate pipeline stages.
num_rpe_params = sum([p.nelement() for p in model.encoder_relative_position_embeddings_weight()])
num_parameters_on_device -= num_rpe_params
# Check and remove RPE embeddings from the decoder that are replicated.
if (
parallel_state.get_pipeline_model_parallel_world_size() > 1
and parallel_state.is_pipeline_stage_after_split()
and not parallel_state.is_pipeline_stage_at_split()
and self.cfg.encoder.get("position_embedding_type", "learned_absolute") == "relative"
):
# substract the RPE params on intermediate pipeline stages.
num_rpe_params = sum([p.nelement() for p in model.decoder_relative_position_embeddings_weight()])
num_parameters_on_device -= num_rpe_params
# to be summed across data parallel group
total_num_parameters = torch.tensor(num_parameters_on_device).cuda()
torch.distributed.all_reduce(total_num_parameters, group=parallel_state.get_model_parallel_group())
return num_parameters_on_device, total_num_parameters
def build_model_parallel_config(self) -> ModelParallelConfig:
""" For attributes in the nemo model config that are the same as the
megatron core ModelParallelConfig we will use the value from the nemo config.
For attributes in ModelParallelConfig that are not in the nemo model config, we add custom logic.
"""
cfg = OmegaConf.to_container(self.cfg, resolve=True)
# map precision related configs
precision = cfg.get('precision', 32) # PTL trainer precision
megatron_amp_O2 = cfg.get('megatron_amp_O2', False)
# dtype used in p2p communication
pipeline_dtype = self.torch_dtype
# maps NeMo model configs to ModelParallelConfig from megatron core
config_mapping = {
"perform_initialization": True, # initailize weights when constructing the module
"fp16": self.torch_dtype == torch.float16
and megatron_amp_O2, # NeMo does not currently support fp16 training with megatron amp O2, eval and inference is supported
"bf16": self.torch_dtype == torch.bfloat16 and megatron_amp_O2,
"params_dtype": self.params_dtype,
"timers": None, # NeMo does not currently support megatron core timers
"async_tensor_model_parallel_allreduce": self.cfg.get('tensor_model_parallel_world_size', 1) > 1
and not self.cfg.get('sequence_parallel', False),
"pipeline_dtype": pipeline_dtype,
"grad_scale_func": self.trainer.precision_plugin.scaler.scale
if self.torch_dtype == torch.float16
else None,
"enable_autocast": not megatron_amp_O2 and self.torch_dtype in [torch.bfloat16, torch.float16],
"autocast_dtype": self.autocast_dtype,
"variable_seq_lengths": False, # set dynamically during training
"num_microbatches_with_partial_activation_checkpoints": self.cfg.get(
'num_micro_batches_with_partial_activation_checkpoints', None
),
"batch_p2p_sync": True, # call torch.cuda.synchronize() after batch isend/rcv
"use_ring_exchange_p2p": False, # not supported in NeMo
"deallocate_pipeline_outputs": False, # not supported in NeMo
"no_sync_func": None, # set dynamically during training
"grad_sync_func": None, # set dynamically during training
"param_sync_func": None, # set dynamically during training
}
# instantitate ModelParallelConfig from this dict
mp_config_dict = {}
for field in fields(ModelParallelConfig):
# model config has priority
if field.name in cfg:
mp_config_dict[field.name] = cfg[field.name]
# then config_mapping
elif field.name in config_mapping:
mp_config_dict[field.name] = config_mapping[field.name]
else:
logging.warning(
f"The model: {self} does not have field.name: {field.name} in its cfg. "
f"Add this key to cfg or config_mapping to make to make it configurable."
)
model_parallel_config = ModelParallelConfig(**mp_config_dict)
try:
# hidden size is needed for pipeline schedules but is not currently in ModelParallelConfig
setattr(model_parallel_config, 'hidden_size', self.cfg.hidden_size)
except AttributeError:
logging.warning(
f'hidden_size not found in {self.cfg}. Set this in model_parallel_config if using pipeline parallelism.'
)
return model_parallel_config
def _val_iterator_done(self, iterator):
"""
Check if the iterator is exhausted, if so raise a StopIteration and exit validation_step
"""
try:
element = next(iterator)
except StopIteration:
return iterator, True
# reinsert the element back to the iterator
return itertools.chain([element], iterator), False
| NeMo-main | nemo/collections/nlp/models/language_modeling/megatron_base_model.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.data.glue_benchmark.glue_benchmark_dataset import (
TextToTextGLUEDataset,
TextToTextXNLIDataset,
)
from nemo.collections.nlp.models.language_modeling.megatron_finetune_model import MegatronT5FinetuneModel
from nemo.utils import logging
try:
from megatron.core import parallel_state
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
__all__ = ['MegatronT5GLUEModel']
class MegatronT5GLUEModel(MegatronT5FinetuneModel):
"""GLUE Model that Inherits from MegatronT5FinetuneModel and overrides the dataset building."""
def __init__(self, cfg: DictConfig, trainer: Trainer):
super().__init__(cfg, trainer=trainer)
def _build_dataset(self, data_cfg, check_implict_grad_acc=False):
if (
check_implict_grad_acc
and data_cfg.global_batch_size > data_cfg.micro_batch_size * parallel_state.get_data_parallel_world_size()
):
raise ValueError(
f'You are trying to use "implicit gradient accumulation" of {data_cfg.global_batch_size // (data_cfg.micro_batch_size * parallel_state.get_data_parallel_world_size())} in your validation/test datasets. This is not supported. Please set global_batch_size equal to micro_batch_size * data_parallel_world_size.'
)
if data_cfg.task_name == 'xnli':
dataset = TextToTextXNLIDataset(
data_cfg.file_path,
task_name=data_cfg.task_name,
tokenizer=self.tokenizer,
max_seq_length=data_cfg.max_seq_length,
lang_list=self.cfg.eval_languages,
)
else:
dataset = TextToTextGLUEDataset(
data_cfg.file_path,
task_name=data_cfg.task_name,
tokenizer=self.tokenizer,
max_seq_length=data_cfg.max_seq_length,
)
return dataset
def build_train_valid_test_datasets(self, stage):
logging.info('Building GLUE/XNLI datasets.')
if stage != 'test':
# Wrap this in a list since the general finetuning parent class supports multi-validation.
self._validation_ds = [self._build_dataset(self.cfg.data.validation_ds, check_implict_grad_acc=True)]
logging.info(f'Length of val dataset: {len(self._validation_ds[0])}')
if stage != 'validate':
if hasattr(self.cfg.data, 'test_ds'):
# Wrap this in a list since the general finetuning parent class supports multi-validation.
self._test_ds = [self._build_dataset(self.cfg.data.test_ds, check_implict_grad_acc=True)]
logging.info(f'Length of test dataset: {len(self._test_ds[0])}')
if stage == 'validate' or stage == 'test':
return
self._train_ds = self._build_dataset(self.cfg.data.train_ds, check_implict_grad_acc=False)
logging.info(f'Length of train dataset: {len(self._train_ds)}')
logging.info(f'Finished building GLUE/XNLI datasets.')
| NeMo-main | nemo/collections/nlp/models/language_modeling/megatron_glue_model.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
import inspect
from typing import Any, Dict, List, Optional
import torch
from omegaconf import OmegaConf, open_dict
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.accelerators import CPUAccelerator
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.data.language_modeling.megatron.data_samplers import (
MegatronPretrainingRandomSampler,
MegatronPretrainingSampler,
)
from nemo.collections.nlp.models.language_modeling.megatron_base_model import MegatronBaseModel
from nemo.collections.nlp.modules.common.megatron.build_model import build_model
from nemo.collections.nlp.modules.common.megatron.module import Float16Module
from nemo.collections.nlp.modules.common.megatron.token_level_encoder_decoder import (
MegatronTokenLevelEncoderDecoderModule,
)
from nemo.collections.nlp.modules.common.megatron.utils import (
ApexGuardDefaults,
average_losses_across_data_parallel_group,
get_params_for_weight_decay_optimization,
)
from nemo.collections.nlp.modules.common.text_generation_utils import (
compute_beam_search_len_penalty,
get_sampling_token_fn,
)
from nemo.collections.nlp.parts.utils_funcs import get_last_rank
from nemo.utils import AppState, logging
try:
from apex.transformer.pipeline_parallel.utils import (
_reconfigure_microbatch_calculator,
get_micro_batch_size,
get_num_microbatches,
)
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
try:
from megatron.core import parallel_state, tensor_parallel
from megatron.core.enums import ModelType
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
__all__ = ["MegatronLMEncoderDecoderModel"]
class MegatronLMEncoderDecoderModel(MegatronBaseModel):
"""
Megatron encoder-decoder base class
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
super().__init__(cfg, trainer=trainer)
if cfg.get('pipeline_model_parallel_size', 1) > 1:
if cfg.get('pipeline_model_parallel_split_rank', 0) <= 0:
raise ValueError(
f"pipeline_model_parallel_split_rank must be > 0 when using pipeline_model_parallel_size > 1"
)
if cfg.get('pipeline_model_parallel_size', 1) > 1:
if not cfg.get('share_token_embeddings', True) or not cfg.get(
'share_decoder_tokens_head_embeddings', True
):
raise ValueError(
"when pipeline_model_parallel_size > 1 we require share_token_embeddings=True and share_decoder_tokens_head_embeddings=True"
)
# Make sure trainer.accumulate_grad_batches is 1.
self._validate_trainer()
# TODO: Currently does not support interleaved pipeline parallelism.
# This means we can only use pipeline parallelism without the interleaved schedule.
if isinstance(self.trainer.accelerator, CPUAccelerator):
logging.warning("Using CPUAccelerator, model will be built on CPU.")
self.enc_dec_model = build_model(
model_provider_func=self.model_provider_func,
wrap_with_ddp=False,
on_cpu=True,
model_type=ModelType.encoder_and_decoder,
)[0]
else:
self.enc_dec_model = build_model(
model_provider_func=self.model_provider_func,
wrap_with_ddp=False,
model_type=ModelType.encoder_and_decoder,
)[0]
# We don't need to call it explicitly? Since it is a pytorch lightning hook function
# self.setup_optimizer_param_groups()
self.megatron_amp_o2 = cfg.get('megatron_amp_O2', False)
if self.megatron_amp_o2:
if not self.with_distributed_adam:
# Pre-allocate the model on GPU to have master parameters allocated on the same device with matching data type
self.enc_dec_model.cuda(torch.cuda.current_device())
# Model wrapper to convert both model and inputs to half precision
self.enc_dec_model = Float16Module(
config=self.model_parallel_config, module=self.enc_dec_model, precision=self.cfg.precision
)
self.enable_autocast = (
True if (not self.megatron_amp_o2) and (self.autocast_dtype in [torch.float16, torch.bfloat16]) else False
)
self.enc_dec_model.model_type = ModelType.encoder_and_decoder
def setup_optimizer_param_groups(self):
"""ModelPT override. Optimizer will get self._optimizer_param_groups"""
self._optimizer_param_groups = get_params_for_weight_decay_optimization([self.enc_dec_model])
def configure_optimizers(self):
if self.with_distributed_adam:
# Identify params that require grad reductions between
# pipeline stages
# See: allreduce_word_and_position_embeddings
model_parallel_params = []
if parallel_state.get_pipeline_model_parallel_world_size() > 1 and (
parallel_state.is_rank_in_embedding_group()
):
if self.cfg.get('share_token_embeddings', True) and self.cfg.get(
'share_decoder_tokens_head_embeddings', True
):
model_parallel_params.append(self.enc_dec_model.word_embeddings_weight())
if (
parallel_state.is_rank_in_position_embedding_group()
and parallel_state.get_pipeline_model_parallel_world_size() > 1
and parallel_state.get_pipeline_model_parallel_split_rank() is not None
and self.cfg.encoder.get('position_embedding_type') == 'learned_absolute'
and self.cfg.decoder.get('position_embedding_type') == 'learned_absolute'
):
if self.cfg.get('share_token_embeddings', True):
model_parallel_params.append(self.enc_dec_model.position_embeddings_weight())
if (
parallel_state.get_pipeline_model_parallel_world_size() > 2
and parallel_state.get_pipeline_model_parallel_split_rank() is not None
):
if (
self.cfg.encoder.get('position_embedding_type') == 'relative'
and parallel_state.is_rank_in_encoder_relative_position_embedding_group()
and parallel_state.get_pipeline_model_parallel_split_rank() > 1
):
model_parallel_params.append(self.enc_dec_model.encoder_relative_position_embeddings_weight())
if (
self.cfg.decoder.get('position_embedding_type') == 'relative'
and parallel_state.is_rank_in_decoder_relative_position_embedding_group()
):
model_parallel_params.append(self.enc_dec_model.decoder_relative_position_embeddings_weight())
if not self.cfg.decoder.get('relative_position_bias_self_attention_only', True):
model_parallel_params.append(
self.enc_dec_model.decoder_cross_attention_relative_position_embeddings_weight()
)
# Disable async grad reductions for params that are
# synchronized for pipeline parallelism
for param in model_parallel_params:
param._disable_greedy_grad_copy = not self.megatron_amp_o2
param._disable_overlap_grad_sync = True
return super().configure_optimizers()
def _handle_bias_activation_fusion_args(self, cfg):
# For oldest models, we don't have the option to turn on/off bias activation fusion. It is always on.
if not hasattr(cfg, 'bias_gelu_fusion') and not hasattr(cfg, 'bias_activation_fusion'):
# Handle the case where the model can have bias=False
if cfg.get('bias', True):
cfg.bias_activation_fusion = True
else:
cfg.bias_activation_fusion = False
# For in-between models, Re-map bias_gelu_fusion to bias_activation_fusion
elif hasattr(cfg, 'bias_gelu_fusion'):
logging.warning('bias_gelu_fusion is deprecated. Please use bias_activation_fusion instead.')
cfg.bias_activation_fusion = cfg.bias_gelu_fusion
def _populate_encoder_decoder_configs_for_backward_compatibility(self, cfg):
"""
Populate encoder and decoder configs for backward compatibility with a checkpoint that has a common enc/dec config.
"""
# TODO: This will not remove redundant args that are already present in the new yaml file's config.model
encoder_cfg = copy.deepcopy(cfg)
decoder_cfg = copy.deepcopy(cfg)
OmegaConf.set_struct(encoder_cfg, True)
OmegaConf.set_struct(decoder_cfg, True)
OmegaConf.set_struct(cfg, True)
with open_dict(encoder_cfg), open_dict(decoder_cfg), open_dict(cfg):
encoder_cfg.arch = cfg.get('encoder_arch', 'transformer')
decoder_cfg.arch = cfg.get('decoder_arch', 'transformer')
self._handle_bias_activation_fusion_args(encoder_cfg)
self._handle_bias_activation_fusion_args(decoder_cfg)
cfg.encoder = encoder_cfg
cfg.decoder = decoder_cfg
# NOTE: For old models there are two scenarios:
# 1. If we share decoder embeddings with the output layer, we would always set tokens_head_bias=True
# 2. If we do not share decoder embeddings with the output layer, we would always set tokens_head_bias=False
cfg.tokens_head_bias = (
True if cfg.get('share_decoder_tokens_head_embeddings', True) else False
) # For models before separate encoder/decoder configs, tokens_head_bias was always True.
def model_provider_func(self, pre_process, post_process, add_encoder, add_decoder):
if not hasattr(self.cfg, 'encoder') or not hasattr(self.cfg, 'decoder'):
logging.warning(
'Could not find encoder or decoder in config. This is probably because of restoring an old checkpoint. Copying shared model configs to encoder and decoder configs.'
)
# After the call below, self.cfg.encoder and self.cfg.decoder will be populated with the cfg.model configs from old checkpoints.
self._populate_encoder_decoder_configs_for_backward_compatibility(self.cfg)
if parallel_state.get_pipeline_model_parallel_world_size() > 1 and self.cfg.encoder.arch == 'perceiver':
raise ValueError(f"Perceivers with pipeline parallel > 1 is not supported yet.")
if not hasattr(self.cfg, 'embedding_init_method_std'):
embedding_init_method_std = self.cfg.encoder.init_method_std
else:
embedding_init_method_std = self.cfg.embedding_init_method_std
if not hasattr(self.cfg, 'embedding_dropout'):
embedding_dropout = self.cfg.encoder.hidden_dropout
else:
embedding_dropout = self.cfg.embedding_dropout
model = MegatronTokenLevelEncoderDecoderModule(
config=self.model_parallel_config,
encoder_cfg=self.cfg.encoder,
decoder_cfg=self.cfg.decoder,
vocab_size=self.padded_vocab_size,
max_position_embeddings=self.cfg.max_position_embeddings,
num_tokentypes=0,
parallel_output=True,
pre_process=pre_process,
post_process=post_process,
fp16_cross_entropy=self.cfg.get('fp16_lm_cross_entropy', False),
megatron_amp_O2=self.cfg.get('megatron_amp_O2', False),
precision=self.cfg.get('precision', 16),
embedding_init_method_std=embedding_init_method_std,
embedding_dropout=embedding_dropout,
label_smoothing=self.cfg.get('label_smoothing', 0.0),
add_encoder=add_encoder,
add_decoder=add_decoder,
share_token_embeddings=self.cfg.get('share_token_embeddings', True),
share_decoder_tokens_head_embeddings=self.cfg.get('share_decoder_tokens_head_embeddings', True),
tokens_head_bias=self.cfg.get('tokens_head_bias', True),
hiddens_cfg=self.cfg.get('hiddens', None),
)
return model
def forward(
self,
encoder_input_ids,
decoder_input_ids,
encoder_attn_mask,
decoder_attn_mask,
token_type_ids=None,
lm_labels=None,
enc_output=None,
enc_output_attn_mask=None,
output_enc_hidden_only=False,
enc_input=None,
):
output_tensor = self.enc_dec_model(
enc_input_ids=encoder_input_ids,
dec_input_ids=decoder_input_ids,
enc_attn_mask=encoder_attn_mask,
dec_attn_mask=decoder_attn_mask,
token_type_ids=token_type_ids,
labels=lm_labels,
enc_output=enc_output,
enc_output_attn_mask=enc_output_attn_mask,
output_enc_hidden_only=output_enc_hidden_only,
enc_input=enc_input,
)
return output_tensor
def _execute_fwd_bwd_function(self, data_iterator, forward_only, tensor_shape, decoder_seq_length):
"""
An auxiliary function that executes the fwd_bwd_step function and parse the returned values.
"""
fwd_bwd_function = get_forward_backward_func()
seq_length = tensor_shape[0]
losses_reduced_per_micro_batch = fwd_bwd_function(
forward_step_func=self.get_forward_output_and_loss_func(),
data_iterator=data_iterator,
model=[self.enc_dec_model],
num_microbatches=get_num_microbatches(),
forward_only=forward_only,
seq_length=seq_length,
micro_batch_size=get_micro_batch_size(),
decoder_seq_length=decoder_seq_length,
)
# only the last stages of the pipeline return losses
if losses_reduced_per_micro_batch:
mean_loss_dict = {}
for k in losses_reduced_per_micro_batch[0].keys():
# average loss across micro batches
mean_loss_dict[k] = torch.stack(
[loss_reduced[k] for loss_reduced in losses_reduced_per_micro_batch]
).mean()
else:
loss_mean = torch.tensor(0.0).cuda()
mean_loss_dict = {"loss": loss_mean}
return mean_loss_dict
def fwd_bwd_step(self, dataloader_iter, batch_idx, forward_only):
"""
Dataloader produces a global batch which is turned into a list of microbatches.
The list of microbatches is then piped through the pipeline using megatron-core fwd/bwd functions.
"""
# Get seq length of batch
tensor_shape = [self.max_encoder_seq_length, self.cfg.micro_batch_size, self.cfg.encoder.hidden_size]
return self._execute_fwd_bwd_function(
data_iterator=dataloader_iter,
forward_only=forward_only,
tensor_shape=tensor_shape,
decoder_seq_length=self.max_decoder_seq_length,
)
def training_step(self, dataloader_iter, batch_idx):
"""
Our dataloaders produce a micro-batch and then we fetch
a number of microbatches depending on the global batch size and model parallel size
from the dataloader to produce a list of microbatches.
Batch should be a list of microbatches and those microbatches should on CPU.
Microbatches are then moved to GPU during the pipeline.
The list of microbatches is then piped through the pipeline using megatron-core fwd/bwd functions.
"""
# we zero grads here because we also call backward in the megatron fwd/bwd functions
self._optimizer.zero_grad()
loss_dict = self.fwd_bwd_step(dataloader_iter, batch_idx, False)
if self.with_distributed_adam:
# synchronize asynchronous grad reductions
# note: not necessary, but reduces performance degradation
# from multiple simultaneous NCCL calls
self._optimizer._finish_bucket_grad_sync()
elif self.megatron_amp_o2:
# when using pipeline parallelism grads must be reduced after the pipeline (not asynchronously)
if self.cfg.get('pipeline_model_parallel_size', 1) > 1:
# main grads are stored in the MainParamsOptimizer wrapper
self._optimizer.allreduce_main_grads()
else:
# async grad allreduce is not currently implemented for O1/autocasting mixed precision training
# so we allreduce gradients after the pipeline
self.allreduce_gradients() # @sangkug we think this is causing memory to blow up (hurts perf)
if self.cfg.get('pipeline_model_parallel_size', 1) > 1:
# when using pipeline parallelism, we need keep the word and position embeddings in sync
self.allreduce_word_and_position_embeddings()
## logging
# we can only log on one rank if it is rank zero so we broadcast from last rank
# we can avoid this broadcast by updating the PTL log function to accept specific ranks
for k, v in loss_dict.items():
torch.distributed.broadcast(v, get_last_rank())
n = f'reduced_train_{k}'
self.log(n, v, prog_bar=n.endswith("_loss"), rank_zero_only=True, batch_size=1)
if self.torch_dtype == torch.float16:
loss_scale = self.trainer.precision_plugin.scaler._scale
if loss_scale is not None:
self.log('loss_scale', loss_scale, batch_size=1)
lr = self._optimizer.param_groups[0]['lr']
self.log('lr', lr, rank_zero_only=True, batch_size=1)
self.log(
'global_step', self.trainer.global_step, prog_bar=True, rank_zero_only=True, batch_size=1,
)
# TODO: make sure compute_consumed_samples works for pipeline parallelism
self.log(
'consumed_samples',
self._compute_consumed_samples_after_training_step(),
prog_bar=True,
rank_zero_only=True,
batch_size=1,
)
return loss_dict
@property
def max_decoder_seq_length(self) -> int:
seq_len = self._cfg.data.get('seq_length_dec', None)
if seq_len is None:
seq_len = self.cfg.seq_length
return seq_len
@property
def max_encoder_seq_length(self) -> int:
return self.cfg.seq_length
def backward(self, *args, **kwargs):
""" LightningModule hook to do backward.
We want this to do nothing since we run backward in the fwd/bwd functions from megatron-core.
No need to call it here.
"""
return
def optimizer_zero_grad(self, *args, **kwargs):
""" LightningModule hook to zero grad.
We want this to do nothing as we are zeroing grads during the training_step.
"""
return
def allreduce_gradients(self):
"""Reduce gradients across data parallel ranks.
Modified from megatron-lm: https://github.com/NVIDIA/Megatron-LM/blob/d41696840ed0a7edb7e0499eb82a48ae112d9bb3/megatron/model/distributed.py#L188
"""
# Bucketize and all-reduce
buckets = {}
# Pack the buckets.
for param in self.parameters():
if param.requires_grad and param.grad is not None:
tp = param.data.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(param)
# param.main_grad = param.grad
# For each bucket, all-reduce and copy all-reduced grads.
for tp in buckets:
bucket = buckets[tp]
grads = [param.grad.data for param in bucket]
coalesced = torch._utils._flatten_dense_tensors(grads)
coalesced /= parallel_state.get_data_parallel_world_size()
torch.distributed.all_reduce(coalesced, group=parallel_state.get_data_parallel_group())
for buf, synced in zip(grads, torch._utils._unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
def allreduce_word_and_position_embeddings(self):
# Modified from megatron-lm: https://github.com/NVIDIA/Megatron-LM/blob/d41696840ed0a7edb7e0499eb82a48ae112d9bb3/megatron/training.py#L407
# All-reduce word_embeddings' grad across first, last stages to ensure that word_embeddings parameters stay in sync.
if parallel_state.get_pipeline_model_parallel_world_size() > 1 and (
parallel_state.is_rank_in_embedding_group()
):
if self.cfg.get('share_token_embeddings', True) and self.cfg.get(
'share_decoder_tokens_head_embeddings', True
):
word_embeddings_weight = self.enc_dec_model.word_embeddings_weight()
if self.megatron_amp_o2:
# O2 recipe stores a "main" copy of weights and grads
grad = word_embeddings_weight.main_grad
else:
grad = word_embeddings_weight.grad
torch.distributed.all_reduce(grad, group=parallel_state.get_embedding_group())
else:
raise ValueError(
f"Attempting to allreduce word_embeddings for pipeline parallel size > 1, but found untied word embeddings or token head embeddings. This is not supported yet."
)
# All-reduce position embeddings for T5.
if (
parallel_state.is_rank_in_position_embedding_group()
and parallel_state.get_pipeline_model_parallel_world_size() > 1
and parallel_state.get_pipeline_model_parallel_split_rank() is not None
and self.cfg.encoder.get('position_embedding_type') == 'learned_absolute'
and self.cfg.decoder.get('position_embedding_type') == 'learned_absolute'
):
if self.cfg.get('share_token_embeddings', True):
position_embeddings_weight = self.enc_dec_model.position_embeddings_weight()
if self.megatron_amp_o2:
grad = position_embeddings_weight.main_grad
else:
grad = position_embeddings_weight.grad
torch.distributed.all_reduce(grad, group=parallel_state.get_position_embedding_group())
# All-reduce relative position embeddings for T5.
if (
parallel_state.get_pipeline_model_parallel_world_size()
> 2 # This > 2 and not > 1 since with PP=2 encoder RPE can live only on one rank.
and parallel_state.get_pipeline_model_parallel_split_rank() is not None
):
# For split rank = 1, we have only one encoder rank and so we don't need to allreduce.
if (
self.cfg.encoder.get('position_embedding_type') == 'relative'
and parallel_state.is_rank_in_encoder_relative_position_embedding_group()
and parallel_state.get_pipeline_model_parallel_split_rank() > 1
):
position_embeddings_weight = self.enc_dec_model.encoder_relative_position_embeddings_weight()
if self.megatron_amp_o2:
grad = position_embeddings_weight.main_grad
else:
grad = position_embeddings_weight.grad
torch.distributed.all_reduce(
grad, group=parallel_state.get_encoder_relative_position_embedding_group()
)
# For split rank == pipeline_world_size - 1, we have only one decoder rank and so we don't need to allreduce.
if (
self.cfg.decoder.get('position_embedding_type') == 'relative'
and parallel_state.is_rank_in_decoder_relative_position_embedding_group()
):
position_embeddings_weight = self.enc_dec_model.decoder_relative_position_embeddings_weight()
if self.megatron_amp_o2:
grad = position_embeddings_weight.main_grad
else:
grad = position_embeddings_weight.grad
torch.distributed.all_reduce(
grad, group=parallel_state.get_decoder_relative_position_embedding_group()
)
# If the model also has separate RPE weights for decoder cross-attention, allreduce those as well.
if not self.cfg.decoder.get('relative_position_bias_self_attention_only', True):
position_embeddings_weight = (
self.enc_dec_model.decoder_cross_attention_relative_position_embeddings_weight()
)
if self.megatron_amp_o2:
grad = position_embeddings_weight.main_grad
else:
grad = position_embeddings_weight.grad
torch.distributed.all_reduce(
grad, group=parallel_state.get_decoder_relative_position_embedding_group()
)
def _process_batch(self, global_batch: Dict[str, torch.Tensor]) -> List[torch.Tensor]:
# If the decoder input starts with <pad> instead of <bos>, which is the case for huggingface T5 models, we don't want to mask the first token.
# For NeMo-Megatron, the sequence starts with <bos>, which is never masked so we can always set index 0 to be unmasked.
global_batch['dec_mask'][:, 0] = 1
return [
global_batch["text_enc"],
global_batch["text_dec"],
global_batch["loss_mask"],
global_batch["labels"],
global_batch["enc_mask"],
global_batch["dec_mask"],
global_batch.get('data', None),
]
def get_forward_output_and_loss_func(self):
def fwd_output_and_loss_func(dataloader_iter, model):
batch = next(dataloader_iter)
# convert to list if not already converted.
if isinstance(batch, dict):
# convert to list if not already converted.
batch = self._process_batch(batch)
batch = [x.cuda(non_blocking=True) if torch.is_tensor(x) else x for x in batch]
(
encoder_input_ids,
decoder_input_ids,
loss_mask,
lm_labels,
encoder_attn_mask,
decoder_attn_mask,
batch_data,
) = batch
output = model(
encoder_input_ids, # enc_input_ids
encoder_attn_mask, # enc_attn_mask
decoder_input_ids, # dec_input_ids
decoder_attn_mask, # dec_attn_mask
None, # token_type_ids
lm_labels, # labels
batch_data, # batch_data
)
def loss_func(output_tensor):
if isinstance(output_tensor, dict):
# handle loss of hidden transformations
loss_dict = output_tensor
output_tensor = loss_dict.pop("output")
# compute reconstruction (tokens) only loss from per-token reconstruction loss
tokens_loss = self.loss_func(loss_mask, output_tensor)
loss_dict["tokens_loss"] = tokens_loss
tokens_loss_weight = loss_dict.get("tokens_loss_weight", 1.0)
# compute total loss
loss = loss_dict["loss"] = loss_dict["hiddens_loss"] + tokens_loss_weight * tokens_loss
# average losses across data parallel group
loss_dict = {
k: average_losses_across_data_parallel_group([v.mean()]) for k, v in loss_dict.items()
}
else:
# compute reconstruction (tokens) only loss from per-token reconstruction loss
loss = self.loss_func(loss_mask, output_tensor)
# average losses across data parallel group
reduced_loss = average_losses_across_data_parallel_group([loss])
loss_dict = {'loss': reduced_loss}
return loss, loss_dict
return output, loss_func
return fwd_output_and_loss_func
@functools.lru_cache(maxsize=None)
def _kwargs_to_arg_idx(self):
"""
Returns a dict {kwarg name: arg index} to be used when mapping
kwargs into a list of args.
Computed on first call, and then cached.
"""
# build mapping of kwargs to arg index at first run
module = self.enc_dec_model.forward if not self.megatron_amp_o2 else self.enc_dec_model.module.forward
args_name = inspect.getfullargspec(module)[0][1:]
kwargs_to_arg_idx = {k: v for k, v in zip(args_name, range(len(args_name)))}
return kwargs_to_arg_idx
def _build_forward_args_from_kwargs(self, args_name, args, **kwargs):
"""
A helper method that converts arguments into positional arguments (by name)
args - a list of arguments to pass to self.enc_dec_model (tensors from batch)
args_name - a list of argument name (to be matched against allowed kwargs)
kwargs - a dict {arg name: arg value} (used for non-tensor values)
"""
# sanity checks
if len(args) != len(args_name):
raise ValueError(f"Mismatch between length in args_name ({len(args_name)}) and args ({len(args)})")
if any([n in kwargs for n in args_name]):
raise ValueError(f"args_name = {args_name} cannot overlap kwargs = {list(kwargs.keys())}")
# get mapping of kwarg names to arg index
kwargs_to_arg_idx = self._kwargs_to_arg_idx()
# collect all arguments
all_args_name = args_name[:]
all_args = args[:]
for k, v in kwargs.items():
all_args_name.append(k)
all_args.append(v)
args_idx = [kwargs_to_arg_idx[n] for n in all_args_name]
# print(f"all_args_name = {all_args_name} args_idx = {args_idx}")
# construct args ordered by name (with None as place-holder)
forward_args = [None] * (max(args_idx) + 1)
for i, v in zip(args_idx, all_args):
forward_args[i] = v
return forward_args
def _get_forward_output_only_func(self, arg_names, output_name, **kwargs):
"""
args_idx - maps batch into index of args (with None filling gaps)
arg_names - corresponding names for a friendly error message
output_name - name of output (hiddens for encode, logits for decode)
kwargs - shared arguments (non tensors)
"""
def fwd_output_only_func(dataloader_iter, model):
batch = next(dataloader_iter)
batch = [x.cuda(non_blocking=True) if torch.is_tensor(x) else x for x in batch]
# map batch and shared args into forward args
args = self._build_forward_args_from_kwargs(args_name=arg_names, args=batch, **kwargs)
output = model(*args).contiguous()
def id_func(output_tensor):
if isinstance(output_tensor, dict):
# handle loss of hidden transformations ("output" is the default output)
output_tensor = output_tensor["output"]
return output_tensor, {output_name: output_tensor}
return output, id_func
return fwd_output_only_func
##########
def _test_validation_step(self, step_outputs, dataloader_iter, batch_idx, dataloader_idx=0):
"""
Shared code for validation and test step
"""
# Check if iterator is exhausted
dataloader_iter, done = self._val_iterator_done(dataloader_iter)
if done:
return
loss_dict = self.fwd_bwd_step(dataloader_iter, batch_idx, True)
step_outputs.append(loss_dict)
return loss_dict
def validation_step(self, dataloader_iter, batch_idx, dataloader_idx=0):
"""
return_values - if given, returns a dictionary with given keys and corresponding values
"""
if type(self.trainer.val_dataloaders) == list and len(self.trainer.val_dataloaders) > 1:
step_outputs = self.validation_step_outputs[dataloader_idx]
else:
step_outputs = self.validation_step_outputs
return self._test_validation_step(
step_outputs=step_outputs,
dataloader_iter=dataloader_iter,
batch_idx=batch_idx,
dataloader_idx=dataloader_idx,
)
def test_step(self, dataloader_iter, batch_idx, dataloader_idx=0):
if type(self.trainer.val_dataloaders) == list and len(self.trainer.val_dataloaders) > 1:
step_outputs = self.test_step_outputs[dataloader_idx]
else:
step_outputs = self.test_step_outputs
return self._test_validation_step(
step_outputs=step_outputs,
dataloader_iter=dataloader_iter,
batch_idx=batch_idx,
dataloader_idx=dataloader_idx,
)
def _test_validation_epoch_end(self, step_outputs, prefix):
"""
Shared logging for validation and test
"""
# NOTE: we need to make sure outputs is not empty (this is a workaround for a bug in pytorch lightning (?))
if not step_outputs:
logging.warning(f"{prefix} epoch end: outputs is empty")
return None
# only the last pipeline parallel stages return loss
if parallel_state.is_pipeline_last_stage() and len(step_outputs):
averaged_loss = {k: torch.stack([x[k] for x in step_outputs]).mean() for k in step_outputs[0].keys()}
else:
# if we are here we assume that only loss is available and hidden transforms are disabled (since not supported in pipleline parallel)
averaged_loss = {'loss': torch.tensor(0.0).cuda()}
# we can only log on one rank if it is rank zero so we broadcast from last rank
for k, v in averaged_loss.items():
torch.distributed.broadcast(v, get_last_rank())
averaged_loss[k] = v
n = f'{prefix}_{k}'
# log only '*_loss' values in progress bar
self.log(n, v, prog_bar=(n.endswith("_loss")), rank_zero_only=True, batch_size=1)
# free memory
step_outputs.clear()
return averaged_loss
def on_validation_epoch_end(self):
# FIXME: do we need this? 'global_step' is logged in training_step
self.log('global_step', self.trainer.global_step, prog_bar=True, rank_zero_only=True, batch_size=1)
return self._test_validation_epoch_end(step_outputs=self.validation_step_outputs, prefix="val",)
def on_test_epoch_end(self):
return self._test_validation_epoch_end(step_outputs=self.test_step_outputs, prefix="test",)
def loss_func(self, loss_mask, tokens_loss):
"""
This function takes as input per-token loss and masks non-required values.
"""
losses = tokens_loss.view(-1).float()
loss_mask = loss_mask.view(-1).float()
# TODO: add nemo version here
loss = torch.sum(losses * loss_mask) / loss_mask.sum() # sequence level nll
return loss
def process_micro_batch(self, micro_batch):
""" Micro batch returned by MegatronT5 dataloader"""
data_b = micro_batch
# Unpack.
tokens_enc = data_b['text_enc'].long()
tokens_dec = data_b['text_dec'].long()
labels = data_b['labels'].long()
loss_mask = data_b['loss_mask'].float()
enc_mask = data_b['enc_mask']
dec_mask = data_b['dec_mask']
return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask
def _process_global_batch_without_megatron_batch_sampler(self, global_batch, tokenizer=None):
""" Prepares the global batch for megatron-core fwd/bwd functions.
Global batch is a list of micro batches.
"""
tokenizer = self.tokenizer if tokenizer is None else tokenizer
text_enc_list = []
text_dec_list = []
labels_list = []
loss_mask_list = []
enc_mask_list = []
dec_mask_list = []
# Determine the maximum encoder and decoder sequence lengths amongst microbatches and pad each microbatch to the max seq length.
# NOTE: This should only happen for model finetuning where we pad dynamically. Training uses fixed training shapes.
max_enc_seq_lenth = max([micro_batch['text_enc'].shape[1] for micro_batch in global_batch])
max_dec_seq_lenth = max([micro_batch['text_dec'].shape[1] for micro_batch in global_batch])
for micro_batch in global_batch:
text_enc, text_dec, loss_mask, labels, enc_mask, dec_mask = self.process_micro_batch(micro_batch)
# Check if encoder sequence length < max encoder sequence length of the global batch and pad.
if text_enc.shape[1] < max_enc_seq_lenth:
text_enc = torch.nn.functional.pad(
text_enc, (0, max_enc_seq_lenth - text_enc.shape[1], 0, 0), 'constant', tokenizer.pad_id
)
enc_mask = torch.nn.functional.pad(
enc_mask, (0, max_enc_seq_lenth - enc_mask.shape[1], 0, 0), 'constant', 0
)
if text_dec.shape[1] < max_dec_seq_lenth:
text_dec = torch.nn.functional.pad(
text_dec, (0, max_dec_seq_lenth - text_dec.shape[1], 0, 0), 'constant', tokenizer.pad_id
)
dec_mask = torch.nn.functional.pad(
dec_mask, (0, max_dec_seq_lenth - dec_mask.shape[1], 0, 0), 'constant', 0
)
labels = torch.nn.functional.pad(
labels, (0, max_dec_seq_lenth - labels.shape[1], 0, 0), 'constant', tokenizer.pad_id
)
loss_mask = torch.nn.functional.pad(
loss_mask, (0, max_dec_seq_lenth - loss_mask.shape[1], 0, 0), 'constant', 0
)
text_enc_list.append(text_enc)
text_dec_list.append(text_dec)
labels_list.append(labels)
loss_mask_list.append(loss_mask)
enc_mask_list.append(enc_mask)
dec_mask_list.append(dec_mask)
# Concatenate to (num_microbatches x micro_batch_size x seq_len)
tokens_enc_tensor = torch.concat(text_enc_list, dim=0)
tokens_dec_tensor = torch.concat(text_dec_list, dim=0)
labels_tensor = torch.concat(labels_list, dim=0)
loss_mask_tensor = torch.concat(loss_mask_list, dim=0)
enc_mask_tensor = torch.concat(enc_mask_list, dim=0)
dec_mask_tensor = torch.concat(dec_mask_list, dim=0)
return {
'text_enc': tokens_enc_tensor,
'text_dec': tokens_dec_tensor,
'loss_mask': loss_mask_tensor,
'labels': labels_tensor,
'enc_mask': enc_mask_tensor,
'dec_mask': dec_mask_tensor,
}
def build_train_valid_test_datasets(self):
raise NotImplementedError("Please implement this method in child-class")
def build_pretraining_data_loader(self, dataset, consumed_samples, num_workers):
"""Buld dataloader given an input dataset."""
if dataset is None:
return None
logging.info(f'Building dataloader with consumed samples: {consumed_samples}')
# Megatron sampler
if hasattr(self._cfg.data, 'dataloader_type') and self._cfg.data.dataloader_type is not None:
if self._cfg.data.dataloader_type == 'single':
batch_sampler = MegatronPretrainingSampler(
total_samples=len(dataset),
consumed_samples=consumed_samples,
micro_batch_size=self._cfg.micro_batch_size,
global_batch_size=self._cfg.global_batch_size,
data_parallel_rank=parallel_state.get_data_parallel_rank(),
data_parallel_size=parallel_state.get_data_parallel_world_size(),
drop_last=self._cfg.get('drop_last', True),
)
elif self._cfg.data.dataloader_type == 'cyclic':
batch_sampler = MegatronPretrainingRandomSampler(
total_samples=len(dataset),
consumed_samples=consumed_samples,
micro_batch_size=self._cfg.micro_batch_size,
global_batch_size=self._cfg.global_batch_size,
data_parallel_rank=parallel_state.get_data_parallel_rank(),
data_parallel_size=parallel_state.get_data_parallel_world_size(),
drop_last=self._cfg.get('drop_last', True),
)
else:
raise Exception(f'{self._cfg.dataloader_type} dataloader type is not supported.')
else:
raise ValueError('cfg.data.dataloader_type not found. Must be "single" or "cyclic"')
# Torch dataloader.
return torch.utils.data.DataLoader(
dataset,
batch_sampler=batch_sampler,
num_workers=num_workers,
pin_memory=True,
persistent_workers=True if num_workers > 0 else False,
)
def setup(self, stage=None):
""" PTL hook that is executed after DDP spawns.
We setup datasets here as megatron datasets require DDP to instantiate.
See https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#setup for more information.
Args:
stage (str, optional): Can be 'fit', 'validate', 'test' or 'predict'. Defaults to None.
"""
num_parameters_on_device, total_num_parameters = self._get_total_params_across_model_parallel_groups_enc_dec(
self.enc_dec_model
)
logging.info(
f'Pipeline model parallel rank: {parallel_state.get_pipeline_model_parallel_rank()}\n'
f'Tensor model parallel rank: {parallel_state.get_tensor_model_parallel_rank()}\n'
f'Number of model parameters on device: {num_parameters_on_device:.2e}\n'
f'Total number of model parameters: {total_num_parameters:.2e}\n'
)
resume_checkpoint_path = self.trainer.ckpt_path
if resume_checkpoint_path:
init_consumed_samples = self._extract_consumed_samples_from_ckpt(resume_checkpoint_path)
else:
init_consumed_samples = 0
self.init_consumed_samples = init_consumed_samples
"""A PTL method to setup the training, validation and test datasets."""
if stage == 'predict':
return
if self._train_dl is not None and self._validation_dl is not None:
return
self.build_train_valid_test_datasets()
self.setup_training_data(self._cfg.data)
self.setup_validation_data(self._cfg.data)
self.setup_test_data(self._cfg.data)
# when using pipeline model parallel the final stage need to initialize word embeddings
if parallel_state.get_pipeline_model_parallel_world_size() > 1:
assert (
self.cfg.share_token_embeddings
), "share_word_embedding must be True when using pipeline model parallel > 1"
assert (
self.cfg.share_decoder_tokens_head_embeddings
), "share_decoder_tokens_head_embeddings must be True when using pipeline model parallel > 1"
self.enc_dec_model.sync_initial_word_embeddings()
if (
self.cfg.encoder.get('position_embedding_type') == 'learned_absolute'
and self.cfg.decoder.get('position_embedding_type') == 'learned_absolute'
):
self.enc_dec_model.sync_initial_position_embeddings()
# Synchronize RPE embeddings across pipeline parallel ranks.
else:
if self.cfg.encoder.get('position_embedding_type', 'learned_absolute') == 'relative':
self.enc_dec_model.sync_initial_encoder_relative_position_embeddings()
if self.cfg.decoder.get('position_embedding_type', 'learned_absolute') == 'relative':
self.enc_dec_model.sync_initial_decoder_relative_position_embeddings()
if self.cfg.decoder.get(
'position_embedding_type', 'learned_absolute'
) == 'relative' and not self.cfg.decoder.get('relative_position_bias_self_attention_only', True):
self.enc_dec_model.sync_initial_decoder_cross_attention_relative_position_embeddings()
def setup_training_data(self, cfg):
if hasattr(self, '_train_ds'):
consumed_samples = self.compute_consumed_samples(0)
self._train_dl = self.build_pretraining_data_loader(
self._train_ds, consumed_samples, num_workers=self._cfg.data.num_workers
)
def setup_validation_data(self, cfg):
if hasattr(self, '_validation_ds'):
consumed_samples = 0
self._validation_dl = self.build_pretraining_data_loader(
self._validation_ds, consumed_samples, num_workers=self._cfg.data.num_workers
)
def setup_test_data(self, cfg):
if hasattr(self, '_test_ds'):
consumed_samples = 0
self._test_dl = self.build_pretraining_data_loader(self._test_ds, consumed_samples, num_workers=0)
def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: Optional[int] = None) -> Any:
request = batch
response = self.complete(request)
logging.info(f"response: {response}")
return response
def encode(self, tokens_enc, enc_mask, encoder_input=None, batch_data=None, reconfigure_microbatch=True):
"""
tokens_enc - encoder input tokens
enc_mask - corresponding mask
encoder_input - encoder input (bypass tokens), if given tokens_enc can be None.
batch_data - passed directly to all hidden transformations and losses.
Can be used to pass additional data like class label.
Format is not defined and should match the expected format of the used hiddens modules.
"""
# Check whether the DDP is initialized. This is needed when running inference outside of training loop.
if parallel_state.is_unitialized():
def dummy():
return
if self.trainer.strategy.launcher is not None:
self.trainer.strategy.launcher.launch(dummy, trainer=self.trainer)
self.trainer.strategy.setup_environment()
# Reconfigure microbatch sizes here because on model restore, this will contain the micro/global batch configuration used while training.
if reconfigure_microbatch:
_reconfigure_microbatch_calculator(
rank=0, # This doesn't matter since it is only used for logging
rampup_batch_size=None,
global_batch_size=1,
micro_batch_size=1, # Make sure that there is no "grad acc" while decoding.
data_parallel_size=1, # We check above to make sure that dataparallel size is always 1 at inference.
)
# If classes that inherit from this class are using a different tokenizer,
app_state = AppState()
if tokens_enc is not None:
global_batch_per_gpu = tokens_enc.size(0)
encoder_seq_length = tokens_enc.size(1)
else:
global_batch_per_gpu = encoder_input.size(1)
encoder_seq_length = encoder_input.size(0)
num_micro_batches_before_decode = get_num_microbatches()
# Reconfigure microbatch calculator here to set num microbatches to 1 while decoding since its not clear how to decode with "grad acc".
# reconfigure back to how things were before encode
if reconfigure_microbatch:
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=global_batch_per_gpu * parallel_state.get_data_parallel_world_size(),
micro_batch_size=global_batch_per_gpu, # Make sure that there is no "grad acc" while decoding.
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
tensor_shape = [encoder_seq_length, global_batch_per_gpu, self.cfg.encoder.hidden_size]
# build input arguments description
if tokens_enc is not None:
batch_for_pipeline = [tokens_enc, enc_mask, batch_data]
arg_names = ['enc_input_ids', 'enc_attn_mask', 'batch_data']
else:
if encoder_input is None:
raise ValueError("At least one of tokens_enc and encoder_input must be provided with not None value")
batch_for_pipeline = [enc_mask]
arg_names = ['enc_attn_mask']
if encoder_input is not None:
batch_for_pipeline.append(encoder_input)
arg_names.append('enc_input')
forward_step_func = self._get_forward_output_only_func(
arg_names=arg_names, output_name="hiddens", output_enc_hidden_only=True
)
fwd_bwd_func = get_forward_backward_func()
# Counter intuitively, we need to set decoder_sequence_length=encoder_seq_length
# because while running `.encode()`, the last hidden states from encoder are passed through
# as identity through the pipeline.
# Setting it to anything else will cause hanging due to tensor shape mismatches.
output_tensor = fwd_bwd_func(
forward_step_func=forward_step_func,
data_iterator=iter([batch_for_pipeline,]),
model=[self.enc_dec_model],
forward_only=True,
num_microbatches=1,
seq_length=encoder_seq_length,
decoder_seq_length=encoder_seq_length,
micro_batch_size=get_micro_batch_size(),
)
if output_tensor:
output_tensor = output_tensor[0]['hiddens']
else:
output_tensor = torch.zeros(tensor_shape, dtype=self.autocast_dtype).cuda()
if self.cfg.get('pipeline_model_parallel_size', 1) > 1:
# Broadcast from the last pipeline stage to all other model-parallel ranks.
torch.distributed.broadcast(
output_tensor,
parallel_state.get_pipeline_model_parallel_last_rank(),
group=parallel_state.get_pipeline_model_parallel_group(),
)
# Reset microbatch calculator to what it was before decoding.
if reconfigure_microbatch:
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=global_batch_per_gpu * parallel_state.get_data_parallel_world_size(),
micro_batch_size=global_batch_per_gpu // num_micro_batches_before_decode,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
# Return the output tensor of encoder and transpose from [seq_len, batch, hidden] to [batch, seq_len, hidden]
return output_tensor.transpose(1, 0)
def decode(
self,
tokens_enc,
enc_mask,
num_tokens_to_generate,
encoder_input=None,
tokenizer=None,
enc_output=None,
enc_output_attn_mask=None,
ignore_ids=[],
bos_id=None, # If bos=None, will use tokenizer.bos_id unless explicitly set to something else.
predicted_tokens_dec=None,
batch_data=None,
sampling_method: str = "greedy-search",
sampling_kwargs: dict = {},
):
"""
tokens_enc - a tensor of shape [batch_size, seq_len] that contains the input tokens.
enc_mask - a tensor of shape [batch_size, seq_len] that contains the input tokens mask (1 for active, 0 for inactive).
num_tokens_to_generate - the max number of tokens to generate.
encoder_input - a tensor of shape [batch_size, seq_len, hidden_size] that contains the encoder hidden states (replaces tokens_enc if given).
tokenizer - a tokenizer object.
enc_output - a tensor of shape [batch_size, seq_len, hidden_size] that contains the encoder hidden states (replaces tokens_enc and encoder_input if given).
enc_output_attn_mask - a tensor of shape [batch_size, seq_len] that contains the encoder attention mask (replaces enc_mask if given).
ignore_ids - a list of token ids to ignore when sampling.
bos_id - the id of the beginning of sentence token. If None, will use tokenizer.bos_id unless explicitly set to something else.
predicted_tokens_dec - a tensor of shape [batch_size, seq_len] that contains the tokens that have already been decoded.
sampling_method - a sampling method to use in the decoding iterations. Currently supported methods are
"beam-search"/"greedy-search"/"topkp-sampling". The argument specifies the sampling function
that takes in a tensor of logits [batch_size, vocab_size] and returns a tuple
(tensor of log_probs [batch_size], tensor of sampled tokens_ids from logits [batch_size]).
If the beam search is enabled, the sampling function returns tensors [batch_size, beam_size]
sampling_kwargs - dict with arguments to be passed to the sampling function. Please refer to the method
get_sampling_token_fn to see which arguments are required for a chosen sampling_method.
return:
tuple of tensors [batch_size, seq_len +1], [batch_size, seq_len] for predicted tokens and their log probs.
If sampling_method == 'beam-size' and keep_only_best_tokens is False the shape of the tensors are
[batch_size, beam_size, seq_len + 1], [batch_size, beam_size, seq_len]
"""
# Setting up the sampling strategy
sample_token_fn, sampling_kwargs = get_sampling_token_fn(sampling_method, sampling_kwargs)
beam_search = sampling_method == 'beam-search'
if beam_search:
beam_size = sampling_kwargs['beam_size']
beam_alpha = sampling_kwargs['beam_alpha']
keep_only_best_tokens = sampling_kwargs['keep_only_best_tokens']
return_scores = sampling_kwargs['return_scores']
logging.info(f'Decoding using the beam search method with beam size={beam_size}...')
assert beam_size >= 1 and beam_alpha >= 0, 'Beam-search related parameters are misspecified'
else:
logging.info(f'Decoding using the {sampling_method} method...')
# Check whether the DDP is initialized. This is needed when running inference outside of training loop.
if not parallel_state.model_parallel_is_initialized():
def dummy():
return
if self.trainer.strategy.launcher is not None:
self.trainer.strategy.launcher.launch(dummy, trainer=self.trainer)
self.trainer.strategy.setup_environment()
# Reconfigure microbatch sizes here because on model restore, this will contain the micro/global batch configuration used while training.
_reconfigure_microbatch_calculator(
rank=0, # This doesn't matter since it is only used for logging
rampup_batch_size=None,
global_batch_size=1,
micro_batch_size=1, # Make sure that there is no "grad acc" while decoding.
data_parallel_size=1, # We check above to make sure that dataparallel size is always 1 at inference.
)
# If classes that inherit from this class are using a different tokenizer,
tokenizer = self.tokenizer if tokenizer is None else tokenizer
app_state = AppState()
if tokens_enc is not None:
global_batch_per_gpu = tokens_enc.size(0)
device = tokens_enc.device
encoder_seq_length = tokens_enc.size(1)
else:
global_batch_per_gpu = enc_output.size(0)
device = enc_output.device
encoder_seq_length = enc_output.size(1)
num_micro_batches_before_decode = get_num_microbatches()
# Reconfigure microbatch calculator here to set num microbatches to 1 while decoding since its not clear how to decode with "grad acc".
# reconfigure back to how things were before decode
# TODO: Check if the user is trying to do gradient acc and maybe throw error
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=global_batch_per_gpu * parallel_state.get_data_parallel_world_size(),
micro_batch_size=global_batch_per_gpu, # Make sure that there is no "grad acc" while decoding.
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
# TODO: Figure out how to handle bos being either <bos> for NeMo-Megatron and <pad> for Huggingface/Google.
bos_id = tokenizer.bos_id if bos_id is None else bos_id
# initial prompt can be given
if predicted_tokens_dec is None:
predicted_tokens_dec = torch.LongTensor([bos_id] * global_batch_per_gpu).unsqueeze(1).to(device)
# collect log probs that were used in the sampling
predicted_log_probs = torch.zeros((global_batch_per_gpu, 0), dtype=self.autocast_dtype).to(device)
tensor_shape = [encoder_seq_length, global_batch_per_gpu, self.cfg.encoder.hidden_size]
assert predicted_tokens_dec.size(0) == global_batch_per_gpu
# get encoder hiddens (output)
if enc_output is None:
# Encode returns a tensr of shape [batch, seq_len, hidden]
# All ranks will call `.encode()`, but only the last rank will have a non-empty output tensor.
enc_output = self.encode(
tokens_enc=tokens_enc, enc_mask=enc_mask, encoder_input=encoder_input, reconfigure_microbatch=False
)
if enc_output_attn_mask is None:
enc_output_attn_mask = enc_mask
for i in range(num_tokens_to_generate):
# No microbatches in decoding. Just the global batch.
decoder_seq_length = predicted_tokens_dec.size(1)
dec_mask = predicted_tokens_dec != tokenizer.pad_id
dec_mask[:, 0] = 1 # Make sure you never mask the first token even if it is <pad>.
batch_for_pipeline = [enc_output, enc_output_attn_mask, predicted_tokens_dec, dec_mask, batch_data]
arg_names = ['enc_output', 'enc_output_attn_mask', 'dec_input_ids', 'dec_attn_mask', 'batch_data']
forward_step_func = self._get_forward_output_only_func(arg_names=arg_names, output_name="logits")
fwd_bwd_func = get_forward_backward_func()
output_tensor = fwd_bwd_func(
forward_step_func=forward_step_func,
data_iterator=iter([batch_for_pipeline,]),
model=[self.enc_dec_model],
forward_only=True,
num_microbatches=1,
seq_length=encoder_seq_length,
decoder_seq_length=encoder_seq_length,
micro_batch_size=get_micro_batch_size(),
)
# get output tensor
if parallel_state.is_pipeline_last_stage():
output_tensor = output_tensor[0]['logits']
output_tensor = tensor_parallel.gather_from_tensor_model_parallel_region(output_tensor)
# make sure it won't sample outside the vocab_size range
output_tensor[:, :, tokenizer.vocab_size :] = -float('Inf')
# ignore selected indices
if ignore_ids:
output_tensor = output_tensor.index_fill(
dim=-1, index=torch.tensor(ignore_ids, device=output_tensor.device), value=-float('Inf')
)
log_probs, token_ids = sample_token_fn(logits=output_tensor[:, -1, :])
# enforce valid range of token ids
token_ids = torch.clamp(token_ids, max=tokenizer.vocab_size - 1)
if beam_search:
# beam search: beam creation in the first iteration
if i == 0:
# resizing decoder inputs to match tensors augmented with beams
log_probs, token_ids = log_probs.view(-1), token_ids.view(-1)
scores = log_probs.unsqueeze(1).clone()
batch_size, src_length, hidden_size = enc_output.size()
enc_output_attn_mask = enc_output_attn_mask.repeat(1, beam_size).view(-1, src_length)
enc_output = enc_output.repeat(1, beam_size, 1).view(-1, src_length, hidden_size)
# resize tensors that collect predicted tokens and logits per iteration to
# match shape of tensors augmented with the beam size
predicted_tokens_dec = predicted_tokens_dec.repeat(beam_size, 1)
predicted_log_probs = predicted_log_probs.repeat(beam_size, 0)
pad_profile = torch.zeros_like(scores).long()
decoder_seq_lengths = torch.zeros_like(scores).fill_(predicted_tokens_dec.size(1) + 1)
# reconfigure batch size for apex since the tensor have been augmented with beam size
global_batch_per_gpu = token_ids.shape[0]
tensor_shape[1] = global_batch_per_gpu
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=global_batch_per_gpu * parallel_state.get_data_parallel_world_size(),
micro_batch_size=global_batch_per_gpu,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
# collect all predicted tokens and log_probs
predicted_tokens_dec = torch.cat(
[predicted_tokens_dec.to(token_ids.device), token_ids.unsqueeze(1)], dim=1
)
predicted_log_probs = torch.cat(
[predicted_log_probs.to(log_probs.device), log_probs.unsqueeze(1)], dim=1
)
# beam search: beam selection in the second iteration and on
else:
# mask all finished hypotheses to exclude them from beam
pad_mask = pad_profile.repeat(1, beam_size)
# for all prefixes ending with <eos> or <pad> replace generated
# continuations with <pad>
token_ids = tokenizer.pad_id * pad_mask + token_ids * (1 - pad_mask)
# force all hypotheses but one generated from already finished
# hypotheses to have extremely low score, so they will not be
# considered during beam re-ranking
pad_mask[:, 1:] = pad_mask[:, 1:] * -10000.0
scores = scores + log_probs * (1 - pad_mask).to(scores.dtype)
# choose top-k hypotheses with length penalty applied
len_penalties = compute_beam_search_len_penalty(decoder_seq_lengths, beam_alpha)
scores = scores / len_penalties
scores, indices = sample_token_fn(scores.view(-1, beam_size ** 2), dim=1, log_softmax=False)
scores = scores.view(-1, 1) * len_penalties
# select predicted sequences which correspond to the chosen hypotheses
predicted_tokens_dec = predicted_tokens_dec.unsqueeze(1).repeat(1, beam_size, 1)
predicted_tokens_dec = torch.cat((predicted_tokens_dec, token_ids.unsqueeze(2)), dim=2)
predicted_tokens_dec = predicted_tokens_dec.view(batch_size, beam_size ** 2, -1)
p_len = predicted_tokens_dec.size(2)
predicted_tokens_dec_ids = indices.unsqueeze(2).repeat(1, 1, p_len)
predicted_tokens_dec = predicted_tokens_dec.gather(1, predicted_tokens_dec_ids).view(-1, p_len)
# select logits which correspond to the chosen hypotheses
predicted_log_probs = predicted_log_probs.unsqueeze(1).repeat(1, beam_size, 1)
predicted_log_probs = torch.cat((predicted_log_probs, log_probs.unsqueeze(2)), dim=2)
predicted_log_probs = predicted_log_probs.view(batch_size, beam_size ** 2, -1)
predicted_log_probs = predicted_log_probs.gather(1, predicted_tokens_dec_ids[:, :, 1:]).view(
-1, p_len - 1
)
# update decoder_seq_length and pad_profile
not_eos_pad = predicted_tokens_dec.ne(tokenizer.eos_id) & predicted_tokens_dec.ne(
tokenizer.pad_id
)
decoder_seq_lengths = 1 + not_eos_pad.sum(dim=1, keepdim=True).to(scores.dtype)
pad_profile = (~not_eos_pad[:, -1:]).long()
else:
# collect all predicted tokens and log_probs
predicted_tokens_dec = torch.cat(
[predicted_tokens_dec.to(token_ids.device), token_ids.unsqueeze(1)], dim=1
)
predicted_log_probs = torch.cat(
[predicted_log_probs.to(log_probs.device), log_probs.unsqueeze(1)], dim=1
)
else:
predicted_tokens_dec = torch.zeros(
(predicted_tokens_dec.shape[0], predicted_tokens_dec.shape[1] + 1),
dtype=predicted_tokens_dec.dtype,
).cuda()
predicted_log_probs = torch.zeros(
(predicted_log_probs.shape[0], predicted_log_probs.shape[1] + 1), dtype=self.autocast_dtype
).cuda()
if self.cfg.get('pipeline_model_parallel_size', 1) > 1:
# Broadcast from the last pipeline stage to all other model-parallel ranks.
torch.distributed.broadcast(
predicted_tokens_dec,
parallel_state.get_pipeline_model_parallel_last_rank(),
group=parallel_state.get_pipeline_model_parallel_group(),
)
torch.distributed.broadcast(
predicted_log_probs,
parallel_state.get_pipeline_model_parallel_last_rank(),
group=parallel_state.get_pipeline_model_parallel_group(),
)
# Reset microbatch calculator to what it was before decoding.
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=global_batch_per_gpu * parallel_state.get_data_parallel_world_size(),
micro_batch_size=global_batch_per_gpu // num_micro_batches_before_decode,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
if beam_search and beam_size > 1:
if keep_only_best_tokens:
len_penalties = compute_beam_search_len_penalty(decoder_seq_lengths, 0)
scores = scores / len_penalties
scores = scores.view(-1, beam_size)
best_ids = torch.argmax(scores, dim=1, keepdim=True)
scores = scores * len_penalties.view(-1, beam_size)
scores = scores.gather(1, best_ids)
best_tokens = best_ids.repeat(1, predicted_tokens_dec.size(1)).unsqueeze(1)
predicted_tokens_dec = (
predicted_tokens_dec.view(batch_size, beam_size, -1).gather(1, best_tokens).squeeze(1)
)
predicted_log_probs = (
predicted_log_probs.view(batch_size, beam_size, -1).gather(1, best_tokens[:, :, 1:]).squeeze(1)
)
else:
predicted_tokens_dec = predicted_tokens_dec.view(batch_size, beam_size, -1)
predicted_log_probs = predicted_log_probs.view(batch_size, beam_size, -1)
scores = scores.view(-1, beam_size)
if beam_search:
if return_scores:
return predicted_tokens_dec, predicted_log_probs, scores
return predicted_tokens_dec, predicted_log_probs
def complete(self, request: Dict):
"""
Autoregressively invokes language model in the inference mode
Args:
request: Dictionary with the following fields
* prompt: a string which text the model should complete.
* tokens_to_generate: how many tokens to generate while doing prompt completion.
Returns:
response: A python dictionary with the following fields
* prompt: original text of the prompt
* tokenized_prompt: list of (str) tokens from prompt
* completion: a python dictionary with the following subfields:
* tokens: a list of triples (token, token_id, log_prob) comprising completion
* text: completion text (as a single string)
"""
app_state = AppState()
# The complete method only works with global batch = micro batch size = data parallel size = 1.
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=1,
micro_batch_size=1,
data_parallel_size=1,
)
app_state = AppState()
response = {}
self.freeze()
# naive greedy slow loop
# TODO: add option for BeamSearchDecoder
response['prompt'] = request['prompt'][0]
response['completion'] = {}
bos_id = request['bos_id']
tokens_enc = request['masked_sample']
response['masked_input'] = ' '.join(self.tokenizer.ids_to_tokens(tokens_enc[0].cpu().numpy().tolist()))
enc_mask = tokens_enc != self.tokenizer.pad_id
predicted_tokens_ids, log_probs = self.decode(
tokens_enc, enc_mask, int(request['tokens_to_generate']), bos_id=bos_id
)
predicted_tokens_ids = predicted_tokens_ids.cpu().numpy()[0].tolist()
log_probs = log_probs.cpu().numpy()[0].tolist()
if self.tokenizer.eos_id in predicted_tokens_ids:
idx = predicted_tokens_ids.index(self.tokenizer.eos_id)
predicted_tokens_ids = predicted_tokens_ids[:idx]
else:
predicted_tokens_ids = [id for id in predicted_tokens_ids if id != self.tokenizer.pad_id]
if self.tokenizer.eos_id in predicted_tokens_ids:
idx = predicted_tokens_ids.index(self.tokenizer.eos_id)
predicted_tokens_ids = predicted_tokens_ids[:idx]
# Legacy sentencepiece detokenization still preserves special tokens which messes up exact string match.
if hasattr(self.tokenizer, 'special_token_to_id'):
predicted_tokens_ids = [
id for id in predicted_tokens_ids if id not in self.tokenizer.special_token_to_id.values()
]
predicted_tokens_dec = self.tokenizer.ids_to_tokens(predicted_tokens_ids)
response['completion']['text'] = self.tokenizer.tokens_to_text(predicted_tokens_dec)
response['completion']['tokens'] = list(zip(predicted_tokens_ids, predicted_tokens_dec, log_probs))
self.unfreeze()
return response
def transfer_batch_to_device(self, batch: Any, device: torch.device, dataloader_idx: int) -> Any:
""" PTL hook: https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#transfer-batch-to-device
When using pipeline parallelism, we need the global batch to remain on the CPU,
since the memory overhead will be too high when using a large number of microbatches.
Microbatches are transferred from CPU to GPU inside the pipeline.
"""
return batch
def _validate_trainer(self):
""" Certain trainer configurations can break training.
Here we try to catch them and raise an error.
"""
if self.trainer.accumulate_grad_batches > 1:
raise ValueError(
f'Gradient accumulation is done within training_step. trainer.accumulate_grad_batches must equal 1'
)
def list_available_models(self):
pass
def build_model_parallel_config(self):
""" Hidden size needs to be set from the cfg.encoder for the pipeline schedule.
"""
model_parallel_config = super().build_model_parallel_config()
try:
# hidden size is needed for pipeline schedules but is not currently in ModelParallelConfig
setattr(model_parallel_config, 'hidden_size', self.cfg.encoder.hidden_size)
except AttributeError:
logging.warning(
f'encoder.hidden_size not found in {self.cfg}. Set this in model_parallel_config if using pipeline parallelism.'
)
return model_parallel_config
| NeMo-main | nemo/collections/nlp/models/language_modeling/megatron_lm_encoder_decoder_model.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT model."""
import torch
from nemo.collections.nlp.modules.common.megatron.language_model import get_language_model
from nemo.collections.nlp.modules.common.megatron.module import MegatronModule
from nemo.collections.nlp.modules.common.megatron.transformer import get_layer_norm
from nemo.collections.nlp.modules.common.megatron.utils import (
ApexGuardDefaults,
build_position_ids,
erf_gelu,
get_linear_layer,
init_method_normal,
openai_gelu,
parallel_lm_logits,
scaled_init_method_normal,
)
try:
from apex.transformer.enums import AttnMaskType
from apex.transformer.tensor_parallel.layers import set_tensor_model_parallel_attributes
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
# fake missing classes with None attributes
AttnMaskType = ApexGuardDefaults()
try:
from megatron.core import ModelParallelConfig, parallel_state, tensor_parallel
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelParallelConfig = ApexGuardDefaults
HAVE_MEGATRON_CORE = False
def bert_extended_attention_mask(attention_mask):
# We create a 3D attention mask from a 2D tensor mask.
# [b, 1, s]
attention_mask_b1s = attention_mask.unsqueeze(1)
# [b, s, 1]
attention_mask_bs1 = attention_mask.unsqueeze(2)
# [b, s, s]
attention_mask_bss = attention_mask_b1s * attention_mask_bs1
# [b, 1, s, s]
extended_attention_mask = attention_mask_bss.unsqueeze(1)
# Convert attention mask to binary:
extended_attention_mask = extended_attention_mask < 0.5
return extended_attention_mask
class BertLMHead(MegatronModule):
"""Masked LM head for Bert
Arguments:
mpu_vocab_size: model parallel size of vocabulary.
hidden_size: hidden size
init_method: init method for weight initialization
layernorm_epsilon: tolerance for layer norm divisions
parallel_output: whether output logits being distributed or not.
"""
def __init__(
self,
config: ModelParallelConfig,
mpu_vocab_size,
hidden_size,
init_method,
layernorm_epsilon,
parallel_output,
use_openai_gelu,
onnx_safe,
):
super(BertLMHead, self).__init__(config=config)
self.bias = torch.nn.Parameter(torch.zeros(mpu_vocab_size))
set_tensor_model_parallel_attributes(self.bias, True, 0, 1)
self.parallel_output = parallel_output
self.sequence_parallel = config.sequence_parallel
self.dense = get_linear_layer(hidden_size, hidden_size, init_method)
self.layernorm = get_layer_norm(hidden_size, eps=layernorm_epsilon)
self.gelu = torch.nn.functional.gelu
if use_openai_gelu:
self.gelu = openai_gelu
elif onnx_safe:
self.gelu = erf_gelu
def forward(self, hidden_states, word_embeddings_weight):
hidden_states = self.dense(hidden_states)
hidden_states = self.gelu(hidden_states)
hidden_states = self.layernorm(hidden_states)
async_tensor_model_parallel_allreduce = self.config.async_tensor_model_parallel_allreduce
output = parallel_lm_logits(
hidden_states,
word_embeddings_weight,
self.parallel_output,
sequence_parallel=self.sequence_parallel,
bias=self.bias,
async_tensor_model_parallel_allreduce=async_tensor_model_parallel_allreduce,
)
return output
def post_language_model_processing(
lm_output, pooled_output, lm_head, binary_head, lm_labels, logit_weights, fp16_lm_cross_entropy,
):
# lm_logits: [s, b, vocab_size]
lm_logits = lm_head(lm_output, logit_weights)
binary_logits = None
if binary_head is not None:
# binary_logits: [s, b, 2] or [s, b, vocab_size] if binary_head is Identity
binary_logits = binary_head(pooled_output)
if lm_labels is None:
return lm_logits, binary_logits
else:
# match shape of labels to lm_logits
# lm_labels: [b, s] -> [s, b]
lm_labels = lm_labels.transpose(0, 1).contiguous()
if fp16_lm_cross_entropy:
assert lm_logits.dtype == torch.half
lm_loss = tensor_parallel.vocab_parallel_cross_entropy(lm_logits, lm_labels)
else:
lm_loss = tensor_parallel.vocab_parallel_cross_entropy(lm_logits.float(), lm_labels)
# lm_loss: [s, b]
return lm_loss, binary_logits
class BertModel(MegatronModule):
"""
Bert Language model.
Model returns [seq, batch, hidden] shape
"""
def __init__(
self,
config: ModelParallelConfig,
vocab_size,
hidden_size,
max_position_embeddings,
num_layers,
num_attention_heads,
ffn_hidden_size,
apply_query_key_layer_scaling=True,
kv_channels=None,
num_tokentypes=0,
parallel_output=True,
pre_process=True,
post_process=True,
init_method_std=0.02,
fp16_lm_cross_entropy=False,
megatron_amp_O2=False,
hidden_dropout=0.1,
precision=16,
fp32_residual_connection=False,
activations_checkpoint_granularity=None,
activations_checkpoint_method=None,
activations_checkpoint_num_layers=1,
activations_checkpoint_layers_per_pipeline=None,
layernorm_epsilon=1e-5,
masked_softmax_fusion=False,
bias_gelu_fusion=True,
bias_dropout_add_fusion=True,
openai_gelu=False,
onnx_safe=False,
add_binary_head=True,
megatron_legacy=False,
sequence_parallel=False,
position_embedding_type='learned_absolute',
):
super(BertModel, self).__init__(config=config)
self.fp16_lm_cross_entropy = fp16_lm_cross_entropy
self.add_binary_head = add_binary_head
self.parallel_output = parallel_output
self.pre_process = pre_process
self.post_process = post_process
self.sequence_parallel = sequence_parallel
init_method = init_method_normal(init_method_std)
scaled_init_method = scaled_init_method_normal(init_method_std, num_layers)
self.language_model, self._language_model_key = get_language_model(
config=config,
vocab_size=vocab_size,
hidden_size=hidden_size,
hidden_dropout=hidden_dropout,
num_tokentypes=num_tokentypes,
max_position_embeddings=max_position_embeddings,
num_layers=num_layers,
num_attention_heads=num_attention_heads,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
ffn_hidden_size=ffn_hidden_size,
add_pooler=self.add_binary_head,
encoder_attn_mask_type=AttnMaskType.padding,
init_method=init_method,
scaled_init_method=scaled_init_method,
pre_process=self.pre_process,
post_process=self.post_process,
init_method_std=init_method_std,
megatron_amp_O2=megatron_amp_O2,
precision=precision,
fp32_residual_connection=fp32_residual_connection,
activations_checkpoint_granularity=activations_checkpoint_granularity,
activations_checkpoint_method=activations_checkpoint_method,
activations_checkpoint_num_layers=activations_checkpoint_num_layers,
activations_checkpoint_layers_per_pipeline=activations_checkpoint_layers_per_pipeline,
layernorm_epsilon=layernorm_epsilon,
masked_softmax_fusion=masked_softmax_fusion,
bias_activation_fusion=bias_gelu_fusion,
bias_dropout_add_fusion=bias_dropout_add_fusion,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
megatron_legacy=megatron_legacy,
position_embedding_type=position_embedding_type,
)
self.initialize_word_embeddings(
init_method=init_method_normal(init_method_std), vocab_size=vocab_size, hidden_size=hidden_size
)
if self.post_process:
self.lm_head = BertLMHead(
config,
self.word_embeddings_weight().size(0),
hidden_size,
init_method,
layernorm_epsilon,
parallel_output,
openai_gelu,
onnx_safe,
)
self._lm_head_key = 'lm_head'
self.binary_head = None
if self.add_binary_head:
self.binary_head = get_linear_layer(hidden_size, 2, init_method)
self._binary_head_key = 'binary_head'
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
self.language_model.set_input_tensor(input_tensor)
def forward(
self,
bert_model_input,
attention_mask,
token_type_ids=None,
lm_labels=None,
checkpoint_activations_all_layers=None,
):
extended_attention_mask = bert_extended_attention_mask(attention_mask)
if parallel_state.is_pipeline_first_stage():
input_ids = bert_model_input
position_ids = build_position_ids(input_ids)
else:
position_ids = None
input_ids = None
lm_output = self.language_model(
input_ids,
position_ids,
extended_attention_mask,
token_type_ids=token_type_ids,
checkpoint_activations_all_layers=checkpoint_activations_all_layers,
)
if self.post_process and self.add_binary_head:
lm_output, pooled_output = lm_output
else:
pooled_output = None
if self.post_process:
return post_language_model_processing(
lm_output,
pooled_output,
self.lm_head,
self.binary_head,
lm_labels,
self.word_embeddings_weight(),
self.fp16_lm_cross_entropy,
)
else:
return lm_output
def state_dict_for_save_checkpoint(self, destination=None, prefix='', keep_vars=False):
"""For easy load when model is combined with other heads,
add an extra key."""
state_dict_ = {}
state_dict_[self._language_model_key] = self.language_model.state_dict_for_save_checkpoint(
destination, prefix, keep_vars
)
if self.post_process:
state_dict_[self._lm_head_key] = self.lm_head.state_dict_for_save_checkpoint(
destination, prefix, keep_vars
)
if self.post_process and self.add_binary_head:
state_dict_[self._binary_head_key] = self.binary_head.state_dict(destination, prefix, keep_vars)
# Save word_embeddings.
if self.post_process and not self.pre_process:
state_dict_[self._word_embeddings_for_head_key] = self.word_embeddings.state_dict(
destination, prefix, keep_vars
)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
self.language_model.load_state_dict(state_dict[self._language_model_key], strict=strict)
if self.post_process:
self.lm_head.load_state_dict(state_dict[self._lm_head_key], strict=strict)
if self.post_process and self.add_binary_head:
self.binary_head.load_state_dict(state_dict[self._binary_head_key], strict=strict)
# Load word_embeddings.
if self.post_process and not self.pre_process:
self.word_embeddings.load_state_dict(state_dict[self._word_embeddings_for_head_key], strict=strict)
| NeMo-main | nemo/collections/nlp/models/language_modeling/megatron/bert_model.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from nemo.collections.nlp.models.language_modeling.megatron.bert_model import BertModel
try:
from nemo.collections.nlp.models.language_modeling.megatron.gpt_model import GPTModel
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
# from nemo.collections.nlp.models.language_modeling.megatron.t5_model import T5Model
| NeMo-main | nemo/collections/nlp/models/language_modeling/megatron/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GPT-2 model."""
import torch
from nemo.collections.nlp.modules.common.megatron.language_model import get_language_model
from nemo.collections.nlp.modules.common.megatron.module import MegatronModule
from nemo.collections.nlp.modules.common.megatron.utils import (
ApexGuardDefaults,
init_method_normal,
parallel_lm_logits,
scaled_init_method_normal,
)
from nemo.collections.nlp.parts import utils_funcs
try:
from apex.transformer.enums import AttnMaskType
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
# fake missing classes with None attributes
AttnMaskType = ApexGuardDefaults()
HAVE_APEX = False
try:
from megatron.core import ModelParallelConfig, parallel_state, tensor_parallel
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelParallelConfig = ApexGuardDefaults
HAVE_MEGATRON_CORE = False
def post_language_model_processing(
lm_output,
labels,
logit_weights,
get_key_value,
parallel_output,
forward_method_parallel_output,
fp16_lm_cross_entropy,
return_logits=False,
sequence_parallel=False,
gradient_accumulation_fusion=False,
):
if get_key_value:
lm_output, presents = lm_output
# Output. Format is [s b h]
if forward_method_parallel_output is not None:
parallel_output = forward_method_parallel_output
async_tensor_model_parallel_allreduce = (
parallel_state.get_tensor_model_parallel_world_size() > 1 and not sequence_parallel
)
output = parallel_lm_logits(
lm_output,
logit_weights,
parallel_output,
sequence_parallel=sequence_parallel,
gradient_accumulation_fusion=gradient_accumulation_fusion,
async_tensor_model_parallel_allreduce=async_tensor_model_parallel_allreduce,
)
if get_key_value:
output = [output, presents]
if labels is None:
# [s b h] -> [b s h]
return output.transpose(0, 1).contiguous()
else:
# [b s] -> [s b]
labels = labels.transpose(0, 1).contiguous()
if fp16_lm_cross_entropy:
assert output.dtype == torch.half
loss = tensor_parallel.vocab_parallel_cross_entropy(output, labels)
else:
loss = tensor_parallel.vocab_parallel_cross_entropy(output.float(), labels)
# [s b] -> [b, s]
loss = loss.transpose(0, 1).contiguous()
if return_logits:
return loss, output
else:
return loss
class GPTModel(MegatronModule):
"""GPT-2 Language model."""
def __init__(
self,
config: ModelParallelConfig,
vocab_size,
hidden_size,
max_position_embeddings,
num_layers,
num_attention_heads,
ffn_hidden_size,
apply_query_key_layer_scaling=False,
kv_channels=None,
num_tokentypes=0,
parallel_output=True,
pre_process=True,
post_process=True,
init_method_std=0.02,
use_scaled_init_method=True,
fp16_lm_cross_entropy=False,
megatron_amp_O2=False,
hidden_dropout=0.1,
attention_dropout=0.1,
ffn_dropout=0.0,
precision=16,
fp32_residual_connection=False,
activations_checkpoint_granularity=None,
activations_checkpoint_method=None,
activations_checkpoint_num_layers=1,
activations_checkpoint_layers_per_pipeline=None,
normalization='layernorm',
layernorm_epsilon=1e-5,
bias=True,
bias_activation_fusion=True,
bias_dropout_add_fusion=True,
masked_softmax_fusion=True,
activation='gelu',
headscale=False,
transformer_block_type='pre_ln',
normalize_attention_scores=True,
position_embedding_type='learned_absolute',
rotary_percentage=1.0,
attention_type='multihead',
share_embeddings_and_output_weights=True,
persist_layer_norm=False,
openai_gelu=False,
megatron_legacy=False,
onnx_safe=False,
transformer_engine=False,
fp8=False,
fp8_e4m3=False,
fp8_hybrid=False,
fp8_margin=0,
fp8_interval=1,
fp8_amax_history_len=1,
fp8_amax_compute_algo='most_recent',
reduce_amax=True,
use_emha=False,
ub_tp_comm_overlap=False,
use_flash_attention=False,
seq_len_interpolation_factor=None,
):
super(GPTModel, self).__init__(config=config, share_token_embeddings=share_embeddings_and_output_weights)
self.parallel_output = parallel_output
self.pre_process = pre_process
self.post_process = post_process
self.fp16_lm_cross_entropy = fp16_lm_cross_entropy
self.sequence_parallel = self.config.sequence_parallel
self.share_embeddings_and_output_weights = share_embeddings_and_output_weights
self.dtype = utils_funcs.torch_dtype_from_precision(precision, megatron_amp_O2)
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
scaled_init_method = (
scaled_init_method_normal(init_method_std, num_layers)
if use_scaled_init_method
else init_method_normal(init_method_std)
)
self.language_model, self._language_model_key = get_language_model(
config=config,
vocab_size=vocab_size,
hidden_size=hidden_size,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
ffn_dropout=ffn_dropout,
num_tokentypes=num_tokentypes,
max_position_embeddings=max_position_embeddings,
num_layers=num_layers,
num_attention_heads=num_attention_heads,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
ffn_hidden_size=ffn_hidden_size,
add_pooler=False,
encoder_attn_mask_type=AttnMaskType.causal,
init_method=init_method_normal(init_method_std),
scaled_init_method=scaled_init_method,
pre_process=self.pre_process,
post_process=self.post_process,
init_method_std=init_method_std,
megatron_amp_O2=megatron_amp_O2,
precision=precision,
fp32_residual_connection=fp32_residual_connection,
activations_checkpoint_granularity=activations_checkpoint_granularity,
activations_checkpoint_method=activations_checkpoint_method,
activations_checkpoint_num_layers=activations_checkpoint_num_layers,
activations_checkpoint_layers_per_pipeline=activations_checkpoint_layers_per_pipeline,
normalization=normalization,
layernorm_epsilon=layernorm_epsilon,
rotary_percentage=rotary_percentage,
share_embeddings_and_output_weights=share_embeddings_and_output_weights,
bias=bias,
bias_activation_fusion=bias_activation_fusion,
bias_dropout_add_fusion=bias_dropout_add_fusion,
masked_softmax_fusion=masked_softmax_fusion,
activation=activation,
headscale=headscale,
transformer_block_type=transformer_block_type,
normalize_attention_scores=normalize_attention_scores,
position_embedding_type=position_embedding_type,
attention_type=attention_type,
persist_layer_norm=persist_layer_norm,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
megatron_legacy=megatron_legacy,
transformer_engine=transformer_engine,
fp8=fp8,
fp8_e4m3=fp8_e4m3,
fp8_hybrid=fp8_hybrid,
fp8_margin=fp8_margin,
fp8_interval=fp8_interval,
fp8_amax_history_len=fp8_amax_history_len,
fp8_amax_compute_algo=fp8_amax_compute_algo,
reduce_amax=reduce_amax,
use_emha=use_emha,
ub_tp_comm_overlap=ub_tp_comm_overlap,
use_flash_attention=use_flash_attention,
seq_len_interpolation_factor=seq_len_interpolation_factor,
)
if self.share_embeddings_and_output_weights:
self.initialize_word_embeddings(
init_method=init_method_normal(init_method_std),
vocab_size=vocab_size,
hidden_size=hidden_size,
param_dtype=self.dtype,
)
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
self.language_model.set_input_tensor(input_tensor)
def forward(
self,
input_ids,
position_ids,
attention_mask,
loss_mask=None,
labels=None,
token_type_ids=None,
layer_past=None,
get_key_value=False,
forward_method_parallel_output=None,
encoder_input=None,
set_inference_key_value_memory=False,
inference_max_sequence_len=None,
checkpoint_activations_all_layers=None,
):
# input_ids: [b, s]
# position_ids: [b, s]
# attention_mask: [1, 1, s, s]
lm_output = self.language_model(
input_ids,
position_ids,
attention_mask,
layer_past=layer_past,
get_key_value=get_key_value,
encoder_input=encoder_input,
set_inference_key_value_memory=set_inference_key_value_memory,
inference_max_sequence_len=inference_max_sequence_len,
checkpoint_activations_all_layers=checkpoint_activations_all_layers,
)
if self.post_process:
if loss_mask is not None:
loss_lm_output = lm_output.transpose(0, 1)[loss_mask == 1].unsqueeze(1)
loss_labels = labels[loss_mask == 1].unsqueeze(0)
else:
loss_lm_output = lm_output
loss_labels = labels
post_process_result = post_language_model_processing(
loss_lm_output,
loss_labels,
self.language_model.output_layer.weight
if not self.share_embeddings_and_output_weights
else self.word_embeddings_weight(),
get_key_value,
self.parallel_output,
forward_method_parallel_output,
self.fp16_lm_cross_entropy,
return_logits=encoder_input is not None,
sequence_parallel=self.sequence_parallel,
gradient_accumulation_fusion=self.config.gradient_accumulation_fusion,
)
if loss_mask is not None:
if isinstance(post_process_result, tuple):
loss, logits = post_process_result
else:
loss, logits = post_process_result, None
res = torch.zeros_like(labels).type_as(loss)
res[loss_mask == 1] = loss
return res if logits is None else (res, logits)
else:
return post_process_result
else:
return lm_output
def state_dict_for_save_checkpoint(self, destination=None, prefix='', keep_vars=False):
state_dict_ = {}
state_dict_[self._language_model_key] = self.language_model.state_dict_for_save_checkpoint(
destination, prefix, keep_vars
)
# Save word_embeddings.
if self.post_process and not self.pre_process:
state_dict_[self._word_embeddings_for_head_key] = self.word_embeddings.state_dict(
destination, prefix, keep_vars
)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
# Load word_embeddings.
if self.post_process and not self.pre_process:
self.word_embeddings.load_state_dict(state_dict[self._word_embeddings_for_head_key], strict=strict)
if self._language_model_key in state_dict:
state_dict = state_dict[self._language_model_key]
self.language_model.load_state_dict(state_dict, strict=strict)
| NeMo-main | nemo/collections/nlp/models/language_modeling/megatron/gpt_model.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['list2str', 'tensor2list', 'plot_confusion_matrix', 'get_classification_report']
import os
import time
from typing import Callable, Dict, List, Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
from matplotlib import pyplot as plt
from sklearn.metrics import classification_report, confusion_matrix
from torch import Tensor
from nemo.collections.nlp.modules.common.megatron.utils import erf_gelu
from nemo.collections.nlp.modules.common.megatron.utils import openai_gelu as openai_gelu_func
from nemo.collections.nlp.modules.common.megatron.utils import squared_relu
from nemo.utils import logging
def torch_dtype_from_precision(precision: Union[int, str], megatron_amp_O2: Optional[bool] = None) -> torch.dtype:
""" Mapping from PTL precision types to corresponding PyTorch parameter datatype."""
if megatron_amp_O2 is not None and megatron_amp_O2 is False:
return torch.float32
if precision in ['bf16', 'bf16-mixed']:
return torch.bfloat16
elif precision in [16, '16', '16-mixed']:
return torch.float16
elif precision in [32, '32', '32-true']:
return torch.float32
else:
raise ValueError(f"Could not parse the precision of `{precision}` to a valid torch.dtype")
def list2str(l: List[int]) -> str:
""" Converts list to a string"""
return ' '.join([str(x) for x in l])
def tensor2list(tensor: Tensor) -> List[Union[int, float]]:
""" Converts tensor to a list """
return tensor.detach().cpu().tolist()
def plot_confusion_matrix(
labels: List[int],
preds: List[int],
graph_fold: str,
label_ids: Dict[str, int] = None,
normalize: bool = False,
prefix: str = '',
):
'''
Plot confusion matrix.
Args:
labels: ground truth labels
preds: model predictions
graph_fold: path to a folder to store confusion matrix plot
label_ids: str label to id map, for example: {'O': 0, 'LOC': 1}
normalize: whether to normalize confusion matrix
prefix: prefix for the plot name
'''
if label_ids is None:
_plot_confusion_matrix(labels, preds, graph_fold)
else:
# remove labels from label_ids that don't appear in the dev set
used_labels = set(labels) | set(preds)
label_ids = {k: label_ids[k] for k, v in label_ids.items() if v in used_labels}
ids_to_labels = {label_ids[k]: k for k in label_ids}
classes = [ids_to_labels[id] for id in sorted(label_ids.values())]
title = 'Confusion_matrix'
cm = confusion_matrix(labels, preds)
if normalize:
sums = cm.sum(axis=1)[:, np.newaxis]
sums = np.where(sums == 0, 1, sums)
cm = cm.astype('float') / sums
title = 'Normalized_' + title
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
ax.set_xticks(np.arange(-1, len(classes)))
ax.set_yticks(np.arange(-1, len(classes)))
ax.set_xticklabels([''] + classes, rotation=90)
ax.set_yticklabels([''] + classes)
ax.set_ylabel('True')
ax.set_xlabel('Predicted')
os.makedirs(graph_fold, exist_ok=True)
fig.colorbar(cax)
title = (prefix + title).strip()
fig_name = os.path.join(graph_fold, title + '_' + time.strftime('%Y%m%d-%H%M%S'))
plt.savefig(fig_name)
logging.info(f'Confusion matrix saved to {fig_name}')
def _plot_confusion_matrix(labels: List[int], preds: List[int], graph_fold: str):
"""
Plot confusion matrix
Args:
labels: ground truth labels
preds: model predictions
graph_fold: path to a folder to store confusion matrix plot
"""
cm = confusion_matrix(labels, preds)
logging.info(f'Confusion matrix:\n{cm}')
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title('Confusion matrix of the classifier')
fig.colorbar(cax)
plt.xlabel('Predicted')
plt.ylabel('True')
os.makedirs(graph_fold, exist_ok=True)
plt.savefig(os.path.join(graph_fold, time.strftime('%Y%m%d-%H%M%S')))
def get_classification_report(labels, preds, label_ids, output_dict=False):
"""
Returns classification report
"""
# remove labels from label_ids that don't appear in predictions or ground truths
used_labels = set(labels) | set(preds)
labels_names = [
k + ' (label id: ' + str(v) + ')'
for k, v in sorted(label_ids.items(), key=lambda item: item[1])
if v in used_labels
]
return classification_report(labels, preds, target_names=labels_names, digits=4, output_dict=output_dict)
def is_last_rank():
return torch.distributed.get_rank() == (torch.distributed.get_world_size() - 1)
def get_last_rank():
return torch.distributed.get_world_size() - 1
def activation_to_func(activation: str, openai_gelu: bool = False, onnx_safe: bool = False) -> Callable:
""" Converts an activation function represented as a string to a function.
Args:
activation (str): string representation of an activation function, typically gotten from the model config.
openai_gelu (bool): whether to use the OpenAI GELU implementation. Used with HF compatibility.
onnx_safe (bool): whether to use the ONNX-compatible implementation of GELU.
Returns:
Callable: the activation function.
"""
supported_activations = [
'gelu',
'geglu',
'reglu',
'swiglu',
'squared-relu',
'fast-geglu',
'fast-swiglu',
'fast-reglu',
]
if activation not in supported_activations:
raise ValueError(f"Unsupported activation {activation}. Supported activations: {supported_activations} ")
# Give openai_gelu precedence over other activations if set, for HF compatibility.
# Normally this is off and shouldn't affect regular model training.
if openai_gelu:
activation_func = openai_gelu_func
elif activation in ["gelu", "geglu", "fast-geglu"]:
activation_func = F.gelu
elif onnx_safe:
activation_func = erf_gelu
elif activation in ["reglu", "fast-reglu"]:
activation_func = F.relu
elif activation in ["swiglu", "fast-swiglu"]:
# SiLU or sigmoid linear unit is the same as swish with beta = 1 (which is what https://arxiv.org/pdf/2002.05202.pdf uses.)
activation_func = F.silu
elif activation == 'squared-relu':
activation_func = squared_relu
return activation_func
| NeMo-main | nemo/collections/nlp/parts/utils_funcs.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelSummary
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from nemo.collections.nlp.parts.nlp_overrides import (
CustomProgressBar,
GradScaler,
MegatronHalfPrecisionPlugin,
NLPDDPStrategy,
PipelineMixedPrecisionPlugin,
)
class MegatronTrainerBuilder:
"""
Builder type to hide complex configuration of PTL Trainers for Megatron LLM models.
Can be extended to change behavior for a specific model.
"""
def __init__(self, cfg: DictConfig) -> None:
self.cfg = cfg
def _training_strategy(self) -> NLPDDPStrategy:
"""
Returns a ddp strategy passed to Trainer.strategy.
"""
return NLPDDPStrategy(
no_ddp_communication_hook=True,
gradient_as_bucket_view=self.cfg.model.gradient_as_bucket_view,
find_unused_parameters=False,
)
def _grad_scaler(self) -> GradScaler:
"""
Returns a scaler for precision plugins.
"""
return GradScaler(
init_scale=self.cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=self.cfg.model.get('native_amp_growth_interval', 1000),
hysteresis=self.cfg.model.get('hysteresis', 2),
)
def _plugins(self) -> list:
"""
Returns:
plugins: list of plugins passed to Trainer.plugins including precision plugins.
"""
megatron_amp_o2 = self.cfg.model.get('megatron_amp_O2', False)
with_distributed_adam = self.cfg.model.optim.get('name') == 'distributed_fused_adam'
plugins = []
if self.cfg.trainer.precision in [16, '16', 'bf16', '16-mixed', 'bf16-mixed']:
scaler = None
if self.cfg.trainer.precision in [16, '16', '16-mixed']:
scaler = self._grad_scaler()
plugin_precision = '16-mixed'
else:
plugin_precision = 'bf16-mixed'
if megatron_amp_o2 and not with_distributed_adam:
plugins.append(MegatronHalfPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
else:
plugins.append(PipelineMixedPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
if self.cfg.get('cluster_type', None) == 'BCP':
plugins.append(TorchElasticEnvironment())
return plugins
def create_trainer(self) -> Trainer:
strategy = self._training_strategy()
plugins = self._plugins()
return Trainer(plugins=plugins, strategy=strategy, **self.cfg.trainer, callbacks=[CustomProgressBar()])
class MegatronBertTrainerBuilder(MegatronTrainerBuilder):
"""Builder for BERT model Trainer with overrides."""
def _grad_scaler(self) -> GradScaler:
return GradScaler(
init_scale=self.cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=self.cfg.model.get('native_amp_growth_interval', 1000),
)
class MegatronT5TrainerBuilder(MegatronTrainerBuilder):
"""Builder for T5 model Trainer with overrides."""
def create_trainer(self) -> Trainer:
strategy = self._training_strategy()
plugins = self._plugins()
return Trainer(
plugins=plugins,
strategy=strategy,
**self.cfg.trainer,
callbacks=[ModelSummary(max_depth=3), CustomProgressBar()]
)
| NeMo-main | nemo/collections/nlp/parts/megatron_trainer_builder.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import shutil
import tempfile
from collections import OrderedDict, defaultdict
from contextlib import contextmanager
from pathlib import Path
from typing import Any, Callable, Dict, Generator, Iterator, List, Literal, Mapping, Optional, Sized, Union
import pytorch_lightning as pl
import torch
from lightning_fabric.utilities.cloud_io import get_filesystem
from omegaconf import OmegaConf
from pytorch_lightning.callbacks.progress import TQDMProgressBar
from pytorch_lightning.callbacks.progress.tqdm_progress import _update_n
from pytorch_lightning.loops.fetchers import _DataFetcher
from pytorch_lightning.overrides.base import _LightningModuleWrapperBase
from pytorch_lightning.plugins import ClusterEnvironment
from pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO
from pytorch_lightning.plugins.precision import MixedPrecisionPlugin
from pytorch_lightning.strategies.ddp import DDPStrategy
from pytorch_lightning.trainer.states import TrainerFn
from pytorch_lightning.trainer.trainer import Trainer
from torch.distributed.algorithms.ddp_comm_hooks.debugging_hooks import noop_hook
from torch.nn.parallel import DistributedDataParallel
from nemo.collections.nlp.modules.common.megatron.module import Float16Module
from nemo.core.connectors.save_restore_connector import SaveRestoreConnector
from nemo.core.optim import MainParamsOptimizerWrapper
from nemo.utils import AppState, logging
from nemo.utils.get_rank import is_global_rank_zero
from nemo.utils.model_utils import ckpt_to_dir, inject_model_parallel_rank, uninject_model_parallel_rank
try:
from apex.transformer.pipeline_parallel.utils import get_num_microbatches
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
try:
from megatron.core import dist_checkpointing, parallel_state
from megatron.core.dist_checkpointing.dict_utils import dict_list_map_outplace
from megatron.core.dist_checkpointing.optimizer import (
get_param_id_to_sharded_param_map,
make_sharded_optimizer_tensor,
optim_state_to_sharding_state,
)
from megatron.core.transformer.module import Float16Module as MCoreFloat16Module
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
NEMO_MEGATRON_MODEL_PARALLEL_APPSTATE_OVERRIDE = "NEMO_MEGATRON_MODEL_PARALLEL_APPSTATE_OVERRIDE"
class NLPDDPStrategy(DDPStrategy):
""" DDP plugin for Pytorch Lightning. Needed to customize DDP for model parallel models.
Args:
no_ddp_communication_hook: Disable DDP communication hook when using AMP-O2
with FP32 gradient accumulation.
"""
def __init__(
self,
parallel_devices: Optional[List[torch.device]] = None,
cluster_environment: ClusterEnvironment = None,
checkpoint_io: Optional[CheckpointIO] = None,
no_ddp_communication_hook: bool = False,
**kwargs: Union[Any, Dict[str, Any]],
) -> None:
if not HAVE_APEX:
raise ImportError(
"Apex was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
if not HAVE_MEGATRON_CORE:
raise ImportError(
"megatron-core was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
super().__init__(parallel_devices, cluster_environment, checkpoint_io, **kwargs)
self.no_ddp_communication_hook = no_ddp_communication_hook
def setup(self, trainer: "pl.Trainer") -> None:
"""
Override setup() of DDPStrategy to avoid _sync_module_states(self.model) during eval as it can cause PP > 1 to hang
due to assumption in DDPStrategy class that the same model is replicated across GPUs
"""
trainer_fn = trainer.state.fn
if trainer_fn == TrainerFn.FITTING:
super().setup(trainer)
else:
assert self.accelerator is not None
self.accelerator.setup(trainer)
# move the model to the correct device
self.model_to_device()
self.setup_precision_plugin()
assert self.model is not None
def setup_distributed(self, global_rank: int = None, world_size: int = None) -> None:
# call PTL init ddp
super().setup_distributed()
# init model parallel if needed
if not parallel_state.model_parallel_is_initialized():
app_state = AppState()
if app_state.model_parallel_size is not None:
self.init_model_parallel(app_state.global_rank, app_state.world_size)
def configure_ddp(self):
""" Override LightningModule ddp if using model parallel.
Sets find_unused_parameters to False to use activation-checkpoint-recomputation.
"""
if (hasattr(self.model, 'megatron_amp_o2') and self.model.megatron_amp_o2) or (
hasattr(self.model, 'with_distributed_adam') and self.model.with_distributed_adam
):
# do not use DDP if using megatron amp O2 or distributed optimizer
self._model = _LightningModuleWrapperBase(self.model)
else:
app_state = AppState()
if app_state.model_parallel_size is not None:
logging.info(f"Configuring DDP for model parallelism.")
# With model parallelism, multiple GPUs form a large "logical GPU"
# this means that data parallel groups span multiple GPUs
# and are non-trivial
# TODO: for megatron-lm self.model is a list
# Removing self.pre_configure_ddp() as DDP's 'find_unused_parameters' now defaults
# to False in PTL 2.0 and hence pre_configure_ddp() is removed in ddp.py
# self.pre_configure_ddp()
# device_ids = self.determine_ddp_device_ids()
self._model = DistributedDataParallel(
_LightningModuleWrapperBase(self.model),
process_group=parallel_state.get_data_parallel_group(),
**self._ddp_kwargs,
)
if self.no_ddp_communication_hook:
# When using custom gradient accumulation and allreduce, disable
# DDP communication hook that works on the gradient bucket.
# Instead, use the custom gradient function and communication hook,
# which is defined in the master optimizer wrapper.
self._model.require_backward_grad_sync = False
self._model.register_comm_hook(None, noop_hook)
else:
super().configure_ddp()
def init_model_parallel(self, global_rank: int, world_size: int) -> None:
""" Initializes Megatron-LM model parallel if using model parallelism.
Args:
global_rank (int): the global process index.
world_size (int): the total number of GPUs, num_nodes * num_devices
is_slurm_managing_tasks (bool, optional): is the cluster managed by SLURM.
"""
app_state = AppState()
# we initialize megatron-lm model parallel and data parallel groups
# after initializing DDP with PTL.
if app_state.model_parallel_size is not None:
# destroy groups in case they have already been created
# this happens with multiple calls to trainer.test for example
parallel_state.destroy_model_parallel()
if torch.distributed.is_initialized():
parallel_state.initialize_model_parallel(
tensor_model_parallel_size=app_state.tensor_model_parallel_size,
pipeline_model_parallel_size=app_state.pipeline_model_parallel_size,
virtual_pipeline_model_parallel_size=app_state.virtual_pipeline_model_parallel_size,
pipeline_model_parallel_split_rank=app_state.pipeline_model_parallel_split_rank,
use_fp8=app_state.use_fp8,
)
# assert that fake tp and pp rank match after model parallel init
assert app_state.tensor_model_parallel_rank == parallel_state.get_tensor_model_parallel_rank()
assert app_state.pipeline_model_parallel_rank == parallel_state.get_pipeline_model_parallel_rank()
app_state.tensor_model_parallel_group = parallel_state.get_tensor_model_parallel_group()
app_state.data_parallel_group = parallel_state.get_data_parallel_group()
app_state.data_parallel_rank = parallel_state.get_data_parallel_rank()
app_state.data_parallel_size = parallel_state.get_data_parallel_world_size()
app_state.pipeline_model_parallel_group = parallel_state.get_pipeline_model_parallel_group()
# create MPI process group for UCX-based communication APIs
if app_state.init_mpi_proc_group:
torch.distributed.new_group(backend='mpi')
def optimizer_sharded_state_dict(self):
"""
Sharded state dictionary for an MainParamsOptimizerWrapper.
Used to save and load the optimizer state when training with distributed_checkpoint.
Returns:
dict: The sharded state dictionary for the optimizer
Raises:
ValueError: If a parameter ID does not match any model sharded parameter.
"""
optimizer = self.lightning_module.optimizers(use_pl_optimizer=False) # MainParamsOptimizerWrapper
model_sharded_state_dict = self.lightning_module.sharded_state_dict()
# remove _extra_state
model_sharded_state_dict = {
key: value for key, value in model_sharded_state_dict.items() if not key.endswith('_extra_state')
}
if not isinstance(optimizer, MainParamsOptimizerWrapper):
return optimizer.sharded_state_dict(model_sharded_state_dict)
optimizer_state_dict = optimizer.state_dict()
id_to_sharded_param_map = get_param_id_to_sharded_param_map(
model_sharded_state_dict=model_sharded_state_dict,
optim_params_iter=itertools.chain.from_iterable(g for g in optimizer.float16_groups),
)
# Convert fp32_from_fp16_params
assert len(optimizer_state_dict['fp32_from_fp16_params']) == len(
optimizer_state_dict['optimizer']['param_groups']
)
def get_safe(param_id):
try:
return id_to_sharded_param_map[param_id]
except KeyError as e:
raise ValueError(f'Param id {param_id} does not match any model sharded param') from e
optimizer_state_dict['fp32_from_fp16_params'] = [
[
make_sharded_optimizer_tensor(get_safe(param_id), fp32_param, prefix=f'optimizer.state.fp32_param')
for param_id, fp32_param in zip(state_group['params'], fp32_group)
]
for fp32_group, state_group in zip(
optimizer_state_dict['fp32_from_fp16_params'], optimizer_state_dict['optimizer']['param_groups']
)
]
# Convert state
optim_state_to_sharding_state(optimizer_state_dict['optimizer'], id_to_sharded_param_map)
return optimizer_state_dict
def save_checkpoint(
self, checkpoint: Dict[str, Any], filepath: Union[str, Path], storage_options: Optional[Any] = None
) -> None:
app_state = AppState()
""" PTL method which we override to accomodate distributed checkpoints and
the legacy model parallel checkpoints.
When using megatron core, the distributed checkpointing library expects save functions to be
called on every rank and internally does the rank checking.
"""
# check if using distributed checkpointing
if (
hasattr(self.lightning_module, 'sharded_state_dict')
and self.lightning_module.sharded_state_dict() is not None
):
# converts the optimizer states to their sharded equivalents
checkpoint['optimizer_states'] = [self.optimizer_sharded_state_dict()]
# dist_checkpointing expects a directory so we will name the directory
# using the path with the file extension removed
checkpoint_dir = ckpt_to_dir(filepath)
fs = get_filesystem(checkpoint_dir)
if is_global_rank_zero():
fs.makedirs(checkpoint_dir, exist_ok=True)
# remove device state_dict
checkpoint['state_dict'] = OrderedDict([])
dist_checkpointing.save(sharded_state_dict=checkpoint, checkpoint_dir=checkpoint_dir)
else:
# PTL override to accomodate model parallel checkpoints
filepath = inject_model_parallel_rank(filepath)
if self.is_global_zero or app_state.data_parallel_rank == 0:
self.checkpoint_io.save_checkpoint(checkpoint, filepath, storage_options=storage_options)
def load_model_state_dict(self, checkpoint: Mapping[str, Any]) -> None:
# if using distributed checkpointing, the state dict logic is at the model level
if (
hasattr(self.lightning_module, 'sharded_state_dict')
and self.lightning_module.sharded_state_dict() is not None
):
return
# legacy state dict logic, does not use megatron core
else:
# Release strict state dict matching when using Megatron AMP-O2 to skip matching
# half-precision module wrapper module.
# TODO: Refactor this to be more generic.
model_key = None
model_attr = None
if hasattr(self.lightning_module, 'model'):
model_key = 'model'
model_attr = self.lightning_module.model
elif hasattr(self.lightning_module, 'enc_dec_model'):
model_key = 'enc_dec_model'
model_attr = self.lightning_module.enc_dec_model
if model_key is not None:
if isinstance(model_attr, Float16Module) or isinstance(model_attr, MCoreFloat16Module):
new_state_dict = {}
for key in checkpoint['state_dict'].keys():
new_key = key.replace(f'{model_key}.', f'{model_key}.module.', 1)
new_state_dict[new_key] = checkpoint['state_dict'][key]
checkpoint['state_dict'] = new_state_dict
self.lightning_module.load_state_dict(checkpoint["state_dict"])
def _fix_tensors_device(self, ckpt: Dict) -> Dict:
""" Ensure checkpoint tensors are on the correct device."""
assert torch.cuda.is_initialized(), (torch.cuda.is_available(), torch.cuda.is_initialized())
cur_dev = torch.device("cuda", index=torch.cuda.current_device())
def _fix_device(t):
if isinstance(t, torch.Tensor) and t.is_cuda and t.device != cur_dev:
t = t.to(cur_dev)
return t
return dict_list_map_outplace(_fix_device, ckpt)
def load_checkpoint(self, checkpoint_path: Union[str, Path]) -> Dict[str, Any]:
""" PTL method which we override to integrate distributed checkpoints for model parallel models.
In order to load distributed checkpoints we need to provide the sharded_state_dict to
the distributed load function. We get the sharded_state_dict from self.lightning_module
which makes it convenient to have the loading logic happen at the strategy level.
"""
fs = get_filesystem(checkpoint_path)
# Check if using distributed checkpointing
if (
hasattr(self.lightning_module, 'sharded_state_dict')
and self.lightning_module.sharded_state_dict() is not None
):
# Distributed checkpoints must be directories.
if not fs.isdir(checkpoint_path):
raise ValueError(f'Distributed checkpoints should be a directory. Found: {checkpoint_path}.')
sharded_state_dict = self.lightning_module.sharded_state_dict()
checkpoint = {}
# after dist_checkpointing.load, sharded tensors will be replaced with tensors
checkpoint['state_dict'] = sharded_state_dict
checkpoint['optimizer_states'] = [self.optimizer_sharded_state_dict()]
checkpoint = dist_checkpointing.load(sharded_state_dict=checkpoint, checkpoint_dir=checkpoint_path)
checkpoint = self._fix_tensors_device(checkpoint)
return checkpoint
# Legacy model parallel checkpointing logic, does not use megatron core
else:
# Try to read the checkpoint at `path`. If not exist, do not restore checkpoint.
checkpoint_path = inject_model_parallel_rank(checkpoint_path)
if not fs.exists(checkpoint_path):
raise FileNotFoundError(f"Checkpoint at {checkpoint_path} not found. Aborting training.")
torch.cuda.empty_cache()
return self.checkpoint_io.load_checkpoint(checkpoint_path)
def remove_checkpoint(self, filepath: Union[str, Path]) -> None:
# check if filepath is a distributed checkpoint
if (
hasattr(self.lightning_module, 'sharded_state_dict')
and self.lightning_module.sharded_state_dict() is not None
):
if self.is_global_zero:
shutil.rmtree(ckpt_to_dir(filepath))
# legacy checkpoint logic, does not use megatron core
else:
app_state = AppState()
# PTL override to accomodate model parallel checkpoints
filepath = inject_model_parallel_rank(filepath)
if self.is_global_zero or app_state.data_parallel_rank == 0:
logging.info(f'Removing checkpoint: {filepath}')
self.checkpoint_io.remove_checkpoint(filepath)
@property
def distributed_sampler_kwargs(self):
app_state = AppState()
if app_state.model_parallel_size is not None:
# When using model parallel, data parallel groups are non-trivial and they
# correspond to the logical GPUs. This means that the GPUs that form a
# single logical GPU all need to get the same batch of data.
distributed_sampler_kwargs = dict(
num_replicas=app_state.data_parallel_size, rank=app_state.data_parallel_rank
)
return distributed_sampler_kwargs
else:
return super(NLPDDPStrategy, self).distributed_sampler_kwargs
@property
def restore_checkpoint_after_setup(self) -> bool:
""" This needs to be True for distributed checkpointing because
we require the model to have configured the optimizer before
deserializing the checkpoint.
"""
return True
class NLPSaveRestoreConnector(SaveRestoreConnector):
def __init__(self) -> None:
if not HAVE_APEX:
logging.warning(
"Apex was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/apex\n"
"Megatron-based models require Apex to function correctly."
)
# raise ImportError(
# "Apex was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
# )
if not HAVE_MEGATRON_CORE:
logging.warning(
"megatron-core was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
super().__init__()
def save_to(self, model, save_path: str):
app_state = AppState()
# Check if using distributed checkpointing
dist_ckpt = hasattr(model, 'sharded_state_dict') and model.sharded_state_dict() is not None
dist_ckpt_dir = None
if (app_state.model_parallel_size is not None and app_state.model_parallel_size > 1) or dist_ckpt:
dir_name = os.path.dirname(save_path)
# dist ckpt calls save on every rank
if dist_ckpt:
# model weights is a directory
dist_ckpt_dir = ckpt_to_dir(os.path.join(dir_name, self.model_weights_ckpt))
fs = get_filesystem(dist_ckpt_dir)
if is_global_rank_zero():
fs.makedirs(dist_ckpt_dir, exist_ok=True)
sharded_state_dict = model.sharded_state_dict()
# dist checkpoint needs torch.distributed to save the checkpoint
if parallel_state.is_unitialized():
def dummy():
return
if model.trainer.strategy.launcher is not None:
model.trainer.strategy.launcher.launch(dummy, trainer=model.trainer)
model.trainer.strategy.setup_environment()
dist_checkpointing.save(sharded_state_dict=sharded_state_dict, checkpoint_dir=dist_ckpt_dir)
else:
# first we save the weights for each model parallel rank
if app_state.data_parallel_rank == 0:
if app_state.pipeline_model_parallel_size == 1:
mp_model_weights = os.path.join(
dir_name, f'mp_rank_{app_state.tensor_model_parallel_rank:02d}_' + self.model_weights_ckpt
)
else:
mp_model_weights = os.path.join(
dir_name,
f'tp_rank_{app_state.tensor_model_parallel_rank:02d}_pp_rank_{app_state.pipeline_model_parallel_rank:03d}_'
+ self.model_weights_ckpt,
)
self._save_state_dict_to_disk(model.state_dict(), mp_model_weights)
if torch.distributed.is_initialized():
torch.distributed.barrier()
# create nemo file from folder with all mp_ranks checkpoints
if (
app_state.pipeline_model_parallel_rank == 0
and app_state.tensor_model_parallel_rank == 0
and app_state.data_parallel_rank == 0
):
with tempfile.TemporaryDirectory() as tmpdir:
if dist_ckpt:
shutil.move(str(dist_ckpt_dir), tmpdir)
elif app_state.pipeline_model_parallel_size == 1:
# move weights to the tmpdir
for tp_rank in range(app_state.tensor_model_parallel_size):
os.makedirs(os.path.join(tmpdir, f'mp_rank_{tp_rank:02d}'))
mp_model_weights = os.path.join(
dir_name, f'mp_rank_{tp_rank:02d}_' + self.model_weights_ckpt
)
shutil.move(
mp_model_weights,
os.path.join(tmpdir, f'mp_rank_{tp_rank:02d}', self.model_weights_ckpt),
)
else:
# move weights to the tmpdir
for tp_rank, pp_rank in itertools.product(
range(app_state.tensor_model_parallel_size), range(app_state.pipeline_model_parallel_size),
):
os.makedirs(os.path.join(tmpdir, f'tp_rank_{tp_rank:02d}_pp_rank_{pp_rank:03d}'))
mp_model_weights = os.path.join(
dir_name, f'tp_rank_{tp_rank:02d}_pp_rank_{pp_rank:03d}_' + self.model_weights_ckpt
)
shutil.move(
mp_model_weights,
os.path.join(
tmpdir, f'tp_rank_{tp_rank:02d}_pp_rank_{pp_rank:03d}', self.model_weights_ckpt
),
)
# create config and artifacts in tmpdir
config_yaml = os.path.join(tmpdir, self.model_config_yaml)
model.to_config_file(path2yaml_file=config_yaml)
if hasattr(model, 'artifacts') and model.artifacts is not None:
self._handle_artifacts(model, nemo_file_folder=tmpdir)
self._update_artifact_paths(model, path2yaml_file=config_yaml)
# create tar file
self._make_nemo_file_from_folder(save_path, tmpdir)
else:
return super().save_to(model, save_path)
def modify_state_dict(self, conf, state_dict):
if conf.get('megatron_legacy', False):
new_state_dict = {}
for key in state_dict.keys():
new_key = key.replace('bert_model.language_model', 'bert_model.model.language_model')
new_key = new_key.replace('transformer', 'encoder')
new_key = new_key.replace('.attention.', '.self_attention.')
new_state_dict[new_key] = state_dict[key]
state_dict = new_state_dict
if conf.get('megatron_amp_O2', False):
new_state_dict = {}
for key in state_dict.keys():
new_key = key.replace('model.', 'model.module.', 1)
new_state_dict[new_key] = state_dict[key]
state_dict = new_state_dict
return state_dict
def _load_state_dict_from_disk(self, model_weights, map_location=None):
# if model_weights with the extension removed is a directory, we assume it is a distributed checkpoint
# we need to defer loading the state dict so we return None
uninject_model_weights = uninject_model_parallel_rank(model_weights)
# legacy model_weights will have mp rank injected
if os.path.isfile(model_weights):
return super()._load_state_dict_from_disk(model_weights, map_location)
# dist checkpoint will be a dir
elif os.path.isdir(os.path.splitext(uninject_model_weights)[0]):
return None
else:
raise ValueError(f'Expected {model_weights} to be a file or directory.')
def restore_from(
self,
calling_cls,
restore_path: str,
override_config_path: Optional[Union[OmegaConf, str]] = None,
map_location: Optional[torch.device] = None,
strict: bool = True,
return_config: bool = False,
trainer: Trainer = None,
):
"""
Restores model instance (weights and configuration) into .nemo file
Args:
restore_path: path to .nemo file from which model should be instantiated
override_config_path: path to a yaml config that will override the internal
config file or an OmegaConf / DictConfig object representing the model config.
map_location: Optional torch.device() to map the instantiated model to a device.
By default (None), it will select a GPU if available, falling back to CPU otherwise.
strict: Passed to load_state_dict. By default True
return_config: If set to true, will return just the underlying config of the restored
model as an OmegaConf DictConfig object without instantiating the model.
Example:
```
model = nemo.collections.nlp.models.TextClassification.restore_from('asr.nemo')
assert isinstance(model, nemo.collections.nlp.models.TextClassification)
```
Returns:
An instance of type cls or its underlying config (if return_config is set).
"""
# Get path where the command is executed - the artifacts will be "retrieved" there
# (original .nemo behavior)
loaded_params = super().load_config_and_state_dict(
calling_cls, restore_path, override_config_path, map_location, strict, return_config, trainer,
)
if not isinstance(loaded_params, tuple) or return_config is True:
return loaded_params
conf, instance, state_dict = loaded_params
# if we're using dist checkpointing then state_dict will be None
if state_dict is None:
# dist checkpointing needs torch.distributed to load the checkpoint
if parallel_state.is_unitialized():
def dummy():
return
if trainer.strategy.launcher is not None:
trainer.strategy.launcher.launch(dummy, trainer=trainer)
trainer.strategy.setup_environment()
with tempfile.TemporaryDirectory() as tmpdir:
# Check if self.model_extracted_dir is set, and is a valid path
if self.model_extracted_dir is not None and os.path.isdir(self.model_extracted_dir):
# Log that NeMo will use the provided `model_extracted_dir`
logging.info(
f"Restoration will occur within pre-extracted directory : " f"`{self.model_extracted_dir}`."
)
# Override `tmpdir` above with the pre-extracted `model_extracted_dir`
tmpdir = self.model_extracted_dir
else:
# Extract the nemo file into the temporary directory
self._unpack_nemo_file(
path2file=restore_path, out_folder=tmpdir, extract_config_only=return_config is True
)
checkpoint = {}
sharded_state_dict = instance.sharded_state_dict()
checkpoint['state_dict'] = sharded_state_dict
# remove model weights extension
tmp_model_weights_ckpt = os.path.join(tmpdir, self.model_weights_ckpt)
tmp_model_weights_dir = os.path.splitext(tmp_model_weights_ckpt)[0]
assert os.path.isdir(tmp_model_weights_dir), f'Expected {tmp_model_weights_dir} to be a directory.'
checkpoint = dist_checkpointing.load(
sharded_state_dict=checkpoint, checkpoint_dir=tmp_model_weights_dir
)
instance.on_load_checkpoint(checkpoint)
if hasattr(instance, 'setup_transformer_engine_tp_groups'):
instance.setup_transformer_engine_tp_groups()
else:
state_dict = self.modify_state_dict(conf, state_dict)
super().load_instance_with_state_dict(instance, state_dict, strict)
logging.info(f'Model {instance.__class__.__name__} was successfully restored from {restore_path}.')
return instance
class PEFTSaveRestoreConnector(NLPSaveRestoreConnector):
"""
PEFT models require the ability to load/save a small subset of the full model (once PEFT params have been infused into the base model.)
The PEFTSaveRestoreConnector is used to allow loading and saving only the PEFT params while not saving the entire model.
Args:
peft_model_nemo_path: Used to provide the .nemo file corresponding to a PEFT model (which will only contain a small set of params)
peft_model_ckpt_path: Used to provide the path to .ckpt files of a PEFT model. This is required when no .nemo is available (yet) such as during resumed training.
peft_model_ckpt_name: The filename of the ckpt file inside the peft_model_ckpt_path folder
If both are provided the peft_model_ckpt_path takes precedence.
If neither are provided, PEFT params are initialized at random (not loaded from any external source).
"""
def __init__(
self,
peft_model_nemo_path: Optional[str] = None,
peft_model_ckpt_path: Optional[str] = None,
peft_model_ckpt_name: Optional[str] = "model_weights.ckpt",
) -> None:
super().__init__()
self.peft_model_ckpt_name = peft_model_ckpt_name
if peft_model_ckpt_path:
# First we will try to load a adapter ckpt path
# this is given priority over loading from nemo path to make resumption of training possible
ckpt_name = os.path.basename(peft_model_ckpt_path)
if not ckpt_name.strip() == '':
# update the weights file name inside the ckpt path rank folders
self.peft_model_ckpt_name = ckpt_name
self.peft_model_ckpt_dir = os.path.dirname(peft_model_ckpt_path)
assert os.path.isdir(self.peft_model_ckpt_dir)
self.peft_model_nemo_path = None
elif peft_model_nemo_path:
# If resumption is not possible we will try to load a adapter nemo path
self.peft_model_nemo_path = peft_model_nemo_path
assert os.path.exists(self.peft_model_nemo_path)
self.peft_model_ckpt_dir = None
else:
# We are not resuming training from a nemo file or a ckpt
# We are training the adapter from randomly initialization
self.peft_model_nemo_path = None
self.peft_model_ckpt_dir = None
def _load_state_dict_from_disk(self, model_weights, map_location=None):
"""
Infuse the state_dict of the base model with PEFT params from either a peft_model_nemo_path or peft_model_ckpt_path
"""
# first load based model weights
base_model_state_dict = super()._load_state_dict_from_disk(model_weights, map_location)
# Next, We want to load PEFT model's weights
if self.peft_model_nemo_path:
# if the PEFT weights are provided in a .nemo file
# we need to untar the .nemo if its still tarred
with tempfile.TemporaryDirectory() as tmpdir:
self._unpack_nemo_file(self.peft_model_nemo_path, tmpdir)
model_weights_path = self._inject_model_parallel_rank_for_ckpt(tmpdir, self.peft_model_ckpt_name)
peft_state_dict = torch.load(model_weights_path, map_location)
elif self.peft_model_ckpt_dir:
# if the PEFT weights are provided in a ckpt path file
# we don't need to untar
model_weights_path = self._inject_model_parallel_rank_for_ckpt(
self.peft_model_ckpt_dir, self.peft_model_ckpt_name
)
peft_state_dict = torch.load(model_weights_path, map_location)['state_dict']
else:
peft_state_dict = {}
base_model_state_dict.update(peft_state_dict) # add the PEFT state_dict into the base model's state_dict
return base_model_state_dict
def restore_from(
self,
calling_cls,
restore_path: str,
override_config_path: Optional[Union[OmegaConf, str]] = None,
map_location: Optional[torch.device] = None,
strict: bool = True,
return_config: bool = False,
trainer: Trainer = None,
):
"""
Extends the restore_from method of the `NLPSaveRestoreConnector` so that PEFT params are inserted into the state_dict which is required when training a PEFT model from scratch.
"""
# Get path where the command is executed - the artifacts will be "retrieved" there
# (original .nemo behavior)
loaded_params = super().load_config_and_state_dict(
calling_cls, restore_path, override_config_path, map_location, strict, return_config, trainer,
)
if not isinstance(loaded_params, tuple) or return_config is True:
return loaded_params
conf, instance, state_dict = loaded_params
if (
self.peft_model_nemo_path is None and self.peft_model_ckpt_dir is None
): # we have this check only for training PEFT from scratch
peft_state_dict = instance.get_peft_state_dict()
state_dict.update(peft_state_dict)
state_dict = self.modify_state_dict(conf, state_dict)
self.load_instance_with_state_dict(instance, state_dict, strict)
logging.info(f'Model {instance.__class__.__name__} was successfully restored from {restore_path}.')
return instance
class PipelineMixedPrecisionPlugin(MixedPrecisionPlugin):
""" Overrides PTL autocasting to not wrap training/val/test_step.
We do this because we have the megatron-core fwd/bwd functions in training_step.
This means .backward is being called in training_step so we do not want the whole
step wrapped in autocast.
We instead wrap the fwd_output_and_loss_func that is passed to the megatron-core fwd/bwd functions.
"""
def __init__(
self,
precision: Literal["16-mixed", "bf16-mixed"],
device: str,
scaler: Optional[torch.cuda.amp.GradScaler] = None,
) -> None:
super().__init__(precision, device, scaler=scaler)
dtype = None
# MixedPrecisionPlugin class in PTL >= 2.0 takes only "16-mixed" or "bf16-mixed" for precision arg
if precision == '16-mixed':
dtype = torch.float16
elif precision == 'bf16-mixed':
dtype = torch.bfloat16
torch.set_autocast_gpu_dtype(dtype)
@contextmanager
def forward_context(self) -> Generator[None, None, None]:
"""Have the PTL context manager do nothing."""
yield
class GradScaler(torch.cuda.amp.GradScaler):
"""
Gradient sclaer for model-parallel inf check. The inf in gradients are checked across tensor-parallel
ranks in (1) executing optimizer step and (2) gradient scaler update.
"""
def __init__(
self,
init_scale=2.0 ** 16,
growth_factor=2.0,
backoff_factor=0.5,
growth_interval=2000,
enabled=True,
hysteresis=1,
):
super().__init__(
init_scale=init_scale,
growth_factor=growth_factor,
backoff_factor=backoff_factor,
growth_interval=growth_interval,
enabled=enabled,
)
self.optimizer_update_skipped: Optional[bool] = None
self.hysteresis = hysteresis
self._hysteresis_tracker = self.hysteresis
def _unscale_grads_(self, optimizer, *args):
if getattr(optimizer, "_custom_amp_unscale_grads", False):
return optimizer.unscale_grads(*args)
else:
return super()._unscale_grads_(optimizer, *args)
def _maybe_opt_step(self, optimizer, optimizer_state, *args, **kwargs):
retval = None
found_inf = torch.cuda.FloatTensor([sum(v.item() for v in optimizer_state["found_inf_per_device"].values())])
# Update across all model parallel instances.
torch.distributed.all_reduce(
found_inf, op=torch.distributed.ReduceOp.MAX, group=parallel_state.get_model_parallel_group()
)
if found_inf.item() == 0:
retval = optimizer.step(*args, **kwargs)
self.optimizer_update_skipped = False
else:
self.optimizer_update_skipped = True
return retval
def update(self, new_scale=None):
"""
Updates to native grad scaler update function.
1. Check inf across model-parallel ranks.
2. Update hysteresis tracker.
3. Apply hysteresis to grad scale update.
"""
if not self._enabled:
return
_scale, _growth_tracker = self._check_scale_growth_tracker("update")
if new_scale is not None:
# Accept a new user-defined scale.
if isinstance(new_scale, float):
self._scale.fill_(new_scale) # type: ignore[union-attr]
else:
reason = "new_scale should be a float or a 1-element torch.cuda.FloatTensor with requires_grad=False."
assert isinstance(new_scale, torch.cuda.FloatTensor), reason # type: ignore[attr-defined]
assert new_scale.numel() == 1, reason
assert new_scale.requires_grad is False, reason
self._scale.copy_(new_scale) # type: ignore[union-attr]
else:
# Consume shared inf/nan data collected from optimizers to update the scale.
# If all found_inf tensors are on the same device as self._scale, this operation is asynchronous.
found_infs = [
found_inf.to(device=_scale.device, non_blocking=True)
for state in self._per_optimizer_states.values()
for found_inf in state["found_inf_per_device"].values()
]
assert len(found_infs) > 0, "No inf checks were recorded prior to update."
found_inf_combined = found_infs[0]
# Update across all model parallel instances.
torch.distributed.all_reduce(
found_inf_combined, op=torch.distributed.ReduceOp.MAX, group=parallel_state.get_model_parallel_group()
)
if len(found_infs) > 1:
for i in range(1, len(found_infs)):
found_inf = found_infs[i]
# Update across all model parallel instances.
torch.distributed.all_reduce(
found_inf, op=torch.distributed.ReduceOp.MAX, group=parallel_state.get_model_parallel_group()
)
found_inf_combined += found_inf
if found_inf_combined > 0:
self._hysteresis_tracker -= 1
if self._hysteresis_tracker <= 0:
# When hysteresis becomes zero, follow the native grad scale update rule.
# Increase scale and reset growth tracker
torch._amp_update_scale_(
_scale,
_growth_tracker,
found_inf_combined,
self._growth_factor,
self._backoff_factor,
self._growth_interval,
)
else:
# Only reset the growth tracker when hysteresis is larger than zero
_growth_tracker.fill_(0.0)
else:
# When no inf found, follow the native grad scale update rule.
# Increment growth_tracker, update scale when growth tracker reaches the interval, and
# reset the hysteresis tracker.
torch._amp_update_scale_(
_scale,
_growth_tracker,
found_inf_combined,
self._growth_factor,
self._backoff_factor,
self._growth_interval,
)
self._hysteresis_tracker = self.hysteresis
# To prepare for next iteration, clear the data collected from optimizers this iteration.
self._per_optimizer_states = defaultdict(torch.cuda.amp.grad_scaler._refresh_per_optimizer_state)
def state_dict(self):
"""
Add hysteresis_tracker to the native functions' state_dict
"""
return (
{
"scale": self.get_scale(),
"growth_factor": self._growth_factor,
"backoff_factor": self._backoff_factor,
"growth_interval": self._growth_interval,
"_growth_tracker": self._get_growth_tracker(),
"_hysteresis_tracker": self._hysteresis_tracker,
}
if self._enabled
else {}
)
def load_state_dict(self, state_dict):
"""
Load hysteresis_tracker in addition to the state dict of the native function
"""
if not self._enabled:
return
if len(state_dict) == 0:
raise RuntimeError(
"The source state dict is empty, possibly because it was saved "
"from a disabled instance of GradScaler."
)
self._init_scale = state_dict["scale"]
if self._scale is not None:
self._scale.fill_(state_dict["scale"])
self._growth_factor = state_dict["growth_factor"]
self._backoff_factor = state_dict["backoff_factor"]
self._growth_interval = state_dict["growth_interval"]
self._init_growth_tracker = state_dict["_growth_tracker"]
if self._growth_tracker is not None:
self._growth_tracker.fill_(state_dict["_growth_tracker"])
if "_hysterisis_tracker" in state_dict:
self._hysteresis_tracker = state_dict["_hysterisis_tracker"]
else:
self._hysteresis_tracker = 1
class MegatronHalfPrecisionPlugin(MixedPrecisionPlugin):
"""
Plugin for Half (FP16 and BF16) precision training.
This plugin assumes the use of the optimizer with master parameters (fp32).
This plugin uses half-precision at all operators in the model so need of input precision
at each layer operator.
Args:
precision: Whether to use ``torch.float16`` (``16``) or ``torch.bfloat16`` (``'bf16'``).
device: The device for ``torch.autocast``.
scaler: An optional :class:`torch.cuda.amp.GradScaler` to use.
"""
def __init__(
self, precision: Union[str, int], device: str, scaler: Optional[torch.cuda.amp.GradScaler] = None
) -> None:
super().__init__(precision, device, scaler)
dtype = None
# MixedPrecisionPlugin class in PTL >= 2.0 takes only "16-mixed" or "bf16-mixed" for precision arg
if precision == "16-mixed":
dtype = torch.float16
elif precision == "bf16-mixed":
dtype = torch.bfloat16
torch.set_autocast_gpu_dtype(dtype)
def optimizer_step(
self,
optimizer: torch.optim.Optimizer,
model: Union["pl.LightningModule", torch.nn.Module],
closure: Callable[[], Any],
**kwargs: Any,
) -> None:
assert isinstance(
optimizer, MainParamsOptimizerWrapper
), "MegatronHalfPrecisionPlugin supports only the optimizer with master parameters"
if self.scaler is None:
assert optimizer.fp32_grad_accumulation, "BF16 uses FP32 grad accumulation"
_ = closure()
self._after_closure(model, optimizer)
return optimizer.step(**kwargs)
assert not optimizer.fp32_grad_accumulation, "FP16 uses FP16 grad accumulation"
closure_result = closure()
# TODO: Add an option for merged all-reduce
# cast fp16 grads to fp32 and copy to main grads, which are used for unscale and param update
optimizer.copy_model_grads_to_main_grads()
# `unscale` after the closure is executed but before the `on_before_optimizer_step` hook.
# unscale main (fp32) gradients
self.scaler.unscale_(optimizer)
self._after_closure(model, optimizer)
skipped_backward = closure_result is None
# in manual optimization, the closure does not return a value
if not isinstance(model, pl.LightningModule) or not model.automatic_optimization or not skipped_backward:
# note: the scaler will skip the `optimizer.step` if nonfinite gradients are found
self.scaler.step(optimizer, **kwargs)
self.scaler.update()
@contextmanager
def forward_context(self) -> Generator[None, None, None]:
""" No explicit precision casting. Inputs are supposed to be manually casted """
try:
yield
finally:
pass
class GlobalBatchDataFetcher(_DataFetcher):
""" Overrides PTL DataFetcher. Used to fetch global batches."""
def __init__(self, prefetch_batches: int = 0, store_on_device: bool = False) -> None:
if not HAVE_APEX:
logging.warning("Apex was not found. Using model parallel or megatron models will error out.")
if not HAVE_MEGATRON_CORE:
logging.warning("Megatron-core was not found. Using model parallel or megatron models will error out..")
super().__init__(prefetch_batches=prefetch_batches, store_on_device=store_on_device)
def _fetch_next_batch(self, iterator: Iterator) -> None:
start_output = self.on_fetch_start()
batch = [next(iterator) for _ in range(get_num_microbatches())]
self.fetched += 1
if not self.prefetch_batches and self._has_len:
# when we don't prefetch but the dataloader is sized, we use the length for `done`
dataloader = self.dataloader
assert isinstance(dataloader, Sized) # `_has_len` is True
self.done = self.fetched >= len(dataloader)
self.on_fetch_end(batch, start_output)
class CustomProgressBar(TQDMProgressBar):
"""
Add CustomProgressBar to remove 's/it' and display progress per step instead of per microbatch
for megatron models
"""
def init_train_tqdm(self):
"""
Override bar_format to not have 's/it'
"""
self.bar = super().init_train_tqdm()
self.bar.bar_format = "{desc}: {percentage:3.0f}%|{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}{postfix}]"
return self.bar
def on_train_batch_end(self, trainer, pl_module, *_, **__):
"""
Override parent class on_train_batch_end to update progress bar per global_step instead of per microbatch
"""
n = trainer.global_step
if self._should_update(n, self.train_progress_bar.total):
_update_n(self.train_progress_bar, n)
self.train_progress_bar.set_postfix(self.get_metrics(trainer, pl_module))
| NeMo-main | nemo/collections/nlp/parts/nlp_overrides.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.parts.megatron_lr_schedulers import CosineAnnealingExp
from nemo.collections.nlp.parts.utils_funcs import list2str, tensor2list
| NeMo-main | nemo/collections/nlp/parts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.core.optim.lr_scheduler import AVAILABLE_SCHEDULERS, CosineAnnealing
class CosineAnnealingExp(CosineAnnealing):
"""
Setting max_steps_for_lr_sched for this scheduler in the config is experimental and "
not recommended. The scheduler can use max_steps automatically from "
trainer.max_steps.
"""
def __init__(self, optimizer, *, max_steps, min_lr=0, last_epoch=-1, max_steps_for_lr_sched=None, **kwargs):
super().__init__(optimizer=optimizer, max_steps=max_steps, last_epoch=last_epoch, min_lr=min_lr, **kwargs)
if max_steps_for_lr_sched:
self.max_steps = max_steps_for_lr_sched
self.decay_steps = self.max_steps - (self.constant_steps + self.warmup_steps)
AVAILABLE_SCHEDULERS['CosineAnnealingExp'] = CosineAnnealingExp
| NeMo-main | nemo/collections/nlp/parts/megatron_lr_schedulers.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.modules.common import (
AlbertEncoder,
BertEncoder,
BertModule,
CamembertEncoder,
DistilBertEncoder,
PromptEncoder,
RobertaEncoder,
SequenceClassifier,
SequenceRegression,
SequenceTokenClassifier,
get_lm_model,
get_pretrained_lm_models_list,
get_tokenizer,
get_tokenizer_list,
)
from nemo.collections.nlp.modules.dialogue_state_tracking.sgd_decoder import SGDDecoder
from nemo.collections.nlp.modules.dialogue_state_tracking.sgd_encoder import SGDEncoder
| NeMo-main | nemo/collections/nlp/modules/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for generating text."""
import pickle
from collections.abc import Iterable
from functools import partial
from typing import Callable, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from nemo.collections.common.tokenizers.tabular_tokenizer import TabularTokenizer
from nemo.collections.nlp.modules.common.megatron.utils import get_ltor_masks_and_position_ids
from nemo.collections.nlp.modules.common.text_generation_strategy import model_inference_strategy_dispatcher
from nemo.collections.nlp.modules.common.transformer.text_generation import LengthParam, OutputType, SamplingParam
from nemo.utils import AppState
try:
from apex.transformer.pipeline_parallel.utils import _reconfigure_microbatch_calculator
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
try:
from megatron.core import parallel_state, tensor_parallel
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
__all__ = [
"get_default_sampling_params",
"get_default_length_params",
"megatron_gpt_generate",
"get_computeprob_response",
"generate",
"sample_token_greedy",
"sample_token_topk",
]
def get_default_sampling_params():
# default do greedy sampling
sampling_params: SamplingParam = {
"use_greedy": True,
"temperature": 1.0,
"top_k": 0,
"top_p": 1.0,
"repetition_penalty": 1.0,
"add_BOS": True,
"all_probs": False,
"compute_logprob": False,
"end_strings": ["<|endoftext|>", "<extra_id_1>"],
}
return sampling_params
def get_default_length_params():
# default do greedy sampling
length_params: LengthParam = {"min_length": 0, "max_length": 30}
return length_params
def megatron_gpt_generate(model, inputs, tokenizer, length_params, sampling_params, **strategy_args):
# reproduce the old compute_prob method
# a very special case
if sampling_params['compute_logprob']:
# need to overwrite some configuration, make it immutable
sampling_params = sampling_params.copy()
length_params = length_params.copy()
length_params['max_length'] = 1
sampling_params['all_probs'] = True
sampling_params["add_BOS"] = False
sampling_params['greedy'] = True
response = generate(
model,
inputs=inputs,
tokens_to_generate=length_params['max_length'],
all_probs=sampling_params['all_probs'],
compute_logprob=sampling_params['compute_logprob'],
temperature=sampling_params['temperature'],
add_BOS=sampling_params['add_BOS'],
top_k=sampling_params['top_k'],
top_p=sampling_params['top_p'],
greedy=sampling_params['use_greedy'],
repetition_penalty=sampling_params['repetition_penalty'],
end_strings=sampling_params['end_strings'],
min_tokens_to_generate=length_params['min_length'],
compute_attention_mask=sampling_params.get("compute_attention_mask", True),
**strategy_args,
)
compute_prob_response = get_computeprob_response(tokenizer, response, inputs)
return compute_prob_response
if isinstance(inputs, (list, tuple)):
if isinstance(inputs[0], (str, torch.Tensor)):
output = generate(
model,
inputs=inputs,
tokens_to_generate=length_params['max_length'],
all_probs=sampling_params['all_probs'],
compute_logprob=sampling_params['compute_logprob'],
temperature=sampling_params['temperature'],
add_BOS=sampling_params['add_BOS'],
top_k=sampling_params['top_k'],
top_p=sampling_params['top_p'],
greedy=sampling_params['use_greedy'],
repetition_penalty=sampling_params['repetition_penalty'],
end_strings=sampling_params['end_strings'],
min_tokens_to_generate=length_params['min_length'],
**strategy_args,
)
return output
elif isinstance(inputs[0], dict):
raise NotImplementedError("json object not implemented")
else:
raise NotImplementedError("unknown type is not implemented")
else:
raise NotImplementedError("unknown type is not implemented")
def get_computeprob_response(tokenizer, response, inputs):
if parallel_state.is_pipeline_first_stage() or parallel_state.is_pipeline_last_stage():
# we only have a response on the first and last pipeline stages
compute_prob_response = {}
new_token_ids = []
new_tokens = []
new_texts = []
log_probs = []
full_logprobs = []
offsets = []
for batch_id in range(len(response['tokens'])):
if isinstance(inputs, (list, tuple)):
if isinstance(inputs[0], str):
new_token_id = tokenizer.text_to_ids(inputs[batch_id])
new_text = inputs[batch_id]
token_len = len(new_token_id)
elif isinstance(inputs[0], torch.Tensor):
token_len = int(inputs[1][batch_id].item())
new_token_id = inputs[0][batch_id][:token_len].tolist()
new_text = tokenizer.ids_to_text(new_token_id)
else:
raise TypeError(
f"Unsupported type of `inputs[0]`: {type(inputs[0])}. Supported types: `str`, `torch.Tensor`."
)
else:
raise TypeError(
f"Unsupported type of parameter `inputs`: {type(inputs)}. Supported types: `list` and `tuple`"
)
new_token_ids.append(new_token_id)
new_tokens.append(response['tokens'][batch_id][:token_len])
new_texts.append(new_text)
log_probs.append(response['logprob'][batch_id][:token_len])
full_logprobs.append(response['full_logprob'][batch_id][:token_len])
offsets.append(response['offsets'][batch_id][:-1])
compute_prob_response['sentences'] = new_texts
compute_prob_response['tokens'] = new_tokens
compute_prob_response['token_ids'] = new_token_ids
compute_prob_response['logprob'] = log_probs
compute_prob_response['full_logprob'] = full_logprobs
compute_prob_response['offsets'] = offsets
return compute_prob_response
else:
# intermediate stages
return None
def get_batch(model, tokenizer, context_tokens):
"""Generate batch from context tokens."""
# Move to GPU.
tokens = context_tokens.contiguous().cuda()
# Get the attention mask and postition ids.
attention_mask, _, position_ids = get_ltor_masks_and_position_ids(
tokens,
tokenizer.eos_id,
model.cfg.get('reset_position_ids', False),
model.cfg.get('reset_attention_mask', False),
model.cfg.get('eod_mask_loss', False),
)
return tokens, attention_mask, position_ids
def tab_logits(logits, min_id, max_id, filter_value=-float('Inf')):
logits[:, :min_id] = filter_value
logits[:, max_id:] = filter_value
return logits
def top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-float('Inf'), started=None):
"""
This function has been mostly taken from huggingface conversational
ai code at
https://medium.com/huggingface/how-to-build-a-state-of-the-art-
conversational-ai-with-transfer-learning-2d818ac26313
@param logits: logits tensor
@param top_k: keep only top k tokens with highest probability
@param top_p: keep the top tokens with cumulative probability
@filter_value: value to set filtered tokens to
@started: a tensor of bools indicating whether the text generation starts for the batch
returns the filtered logits
"""
if top_k > 0:
# Remove all tokens with a probability less than the
# last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
if started is not None:
for i in np.arange(indices_to_remove.size(0))[started.cpu().numpy()]:
logits[i, indices_to_remove[i]] = filter_value
else:
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Cconvert to 1D
sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token
# above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
if started is not None:
for i in np.arange(sorted_indices.size(0))[started.cpu().numpy()]:
indices_to_remove = sorted_indices[i][sorted_indices_to_remove[i]]
logits[i, indices_to_remove] = filter_value
else:
for i in range(sorted_indices.size(0)):
indices_to_remove = sorted_indices[i][sorted_indices_to_remove[i]]
logits[i, indices_to_remove] = filter_value
return logits
def repetition_penalty(logits, repetition_penalty, used_tokens):
""" Implement the repetition penalty, check paper
https://arxiv.org/pdf/1909.05858.pdf
"""
if used_tokens is not None and repetition_penalty != 1.0:
logits_update = torch.gather(logits, 1, used_tokens)
logits = torch.scatter(logits, 1, used_tokens, logits_update / repetition_penalty)
return logits
def get_model_parallel_src_rank():
"""Calculate the global rank corresponding to the first local rank
in the model parallel group."""
world_size = torch.distributed.get_world_size()
all_ranks = np.arange(world_size)
tp_size = parallel_state.get_tensor_model_parallel_world_size()
pp_size = parallel_state.get_pipeline_model_parallel_world_size()
# [pipeline dim, data parallel, tensor dim]
all_ranks = all_ranks.reshape(pp_size, -1, tp_size)
dp_rank = parallel_state.get_data_parallel_rank()
return all_ranks[:, dp_rank, :].min()
def send_generate_info(
context_tokens_tensor,
context_length_tensor,
tokens_to_generate,
all_probs,
compute_logprob,
temperature,
top_k,
top_p,
greedy,
repetition_penalty,
min_tokens_to_generate,
end_strings,
):
"""
Needs to be synced up with receive_generate_info
"""
model_parallel_group = parallel_state.get_model_parallel_group()
src = get_model_parallel_src_rank()
# Send the sizes of the tensors
input_info = [
context_tokens_tensor.size(0), # batch_size
context_tokens_tensor.size(1), # seq_len
tokens_to_generate,
all_probs,
compute_logprob, # whether to compute log probabilities matrix
temperature,
top_k,
top_p,
greedy,
repetition_penalty,
min_tokens_to_generate,
]
input_info_tensor = torch.cuda.FloatTensor(input_info)
torch.distributed.broadcast(input_info_tensor, src, model_parallel_group)
# Send variables to all ranks
torch.distributed.broadcast(context_length_tensor, src, model_parallel_group)
torch.distributed.broadcast(context_tokens_tensor, src, model_parallel_group)
# send end strings
string_tensor = torch.as_tensor(
np.frombuffer(pickle.dumps(end_strings), dtype=np.int8), device=torch.cuda.current_device()
)
size = torch.as_tensor([string_tensor.size(0)], device=torch.cuda.current_device(), dtype=torch.int64)
torch.distributed.broadcast(size, src, model_parallel_group)
torch.distributed.broadcast(string_tensor, src, model_parallel_group)
def receive_generate_info():
"""
Needs to be synced up with send_generate_info
"""
model_parallel_group = parallel_state.get_model_parallel_group()
src = get_model_parallel_src_rank()
input_info_tensor = torch.empty(11, dtype=torch.float32, device=torch.cuda.current_device())
torch.distributed.broadcast(input_info_tensor, src, model_parallel_group)
batch_size = int(input_info_tensor[0].item())
seq_len = int(input_info_tensor[1].item())
tokens_to_generate = int(input_info_tensor[2].item())
all_probs = bool(input_info_tensor[3].item())
compute_logprob = bool(input_info_tensor[4].item()) # whether to compute log probabilities matrix
temperature = float(input_info_tensor[5].item())
top_k = int(input_info_tensor[6].item())
top_p = float(input_info_tensor[7].item())
greedy = bool(input_info_tensor[8].item())
repetition_penalty = float(input_info_tensor[9].item())
min_tokens_to_generate = int(input_info_tensor[10].item())
context_length_tensor = torch.empty(batch_size, dtype=torch.int64, device=torch.cuda.current_device())
context_tokens_tensor = torch.empty(batch_size, seq_len, dtype=torch.int64, device=torch.cuda.current_device())
# Send variables to all ranks
torch.distributed.broadcast(context_length_tensor, src, model_parallel_group)
torch.distributed.broadcast(context_tokens_tensor, src, model_parallel_group)
array_size = torch.empty(1, dtype=torch.int64, device=torch.cuda.current_device())
torch.distributed.broadcast(array_size, src, model_parallel_group)
string_tensor = torch.empty(array_size[0], dtype=torch.int8, device=torch.cuda.current_device())
torch.distributed.broadcast(string_tensor, src, model_parallel_group)
bytes = string_tensor.cpu().numpy().tobytes()
end_strings = pickle.loads(bytes)
return (
context_length_tensor,
context_tokens_tensor,
tokens_to_generate,
all_probs,
compute_logprob,
temperature,
top_k,
top_p,
greedy,
repetition_penalty,
min_tokens_to_generate,
end_strings,
)
def synced_generate(
model,
inference_strategy,
context_tokens_tensor,
context_length_tensor,
tokens_to_generate,
all_probs,
temperature,
top_k=0,
top_p=0.0,
greedy=False,
compute_attention_mask=True,
compute_logprob=False,
repetition_penalty=1.2,
end_strings=[],
min_tokens_to_generate=0,
):
context_length = context_length_tensor.min().item()
tokenizer = model.tokenizer
if isinstance(tokenizer, TabularTokenizer):
batch_token_iterator = tab_sample_sequence_batch(
model,
inference_strategy,
context_tokens_tensor,
context_length_tensor,
tokens_to_generate,
all_probs,
compute_attention_mask=compute_attention_mask,
temperature=temperature,
)
else:
batch_token_iterator = sample_sequence_batch(
model,
inference_strategy,
context_tokens_tensor,
context_length_tensor,
tokens_to_generate,
all_probs,
compute_attention_mask=compute_attention_mask,
compute_logprob=compute_logprob,
temperature=temperature,
end_strings=end_strings,
extra={
"top_p": top_p,
"top_k": top_k,
"greedy": greedy,
"repetition_penalty": repetition_penalty,
"min_tokens_to_generate": min_tokens_to_generate,
},
)
for tokens, lengths, output_logits, full_logits in batch_token_iterator:
context_length += 1
if parallel_state.is_pipeline_last_stage():
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_embedding_group()
if compute_logprob:
torch.distributed.broadcast(output_logits, src, group)
if all_probs:
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_embedding_group()
torch.distributed.broadcast(full_logits, src, group)
else:
if parallel_state.is_pipeline_first_stage():
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_embedding_group()
if compute_logprob:
precision = model._trainer.precision
if precision in [16, "16"]:
dtype = torch.float16
elif precision == "bf16":
dtype = torch.bfloat16
else:
dtype = torch.float32
output_logits = torch.empty(
tokens.size(0), context_length - 1, dtype=dtype, device=torch.device("cuda")
)
torch.distributed.broadcast(output_logits, src, group)
if all_probs:
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_embedding_group()
full_logits = torch.empty(
tokens.size(0),
context_length - 1,
model.padded_vocab_size,
dtype=dtype,
device=torch.device("cuda"),
)
torch.distributed.broadcast(full_logits, src, group)
if tokens is not None:
return tokens[:, :context_length], output_logits, full_logits
def generate(
model,
inputs=None,
tokens_to_generate=0,
all_probs=False,
temperature=1.0,
add_BOS=False,
top_k=0,
top_p=0.0,
greedy=False,
compute_attention_mask=True,
compute_logprob=False,
repetition_penalty=1.0,
end_strings=['<|endoftext|>'],
min_tokens_to_generate=0,
**strategy_args,
) -> OutputType:
"""
Args:
model (NLPModel): text generative model
inputs (Union[tuple, List[str]]): if it is a tuple, it is assumed to be (context_tokens_tensor, context_length_tensor). Otherwise it it a list of prompt text strings
tokens_to_generate (int): The maximum length of the tokens to be generated.
all_probs (bool): Return the log prob for all the tokens
temperature (float): sampling temperature
add_BOS (bool): add the bos token at the begining of the prompt
top_k (int): The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (float): If set to float < 1, only the most probable tokens with probabilities that add up to top_p or higher are kept for generation.
greedy (bool): Whether or not to use sampling ; use greedy decoding otherwise
repetition_penalty (float): The parameter for repetition penalty. 1.0 means no penalty
min_tokens_to_generate (int): The minimum length of the tokens to be generated
strategy_args, the extra arguments are treated as inference strategy arguments
end_strings, a list of strings to stop generation when they are encountered in the output.
Returns:
OutputType: It generates the output in a dictionary type. It has the following keys:
sentences: List[str], output sentences
tokens: List[List[str]], output sentences borken into tokens
logprob: List[Tensor], log prob of generated tokens
full_logprob: List[Tensor], log prob of all the tokens in the vocab
token_ids: List[Tensor], output sentence token ids
offsets: List[List[int]] # list of tokens start positions in text
"""
if 'strategy' in strategy_args:
inference_strategy = strategy_args['strategy']
else:
inference_strategy = model_inference_strategy_dispatcher(model, **strategy_args)
tokenizer = model.tokenizer
if torch.distributed.get_rank() == get_model_parallel_src_rank():
if isinstance(inputs, tuple):
context_tokens_tensor, context_length_tensor = inputs
else:
context_tokens_tensor, context_length_tensor = inference_strategy.tokenize_batch(
inputs, tokens_to_generate, add_BOS
)
send_generate_info(
context_tokens_tensor,
context_length_tensor,
tokens_to_generate,
all_probs,
compute_logprob,
temperature,
top_k,
top_p,
greedy,
repetition_penalty,
min_tokens_to_generate,
end_strings,
)
else:
(
context_length_tensor,
context_tokens_tensor,
tokens_to_generate,
all_probs,
compute_logprob,
temperature,
top_k,
top_p,
greedy,
repetition_penalty,
min_tokens_to_generate,
end_strings,
) = receive_generate_info()
output = synced_generate(
model,
inference_strategy,
context_tokens_tensor,
context_length_tensor,
tokens_to_generate,
all_probs,
temperature,
compute_attention_mask=compute_attention_mask,
compute_logprob=compute_logprob,
top_k=top_k,
top_p=top_p,
greedy=greedy,
repetition_penalty=repetition_penalty,
end_strings=end_strings,
min_tokens_to_generate=min_tokens_to_generate,
)
special_tokens = set()
if hasattr(tokenizer, 'pad_token') and tokenizer.pad_token is not None:
special_tokens.add(tokenizer.pad_token)
if hasattr(tokenizer, 'eos_token') and tokenizer.eos_token is not None:
special_tokens.add(tokenizer.eos_token)
if hasattr(tokenizer, 'bos_token') and tokenizer.bos_token is not None:
special_tokens.add(tokenizer.bos_token)
if hasattr(tokenizer, 'cls_token') and tokenizer.cls_token is not None:
special_tokens.add(tokenizer.cls_token)
if hasattr(tokenizer, 'unk_token') and tokenizer.unk_token is not None:
special_tokens.add(tokenizer.unk_token)
if hasattr(tokenizer, 'sep_token') and tokenizer.sep_token is not None:
special_tokens.add(tokenizer.sep_token)
if hasattr(tokenizer, 'mask_token') and tokenizer.mask_token is not None:
special_tokens.add(tokenizer.mask_token)
if output is not None:
decode_tokens, output_logits, full_logits = output
resp_sentences = []
resp_sentences_seg = []
decode_tokens = decode_tokens.cpu().numpy().tolist()
for decode_token in decode_tokens:
sentence = tokenizer.ids_to_text(decode_token)
resp_sentences.append(sentence)
if not isinstance(tokenizer, TabularTokenizer):
words = []
for token in decode_token:
if not isinstance(token, Iterable):
token = [token]
word = tokenizer.ids_to_tokens(token)
if isinstance(word, Iterable):
word = word[0]
if hasattr(tokenizer.tokenizer, 'byte_decoder'):
word = bytearray([tokenizer.tokenizer.byte_decoder[c] for c in word]).decode(
'utf-8', errors='replace'
)
words.append(word)
resp_sentences_seg.append(words)
else:
words = tokenizer.text_to_tokens(sentence)
resp_sentences_seg.append(words)
# offsets calculation
all_offsets = []
for item in resp_sentences_seg:
offsets = [0]
for index, token in enumerate(item):
if index != len(item) - 1:
if token in special_tokens:
offsets.append(offsets[-1])
else:
offsets.append(len(token) + offsets[-1])
all_offsets.append(offsets)
output = {}
output['sentences'] = resp_sentences
output['tokens'] = resp_sentences_seg
output['logprob'] = output_logits
output['full_logprob'] = full_logits
output['token_ids'] = decode_tokens
output['offsets'] = all_offsets
output = inference_strategy.post_generation_process(output)
return output
def switch(val1, val2, boolean):
boolean = boolean.type_as(val1)
return (1 - boolean) * val1 + boolean * val2
def sample_sequence_batch(
model,
inference_strategy,
context_tokens,
context_lengths,
tokens_to_generate,
all_probs=False,
compute_attention_mask=True,
compute_logprob=False,
type_ids=None,
temperature=None,
end_strings=['<|endoftext|>'],
extra={},
):
# Importing here to avoid circular import errors
app_state = AppState()
micro_batch_size = context_tokens.shape[0]
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=micro_batch_size,
micro_batch_size=micro_batch_size,
data_parallel_size=1,
)
assert (
model.cfg.get('sequence_parallel', False) == False
), 'sequence_parallel should be False during inference. Disable it in the model config if restoring from nemo or in hparams.yaml if restoring from PTL checkpoint'
assert (
model.cfg.get('activations_checkpoint_granularity', None) is None
), 'activations_checkpoint_granularity should be None during inference. Disable it in the model config if restoring from nemo or in hparams.yaml if restoring from PTL checkpoint'
assert (
model.cfg.get('activations_checkpoint_method', None) is None
), 'activations_checkpoint_method should be None during inference. Disable it in the model config if restoring from nemo or in hparams.yaml if restoring from PTL checkpoint'
tokenizer = model.tokenizer
# initialize the batch
with torch.no_grad():
context_length = context_lengths.min().item()
inference_strategy.init_batch(context_tokens, context_length, compute_attention_mask)
# added eos_id to support the function generate_samples_eval that passes
# eos_id as an argument and needs termination when that id id found.
eod_id = tokenizer.eos_id
counter = 0
batch_size = context_tokens.size(0)
is_done = torch.zeros([batch_size]).byte().cuda()
tokens = context_tokens
output_logits = None
all_generated_indices = None # used to track all generated indices
# Generate enough tokens for the longest sequence
maxlen = tokens_to_generate + context_lengths.max().item()
maxlen = inference_strategy.clip_max_len(maxlen)
lengths = torch.ones([batch_size]).long().cuda() * maxlen
while context_length < maxlen:
batch, tensor_shape = inference_strategy.prepare_batch_at_step(
tokens, maxlen, micro_batch_size, counter, context_length, compute_attention_mask
)
output = inference_strategy.forward_step(batch, tensor_shape)
if parallel_state.is_pipeline_last_stage():
if compute_logprob:
output = output[0]['logits']
output = tensor_parallel.gather_from_tensor_model_parallel_region(output)
assert output is not None
logits = output[:, -1].view(batch_size, -1).contiguous()
else:
logits = output[0]['logits'][:, -1].contiguous()
logits = tensor_parallel.gather_from_tensor_model_parallel_region(logits)
assert logits is not None
logits = logits.view(batch_size, -1)
# make sure it will generate at least min_length
min_length = extra.get('min_tokens_to_generate', 0)
if min_length > 0:
within_min_length = (context_length - context_lengths) < min_length
logits[within_min_length, eod_id] = -float('Inf')
# make sure it won't sample outside the vocab_size range
logits[:, tokenizer.vocab_size :] = -float('Inf')
# started indicates whether the current token step passes the context_length, so we make sure not to overwrite the context tokens
started = context_lengths <= context_length
if extra.get('greedy', False):
prev = torch.argmax(logits, dim=-1).view(-1)
else:
logits = logits.float()
logits /= temperature
# handle repetition penality
logits = repetition_penalty(logits, extra.get('repetition_penalty', 1.2), all_generated_indices)
logits = top_k_logits(
logits, top_k=extra.get('top_k', 0), top_p=extra.get('top_p', 0.9), started=started
)
probs = F.softmax(logits, dim=-1)
prev = torch.multinomial(probs, num_samples=1).view(-1)
# Clamp the predicted out of vocabulary tokens
prev = torch.clamp(prev, max=tokenizer.vocab_size - 1)
new_tokens = switch(tokens[:, context_length].view(-1), prev, started)
# Replace sampled tokens w/ done token if EOD has already been sampled
new_tokens = switch(new_tokens, eod_id, is_done)
# post process the inference tokens based on the strategy
inference_strategy.post_process(tokens, new_tokens, context_length)
# Insert either new predicted or next prompt token
tokens[:, context_length] = new_tokens
if compute_logprob:
if output_logits is None:
output = F.log_softmax(output[:, :context_length, :], 2)
indices = torch.unsqueeze(tokens[:, 1 : context_length + 1], 2)
output_logits = torch.gather(output, 2, indices).squeeze(2)
all_generated_indices = indices[:, :, 0]
if all_probs:
full_logits = output
else:
output = F.log_softmax(output, 2)
indices = torch.unsqueeze(new_tokens, 1).unsqueeze(2)
new_output_logits = torch.gather(output, 2, indices).squeeze(2)
# TODO(rprenger) we're copying output_logits every time. Should pre-allocate
output_logits = torch.cat([output_logits, new_output_logits], 1)
all_generated_indices = torch.cat([all_generated_indices, indices[:, :, 0]], 1)
if all_probs:
full_logits = torch.cat([full_logits, output], 1)
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_embedding_group()
torch.distributed.broadcast(new_tokens, src, group)
# done_token = (prev == eod_id).byte() & started.byte()
done_token = inference_strategy.end_of_generation_condition(
tokens[:, : context_length + 1], prev, eod_id, end_strings
)
done_token = done_token.byte() & started.byte()
just_finished = (done_token & ~is_done).bool()
lengths[just_finished.view(-1)] = context_length
is_done = is_done | done_token
done = torch.all(is_done)
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_pipeline_model_parallel_group()
torch.distributed.broadcast(done, src, group)
if compute_logprob:
if all_probs:
yield tokens, lengths, output_logits, full_logits
else:
yield tokens, lengths, output_logits, None
else:
yield tokens, lengths, None, None
else:
if parallel_state.is_pipeline_first_stage():
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_embedding_group()
new_tokens = torch.empty_like(tokens[:, context_length])
torch.distributed.broadcast(new_tokens, src, group)
tokens[:, context_length] = new_tokens
yield tokens, None, None, None
else:
yield None, None, None, None
done = torch.cuda.ByteTensor([0])
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_pipeline_model_parallel_group()
torch.distributed.broadcast(done, src, group)
context_length += 1
counter += 1
if done:
break
def tab_sample_sequence_batch(
model,
inference_strategy,
context_tokens,
context_lengths,
tokens_to_generate,
all_probs=True,
compute_attention_mask=True,
type_ids=None,
temperature=None,
):
app_state = AppState()
micro_batch_size = context_tokens.shape[0]
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=micro_batch_size,
micro_batch_size=micro_batch_size,
data_parallel_size=1,
)
tokenizer = model.tokenizer
sizes = tokenizer.code_column.sizes
tokens_per_row = sum(sizes) + 1
columns = tokenizer.code_column.columns
num_columns = len(columns)
tokenid_range = []
for i in range(num_columns):
tokenid_range.extend(tokenizer.code_column.get_range(i))
# initialize the batch
with torch.no_grad():
context_length = context_lengths.min().item()
inference_strategy.init_batch(context_tokens, context_length, compute_attention_mask)
context = context_tokens[:, :context_length]
# the context may start in the middle of the row,
# calculate the offset according to the position of '\n' or '<|endoftext|>'
positions = torch.where(context == tokenizer.eor)[1]
if len(positions) == 0:
positions = torch.where(context == tokenizer.eod)[1]
if len(positions) != 0:
max_position = positions.max().item()
# TODO, need to make sure context of different batch have the same offset lengths")
# otherwise, need to calculate offset per batch_id
offset = (context_length - max_position - 1) % tokens_per_row
else:
offset = 0
eod_id = tokenizer.eos_id
counter = 0
batch_size = context_tokens.size(0)
is_done = torch.zeros([batch_size]).byte().cuda()
tokens = context_tokens
output_logits = None
# Generate enough tokens for the longest sequence
maxlen = tokens_to_generate + context_lengths.max().item()
if maxlen > model.cfg.encoder_seq_length:
maxlen = model.cfg.encoder_seq_length
lengths = torch.ones([batch_size]).long().cuda() * maxlen
while context_length < maxlen:
batch, tensor_shape = inference_strategy.prepare_batch_at_step(
tokens, maxlen, micro_batch_size, counter, context_length, compute_attention_mask
)
output = inference_strategy.forward_step(batch, tensor_shape)
if parallel_state.is_pipeline_last_stage():
output = output[0]['logits'].float()
output = tensor_parallel.gather_from_tensor_model_parallel_region(output)
assert output is not None
output = output.float()
logits = output[:, -1].view(batch_size, -1).contiguous()
token_in_row = (counter + offset) % tokens_per_row
logits = logits.float()
logits /= temperature
if token_in_row == tokens_per_row - 1:
# line break
eor_id = tokenizer.eor
eod_id = tokenizer.eos_id
min_id = min(eor_id, eod_id)
max_id = max(eor_id, eod_id) + 1
logits = tab_logits(logits, min_id, max_id)
else:
# limit the range
min_id, max_id = tokenid_range[token_in_row]
logits = tab_logits(logits, min_id, max_id)
probs = F.softmax(logits, dim=-1)
prev = torch.multinomial(probs, num_samples=1).view(-1)
started = context_lengths <= context_length
# Clamp the out of vocabulary tokens.
prev = torch.clamp(prev, max=tokenizer.vocab_size - 1)
new_tokens = switch(tokens[:, context_length].view(-1), prev, started)
# post process the inference tokens based on the strategy
inference_strategy.post_process(tokens, new_tokens, context_length)
tokens[:, context_length] = new_tokens
if output_logits is None:
output_context = F.log_softmax(output[:, :context_length, :], 2)
indices = torch.unsqueeze(tokens[:, 1 : context_length + 1], 2)
output_logits = torch.gather(output_context, 2, indices).squeeze(2)
if all_probs:
full_logits = output_context
else:
output_context = F.log_softmax(output, 2)
indices = torch.unsqueeze(new_tokens, 1).unsqueeze(2)
new_output_logits = torch.gather(output_context, 2, indices).squeeze(2)
# TODO(rprenger) we're copying output_logits every time. Should pre-allocate
output_logits = torch.cat([output_logits, new_output_logits], 1)
if all_probs:
full_logits = torch.cat([full_logits, output_context], 1)
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_embedding_group()
torch.distributed.broadcast(new_tokens, src, group)
done_token = (prev == eod_id).byte() & started.byte()
just_finished = (done_token & ~is_done).bool()
lengths[just_finished.view(-1)] = context_length
is_done = is_done | done_token
done = torch.all(is_done)
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_pipeline_model_parallel_group()
torch.distributed.broadcast(done, src, group)
if all_probs:
yield tokens, lengths, output_logits, full_logits
else:
yield tokens, lengths, output_logits, None
else:
if parallel_state.is_pipeline_first_stage():
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_embedding_group()
new_tokens = torch.empty_like(tokens[:, context_length])
torch.distributed.broadcast(new_tokens, src, group)
tokens[:, context_length] = new_tokens
yield tokens, None, None, None
else:
yield None, None, None, None
done = torch.cuda.ByteTensor([0])
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_pipeline_model_parallel_group()
torch.distributed.broadcast(done, src, group)
context_length += 1
counter += 1
if done:
break
def sample_token_greedy(logits):
"""
Greedy sampling. Returns the token with the highest probability, and corresponding log_prob.
Args:
logits: [batch_size, vocab_size] - unnormalized log probabilities of the next token
Returns:
log_probs: [batch_size] - log probabilities of the sampled tokens
token_ids: [batch_size] - sampled token ids
"""
log_probs, token_ids = torch.max(torch.nn.functional.log_softmax(logits, dim=-1), dim=-1)
return log_probs, token_ids
def sample_token_topk(logits, top_k=0, top_p=0.0, temperature=1.0, filter_value=-float('Inf')):
"""
Greedy sampling. Returns the token with the highest probability, and corresponding log_prob.
Args:
logits: [batch_size, vocab_size] - unnormalized log probabilities of the next token
top_k: int - if > 0: only sample from top k tokens with highest probability
top_p: float - if > 0.0: only sample from a subset of candidates, where the cumulative probability
temperature: float - temperature for sampling
filter_value: float - value to set filtered tokens to
Returns:
log_probs: [batch_size] - log probabilities of the sampled tokens
token_ids: [batch_size] - sampled token ids
"""
logits = logits.float()
logits /= temperature
logits = top_k_logits(logits, top_k=top_k, top_p=top_p, filter_value=filter_value)
log_probs = torch.nn.functional.log_softmax(logits, dim=-1)
token_ids = torch.multinomial(log_probs.exp(), num_samples=1).view(-1)
log_probs = log_probs.gather(1, token_ids.unsqueeze(1)).squeeze(1)
return log_probs, token_ids
def sample_token_topk_beam_search(logits: torch.Tensor, beam_size: int = 1, dim: int = -1, log_softmax: bool = True):
"""
Beam search selection of top K predictions per target (dim). Returns the beam_size tokens ids with the highest
probability and the corresponding log_prob per target
Args:
logits: [batch_size, vocab_size] or [batch_size, vocab_size] - unnormalized log probabilities of the next token,
beam_size: int > 1 - number of tokens to return with the highest probability per target
dim: int - dim of log_softmax and topk selection
log_softmax: bool - if to calculate log softmax for log probabilities
Returns:
log_probs: [batch_size, beam_size] - log probabilities of the sampled tokens
token_ids: [batch_size, beam_size] - sampled token ids
"""
if log_softmax:
log_probs = torch.nn.functional.log_softmax(logits, dim=dim)
else:
log_probs = logits
# get top candidates for each item in batch
log_probs, token_ids = torch.topk(log_probs, beam_size, dim=dim)
return log_probs, token_ids
def compute_beam_search_len_penalty(lengths: torch.Tensor, alpha: int) -> torch.Tensor:
"""
Length penalty used in the beam search
Args:
lengths: lengths of decoded sequences
alpha: params of the penalty
Returns:
tensor with the penalty value
"""
return ((5 + lengths) / 6).pow(alpha)
def get_sampling_token_fn(sampling_method: str, sampling_kwargs: dict) -> Tuple[Callable, dict]:
"""
Specifies the sampling function that takes in a tensor of logits [batch_size, vocab_size] and returns a tuple
(tensor of log_probs [batch_size], tensor of sampled from logits [batch_size]).
If the beam search is enabled, the sampling function returns tensors [batch_size, beam_size]
Args:
sampling_method: the sampling method to use in the decode steps. Currently supported methods are
"beam-search"/"greedy"/"topkp"
sampling_kwargs: dict with arguments to be passed to the sampling function.
For sampling method 'beam-search', the following kwargs are supported:
beam_size - int, number of the best sequences at each decode iteration to be left per target
beam_alpha - int, the parameter of length penalty applied to predicted sequences
keep_only_best_tokens - used in the beam search, boolean flag if to output only best sequence
of predicted tokens (True) or beam_size predictions per target
return_scores - used in the beam search, boolean flag if to return scores at the top of
predictions and logits
Returns:
sample_token_fn: the sampling function
default_sampling_kwargs: sampling_kwargs augmented with default sampling kwargs
"""
all_default_sampling_kwargs = {
'greedy-search': {},
'topkp-sampling': {'top_k': 0, 'top_p': 0.0, 'temperature': 1.0},
'beam-search': {'beam_size': 1, 'beam_alpha': 0.0, 'keep_only_best_tokens': False, 'return_scores': False},
}
# update default sampling kwargs with user provided kwargs
default_sampling_kwargs = all_default_sampling_kwargs[sampling_method].copy()
default_sampling_kwargs.update(sampling_kwargs)
# sampling_kwargs = default_sampling_kwargs
if sampling_method == 'greedy-search':
sampling_token_fn = sample_token_greedy
elif sampling_method == "topkp-sampling":
top_k = default_sampling_kwargs['top_k']
top_p = default_sampling_kwargs['top_p']
temperature = default_sampling_kwargs['temperature']
sampling_token_fn = partial(sample_token_topk, top_k=top_k, top_p=top_p, temperature=temperature)
elif sampling_method == "beam-search":
beam_size = default_sampling_kwargs['beam_size']
sampling_token_fn = partial(sample_token_topk_beam_search, beam_size=beam_size)
else:
raise ValueError(
f'Invalid sampling method {sampling_method}. '
f'Supported sampling methods are {all_default_sampling_kwargs.keys()}'
)
return sampling_token_fn, default_sampling_kwargs
| NeMo-main | nemo/collections/nlp/modules/common/text_generation_utils.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
from nemo.collections.common.parts import MultiLayerPerceptron
from nemo.collections.nlp.modules.common.classifier import Classifier
from nemo.core.classes import typecheck
from nemo.core.neural_types import LogitsType, LogprobsType, NeuralType
__all__ = ['SequenceClassifier']
class SequenceClassifier(Classifier):
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
if not self.log_softmax:
return {"logits": NeuralType(('B', 'D'), LogitsType())}
else:
return {"log_probs": NeuralType(('B', 'D'), LogprobsType())}
def __init__(
self,
hidden_size: int,
num_classes: int,
num_layers: int = 2,
activation: str = 'relu',
log_softmax: bool = True,
dropout: float = 0.0,
use_transformer_init: bool = True,
idx_conditioned_on: int = 0,
):
"""
Initializes the SequenceClassifier module.
Args:
hidden_size: the hidden size of the mlp head on the top of the encoder
num_classes: number of the classes to predict
num_layers: number of the linear layers of the mlp head on the top of the encoder
activation: type of activations between layers of the mlp head
log_softmax: applies the log softmax on the output
dropout: the dropout used for the mlp head
use_transformer_init: initializes the weights with the same approach used in Transformer
idx_conditioned_on: index of the token to use as the sequence representation for the classification task, default is the first token
"""
super().__init__(hidden_size=hidden_size, dropout=dropout)
self.log_softmax = log_softmax
self._idx_conditioned_on = idx_conditioned_on
self.mlp = MultiLayerPerceptron(
hidden_size=hidden_size,
num_classes=num_classes,
num_layers=num_layers,
activation=activation,
log_softmax=log_softmax,
)
self.post_init(use_transformer_init=use_transformer_init)
@typecheck()
def forward(self, hidden_states):
hidden_states = self.dropout(hidden_states)
logits = self.mlp(hidden_states[:, self._idx_conditioned_on])
return logits
| NeMo-main | nemo/collections/nlp/modules/common/sequence_classifier.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
from torch import Tensor
from nemo.collections.common.parts import MultiLayerPerceptron
from nemo.collections.nlp.modules.common.classifier import Classifier
from nemo.core.classes import typecheck
from nemo.core.neural_types import NeuralType, RegressionValuesType
__all__ = ['SequenceRegression']
class SequenceRegression(Classifier):
"""
Args:
hidden_size: the hidden size of the mlp head on the top of the encoder
num_layers: number of the linear layers of the mlp head on the top of the encoder
activation: type of activations between layers of the mlp head
dropout: the dropout used for the mlp head
use_transformer_init: initializes the weights with the same approach used in Transformer
idx_conditioned_on: index of the token to use as the sequence representation for the classification task,
default is the first token
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"preds": NeuralType(tuple('B'), RegressionValuesType())}
def __init__(
self,
hidden_size: int,
num_layers: int = 2,
activation: str = 'relu',
dropout: float = 0.0,
use_transformer_init: bool = True,
idx_conditioned_on: int = 0,
):
""" Initializes the SequenceRegression module. """
super().__init__(hidden_size=hidden_size, dropout=dropout)
self._idx_conditioned_on = idx_conditioned_on
self.mlp = MultiLayerPerceptron(
hidden_size, num_classes=1, num_layers=num_layers, activation=activation, log_softmax=False,
)
self.post_init(use_transformer_init=use_transformer_init)
@typecheck()
def forward(self, hidden_states: Tensor) -> Tensor:
""" Forward pass through the module.
Args:
hidden_states: hidden states for each token in a sequence, for example, BERT module output
"""
hidden_states = self.dropout(hidden_states)
preds = self.mlp(hidden_states[:, self._idx_conditioned_on])
return preds.view(-1)
| NeMo-main | nemo/collections/nlp/modules/common/sequence_regression.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
import torch
from torch import nn as nn
from nemo.collections.common.parts import transformer_weights_init
from nemo.core.classes import Exportable, NeuralModule
from nemo.core.neural_types import ChannelType, NeuralType
__all__ = ['Classifier']
class Classifier(NeuralModule, Exportable):
"""
A baseclass for modules to perform various classification tasks.
"""
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
"""
Returns definitions of module input ports.
We implement it here since all NLP classifiers have the same inputs
"""
return {"hidden_states": NeuralType(('B', 'T', 'D'), ChannelType())}
def __init__(self, hidden_size: int, dropout: float = 0.0,) -> None:
"""
Initializes the Classifier base module.
Args:
hidden_size: the size of the hidden dimension
dropout: dropout to apply to the input hidden states
"""
super().__init__()
self._hidden_size = hidden_size
self.dropout = nn.Dropout(dropout)
def post_init(self, use_transformer_init: bool):
"""
Common post-processing to be called at the end of concrete Classifiers init methods
Args:
use_transformer_init : whether or not to apply transformer_weights_init
"""
if use_transformer_init:
self.apply(lambda module: transformer_weights_init(module, xavier=False))
def input_example(self, max_batch=1, max_dim=256):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
sample = next(self.parameters())
example = torch.randn(max_batch, max_dim, self._hidden_size).to(sample.device).to(sample.dtype)
return tuple([example])
def save_to(self, save_path: str):
"""
Saves the module to the specified path.
Args:
save_path: Path to where to save the module.
"""
pass
@classmethod
def restore_from(cls, restore_path: str):
"""
Restores the module from the specified path.
Args:
restore_path: Path to restore the module from.
"""
pass
| NeMo-main | nemo/collections/nlp/modules/common/classifier.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import warnings
from typing import List, Set, Tuple
import torch
from nemo.collections.nlp.modules.common.lm_utils import pad_batch
from nemo.collections.nlp.modules.common.megatron.utils import get_ltor_masks_and_position_ids
try:
from apex.transformer.pipeline_parallel.utils import get_num_microbatches
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
try:
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
# the text representation of eos_id, it applies for all tokenizers
END_OF_SEQ = '<|endoftext|>'
class TextGenerationStrategy:
"""
Base class for TextGeneration Strategy
"""
def __init__(self, model):
self.model = model
if self.model.training:
# TODO in the future this should raise an exception
warnings.warn(
"Generation started while the model is in training mode, switching to eval mode "
"(this situation may raise an exception in future versions, please call `eval()` before generation)"
)
self.model.eval()
self._end_of_generation_cache = None
def forward_step(self, batch, tensor_shape):
fwd_bwd_function = get_forward_backward_func()
output_tensor = fwd_bwd_function(
forward_step_func=self.model.get_forward_output_only_func(),
data_iterator=iter([batch,]),
model=[self.forward_model],
num_microbatches=get_num_microbatches(),
forward_only=True,
seq_length=tensor_shape[0],
micro_batch_size=tensor_shape[1],
)
return output_tensor
def tokenize_batch(self, sentences, max_len, add_BOS):
"""
convert the sentences into lists of tokens, pad them to the same length, add bos tokens if it is needed
Args:
sentences (List[str]): list of input sentences in str format.
max_len (int): max number of tokens to generate.
add_BOS (bool): whether to add the BOS token at the beginning
Returns:
Tuple[torch.Tensor], the tokenized and padded torch tensor and the token context length tensor.
"""
tokenizer = self.model.tokenizer
if add_BOS:
context_tokens = [[tokenizer.bos_id] + tokenizer.text_to_ids(s) for s in sentences]
else:
context_tokens = [tokenizer.text_to_ids(s) for s in sentences]
context_tokens, context_lengths = pad_batch(context_tokens, tokenizer.eos_id, max_len)
context_tokens_tensor = torch.cuda.LongTensor(context_tokens)
context_length_tensor = torch.cuda.LongTensor(context_lengths)
return context_tokens_tensor, context_length_tensor
@abc.abstractclassmethod
def clip_max_len(self, maxlen: int) -> int:
""" clip the max len based on the LM model max sequence length
Args:
maxlen (int): the max len computed from the context and number of tokens to generate
returns (int):
the clip the max length based of the LM model max sequence length
"""
pass
@abc.abstractclassmethod
def init_batch(self, context_tokens: torch.Tensor, context_length: int, compute_attention_mask: bool):
"""initialize the batch data before the inference steps.
It will save the intermediate results as object attributes
context_length (int): the context token length
compute_attention_mask: bool: set to True to compute attention mask (not needed for FA)
Args:
context_tokens (torch.Tensor): The padded context tokens including the space for tokens to be generated
"""
pass
@abc.abstractclassmethod
def prepare_batch_at_step(
self, tokens: torch.Tensor, maxlen: int, micro_batch_size: int, step: int, context_length: int
) -> Tuple[List[torch.Tensor], List[int]]:
"""
generate the batch used in inference for each of the steps
Args:
tokens (torch.Tensor): the context tokens
maxlen (int): the maximum length in the context tokens
micro_batch_size (int): text generation batch size
step (int): the inference step count
context_length (int): the new token position in the tokens
returns:
a tuple of list of tensor arguments for the model and a list of tensor shape required by forward method
"""
pass
@abc.abstractclassmethod
def post_process(self, tokens: torch.Tensor, new_tokens: torch.Tensor, context_length: int):
"""
At the end of the single step inference, post process the inference results
Args:
tokens (torch.Tensor): the context tokens
new_token (torch.Tensor): sampled new token id
context_length (int): the new token position in the tokens
"""
pass
def end_of_generation_condition(
self, tokens: torch.Tensor, prev: torch.Tensor, eod_id: int, end_strings: List[str]
) -> torch.Tensor:
"""
return whether the generation should stop based on the previous token
Args:
tokens (torch.Tensor): the generated tokens so far
prev (torch.Tensor): the previous token
eod_id (int): the end of document token id
end_strings (List[str]): the list of end of generation strings
returns:
a boolean tensor indicating whether the generation should stop
"""
if (len(end_strings) == 1 and end_strings[0] == END_OF_SEQ) or not end_strings:
# Simple scenario: only finish on end of document token.
return prev == eod_id
end_tokens, end_strings_to_check = self._get_end_of_generation_tokens_and_strings(eod_id, end_strings)
assert end_tokens
is_end = torch.isin(prev, torch.tensor(list(end_tokens), dtype=prev.dtype, device=prev.device))
if end_strings_to_check:
# The loop below is inefficient (see warning in `_get_end_of_generation_tokens_and_strings()`)
# TODO In addition, we will not stop if the model generates an end string followed by extra characters,
# e.g., if `end_string` is "Done" and there exists a "Done!" token it could generate tokens
# [..., ".", "Done!"]
# which would fail the `endswith("Done")` check. However, stopping when "Done!" is generated would not
# work either, since we would need to post-process the generated string to truncate the extra "!".
# ==> this is left for future work if there is a compelling use case requiring this feature.
for idx, token_seq in enumerate(tokens):
text = self.model.tokenizer.ids_to_text(token_seq.tolist())
is_end[idx] |= any(text.endswith(end_string) for end_string in end_strings_to_check)
return is_end
def post_generation_process(self, output):
"""
At the end of the text generation, post process the results
Args:
output (dict): the text generation output dictionary
"""
return output
def _get_end_of_generation_tokens_and_strings(
self, eod_id: int, end_strings: List[str]
) -> Tuple[Set[int], List[str]]:
"""
return the tokens and strings indicating the end of generation
Args:
eod_id (int): the end of document token id
end_strings (List[str]): the list of end of generation strings
Returns:
a pair `(tokens, strings)` where `tokens` is a set of tokens (int) and `strings` is a list of strings,
which must all be used to identify the end of generation (`tokens` always contains `eod_id`, while
`strings` may be empty if all end strings are associated to unique tokens)
"""
tokenizer = self.model.tokenizer
# A cache is used to remember which end strings are associated to unique tokens vs. which ones
# require an actual string comparison.
if self._end_of_generation_cache is None or self._end_of_generation_cache["tokenizer"] is not tokenizer:
# Invalidate the cache.
self._end_of_generation_cache = {
"tokenizer": tokenizer,
"end_string_to_token": {END_OF_SEQ: eod_id},
"end_strings_to_check": set(),
}
end_string_to_token = self._end_of_generation_cache["end_string_to_token"]
end_tokens = {eod_id} # always include `eod_id`, even if `END_OF_SEQ` is not within `end_strings`
end_strings_to_check = [] # will contain end strings that have no associated special token
for end_string in end_strings:
try:
end_tokens.add(end_string_to_token[end_string])
continue
except KeyError:
if end_string in self._end_of_generation_cache["end_strings_to_check"]:
end_strings_to_check.append(end_string)
continue
# `end_string` does not exist in the cache yet: check if `end_string` is a special token for
# the tokenizer. Ideally, we would simply use `tokenizer.text_to_ids(end_string)`, but some
# tokenizers (e.g., SentencePiece) may prefix the special token with another token associated
# to an empty string. The code below is thus meant to extract the special token associated to
# `end_string` (if it exists). Note that we use "<extra_id_1>" as prefix string to have a low
# risk of the tokenizer merging it with `end_string`, but this is somewhat arbitrary.
ids_ref = tokenizer.text_to_ids("<extra_id_1>")
ids_with_end_string = tokenizer.text_to_ids(f"<extra_id_1>{end_string}")
if len(ids_with_end_string) == len(ids_ref) + 1 and ids_with_end_string[:-1] == ids_ref:
# We can assume that the extra token is the one corresponding to `end_string`.
end_string_to_token[end_string] = ids_with_end_string[-1]
end_tokens.add(ids_with_end_string[-1])
else:
# No special token.
warnings.warn(
f"The end string '{end_string}' has no associated special token: this may slow down "
"generation (consider using a different tokenizer or modifying `end_strings`)"
)
self._end_of_generation_cache["end_strings_to_check"].add(end_string)
end_strings_to_check.append(end_string)
return end_tokens, end_strings_to_check
class GPTModelTextGenerationStrategy(TextGenerationStrategy):
def __init__(self, model):
super().__init__(model)
self.forward_model = self.model.model
def clip_max_len(self, maxlen: int) -> int:
""" clip the max len based on the LM model max sequence length"""
# for positional embedding types that allow length extrapolation, don't clip the max length
if self.model.cfg.get("position_embedding_type", "learned_absolute") == "learned_absolute":
if maxlen > self.model.cfg.encoder_seq_length + 1:
maxlen = self.model.cfg.encoder_seq_length + 1
return maxlen
def init_batch(self, context_tokens: torch.Tensor, context_length: int, compute_attention_mask: bool):
"""initialize the batch data before the inference steps."""
# Move to GPU.
tokenizer = self.model.tokenizer
tokens = context_tokens.contiguous().cuda()
# Get the attention mask and postition ids.
self.attention_mask, _, self.position_ids = get_ltor_masks_and_position_ids(
tokens,
tokenizer.eos_id,
self.model.cfg.get('reset_position_ids', False),
self.model.cfg.get('reset_attention_mask', False),
self.model.cfg.get('eod_mask_loss', False),
compute_attention_mask=compute_attention_mask,
)
def prepare_batch_at_step(
self,
tokens: torch.Tensor,
maxlen: int,
micro_batch_size: int,
step: int,
context_length: int,
compute_attention_mask: bool = True,
) -> Tuple[List[torch.Tensor], List[int]]:
"""
generate the batch used in inference for each of the steps
"""
# types2use = None
if step == 0:
# Allocate memory for the entire context.
set_inference_key_value_memory = True
tokens2use = tokens[:, :context_length]
positions2use = self.position_ids[:, :context_length]
# not using type2use. uncomment it if it is used
# if type_ids is not None:
# types2use = type_ids[:, :context_length]
else:
# Set this to false so the memory is not reallocated.
set_inference_key_value_memory = False
tokens2use = tokens[:, context_length - 1].view(micro_batch_size, -1)
positions2use = self.position_ids[:, context_length - 1].view(micro_batch_size, -1)
# not using type2use. uncomment it if it is used
# if type_ids is not None:
# types2use = type_ids[:, context_length - 1].view(batch_size, -1)
"""Prepare batch for each of the inference steps"""
attention_mask_repeat = None
if compute_attention_mask:
attention_mask_repeat = torch.concat([self.attention_mask for _ in range(micro_batch_size)])
setkey_value_array = torch.tensor(
[set_inference_key_value_memory] * micro_batch_size, device=torch.cuda.current_device()
)
len_array = torch.tensor([maxlen] * micro_batch_size, device=torch.cuda.current_device())
batch = [tokens2use, attention_mask_repeat, positions2use, setkey_value_array, len_array]
tensor_shape = [tokens2use.shape[1], micro_batch_size, self.model.cfg.hidden_size]
return batch, tensor_shape
class PromptLearningModelTextGenerationStrategy(TextGenerationStrategy):
def __init__(self, model, task_ids):
super().__init__(model)
self.task_ids = task_ids
self.forward_model = self.model
def init_batch(self, context_tokens: torch.Tensor, context_length: int, compute_attention_mask: bool):
"""initialize the batch data before the inference steps."""
# Move to GPU.
tokenizer = self.model.tokenizer
tokens = context_tokens.contiguous().cuda()
# Get the attention mask and postition ids.
self.attention_mask, _, self.position_ids = get_ltor_masks_and_position_ids(
tokens,
tokenizer.eos_id,
self.model.cfg.get('reset_position_ids', False),
self.model.cfg.get('reset_attention_mask', False),
self.model.cfg.get('eod_mask_loss', False),
compute_attention_mask=compute_attention_mask,
)
def clip_max_len(self, maxlen: int) -> int:
""" clip the max len based on the LM model max sequence length"""
if maxlen > self.model.frozen_model.cfg.encoder_seq_length + 1:
maxlen = self.model.frozen_model.cfg.encoder_seq_length + 1
return maxlen
def prepare_batch_at_step(
self,
tokens: torch.Tensor,
maxlen: int,
micro_batch_size: int,
step: int,
context_length: int,
compute_attention_mask: bool,
) -> Tuple[List[torch.Tensor], List[int]]:
# types2use = None
if step == 0:
# Allocate memory for the entire context.
set_inference_key_value_memory = True
tokens2use = tokens[:, :context_length]
positions2use = self.position_ids[:, :context_length]
# not using type2use. uncomment it if it is used
# if type_ids is not None:
# types2use = type_ids[:, :context_length]
else:
# Set this to false so the memory is not reallocated.
set_inference_key_value_memory = False
tokens2use = tokens[:, context_length - 1].view(micro_batch_size, -1)
positions2use = self.position_ids[:, context_length - 1].view(micro_batch_size, -1)
# not using type2use. uncomment it if it is used
# if type_ids is not None:
# types2use = type_ids[:, context_length - 1].view(batch_size, -1)
"""Prepare batch for each of the inference steps"""
attention_mask_repeat = None
if compute_attention_mask:
attention_mask_repeat = torch.concat([self.attention_mask for _ in range(micro_batch_size)])
setkey_value_array = torch.tensor(
[set_inference_key_value_memory] * micro_batch_size, device=torch.cuda.current_device()
)
len_array = torch.tensor([maxlen] * micro_batch_size, device=torch.cuda.current_device())
batch = [tokens2use, attention_mask_repeat, positions2use, self.task_ids, setkey_value_array, len_array]
tensor_shape = [tokens2use.shape[1], micro_batch_size, self.model.frozen_model.cfg.hidden_size]
return batch, tensor_shape
def post_process(self, tokens: torch.Tensor, new_tokens: torch.Tensor, context_length: int):
"""
At the end of the inference, post process the inference results
"""
# Replace special soft prompt token ids with unk token ids
if (
self.model.pseudo_token_ids_start is not None
): # TODO: (@adithyare) prompt learning logic can be greatly simplified by removing data preparation logic from model logic.
tokenizer = self.model.tokenizer
pseudo_token_ids_start = self.model.pseudo_token_ids_start
new_tokens[(new_tokens >= pseudo_token_ids_start)] = tokenizer.unk_id
tokens[:, :context_length][(tokens[:, :context_length] >= pseudo_token_ids_start)] = tokenizer.unk_id
def model_inference_strategy_dispatcher(model, **args):
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.models.language_modeling.megatron_gpt_prompt_learning_model import (
MegatronGPTPromptLearningModel,
)
from nemo.collections.nlp.models.language_modeling.megatron_retrieval_model import MegatronRetrievalModel
from nemo.collections.nlp.modules.common.retro_inference_strategies import (
RetroFileQAModelTextGenerationStrategy,
RetroModelTextGenerationStrategy,
RetroQAModelTextGenerationStrategy,
)
if isinstance(model, MegatronGPTPromptLearningModel):
return PromptLearningModelTextGenerationStrategy(model, **args)
elif isinstance(model, MegatronGPTModel):
return GPTModelTextGenerationStrategy(model)
elif isinstance(model, MegatronRetrievalModel):
strategy_name = args['strategy']
del args['strategy']
megatron_lm_compatible = model.model.megatron_lm_compatible
args['megatron_lm_compatible'] = megatron_lm_compatible
if strategy_name == 'RetroModelTextGenerationStrategy':
return RetroModelTextGenerationStrategy(model, **args)
elif strategy_name == 'RetroQAModelTextGenerationStrategy':
return RetroQAModelTextGenerationStrategy(model, **args)
elif strategy_name == 'RetroFileQAModelTextGenerationStrategy':
return RetroFileQAModelTextGenerationStrategy(model, **args)
else:
raise ValueError(f'{strategy_name} is not supported for inference')
else:
raise ValueError(f'{model} is not supported for inference')
# Should call GPTModel or Megatron Retrieval Model's forward method
| NeMo-main | nemo/collections/nlp/modules/common/text_generation_strategy.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Adopted from https://github.com/gradio-app/gradio/blob/main/gradio/components.py
Fix a markdown render problem.
"""
from __future__ import annotations
import warnings
from markdown2 import Markdown
try:
from typing import Any, Callable, Dict, List, Literal, Tuple
from gradio.components import (
Changeable,
Component,
Enum,
EventListenerMethod,
IOComponent,
JSONSerializable,
Selectable,
document,
processing_utils,
)
GRADIO_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
GRADIO_AVAILABLE = False
class _Keywords(Enum):
NO_VALUE = "NO_VALUE" # Used as a sentinel to determine if nothing is provided as a argument for `value` in `Component.update()`
FINISHED_ITERATING = (
"FINISHED_ITERATING" # Used to skip processing of a component's value (needed for generators + state)
)
@document("style")
class Chatbot(Changeable, Selectable, IOComponent, JSONSerializable):
"""
Displays a chatbot output showing both user submitted messages and responses. Supports a subset of Markdown including bold, italics, code, and images.
Preprocessing: this component does *not* accept input.
Postprocessing: expects function to return a {List[Tuple[str | None | Tuple, str | None | Tuple]]}, a list of tuples with user message and response messages. Messages should be strings, tuples, or Nones. If the message is a string, it can include Markdown. If it is a tuple, it should consist of (string filepath to image/video/audio, [optional string alt text]). Messages that are `None` are not displayed.
Demos: chatbot_simple, chatbot_multimodal
"""
def __init__(
self,
value: List[Tuple[str | None, str | None]] | Callable | None = None,
color_map: Dict[str, str] | None = None, # Parameter moved to Chatbot.style()
*,
label: str | None = None,
every: float | None = None,
show_label: bool = True,
visible: bool = True,
elem_id: str | None = None,
elem_classes: List[str] | str | None = None,
**kwargs,
):
"""
Parameters:
value: Default value to show in chatbot. If callable, the function will be called whenever the app loads to set the initial value of the component.
label: component name in interface.
every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
show_label: if True, will display label.
visible: If False, component will be hidden.
elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
"""
if color_map is not None:
warnings.warn("The 'color_map' parameter has been deprecated.",)
# self.md = utils.get_markdown_parser()
self.md = Markdown(extras=["fenced-code-blocks", "tables", "break-on-newline"])
self.select: EventListenerMethod
"""
Event listener for when the user selects message from Chatbot.
Uses event data gradio.SelectData to carry `value` referring to text of selected message, and `index` tuple to refer to [message, participant] index.
See EventData documentation on how to use this event data.
"""
IOComponent.__init__(
self,
label=label,
every=every,
show_label=show_label,
visible=visible,
elem_id=elem_id,
elem_classes=elem_classes,
value=value,
**kwargs,
)
def get_config(self):
return {
"value": self.value,
"selectable": self.selectable,
**IOComponent.get_config(self),
}
@staticmethod
def update(
value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
label: str | None = None,
show_label: bool | None = None,
visible: bool | None = None,
):
updated_config = {
"label": label,
"show_label": show_label,
"visible": visible,
"value": value,
"__type__": "update",
}
return updated_config
def _process_chat_messages(self, chat_message: str | Tuple | List | Dict | None) -> str | Dict | None:
if chat_message is None:
return None
elif isinstance(chat_message, (tuple, list)):
mime_type = processing_utils.get_mimetype(chat_message[0])
return {
"name": chat_message[0],
"mime_type": mime_type,
"alt_text": chat_message[1] if len(chat_message) > 1 else None,
"data": None, # These last two fields are filled in by the frontend
"is_file": True,
}
elif isinstance(chat_message, dict): # This happens for previously processed messages
return chat_message
elif isinstance(chat_message, str):
# return self.md.render(chat_message)
return str(self.md.convert(chat_message))
else:
raise ValueError(f"Invalid message for Chatbot component: {chat_message}")
def postprocess(
self, y: List[Tuple[str | Tuple | List | Dict | None, str | Tuple | List | Dict | None]],
) -> List[Tuple[str | Dict | None, str | Dict | None]]:
"""
Parameters:
y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed.
Returns:
List of tuples representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information.
"""
if y is None:
return []
processed_messages = []
for message_pair in y:
assert isinstance(
message_pair, (tuple, list)
), f"Expected a list of lists or list of tuples. Received: {message_pair}"
assert (
len(message_pair) == 2
), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}"
processed_messages.append(
(
# '<pre style="font-family: var(--font)">' +
# message_pair[0] + "</pre>",
message_pair[0],
self._process_chat_messages(message_pair[1]),
)
)
return processed_messages
def style(self, height: int | None = None, **kwargs):
"""
This method can be used to change the appearance of the Chatbot component.
"""
if height is not None:
self._style["height"] = height
if kwargs.get("color_map") is not None:
warnings.warn("The 'color_map' parameter has been deprecated.")
Component.style(
self, **kwargs,
)
return self
| NeMo-main | nemo/collections/nlp/modules/common/chatbot_component.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from typing import Dict, Optional
import torch
from nemo.core.classes import NeuralModule
from nemo.core.classes.exportable import Exportable
from nemo.core.neural_types import ChannelType, MaskType, NeuralType
from nemo.utils import logging
__all__ = ['BertModule']
class BertModule(NeuralModule, Exportable):
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"attention_mask": NeuralType(('B', 'T'), MaskType(), optional=True),
"token_type_ids": NeuralType(('B', 'T'), ChannelType(), optional=True),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"last_hidden_states": NeuralType(('B', 'T', 'D'), ChannelType())}
def restore_weights(self, restore_path: str):
"""Restores module/model's weights"""
logging.info(f"Restoring weights from {restore_path}")
if not os.path.exists(restore_path):
logging.warning(f'Path {restore_path} not found')
return
pretrained_dict = torch.load(restore_path)
# backward compatibility with NeMo0.11
if "state_dict" in pretrained_dict.keys():
pretrained_dict = pretrained_dict["state_dict"]
# remove prefix from pretrained dict
m = re.match("^bert.*?\.", list(pretrained_dict.keys())[0])
if m:
prefix = m.group(0)
pretrained_dict = {k[len(prefix) :]: v for k, v in pretrained_dict.items()}
model_dict = self.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# starting with transformers 3.1.0, embeddings.position_ids is added to the model's state dict and could be
# missing in checkpoints trained with older transformers version
if 'embeddings.position_ids' in model_dict and 'embeddings.position_ids' not in pretrained_dict:
pretrained_dict['embeddings.position_ids'] = model_dict['embeddings.position_ids']
assert len(pretrained_dict) == len(model_dict)
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
logging.info(f"Weights for {type(self).__name__} restored from {restore_path}")
def input_example(self, max_batch=1, max_dim=256):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
sample = next(self.parameters())
sz = (max_batch, max_dim)
input_ids = torch.randint(low=0, high=max_dim - 1, size=sz, device=sample.device)
token_type_ids = torch.randint(low=0, high=1, size=sz, device=sample.device)
attention_mask = torch.randint(low=0, high=1, size=sz, device=sample.device)
input_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
return tuple([input_dict])
| NeMo-main | nemo/collections/nlp/modules/common/bert_module.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
from dataclasses import MISSING, dataclass
from typing import Dict, List, Optional
import nemo
from nemo.collections.common.tokenizers.bytelevel_tokenizers import ByteLevelTokenizer
from nemo.collections.common.tokenizers.char_tokenizer import CharTokenizer
from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer
from nemo.collections.common.tokenizers.regex_tokenizer import RegExTokenizer
from nemo.collections.common.tokenizers.tabular_tokenizer import TabularTokenizer
from nemo.collections.common.tokenizers.word_tokenizer import WordTokenizer
from nemo.collections.common.tokenizers.youtokentome_tokenizer import YouTokenToMeTokenizer
from nemo.collections.nlp.modules.common.huggingface.huggingface_utils import get_huggingface_pretrained_lm_models_list
from nemo.collections.nlp.modules.common.lm_utils import get_pretrained_lm_models_list
from nemo.collections.nlp.parts.nlp_overrides import HAVE_MEGATRON_CORE
from nemo.utils import logging
try:
from nemo.collections.nlp.modules.common.megatron.megatron_utils import get_megatron_tokenizer
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
__all__ = ['get_tokenizer', 'get_tokenizer_list']
megatron_tokenizer_model_map = {
'BertWordPieceLowerCase': 'megatron-bert-345m-uncased',
'BertWordPieceCase': 'megatron-bert-345m-cased',
'GPT2BPETokenizer': 'megatron-gpt-345m',
}
def get_tokenizer_list() -> List[str]:
"""
Returns all all supported tokenizer names
"""
s = set(get_pretrained_lm_models_list())
s.update(set(get_huggingface_pretrained_lm_models_list(include_external=True)))
return ["sentencepiece", "char", "word"] + list(s)
@dataclass
class TokenizerConfig:
library: str = MISSING
tokenizer_model: Optional[str] = None
vocab_size: Optional[int] = None
vocab_file: Optional[str] = None
special_tokens: Optional[Dict[str, str]] = None
bpe_dropout: Optional[float] = 0.0
coverage: Optional[float] = 0.999
training_sample_size: Optional[int] = None
r2l: Optional[bool] = False
sentencepiece_legacy: Optional[bool] = False
def get_tokenizer(
tokenizer_name: str,
tokenizer_model: Optional[str] = None,
vocab_file: Optional[str] = None,
merges_file: Optional[str] = None,
special_tokens: Optional[Dict[str, str]] = None,
use_fast: Optional[bool] = False,
bpe_dropout: Optional[float] = 0.0,
):
"""
Args:
tokenizer_name: sentencepiece or pretrained model from the hugging face list,
for example: bert-base-cased
To see the list of all HuggingFace pretrained models, use:
nemo_nlp.modules.common.get_huggingface_pretrained_lm_models_list()
tokenizer_model: tokenizer model file of sentencepiece or youtokentome
special_tokens: dict of special tokens
vocab_file: path to vocab file
use_fast: (only for HuggingFace AutoTokenizer) set to True to use fast HuggingFace tokenizer
bpe_dropout: (only supported by YTTM tokenizer) BPE dropout tries to corrupt the standard segmentation
procedure of BPE to help
model better learn word compositionality and become robust to segmentation errors.
It has emperically been shown to improve inference time BLEU scores.
"""
if special_tokens is None:
special_tokens_dict = {}
else:
special_tokens_dict = special_tokens
if 'megatron' in tokenizer_name:
if not HAVE_MEGATRON_CORE:
raise ImportError(
"Megatron-core was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
if vocab_file is None:
vocab_file = nemo.collections.nlp.modules.common.megatron.megatron_utils.get_megatron_vocab_file(
tokenizer_name
)
merges_file = nemo.collections.nlp.modules.common.megatron.megatron_utils.get_megatron_merges_file(
tokenizer_name
)
tokenizer_name = get_megatron_tokenizer(tokenizer_name)
if tokenizer_name == 'sentencepiece':
return nemo.collections.common.tokenizers.sentencepiece_tokenizer.SentencePieceTokenizer(
model_path=tokenizer_model, special_tokens=special_tokens, legacy=True
)
elif tokenizer_name == 'yttm':
return YouTokenToMeTokenizer(model_path=tokenizer_model, bpe_dropout=bpe_dropout)
elif tokenizer_name == 'word':
return WordTokenizer(vocab_file=vocab_file, **special_tokens_dict)
elif tokenizer_name == 'char':
return CharTokenizer(vocab_file=vocab_file, **special_tokens_dict)
elif tokenizer_name == 'regex':
return RegExTokenizer().load_tokenizer(regex_file=tokenizer_model, vocab_file=vocab_file)
logging.info(
f"Getting HuggingFace AutoTokenizer with pretrained_model_name: {tokenizer_name}, vocab_file: {vocab_file}, merges_files: {merges_file}, "
f"special_tokens_dict: {special_tokens_dict}, and use_fast: {use_fast}"
)
return AutoTokenizer(
pretrained_model_name=tokenizer_name,
vocab_file=vocab_file,
merges_file=merges_file,
**special_tokens_dict,
use_fast=use_fast,
)
def get_nmt_tokenizer(
library: str = 'yttm',
model_name: Optional[str] = None,
tokenizer_model: Optional[str] = None,
vocab_file: Optional[str] = None,
merges_file: Optional[str] = None,
special_tokens: Optional[Dict[str, str]] = None,
use_fast: Optional[bool] = False,
bpe_dropout: Optional[float] = 0.0,
r2l: Optional[bool] = False,
legacy: Optional[bool] = False,
delimiter: Optional[str] = None,
):
"""
Args:
model_name: if using a pretrained model from NeMo, HuggingFace, or Megatron
tokenizer_model: tokenizer model file of sentencepiece or youtokentome
special_tokens: dict of special tokens
vocab_file: path to vocab file
use_fast: (only for HuggingFace AutoTokenizer) set to True to use fast HuggingFace tokenizer
bpe_dropout: (only supported by YTTM tokenizer) BPE dropout tries to corrupt the standard segmentation procedure
of BPE to help model better learn word compositionality and become robust to segmentation errors.
It has empirically been shown to improve inference time BLEU scores.
r2l: Whether to return subword IDs from right to left
"""
if special_tokens is None:
special_tokens_dict = {}
else:
special_tokens_dict = special_tokens
if (library != 'byte-level') and (
model_name is None and (tokenizer_model is None or not os.path.isfile(tokenizer_model))
):
raise ValueError("No Tokenizer path provided or file does not exist!")
if library == 'yttm':
logging.info(f'Getting YouTokenToMeTokenizer with model: {tokenizer_model} with r2l: {r2l}.')
return YouTokenToMeTokenizer(model_path=tokenizer_model, bpe_dropout=bpe_dropout, r2l=r2l)
elif library == 'huggingface':
logging.info(f'Getting HuggingFace AutoTokenizer with pretrained_model_name: {model_name}')
return AutoTokenizer(
pretrained_model_name=model_name,
vocab_file=vocab_file,
merges_file=merges_file,
**special_tokens_dict,
use_fast=use_fast,
)
elif library == 'sentencepiece':
logging.info(f'Getting SentencePiece with model: {tokenizer_model}')
return nemo.collections.common.tokenizers.sentencepiece_tokenizer.SentencePieceTokenizer(
model_path=tokenizer_model, legacy=legacy
)
elif library == 'byte-level':
logging.info(f'Using byte-level tokenization')
return ByteLevelTokenizer(special_tokens_dict)
elif library == 'regex':
logging.info(f'Using regex tokenization')
return RegExTokenizer().load_tokenizer(regex_file=tokenizer_model, vocab_file=vocab_file)
elif library == 'megatron':
if model_name in megatron_tokenizer_model_map:
model_name = megatron_tokenizer_model_map[model_name]
logging.info(
f'Getting Megatron tokenizer for pretrained model name: {model_name}, custom vocab file: {vocab_file}, and merges file: {merges_file}'
)
return get_tokenizer(tokenizer_name=model_name, vocab_file=vocab_file, merges_file=merges_file)
elif library == 'tabular':
return TabularTokenizer(vocab_file, delimiter=delimiter)
else:
raise NotImplementedError(
'Currently we only support "yttm", "huggingface", "sentencepiece", "megatron", and "byte-level" tokenizer'
'libraries.'
)
| NeMo-main | nemo/collections/nlp/modules/common/tokenizer_utils.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
from typing import Any, Dict, Optional
from nemo.core.classes import NeuralModule
from nemo.core.neural_types import ChannelType, EncodedRepresentation, MaskType, NeuralType
__all__ = ['DecoderModule']
class DecoderModule(NeuralModule, ABC):
""" Base class for decoder neural module to be used in NLP models. """
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"decoder_mask": NeuralType(('B', 'T'), MaskType(), optional=True),
"encoder_embeddings": NeuralType(('B', 'T', 'D'), ChannelType(), optional=True),
"encoder_mask": NeuralType(('B', 'T'), MaskType(), optional=True),
"decoder_mems": NeuralType(('B', 'D', 'T', 'D'), EncodedRepresentation(), optional=True),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"last_hidden_states": NeuralType(('B', 'T', 'D'), ChannelType())}
@property
def hidden_size(self) -> Optional[int]:
raise NotImplementedError
@property
def vocab_size(self) -> Optional[int]:
raise NotImplementedError
@property
def embedding(self) -> Optional[Any]:
raise NotImplementedError
@property
def decoder(self) -> Optional[Any]:
raise NotImplementedError
@property
def max_sequence_length(self) -> Optional[int]:
raise NotImplementedError
| NeMo-main | nemo/collections/nlp/modules/common/decoder_module.py |
# Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.modules.common.bert_module import BertModule
from nemo.collections.nlp.modules.common.huggingface import (
AlbertEncoder,
BertEncoder,
CamembertEncoder,
DistilBertEncoder,
RobertaEncoder,
)
from nemo.collections.nlp.modules.common.lm_utils import get_lm_model, get_pretrained_lm_models_list
from nemo.collections.nlp.modules.common.prompt_encoder import PromptEncoder, PromptEncoderType
from nemo.collections.nlp.modules.common.prompt_table import (
VirtualPromptPlaceholderToken,
VirtualPromptSource,
VirtualPromptStyle,
)
from nemo.collections.nlp.modules.common.sequence_classifier import SequenceClassifier
from nemo.collections.nlp.modules.common.sequence_regression import SequenceRegression
from nemo.collections.nlp.modules.common.sequence_token_classifier import SequenceTokenClassifier
from nemo.collections.nlp.modules.common.token_classifier import BertPretrainingTokenClassifier, TokenClassifier
from nemo.collections.nlp.modules.common.tokenizer_utils import get_tokenizer, get_tokenizer_list
| NeMo-main | nemo/collections/nlp/modules/common/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for generating text."""
import json
import threading
import torch
from flask import Flask, jsonify, request
from flask_restful import Api, Resource
from nemo.collections.nlp.modules.common.retro_inference_strategies import (
RetroModelTextGenerationStrategy,
RetroQAModelTextGenerationStrategy,
)
from nemo.collections.nlp.modules.common.text_generation_utils import generate
from nemo.utils import logging
GENERATE_NUM = 0
lock = threading.Lock()
API_ALLOWED_KEYS = set(
[
'all_probs',
'sentences',
"task_ids",
"tokens_to_generate",
"temperature",
"add_BOS",
"greedy",
"top_k",
"top_p",
"neighbors",
"repetition_penalty",
"min_tokens_to_generate",
"end_strings",
]
)
class MegatronGenerate(Resource):
def __init__(self, model, inference_strategy=None):
self.model = model
self.inference_strategy = inference_strategy
@staticmethod
def send_do_generate():
choice = torch.cuda.LongTensor([GENERATE_NUM])
torch.distributed.broadcast(choice, 0)
def put(self):
logging.info("request IP: " + str(request.remote_addr))
logging.info(json.dumps(request.get_json()))
# check keys
for key in request.get_json().keys():
if key not in API_ALLOWED_KEYS:
logging.error(f"The request key {key} is not allowed")
sentences = request.get_json()["sentences"]
if isinstance(sentences, tuple): # Input can be text or tensor
if len(sentences[0]) != len(sentences[1]) or sentences[0] > 128:
return "Maximum number of sentences is 128", 400
elif len(sentences) > 128:
return "Maximum number of sentences is 128", 400
task_ids = None # Used for ptuned/prompt tuned models only
if "task_ids" in request.get_json():
task_ids = request.get_json()["task_ids"]
if not isinstance(sentences, tuple):
return "Input at 'sentences' must by a tuple of two tensors like:\
(context_tokens_tensor, context_length_tensor) if task ids are given"
if len(task_ids) != len(sentences[0]):
return "Each sentence must have a corresponding task id for p-tuned/prompt-tuned models"
tokens_to_generate = 64 # Choosing hopefully sane default. Full sequence is slow
if "tokens_to_generate" in request.get_json():
tokens_to_generate = request.get_json()["tokens_to_generate"]
if not isinstance(tokens_to_generate, int):
return "tokens_to_generate must be an integer greater than 0"
if tokens_to_generate < 1:
return "tokens_to_generate must be an integer greater than 0"
all_probs = False
if "all_probs" in request.get_json():
all_probs = request.get_json()["all_probs"]
if not isinstance(all_probs, bool):
return "all_probs must be a boolean value"
temperature = 1.0
if "temperature" in request.get_json():
temperature = request.get_json()["temperature"]
if not (type(temperature) == int or type(temperature) == float):
return "temperature must be a positive number less than or equal to 100.0"
if not (0.0 < temperature <= 100.0):
return "temperature must be a positive number less than or equal to 100.0"
add_BOS = False
if "add_BOS" in request.get_json():
add_BOS = request.get_json()["add_BOS"]
if not isinstance(add_BOS, bool):
return "add_BOS must be a boolean value"
greedy = False
if "greedy" in request.get_json():
greedy = request.get_json()["greedy"]
if not isinstance(greedy, bool):
return "greedy must be a boolean value"
top_k = 0
if "top_k" in request.get_json():
top_k = request.get_json()["top_k"]
if not (type(top_k) == int or type(top_k) == float):
return "top_k must be a positive integer number"
if not (0 <= top_k):
return "top_k must be a positive integer number"
top_p = 0.9
if "top_p" in request.get_json():
top_p = request.get_json()["top_p"]
if not (type(top_p) == int or type(top_p) == float):
return "top_p must be a positive number less than or equal to 1.0"
if not (0.0 <= top_p <= 1.0):
return "top_p must be a positive number less than or equal to 1.0"
repetition_penalty = 1.2
if "repetition_penalty" in request.get_json():
repetition_penalty = request.get_json()["repetition_penalty"]
if not (type(repetition_penalty) == int or type(repetition_penalty) == float):
return "repetition_penalty must be a positive number no less than 1.0"
if not (1.0 <= repetition_penalty):
return "repetition_penalty must be a positive number no less than 1.0"
end_strings = ['<|endoftext|>']
if 'end_strings' in request.get_json():
end_strings = request.get_json()['end_strings']
if not isinstance(end_strings, list):
return "expect end_strings to be a list of strings"
if not all([isinstance(s, str) for s in end_strings]):
return "expect end_strings to be a list of strings"
min_tokens_to_generate = 0
if "min_tokens_to_generate" in request.get_json():
min_tokens_to_generate = request.get_json()["min_tokens_to_generate"]
if not isinstance(min_tokens_to_generate, int):
return "min_tokens_to_generate must be an integer no less than 0"
if min_tokens_to_generate < 0:
return "min_tokens_to_generate must be an integer no less than 0"
neighbors = None
if "neighbors" in request.get_json():
neighbors = request.get_json()["neighbors"]
if not isinstance(neighbors, int):
return "num of neighbors must be an integer no less than 0"
if neighbors < 0:
return "num of neighbors must be an integer no less than 0"
with lock: # Need to get lock to keep multiple threads from hitting code
MegatronGenerate.send_do_generate() # Tell other ranks we're doing generate
extra = {}
if task_ids is not None:
extra['task_ids'] = task_ids
if self.inference_strategy is not None:
extra['strategy'] = self.inference_strategy
# RETRO specific arguments
if isinstance(
self.inference_strategy, (RetroModelTextGenerationStrategy, RetroQAModelTextGenerationStrategy)
):
if neighbors is not None:
self.inference_strategy.update_neighbors(neighbors)
output = generate(
self.model,
sentences,
tokens_to_generate,
all_probs,
temperature,
add_BOS,
top_k,
top_p,
greedy,
repetition_penalty,
end_strings=end_strings,
min_tokens_to_generate=min_tokens_to_generate,
**extra,
)
for k in output:
if isinstance(output[k], torch.Tensor):
output[k] = output[k].tolist()
if not all_probs:
del output['full_logprob']
if self.inference_strategy is not None:
if isinstance(
self.inference_strategy, (RetroModelTextGenerationStrategy, RetroQAModelTextGenerationStrategy)
):
retrieved_doc = self.inference_strategy.retrieved_text
output['retrieved'] = retrieved_doc
return jsonify(output)
class MegatronServer(object):
def __init__(self, model, inference_strategy=None):
self.app = Flask(__name__, static_url_path='')
api = Api(self.app)
api.add_resource(MegatronGenerate, '/generate', resource_class_args=[model, inference_strategy])
def run(self, url, port=5000):
self.app.run(url, threaded=True, port=port, debug=False)
| NeMo-main | nemo/collections/nlp/modules/common/text_generation_server.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
from typing import Dict, Optional
from nemo.core.classes import NeuralModule
from nemo.core.neural_types import ChannelType, MaskType, NeuralType
__all__ = ['EncoderModule']
class EncoderModule(NeuralModule, ABC):
""" Base class for encoder neural module to be used in NLP models. """
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"encoder_mask": NeuralType(('B', 'T'), MaskType()),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"last_hidden_states": NeuralType(('B', 'T', 'D'), ChannelType())}
@property
def hidden_size(self) -> Optional[int]:
raise NotImplementedError
| NeMo-main | nemo/collections/nlp/modules/common/encoder_module.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pickle
from typing import List, Tuple
import numpy as np
import torch
import torch.distributed as dist
from nemo.collections.nlp.modules.common.lm_utils import pad_batch
from nemo.collections.nlp.modules.common.megatron.retrieval_services.retrieval_service import ComboRetrievalService
from nemo.collections.nlp.modules.common.text_generation_strategy import TextGenerationStrategy
class RetroModelTextGenerationStrategy(TextGenerationStrategy):
def __init__(self, model, **args):
super().__init__(model)
self.forward_model = self.model.model
self.frequent_query = args['frequent_query']
self.pad_token_for_retrieval = args['pad_tokens']
self.store_retrieved = args['store_retrieved']
self.store = dist.FileStore('/tmp/filestore_eval', -1)
self.store.set('neighbors', str(args['neighbors']))
self.megatron_lm_compatible = args['megatron_lm_compatible']
combo_cfg = args['combo_service']
self.service = ComboRetrievalService(
tokenizer=self.model.tokenizer, service_ip=combo_cfg['service_ip'], service_port=combo_cfg['service_port']
)
self.retrieved = []
self.retrieved_text = []
self.chunk_size = self.model.cfg.chunk_size
def update_neighbors(self, neighbors):
# dynamically change the number of neighbors during the query
self.store.set('neighbors', str(neighbors))
@property
def neighbors(self):
return int(self.store.get('neighbors'))
def tokenize_batch(self, sentences, max_len, add_BOS):
"""
convert the sentences into lists of tokens, pad them to the same length, add bos tokens if it is needed
Args:
sentences (List[str]): list of input sentences in str format.
max_len (int): max number of tokens to generate.
add_BOS (bool): whether to add the BOS token at the beginning
Returns:
Tuple[torch.Tensor], the tokenized and padded torch tensor and the token context length tensor.
"""
tokenizer = self.model.tokenizer
if add_BOS:
context_tokens = [[tokenizer.bos_id] + tokenizer.text_to_ids(s) for s in sentences]
else:
context_tokens = [tokenizer.text_to_ids(s) for s in sentences]
if self.pad_token_for_retrieval:
padded = []
for line in context_tokens:
if len(line) < self.chunk_size:
pad_len = self.chunk_size - len(line)
if self.megatron_lm_compatible:
# megatron lm use eos to pad
padded.append([tokenizer.eos_id] * pad_len + line)
else:
padded.append([tokenizer.pad_id] * pad_len + line)
else:
padded.append(line)
context_tokens = padded
context_tokens, context_lengths = pad_batch(context_tokens, tokenizer.eos_id, max_len)
context_tokens_tensor = torch.cuda.LongTensor(context_tokens)
context_length_tensor = torch.cuda.LongTensor(context_lengths)
return context_tokens_tensor, context_length_tensor
def tokenize_batch_with_context_and_completion(self, sentences, max_len, add_BOS):
"""
convert the sentences into lists of tokens, pad them to the same length, add bos tokens if it is needed
Args:
sentences (List[str]): list of input sentences in str format.
max_len (int): max number of tokens to generate.
add_BOS (bool): whether to add the BOS token at the beginning
Returns:
Tuple[torch.Tensor], the tokenized and padded torch tensor and the token context length tensor.
"""
tokenizer = self.model.tokenizer
if add_BOS:
context_tokens = [
[[tokenizer.bos_id] + tokenizer.text_to_ids(s[0]), tokenizer.text_to_ids(s[1])] for s in sentences
]
else:
context_tokens = [[tokenizer.text_to_ids(s[0]), tokenizer.text_to_ids(s[1])] for s in sentences]
if self.pad_token_for_retrieval:
padded = []
for line in context_tokens:
if len(line[0]) < self.chunk_size:
pad_len = self.chunk_size - len(line[0])
if self.megatron_lm_compatible:
# megatron lm use eos to pad
padded.append([tokenizer.eos_id] * pad_len + line[0] + line[1])
else:
padded.append([tokenizer.pad_id] * pad_len + line[0] + line[1])
else:
padded.append(line[0] + line[1])
context_tokens = padded
context_tokens, context_lengths = pad_batch(context_tokens, tokenizer.eos_id, max_len)
context_tokens_tensor = torch.cuda.LongTensor(context_tokens)
context_length_tensor = torch.cuda.LongTensor(context_lengths)
return context_tokens_tensor, context_length_tensor
def clip_max_len(self, maxlen: int) -> int:
""" clip the max len based on the LM model max sequence length"""
if maxlen > self.model.cfg.encoder_seq_length + 1:
maxlen = self.model.cfg.encoder_seq_length + 1
return maxlen
def _store_retrieved(self, tokens, neighbors):
tokenizer = self.model.tokenizer
for batch_id in range(len(tokens)):
item = {}
query_text = tokenizer.ids_to_text(tokens[batch_id])
item['query'] = query_text
item['neighbors'] = []
for context_id in range(len(neighbors[batch_id])):
neighbor_text = tokenizer.ids_to_text(neighbors[batch_id][context_id])
item['neighbors'].append(neighbor_text)
self.retrieved_text.append(item)
def init_batch(self, context_tokens: torch.Tensor, context_length: int):
self.retrieved = []
self.retrieved_text = []
"""initialize the batch data before the inference steps."""
# Move to GPU.
tokenizer = self.model.tokenizer
tokens = context_tokens.contiguous().cuda()
micro_batch_size, seq_length = tokens.size()
position_ids = torch.arange(seq_length, dtype=torch.long, device=tokens.device)
self.position_ids = position_ids.unsqueeze(0).repeat(micro_batch_size, 1)
if self.megatron_lm_compatible:
# all TRUE for megatron lm, there is no attention mask
self.attention_mask = torch.ones_like(tokens, dtype=torch.bool)
else:
self.attention_mask = tokens != tokenizer.pad_id
for i in range(0, context_length, 64):
if i > 0:
tokens = context_tokens[:, i - 64 : i]
chunks = self.service.get_knn(tokens, self.neighbors)
if self.store_retrieved:
self._store_retrieved(tokens, chunks)
self.retrieved.append(chunks)
def prepare_batch_at_step(
self, tokens: torch.Tensor, maxlen: int, micro_batch_size: int, step: int, context_length: int
) -> Tuple[List[torch.Tensor], List[int]]:
tokenizer = self.model.tokenizer
if context_length % 64 == 0:
# added a new retrieval context
token_context = tokens[:, context_length - 64 : context_length]
chunks = self.service.get_knn(token_context, self.neighbors)
if self.store_retrieved:
self._store_retrieved(token_context, chunks)
self.retrieved.append(chunks)
elif self.frequent_query and len(self.retrieved) > 0:
token_context = tokens[:, context_length - 64 : context_length]
chunks = self.service.get_knn(token_context, self.neighbors)
if self.store_retrieved:
self._store_retrieved(token_context, chunks)
self.retrieved[-1] = chunks
# types2use = None
if step == 0:
# Allocate memory for the entire context.
set_inference_key_value_memory = True
tokens2use = tokens[:, :context_length]
positions2use = self.position_ids[:, :context_length]
# not using type2use. uncomment it if it is used
# if type_ids is not None:
# types2use = type_ids[:, :context_length]
else:
# Set this to false so the memory is not reallocated.
set_inference_key_value_memory = False
tokens2use = tokens[:, context_length - 1].view(micro_batch_size, -1)
positions2use = self.position_ids[:, context_length - 1].view(micro_batch_size, -1)
# not using type2use. uncomment it if it is used
# if type_ids is not None:
# types2use = type_ids[:, context_length - 1].view(batch_size, -1)
retrieved = torch.tensor(np.array(self.retrieved), device=torch.cuda.current_device())
if retrieved.numel() != 0:
retrieved = retrieved.transpose(0, 1).contiguous()
if self.megatron_lm_compatible:
# all TRUE for megatron lm, there is no attention mask
retrieved_mask = torch.ones_like(retrieved, dtype=torch.bool)
else:
retrieved_mask = retrieved != tokenizer.pad_id
if retrieved.numel() == 0:
# add empty retrieved
retrieved = (
torch.tensor(self.service.get_knn(['a'], 0), device=torch.cuda.current_device())
.unsqueeze(0)
.repeat(1, len(self.retrieved), 1, 1)
)
retrieved_mask = retrieved != tokenizer.pad_id
# retrieved = torch.tensor([-1] * micro_batch_size)
# retrieved_mask = torch.tensor([-1] * micro_batch_size)
"""Prepare batch for each of the inference steps"""
# attention_mask_repeat = torch.concat([self.attention_mask for _ in range(micro_batch_size)])
setkey_value_array = torch.tensor(
[set_inference_key_value_memory] * micro_batch_size, device=torch.cuda.current_device()
)
len_array = torch.tensor([maxlen] * micro_batch_size, device=torch.cuda.current_device())
if self.neighbors == 0:
# no retrieval, use 1 padding
neighbors_array = torch.tensor([1] * micro_batch_size, device=torch.cuda.current_device())
else:
neighbors_array = torch.tensor([self.neighbors] * micro_batch_size, device=torch.cuda.current_device())
batch = [
tokens2use,
self.attention_mask[:, :context_length],
retrieved,
retrieved_mask,
setkey_value_array,
len_array,
neighbors_array,
positions2use,
]
tensor_shape = [tokens2use.shape[1], micro_batch_size, self.model.cfg.hidden_size]
return batch, tensor_shape
class RetroQAModelTextGenerationStrategy(RetroModelTextGenerationStrategy):
def tokenize_batch(self, questions, max_len, add_BOS):
"""
convert the sentences into lists of tokens, pad them to the same length, add bos tokens if it is needed
Args:
questions (List[str]): list of input questions in str format.
max_len (int): max number of tokens to generate.
add_BOS (bool): whether to add the BOS token at the beginning
Returns:
Tuple[torch.Tensor], the tokenized and padded torch tensor and the token context length tensor.
"""
tokenizer = self.model.tokenizer
all_lookups = self.service.get_knn(questions, 1 + self.neighbors)
# hack to add "source: " tag
prepend_ids = np.array(tokenizer.text_to_ids('source: '))
all_lookups = np.pad(all_lookups, ((0, 0), (0, 0), (len(prepend_ids), 0)))
all_lookups[:, :, : len(prepend_ids)] = prepend_ids
all_lookups = all_lookups[:, :, : -len(prepend_ids)]
reuse_neighbors = all_lookups[:, 1:]
self.store.set('reuse_neighbors', pickle.dumps(reuse_neighbors))
neighbor_tokens = [neighbors[0].tolist() for neighbors in all_lookups]
# combine question and context
context_tokens = [
n + tokenizer.text_to_ids('\nquestion: ' + q + ' \nanswer:') for n, q in zip(neighbor_tokens, questions)
]
if add_BOS:
context_tokens = [[tokenizer.bos_id] + s for s in context_tokens]
if self.pad_token_for_retrieval:
padded = []
for line in context_tokens:
pad_len = (self.chunk_size - len(line) % self.chunk_size) % self.chunk_size
if self.megatron_lm_compatible:
padded.append([tokenizer.eos_id] * pad_len + line)
else:
padded.append([tokenizer.pad_id] * pad_len + line)
context_tokens = padded
context_tokens, context_lengths = pad_batch(context_tokens, tokenizer.eos_id, max_len)
context_tokens_tensor = torch.cuda.LongTensor(context_tokens)
context_length_tensor = torch.cuda.LongTensor(context_lengths)
return context_tokens_tensor, context_length_tensor
def init_batch(self, context_tokens: torch.Tensor, context_length: int):
self.retrieved = []
self.retrieved_text = []
self.reuse_neighbors = pickle.loads(self.store.get('reuse_neighbors'))
"""initialize the batch data before the inference steps."""
# Move to GPU.
tokenizer = self.model.tokenizer
tokens = context_tokens.contiguous().cuda()
micro_batch_size, seq_length = tokens.size()
position_ids = torch.arange(seq_length, dtype=torch.long, device=tokens.device)
self.position_ids = position_ids.unsqueeze(0).repeat(micro_batch_size, 1)
if self.megatron_lm_compatible:
# all TRUE for megatron lm, there is no attention mask
self.attention_mask = torch.ones_like(tokens, dtype=torch.bool)
else:
self.attention_mask = tokens != tokenizer.pad_id
for i in range(0, context_length, 64):
if i > 0:
tokens = context_tokens[:, i - 64 : i]
chunks = self.reuse_neighbors
if self.store_retrieved:
self._store_retrieved(tokens, chunks)
self.retrieved.append(chunks)
def prepare_batch_at_step(
self, tokens: torch.Tensor, maxlen: int, micro_batch_size: int, step: int, context_length: int
) -> Tuple[List[torch.Tensor], List[int]]:
tokenizer = self.model.tokenizer
if context_length % 64 == 0:
# added a new retrieval context
token_context = tokens[:, context_length - 64 : context_length]
chunks = self.reuse_neighbors
if self.store_retrieved:
self._store_retrieved(token_context, chunks)
self.retrieved.append(chunks)
elif self.frequent_query and len(self.retrieved) > 0:
token_context = tokens[:, context_length - 64 : context_length]
chunks = self.reuse_neighbors
if self.store_retrieved:
self._store_retrieved(token_context, chunks)
self.retrieved[-1] = chunks
# types2use = None
if step == 0:
# Allocate memory for the entire context.
set_inference_key_value_memory = True
tokens2use = tokens[:, :context_length]
positions2use = self.position_ids[:, :context_length]
# not using type2use. uncomment it if it is used
# if type_ids is not None:
# types2use = type_ids[:, :context_length]
else:
# Set this to false so the memory is not reallocated.
set_inference_key_value_memory = False
tokens2use = tokens[:, context_length - 1].view(micro_batch_size, -1)
positions2use = self.position_ids[:, context_length - 1].view(micro_batch_size, -1)
# not using type2use. uncomment it if it is used
# if type_ids is not None:
# types2use = type_ids[:, context_length - 1].view(batch_size, -1)
retrieved = torch.tensor(np.array(self.retrieved), device=torch.cuda.current_device())
if retrieved.numel() != 0:
retrieved = retrieved.transpose(0, 1).contiguous()
if self.megatron_lm_compatible:
# all TRUE for megatron lm, there is no attention mask
retrieved_mask = torch.ones_like(retrieved, dtype=torch.bool)
else:
retrieved_mask = retrieved != tokenizer.pad_id
if retrieved.numel() == 0:
# add empty retrieved
retrieved = (
torch.tensor(self.service.get_knn(['a'], 0), device=torch.cuda.current_device())
.unsqueeze(0)
.repeat(1, len(self.retrieved), 1, 1)
)
retrieved_mask = retrieved != tokenizer.pad_id
"""Prepare batch for each of the inference steps"""
# attention_mask_repeat = torch.concat([self.attention_mask for _ in range(micro_batch_size)])
setkey_value_array = torch.tensor(
[set_inference_key_value_memory] * micro_batch_size, device=torch.cuda.current_device()
)
len_array = torch.tensor([maxlen] * micro_batch_size, device=torch.cuda.current_device())
if self.neighbors == 0:
# no retrieval, use 1 padding
neighbors_array = torch.tensor([1] * micro_batch_size, device=torch.cuda.current_device())
else:
neighbors_array = torch.tensor([self.neighbors] * micro_batch_size, device=torch.cuda.current_device())
batch = [
tokens2use,
self.attention_mask[:, :context_length],
retrieved,
retrieved_mask,
setkey_value_array,
len_array,
neighbors_array,
positions2use,
]
tensor_shape = [tokens2use.shape[1], micro_batch_size, self.model.cfg.hidden_size]
return batch, tensor_shape
def post_generation_process(self, output):
sentences = output['sentences']
modified = []
for sentence in sentences:
sentence = 'answer:' + sentence.split(' \nanswer:')[1]
modified.append(sentence)
output['sentences'] = modified
return output
class RetroFileQAModelTextGenerationStrategy(RetroQAModelTextGenerationStrategy):
def __init__(self, model, **args):
super().__init__(model, **args)
# load the DPR to memory
self.context_db = {}
with open('/dataset/FiD/test.jsonl_title', 'r') as f:
for line in f:
obj = json.loads(line)
self.context_db[obj['question']] = obj
def tokenize_batch(self, questions, max_len, add_BOS):
"""
convert the sentences into lists of tokens, pad them to the same length, add bos tokens if it is needed
Args:
questions (List[str]): list of input questions in str format.
max_len (int): max number of tokens to generate.
add_BOS (bool): whether to add the BOS token at the beginning
Returns:
Tuple[torch.Tensor], the tokenized and padded torch tensor and the token context length tensor.
"""
tokenizer = self.model.tokenizer
# get context from memory
chunks = []
first_context = []
for question in questions:
hash_code = question
if hash_code not in self.context_db:
raise ValueError(f"wrong question is fed: {question}")
contexts = self.context_db[hash_code]['ctxs']
for i, neighbor in enumerate(contexts[: self.neighbors + 1]):
text = "title: " + neighbor["title"] + ", source: " + neighbor["text"]
if i == 0:
first_context.append(text)
tokens = tokenizer.text_to_ids(text)
tokens = tokens[:128]
if len(tokens) < 128:
tokens = tokens + [tokenizer.eos_id] * (128 - len(tokens))
chunks.append(tokens)
all_lookups = np.array(chunks).reshape(1, self.neighbors + 1, -1).astype(np.int64)
reuse_neighbors = all_lookups[:, 1:]
self.store.set('reuse_neighbors', pickle.dumps(reuse_neighbors))
# combine question and context
context_tokens = [
tokenizer.text_to_ids(n + '\nquestion: ' + q + ' \nanswer:') for n, q in zip(first_context, questions)
]
if add_BOS:
context_tokens = [[tokenizer.bos_id] + s for s in context_tokens]
if self.pad_token_for_retrieval:
padded = []
for line in context_tokens:
pad_len = (self.chunk_size - len(line) % self.chunk_size) % self.chunk_size
padded.append([tokenizer.eos_id] * pad_len + line)
context_tokens = padded
context_tokens, context_lengths = pad_batch(context_tokens, tokenizer.eos_id, max_len)
context_tokens_tensor = torch.cuda.LongTensor(context_tokens)
context_length_tensor = torch.cuda.LongTensor(context_lengths)
return context_tokens_tensor, context_length_tensor
| NeMo-main | nemo/collections/nlp/modules/common/retro_inference_strategies.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
from nemo.collections.common.parts import MultiLayerPerceptron
from nemo.collections.nlp.modules.common.classifier import Classifier
from nemo.core.classes import typecheck
from nemo.core.neural_types import LogitsType, NeuralType
__all__ = ['SequenceTokenClassifier']
class SequenceTokenClassifier(Classifier):
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"intent_logits": NeuralType(('B', 'D'), LogitsType()),
"slot_logits": NeuralType(('B', 'T', 'D'), LogitsType()),
}
def __init__(
self,
hidden_size: int,
num_intents: int,
num_slots: int,
num_layers: int = 2,
activation: str = 'relu',
log_softmax: bool = False,
dropout: float = 0.0,
use_transformer_init: bool = True,
):
"""
Initializes the SequenceTokenClassifier module, could be used for tasks that train sequence and
token classifiers jointly, for example, for intent detection and slot tagging task.
Args:
hidden_size: hidden size of the mlp head on the top of the encoder
num_intents: number of the intents to predict
num_slots: number of the slots to predict
num_layers: number of the linear layers of the mlp head on the top of the encoder
activation: type of activations between layers of the mlp head
log_softmax: applies the log softmax on the output
dropout: the dropout used for the mlp head
use_transformer_init: initializes the weights with the same approach used in Transformer
"""
super().__init__(hidden_size=hidden_size, dropout=dropout)
self.intent_mlp = MultiLayerPerceptron(
hidden_size=hidden_size,
num_classes=num_intents,
num_layers=num_layers,
activation=activation,
log_softmax=log_softmax,
)
self.slot_mlp = MultiLayerPerceptron(
hidden_size=hidden_size,
num_classes=num_slots,
num_layers=num_layers,
activation=activation,
log_softmax=log_softmax,
)
self.post_init(use_transformer_init=use_transformer_init)
@typecheck()
def forward(self, hidden_states):
hidden_states = self.dropout(hidden_states)
# intent is classified by first hidden position
intent_logits = self.intent_mlp(hidden_states[:, 0])
slot_logits = self.slot_mlp(hidden_states)
return intent_logits, slot_logits
| NeMo-main | nemo/collections/nlp/modules/common/sequence_token_classifier.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CSS = """
#chatbot .hll { background-color: #ffffcc }
#chatbot .c { color: #408080; font-style: italic }
#chatbot .err { border: 1px solid #FF0000 }
#chatbot .k { color: #008000; font-weight: bold }
#chatbot .o { color: #666666 }
#chatbot .ch { color: #408080; font-style: italic }
#chatbot .cm { color: #408080; font-style: italic }
#chatbot .cp { color: #BC7A00 }
#chatbot .cpf { color: #408080; font-style: italic }
#chatbot .c1 { color: #408080; font-style: italic }
#chatbot .cs { color: #408080; font-style: italic }
#chatbot .gd { color: #A00000 }
#chatbot .ge { font-style: italic }
#chatbot .gr { color: #FF0000 }
#chatbot .gh { color: #000080; font-weight: bold }
#chatbot .gi { color: #00A000 }
#chatbot .go { color: #888888 }
#chatbot .gp { color: #000080; font-weight: bold }
#chatbot .gs { font-weight: bold }
#chatbot .gu { color: #800080; font-weight: bold }
#chatbot .gt { color: #0044DD }
#chatbot .kc { color: #008000; font-weight: bold }
#chatbot .kd { color: #008000; font-weight: bold }
#chatbot .kn { color: #008000; font-weight: bold }
#chatbot .kp { color: #008000 }
#chatbot .kr { color: #008000; font-weight: bold }
#chatbot .kt { color: #B00040 }
#chatbot .m { color: #666666 }
#chatbot .s { color: #BA2121 }
#chatbot .na { color: #7D9029 }
#chatbot .nb { color: #008000 }
#chatbot .nc { color: #0000FF; font-weight: bold }
#chatbot .no { color: #880000 }
#chatbot .nd { color: #AA22FF }
#chatbot .ni { color: #999999; font-weight: bold }
#chatbot .ne { color: #D2413A; font-weight: bold }
#chatbot .nf { color: #0000FF }
#chatbot .nl { color: #A0A000 }
#chatbot .nn { color: #0000FF; font-weight: bold }
#chatbot .nt { color: #008000; font-weight: bold }
#chatbot .nv { color: #19177C }
#chatbot .ow { color: #AA22FF; font-weight: bold }
#chatbot .w { color: #bbbbbb }
#chatbot .mb { color: #666666 }
#chatbot .mf { color: #666666 }
#chatbot .mh { color: #666666 }
#chatbot .mi { color: #666666 }
#chatbot .mo { color: #666666 }
#chatbot .sa { color: #BA2121 }
#chatbot .sb { color: #BA2121 }
#chatbot .sc { color: #BA2121 }
#chatbot .dl { color: #BA2121 }
#chatbot .sd { color: #BA2121; font-style: italic }
#chatbot .s2 { color: #BA2121 }
#chatbot .se { color: #BB6622; font-weight: bold }
#chatbot .sh { color: #BA2121 }
#chatbot .si { color: #BB6688; font-weight: bold }
#chatbot .sx { color: #008000 }
#chatbot .sr { color: #BB6688 }
#chatbot .s1 { color: #BA2121 }
#chatbot .ss { color: #19177C }
#chatbot .bp { color: #008000 }
#chatbot .fm { color: #0000FF }
#chatbot .vc { color: #19177C }
#chatbot .vg { color: #19177C }
#chatbot .vi { color: #19177C }
#chatbot .vm { color: #19177C }
#chatbot .il { color: #666666 }
"""
| NeMo-main | nemo/collections/nlp/modules/common/chat_css.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import enum
from typing import Dict, Optional
import torch
import torch.nn.init as init
from torch import nn
from nemo.collections.nlp.modules.common.megatron.fused_bias_gelu import fused_bias_gelu
from nemo.collections.nlp.modules.common.megatron.utils import ApexGuardDefaults, init_method_normal
from nemo.core.classes import Exportable, NeuralModule
from nemo.core.classes.common import typecheck
try:
from megatron.core import ModelParallelConfig, tensor_parallel
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelParallelConfig = ApexGuardDefaults
HAVE_MEGATRON_CORE = False
__all__ = ["PromptEncoder", "PromptEncoderType"]
class PromptEncoderType(enum.Enum):
TPMLP = "tpmlp" # mlp model that support tensor parallel, better work together with a large language model
MLP = "mlp"
LSTM = "lstm"
EMBEDDING = "embedding"
class PromptEmbedding(NeuralModule, Exportable):
"""Prompt embeddings
Arugments:
init_from_prompt_text: Whether to intialize prompt embeddings
from from certain lm embeddings
corresponding to a prompt string
hidden_size: hidden size should match lm embedding size
total_virtual_tokens: length of prompt initalized from torch init method
"""
def __init__(
self, hidden_size, total_virtual_tokens,
):
super().__init__()
self.hidden_size = hidden_size
self.total_virtual_tokens = total_virtual_tokens
# Randomly init token and position embeddings
self.prompt_embeddings = torch.nn.Embedding(self.total_virtual_tokens, self.hidden_size)
self.prompt_embeddings.weight.data.fill_(0.0)
self.prompt_embeddings.weight.requires_grad = False
# Set fixed indicies for forward pass
self.register_buffer("indices", torch.LongTensor(list(range(self.total_virtual_tokens))), persistent=False)
def clear_prompt_embedding_weights(self,):
"""
Method sets the prompt embedding weights to 0.0
"""
self.prompt_embeddings.weight.fill_(0.0)
def set_prompt_embedding_weights(self, weight: torch.Tensor):
"""
Method sets the prompt embedding weights with a new weight w
"""
self.prompt_embeddings.weight.data = weight.type_as(self.prompt_embeddings.weight.data)
def forward(self,):
"""
Does forward pass
"""
return self.prompt_embeddings(self.indices)
class InferenceTable(NeuralModule, Exportable):
"""
A wrapper class that holds the output representations of the PromptEncoder Model.
At inference time we do not need to forward pass through the full PromptEncoder and can just use this class.
"""
def __init__(self, taskname, hidden_size, total_virtual_tokens, is_inference_ready=False):
super().__init__()
self.taskname = taskname
self.hidden_size = hidden_size
self.total_virtual_tokens = total_virtual_tokens
self.prompt_table = torch.nn.ModuleDict()
self.prompt_table[self.taskname] = PromptEmbedding(self.hidden_size, self.total_virtual_tokens)
self.prompt_table[self.taskname].clear_prompt_embedding_weights()
self.is_inference_ready = is_inference_ready
for p in self.prompt_table.parameters():
p.requires_grad = False
def set_prompt_table(self, prompt_representation: torch.Tensor):
"""
Method sets the prompt embedding inside self.prompt_table[taskname] with new weights
"""
self.prompt_table[self.taskname].set_prompt_embedding_weights(prompt_representation)
self.is_inference_ready = True
def get_prompt_table(self,):
"""
Returns the prompt representation cached in the prompt table
"""
return self.prompt_table[self.taskname].forward()
def clear_prompt_table(self,):
"""
Method "clears" the prompt embedding inside self.prompt_table[taskname] by setting it to zero.
"""
self.prompt_table[self.taskname].clear_prompt_embedding_weights()
self.is_inference_ready = False
class TPMLP(NeuralModule, Exportable):
"""
The Tensor Parallel MLP prompt encoder network that is used to generate the virtual
token embeddings for p-tuning. It only have two layers.
"""
def __init__(
self,
config: ModelParallelConfig,
total_virtual_tokens: int,
hidden_size: int,
output_size: int,
init_std: float,
):
"""
Initializes the Tensor Model parallel MLP PromptEncoderMLP module.
Args:
config: the model parallel config used my megatron core
total_virtual_tokens: the total number of vitural tokens
hidden_size: hidden dimension
output_size: the output dimension
init_std: the MLP init std value
"""
super().__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.total_virtual_tokens = total_virtual_tokens
self.activation = "gelu"
config = copy.deepcopy(config)
config.sequence_parallel = False
config.gradient_accumulation_fusion = False
self.first = tensor_parallel.ColumnParallelLinear(
self.output_size,
self.hidden_size,
config=config,
gather_output=False,
init_method=init_method_normal(init_std),
skip_bias_add=True,
bias=True,
)
self.second = tensor_parallel.RowParallelLinear(
self.hidden_size,
self.output_size,
config=config,
input_is_parallel=True,
init_method=init_method_normal(init_std),
skip_bias_add=True,
bias=True,
)
def forward(self, input_embeds) -> torch.Tensor:
intermediate_parallel, bias_parallel = self.first(input_embeds)
intermediate_parallel = fused_bias_gelu(intermediate_parallel, bias_parallel)
output_embeds, bias_parallel = self.second(intermediate_parallel)
output_embeds = output_embeds + bias_parallel
return output_embeds
class PromptEncoder(NeuralModule, Exportable):
"""
The prompt encoder network that is used to generate the virtual
token embeddings for p-tuning.
"""
def __init__(
self,
config: ModelParallelConfig,
encoder_type: enum,
total_virtual_tokens: int,
token_dim: int,
hidden_size,
lstm_dropout: float,
num_layers: int,
init_std: float,
taskname: str = "taskname",
):
"""
Initializes the PromptEncoder module.
Args:
config: the model parallel config used my megatron core
total_virtual_tokens: the total number of vitural tokens
hidden_size: hidden dimension
lstm_dropout: the dropout used for the LSTM
num_layers: number of layers used in the LSTM
init_std: used for TPMLP encoder type to initialize the mlp weights
"""
super().__init__()
self.token_dim = token_dim
self.input_size = token_dim
self.output_size = token_dim
self.hidden_size = hidden_size
self.total_virtual_tokens = total_virtual_tokens
self.encoder_type = encoder_type
self.activation = "gelu"
self.init_std = init_std
self.taskname = taskname
# Set fixed indicies for forward pass
self.register_buffer("indices", torch.LongTensor(list(range(self.total_virtual_tokens))))
# embedding
self.embedding = torch.nn.Embedding(self.total_virtual_tokens, self.token_dim)
self.inference_table = InferenceTable(taskname, self.token_dim, self.total_virtual_tokens)
if self.encoder_type == PromptEncoderType.EMBEDDING:
init.xavier_normal_(self.embedding.weight)
elif self.encoder_type == PromptEncoderType.LSTM:
# LSTM
self.lstm_head = torch.nn.LSTM(
input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=num_layers,
dropout=lstm_dropout,
bidirectional=True,
batch_first=True,
)
self.mlp_head = nn.Sequential(
nn.Linear(self.hidden_size * 2, self.hidden_size * 2),
nn.ReLU(),
nn.Linear(self.hidden_size * 2, self.output_size),
)
elif self.encoder_type == PromptEncoderType.MLP:
if num_layers <= 1:
raise ValueError(
"The MLP prompt encoder must have at least 2 layers, and exactly 2 layers is recommended."
)
layers = [nn.Linear(self.input_size, self.hidden_size), nn.ReLU()]
for _ in range(num_layers - 2):
layers.extend([nn.Linear(self.hidden_size, self.hidden_size), nn.ReLU()])
layers.append(nn.Linear(self.hidden_size, self.output_size))
self.mlp_head = nn.Sequential(*layers)
elif self.encoder_type == PromptEncoderType.TPMLP:
self.tpmlp = TPMLP(config, self.total_virtual_tokens, self.hidden_size, self.output_size, self.init_std,)
else:
raise ValueError("Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.")
def set_inference_table(self, prompt_representation: torch.Tensor):
"""
This method caches the output representation from the Encoder and saves it inside `self.inference_table`.
"""
prompt_representation = prompt_representation.detach().clone()
self.inference_table.set_prompt_table(prompt_representation)
def clear_inference_table(self,):
self.inference_table.clear_prompt_table()
def get_inference_table(self,):
return self.inference_table.get_prompt_table()
def state_dict(self, desination=None, prefix=None, keep_vars=False):
_state_dict = {}
_state_dict[
'prompt_table'
] = (
self.inference_table.state_dict()
) # (@adithyare) this key is for backward compatibility with downstream users of the "inference ready" model.
_state_dict['embeddings'] = self.embedding.state_dict()
if self.encoder_type == PromptEncoderType.EMBEDDING:
pass
elif self.encoder_type == PromptEncoderType.LSTM:
_state_dict['mlp_head'] = self.mlp_head.state_dict()
_state_dict['lstm_head'] = self.lstm_head.state_dict()
elif self.encoder_type == PromptEncoderType.MLP:
_state_dict['mlp_head'] = self.mlp_head.state_dict()
elif self.encoder_type == PromptEncoderType.TPMLP:
_state_dict['tpmlp'] = self.tpmlp.state_dict()
else:
raise ValueError("Prompt encoder type not recognized. Pl.")
return _state_dict
def load_state_dict(self, state_dict, strict=True):
self.inference_table.load_state_dict(state_dict['prompt_table'])
self.embedding.load_state_dict(state_dict['embeddings'])
if self.encoder_type == PromptEncoderType.EMBEDDING:
pass
elif self.encoder_type == PromptEncoderType.LSTM:
self.mlp_head.load_state_dict(state_dict['mlp_head'])
self.lstm_head.state_dict(state_dict['lstm_head'])
elif self.encoder_type == PromptEncoderType.MLP:
self.mlp_head.load_state_dict(state_dict['mlp_head'])
elif self.encoder_type == PromptEncoderType.TPMLP:
self.tpmlp.load_state_dict(state_dict['tpmlp'])
else:
raise ValueError("Prompt encoder type not recognized. Pl.")
return
def _forward(self,):
input_embeds = self.embedding(self.indices).unsqueeze(0)
if self.encoder_type == PromptEncoderType.EMBEDDING:
output_embeds = input_embeds
elif self.encoder_type == PromptEncoderType.LSTM:
output_embeds = self.mlp_head(self.lstm_head(input_embeds)[0])
elif self.encoder_type == PromptEncoderType.MLP:
output_embeds = self.mlp_head(input_embeds)
elif self.encoder_type == PromptEncoderType.TPMLP:
output_embeds = self.tpmlp(input_embeds)
else:
raise ValueError("Prompt encoder type not recognized. Pl.")
return output_embeds
@typecheck()
def forward(self, batch_size: int, use_cached_reps: bool) -> torch.Tensor:
"""
Forward pass through the encoder with caching of prompt representations
"""
if use_cached_reps:
output_embeds = self.get_inference_table().unsqueeze(0)
else:
if self.training:
if self.inference_table.is_inference_ready:
self.clear_inference_table()
output_embeds = self._forward()
else:
if not self.inference_table.is_inference_ready:
output_embeds = self._forward()
self.set_inference_table(output_embeds.squeeze(0))
output_embeds = self.get_inference_table().unsqueeze(0)
output_embeds = output_embeds.expand(batch_size, self.total_virtual_tokens, self.token_dim)
return output_embeds
| NeMo-main | nemo/collections/nlp/modules/common/prompt_encoder.py |
# Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List, Optional, Union
from attr import asdict
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from nemo.collections.nlp.modules.common.bert_module import BertModule
from nemo.collections.nlp.modules.common.decoder_module import DecoderModule
from nemo.collections.nlp.modules.common.encoder_module import EncoderModule
from nemo.collections.nlp.modules.common.huggingface.huggingface_utils import (
get_huggingface_lm_model,
get_huggingface_pretrained_lm_models_list,
)
from nemo.collections.nlp.modules.common.megatron.megatron_utils import get_megatron_pretrained_bert_models
from nemo.collections.nlp.modules.common.transformer.transformer import NeMoTransformerConfig
from nemo.collections.nlp.modules.common.transformer.transformer_utils import (
get_huggingface_transformer,
get_nemo_transformer,
)
from nemo.utils import AppState, logging
__all__ = ['get_pretrained_lm_models_list', 'get_lm_model', 'pad_batch']
def pad_batch(batch, pad_id, max_len):
context_lengths = []
max_context_length = max([len(tokens) for tokens in batch])
for tokens in batch:
context_length = len(tokens)
if context_length < max_context_length + max_len:
tokens.extend([pad_id] * (max_context_length + max_len - context_length))
context_lengths.append(context_length)
return batch, context_lengths
def get_pretrained_lm_models_list(include_external: bool = False) -> List[str]:
"""
Returns the list of supported pretrained model names
Args:
include_external if true includes all HuggingFace model names, not only those supported language models in NeMo.
"""
return get_huggingface_pretrained_lm_models_list(include_external=include_external)
def get_lm_model(
config_dict: Optional[dict] = None,
config_file: Optional[str] = None,
vocab_file: Optional[str] = None,
trainer: Optional[Trainer] = None,
cfg: DictConfig = None,
) -> BertModule:
"""
Helper function to instantiate a language model encoder, either from scratch or a pretrained model.
If only pretrained_model_name are passed, a pretrained model is returned.
If a configuration is passed, whether as a file or dictionary, the model is initialized with random weights.
Args:
config_dict: path to the model configuration dictionary
config_file: path to the model configuration file
vocab_file: path to vocab_file to be used with Megatron-LM
trainer: an instance of a PyTorch Lightning trainer
cfg: a model configuration
Returns:
Pretrained BertModule
"""
# check valid model type
if cfg.language_model.get('pretrained_model_name'):
if (
not cfg.language_model.pretrained_model_name
or cfg.language_model.pretrained_model_name not in get_pretrained_lm_models_list(include_external=False)
):
logging.warning(
f'{cfg.language_model.pretrained_model_name} is not in get_pretrained_lm_models_list(include_external=False), '
f'will be using AutoModel from HuggingFace.'
)
# warning when user passes both configuration dict and file
if config_dict and config_file:
logging.warning(
f"Both config_dict and config_file were found, defaulting to use config_file: {config_file} will be used."
)
pretrain_model_name = ''
if cfg.get('language_model') and cfg.language_model.get('pretrained_model_name', ''):
pretrain_model_name = cfg.language_model.get('pretrained_model_name', '')
all_pretrained_megatron_bert_models = get_megatron_pretrained_bert_models()
if (
cfg.tokenizer is not None
and cfg.tokenizer.get("tokenizer_name", "") is not None
and "megatron" in cfg.tokenizer.get("tokenizer_name", "")
) or pretrain_model_name in all_pretrained_megatron_bert_models:
import torch
from nemo.collections.nlp.models.language_modeling.megatron_bert_model import MegatronBertModel
class Identity(torch.nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x, *args):
return x
if cfg.language_model.get("lm_checkpoint"):
model = MegatronBertModel.restore_from(restore_path=cfg.language_model.lm_checkpoint, trainer=trainer)
else:
model = MegatronBertModel.from_pretrained(cfg.language_model.get('pretrained_model_name'), trainer=trainer)
# remove the headers that are only revelant for pretraining
model.model.lm_head = Identity()
model.model.binary_head = Identity()
model.model.language_model.pooler = Identity()
else:
model = get_huggingface_lm_model(
config_dict=config_dict,
config_file=config_file,
pretrained_model_name=cfg.language_model.pretrained_model_name,
)
if cfg.language_model.get("lm_checkpoint"):
app_state = AppState()
if not app_state.is_model_being_restored and not os.path.exists(cfg.language_model.lm_checkpoint):
raise ValueError(f'{cfg.language_model.lm_checkpoint} not found')
model.restore_weights(restore_path=cfg.language_model.lm_checkpoint)
return model
# @dataclass
# class TransformerConfig:
# library: str = 'nemo'
# model_name: Optional[str] = None
# pretrained: bool = False
# config_dict: Optional[dict] = None
# checkpoint_file: Optional[str] = None
# encoder: bool = True
def get_transformer(
library: str = 'nemo',
model_name: Optional[str] = None,
pretrained: bool = False,
config_dict: Optional[dict] = None,
checkpoint_file: Optional[str] = None,
encoder: bool = True,
pre_ln_final_layer_norm: bool = True,
padding_idx: int = 0,
) -> Union[EncoderModule, DecoderModule]:
"""Gets Transformer based model to be used as an Encoder or Decoder in NeMo NLP.
First choose the library to get the transformer from. This can be huggingface,
megatron, or nemo. Use the model_name arg to get a named model architecture
and use the pretrained arg to get the named model architecture with pretrained weights.
If model_name is None, then we can pass in a custom configuration via the config_dict.
For example, to instantiate a HuggingFace BERT model with custom configuration we would do:
encoder = get_transformer(library='huggingface',
config_dict={
'_target_': 'transformers.BertConfig',
'hidden_size': 1536
})
Args:
library (str, optional): Can be 'nemo', 'huggingface', or 'megatron'. Defaults to 'nemo'.
model_name (Optional[str], optional): Named model architecture from the chosen library. Defaults to None.
pretrained (bool, optional): Use True to get pretrained weights.
False will use the same architecture but with randomly initialized weights.
Defaults to False.
config_dict (Optional[dict], optional): Use for custom configuration of transformer. Defaults to None.
checkpoint_file (Optional[str], optional): Provide weights for the transformer from a local checkpoint. Defaults to None.
encoder (bool, optional): True returns an EncoderModule, False returns a DecoderModule. Defaults to True.
Returns:
Union[EncoderModule, DecoderModule]: Ensures that Encoder/Decoder will work in EncDecNLPModel
"""
model = None
if library == 'nemo':
if isinstance(config_dict, NeMoTransformerConfig):
config_dict = asdict(config_dict)
model = get_nemo_transformer(
model_name=model_name,
pretrained=pretrained,
config_dict=config_dict,
encoder=encoder,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
padding_idx=padding_idx,
)
if checkpoint_file is not None:
if os.path.isfile(checkpoint_file):
raise ValueError(f'Loading transformer weights from checkpoint file has not been implemented yet.')
elif library == 'huggingface':
model = get_huggingface_transformer(
model_name=model_name, pretrained=pretrained, config_dict=config_dict, encoder=encoder
)
elif library == 'megatron':
raise ValueError(
f'megatron-lm bert support has been deprecated in NeMo 1.5+. Please use NeMo 1.4 for support.'
)
# TODO: enable megatron bert in nemo
# model = get_megatron_transformer(
# model_name=model_name,
# pretrained=pretrained,
# config_dict=config_dict,
# encoder=encoder,
# checkpoint_file=checkpoint_file,
# )
else:
raise ValueError("Libary must be 'nemo', 'huggingface' or 'megatron'")
return model
| NeMo-main | nemo/collections/nlp/modules/common/lm_utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
__all__ = ['VirtualPromptSource', 'VirtualPromptStyle', 'VirtualPromptPlaceholderToken']
class VirtualPromptStyle(enum.Enum):
P_TUNING = 'p-tuning'
NO_PROMPT = 'no-prompts'
class VirtualPromptSource(enum.Enum):
PROMPT_ENCODER = 'prompt_encoder'
NO_PROMPT = 'no-prompts'
class VirtualPromptPlaceholderToken(enum.Enum):
BASE = '<prompt_'
END = '>'
| NeMo-main | nemo/collections/nlp/modules/common/prompt_table.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
try:
import gradio as gr
GRADIO_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
GRADIO_AVAILABLE = False
from nemo.collections.nlp.modules.common.chat_css import CSS
from nemo.collections.nlp.modules.common.megatron.retrieval_services.util import (
convert_retrieved_to_md,
request_data,
text_generation,
)
__all__ = ['RetroDemoWebApp', 'get_demo']
TURN_TOKEN = '<extra_id_1>'
PROMPT_PRESETS = {
"DIALOGUE": {
"SYSTEM_TURN_TOKEN": '',
"USER_TURN_TOKEN": '<extra_id_1>',
"BOT_TURN_TOKEN": '<extra_id_2>',
"END_OF_NAME": '',
"END_OF_TURN": '\n',
},
"DIALOGUE2": {
"SYSTEM_TURN_TOKEN": '<extra_id_0>System\n',
"USER_TURN_TOKEN": '<extra_id_1>',
"BOT_TURN_TOKEN": '<extra_id_1>',
"END_OF_NAME": '\n',
"END_OF_TURN": '\n',
},
}
PRESETS = {
"K1-Greedy": {"temperature": 1.0, "top_p": 0.9, "top_k": 1, "repetition_penalty": 1.0,},
"K50": {"temperature": 0.75, "top_p": 0.95, "top_k": 50, "repetition_penalty": 1.0,},
"K50-Creative": {"temperature": 0.85, "top_p": 0.95, "top_k": 50, "repetition_penalty": 1.0,},
"K50-Precise": {"temperature": 0.1, "top_p": 0.95, "top_k": 50, "repetition_penalty": 1.0,},
"K50-Original": {"temperature": 0.9, "top_p": 0.95, "top_k": 50, "repetition_penalty": 1.0,},
"Nucleus9": {"temperature": 0.8, "top_p": 0.9, "top_k": 10000, "repetition_penalty": 1.0,},
"Custom": {"temperature": 0.75, "top_p": 0.95, "top_k": 50, "repetition_penalty": 1.0,},
}
def check_gradio_import():
if not GRADIO_AVAILABLE:
msg = (
f"could not find the gradio library.\n"
f"****************************************************************\n"
f"To install it, please follow the steps below:\n"
f"pip install gradio==3.34.0\n"
)
raise ImportError(msg)
def create_gen_function(port=5555, chat=False):
def get_generation(prompt, greedy, add_BOS, token_to_gen, min_tokens, temp, top_p, top_k, repetition, end_strings):
data = {
"sentences": [prompt],
"tokens_to_generate": int(token_to_gen),
"temperature": temp,
"add_BOS": add_BOS,
"top_k": top_k,
"top_p": top_p,
"greedy": greedy,
"all_probs": False,
"repetition_penalty": repetition,
"min_tokens_to_generate": int(min_tokens),
"end_strings": [i.strip() for i in end_strings.split(',') if len(i) != 0],
}
response = text_generation(data, port=port)
sentences = response['sentences']
bot_message = sentences[0]
if bot_message.find('<extra_id_0') < 0:
# hack due to the problem that huggingface's tokenizer strips out the <extra_id_x> token
prompt = prompt.replace('<extra_id_0>', '').replace('<extra_id_1>', '').replace('<extra_id_2>', '')
bot_message = bot_message[len(prompt) :]
return bot_message
return get_generation
def get_demo(share, username, password, server_port=5555, web_port=9889, loop=None):
check_gradio_import()
asyncio.set_event_loop(loop)
with gr.Blocks(css=CSS) as demo:
with gr.Row():
with gr.Column(scale=2, width=200):
# store the mutliple turn conversation
token_to_gen = gr.Number(label='Number of Tokens to generate', value=300, type=int)
min_token_to_gen = gr.Number(label='Min number of Tokens to generate', value=1, type=int)
seed = gr.Number(label='Random seed', value=0, type=int)
end_strings = gr.Textbox(label="End strings (comma separated)", value="<extra_id_1>,", lines=1,)
add_BOS = gr.Checkbox(label="Add BOS token", value=False)
sampling_method = gr.Dropdown(
list(PRESETS.keys()), label='Sampling Presets', default='K50', value='K50'
)
temperature = gr.Slider(minimum=0.0, maximum=5.0, value=0.75, label='Temperature', step=0.1)
top_p = gr.Slider(minimum=0.0, maximum=1.0, step=0.02, value=0.95, label='Top P')
top_k = gr.Slider(minimum=0, maximum=1024, step=2, value=50, label='Top K')
repetition_penality = gr.Slider(
minimum=1.0, maximum=5.0, step=0.02, value=1.0, label='Repetition penalty'
)
def set_sampling(x):
return list(PRESETS[x].values())
sampling_method.change(
set_sampling, inputs=[sampling_method], outputs=[temperature, top_p, top_k, repetition_penality]
)
with gr.Column(scale=1, min_width=900):
text = gr.Textbox(label="Playground", value="", lines=60, placeholder="Type something here...",)
submit_btn = gr.Button("Generate")
clear = gr.Button("Clear")
def on_submit(
prompt_text,
token_to_gen,
temperature,
top_p,
top_k,
repetition_penality,
seed,
end_strings,
add_BOS,
min_token_to_gen,
):
output = create_gen_function(server_port)(
prompt_text,
False,
add_BOS,
token_to_gen,
min_token_to_gen,
temperature,
top_p,
top_k,
repetition_penality,
end_strings,
)
print(output)
print('-------------------')
return prompt_text + output
def clear_fun():
return ''
submit_btn.click(
on_submit,
[
text,
token_to_gen,
temperature,
top_p,
top_k,
repetition_penality,
seed,
end_strings,
add_BOS,
min_token_to_gen,
],
[text],
queue=False,
)
clear.click(clear_fun, None, text, queue=False)
demo.queue(concurrency_count=16).launch(
share=share, server_port=web_port, server_name='0.0.0.0', auth=(username, password)
)
def get_chatbot_demo(
share, username, password, server_port=5555, web_port=9889, loop=None, value=False, defaults=None, attributes=None,
):
check_gradio_import()
from nemo.collections.nlp.modules.common.chatbot_component import Chatbot
asyncio.set_event_loop(loop)
with gr.Blocks(css=CSS) as demo:
with gr.Row():
with gr.Column(scale=2, width=200):
# store the mutliple turn conversation
session_state = gr.State(value=[])
token_to_gen = gr.Number(label='Number of Tokens to generate', value=300, type=int)
seed = gr.Number(label='Random seed', value=0, type=int)
prompt_presets = gr.Dropdown(
list(PROMPT_PRESETS.keys()), label='Template Presets', default='DIALOGUE2', value='DIALOGUE2'
)
sampling_method = gr.Dropdown(
list(PRESETS.keys()), label='Sampling Presets', default='K50', value='K50'
)
with gr.Accordion("Sampling Parameters", open=False):
temperature = gr.Slider(
minimum=0.0, maximum=5.0, value=0.75, label='Temperature', step=0.1, interactive=False
)
top_p = gr.Slider(
minimum=0.0, maximum=1.0, step=0.02, value=0.95, label='Top P', interactive=False
)
top_k = gr.Slider(minimum=0, maximum=1024, step=2, value=50, label='Top K', interactive=False)
repetition_penality = gr.Slider(
minimum=1.0, maximum=5.0, step=0.02, value=1.0, label='Repetition penalty', interactive=False
)
with gr.Accordion("Value Parameters", open=True, visible=value):
keys = [k.key for k in attributes]
# keys = ['quality', 'toxicity', 'humor', 'creativity', 'violence', 'helpfulness', 'not_appropriate']
widgets = []
for item in attributes:
if item.type == 'int':
slider = gr.Slider(
minimum=item.min, maximum=item.max, step=1, value=item.default, label=item.name
)
widgets.append(slider)
elif item.type == 'list':
dropdown = gr.Dropdown(
item.choices, label=item.name, default=item.default, value=item.default
)
widgets.append(dropdown)
used_value = gr.CheckboxGroup(keys, value=keys)
def change_visibility(x):
values = []
for key in keys:
if key in x:
values.append(gr.update(visible=True))
else:
values.append(gr.update(visible=False))
return values
used_value.change(
change_visibility, inputs=[used_value], outputs=widgets,
)
def set_sampling(x):
if x == 'Custom':
values = [gr.update(value=v, interactive=True) for v in PRESETS[x].values()]
return values
else:
values = [gr.update(value=v, interactive=False) for v in PRESETS[x].values()]
return values
sampling_method.change(
set_sampling, inputs=[sampling_method], outputs=[temperature, top_p, top_k, repetition_penality]
)
gr.HTML("<hr>")
human_name = gr.Textbox(label="Human Name", value=defaults['user'], line=1,)
assistant_name = gr.Textbox(label="Assistant Name", value=defaults['assistant'], line=1,)
preamble = gr.Textbox(label="System", value=defaults['system'], lines=2,)
def set_prompt(x):
if x == "DIALOGUE":
return '', ''
return defaults['user'], defaults['assistant']
prompt_presets.change(set_prompt, inputs=[prompt_presets], outputs=[human_name, assistant_name])
with gr.Column(scale=1, min_width=900):
chatbot = Chatbot(elem_id="chatbot").style(height=800)
msg = gr.Textbox(label="User", value="", lines=1,)
clear = gr.Button("Clear")
def user(user_message, history, session_state):
session_state.append(user_message)
user_message = user_message.replace('\n', '<br>')
return "", history + [[user_message, None]]
def get_value_str(values_array, used_value):
if len(used_value) == 0:
return ''
assert len(values_array) == len(keys)
value_str = '<extra_id_2>'
elements = []
for i, key in enumerate(keys):
if key in used_value:
elements.append(f'{key}:{values_array[i]}')
value_str += ','.join(elements) + '\n'
return value_str
def bot(
history,
preamble,
token_to_gen,
temperature,
top_p,
top_k,
repetition_penality,
seed,
human_name,
assistant_name,
session_state,
prompts_presets,
used_value,
*values,
):
values_array = values
if value:
value_str = get_value_str(values_array, used_value)
else:
value_str = ''
prompt_preset = PROMPT_PRESETS[prompts_presets]
prompt_text = ''
names = [human_name, assistant_name]
turn_tokens = [prompt_preset['USER_TURN_TOKEN'], prompt_preset['BOT_TURN_TOKEN']]
for i, meg in enumerate(session_state):
name = names[i % 2]
turn = turn_tokens[i % 2]
prompt_text += turn + name + prompt_preset['END_OF_NAME'] + meg + prompt_preset['END_OF_TURN']
prompt_text += (
prompt_preset['BOT_TURN_TOKEN'] + assistant_name + prompt_preset['END_OF_NAME'] + value_str
)
prompt_text = prompt_preset['SYSTEM_TURN_TOKEN'] + preamble + prompt_text
bot_message = create_gen_function(server_port)(
prompt_text,
False,
False,
token_to_gen,
1,
temperature,
top_p,
top_k,
repetition_penality,
'<extra_id_1>',
)
if bot_message.endswith(TURN_TOKEN):
bot_message = bot_message[: -len(TURN_TOKEN)]
history[-1][1] = bot_message
print(prompt_text)
print(bot_message)
print('-------------------')
session_state.append(value_str + bot_message.strip())
return history
msg.submit(user, [msg, chatbot, session_state], [msg, chatbot], queue=False).then(
bot,
[
chatbot,
preamble,
token_to_gen,
temperature,
top_p,
top_k,
repetition_penality,
seed,
human_name,
assistant_name,
session_state,
prompt_presets,
used_value,
*widgets,
],
[chatbot],
)
def clear_fun(session_state):
session_state.clear()
return None
clear.click(clear_fun, [session_state], chatbot, queue=False)
demo.launch(share=share, server_port=web_port, server_name='0.0.0.0', auth=(username, password))
class RetroDemoWebApp:
def __init__(self, text_service_ip, text_service_port, combo_service_ip, combo_service_port):
self.text_service_ip = text_service_ip
self.text_service_port = text_service_port
self.combo_service_ip = combo_service_ip
self.combo_service_port = combo_service_port
def get_retro_generation(
self,
prompt,
greedy,
add_BOS,
token_to_gen,
min_tokens,
temp,
top_p,
top_k,
repetition,
neighbors,
weight,
end_strings,
):
data = {
"sentences": [prompt],
"tokens_to_generate": int(token_to_gen),
"temperature": temp,
"add_BOS": add_BOS,
"top_k": top_k,
"top_p": top_p,
"greedy": greedy,
"all_probs": False,
"repetition_penalty": repetition,
"min_tokens_to_generate": int(min_tokens),
"neighbors": int(neighbors),
"end_strings": [i.strip() for i in end_strings.split(',') if len(i) != 0],
}
self.update_weight(weight)
output_json = text_generation(data, self.text_service_ip, self.text_service_port)
sentences = output_json['sentences']
retrieved = output_json['retrieved']
return sentences[0], convert_retrieved_to_md(retrieved)
def update_weight(self, weight):
data = {"update_weight": [weight, 1.0 - weight]}
return request_data(data, self.combo_service_ip, self.combo_service_port)
def add_doc(self, doc, add_eos):
data = {
"sentences": [doc],
"add_eos": add_eos,
}
return request_data(data, self.combo_service_ip, self.combo_service_port)
def reset_index(self):
data = {"reset": None}
return request_data(data, self.combo_service_ip, self.combo_service_port)
def run_demo(self, share, username, password, port):
check_gradio_import()
with gr.Blocks(css="table, th, td { border: 1px solid blue; table-layout: fixed; width: 100%; }") as demo:
with gr.Row():
with gr.Column(scale=2, width=200):
greedy_flag = gr.Checkbox(label="Greedy", value=True)
add_BOS = gr.Checkbox(label="Add BOS token", value=False)
token_to_gen = gr.Number(label='Number of Tokens to generate', value=30, type=int)
min_token_to_gen = gr.Number(label='Min number of Tokens to generate', value=1, type=int)
temperature = gr.Slider(minimum=0.0, maximum=10.0, value=1.0, label='Temperature', step=0.1)
top_p = gr.Slider(minimum=0.0, maximum=1.0, step=0.02, value=0.9, label='Top P')
top_k = gr.Slider(minimum=0, maximum=10000, step=2, value=0, label='Top K')
repetition_penality = gr.Slider(
minimum=1.0, maximum=5.0, step=0.02, value=1.2, label='Repetition penalty'
)
end_strings = gr.Textbox(label="End strings (comma separated)", value="<|endoftext|>,", lines=1,)
k_neighbors = gr.Slider(minimum=0, maximum=50, step=1, value=2, label='Retrieved Documents')
weight = gr.Slider(
minimum=0.0, maximum=1.0, value=1.0, label='Weight for the Static Retrieval DB', step=0.02
)
add_retrival_doc = gr.Textbox(label="Add New Retrieval Doc", value="", lines=5,)
add_EOS = gr.Checkbox(label="Add EOS token to Retrieval Doc", value=False)
with gr.Row():
add_btn = gr.Button(value="Add")
reset_btn = gr.Button(value="Reset Index")
output_status = gr.Label(value='')
add_btn.click(self.add_doc, inputs=[add_retrival_doc, add_EOS], outputs=[output_status])
reset_btn.click(self.reset_index, inputs=[], outputs=[output_status])
with gr.Column(scale=1, min_width=800):
input_prompt = gr.Textbox(
label="Input",
value="Ariel was playing basketball. 1 of her shots went in the hoop. 2 of her shots did not go in the hoop. How many shots were there in total?",
lines=5,
)
output_box = gr.Textbox(value="", label="Output")
btn = gr.Button(value="Submit")
output_retrieval = gr.HTML()
btn.click(
self.get_retro_generation,
inputs=[
input_prompt,
greedy_flag,
add_BOS,
token_to_gen,
min_token_to_gen,
temperature,
top_p,
top_k,
repetition_penality,
k_neighbors,
weight,
end_strings,
],
outputs=[output_box, output_retrieval],
)
demo.launch(share=share, server_port=port, server_name='0.0.0.0', auth=(username, password))
| NeMo-main | nemo/collections/nlp/modules/common/megatron_web_server.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.