python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from typing import Dict, Optional
import torch
from nemo.core.classes import NeuralModule
from nemo.core.classes.exportable import Exportable
from nemo.core.neural_types import ChannelType, FloatType, IntType, MaskType, NeuralType, StringType, VoidType
from nemo.utils import logging
__all__ = ['GPTModule']
class GPTModule(NeuralModule, Exportable):
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"token_type_ids": NeuralType(('B', 'T'), ChannelType(), optional=True),
"attention_mask": NeuralType(('B', 'T'), MaskType(), optional=True),
"labels": NeuralType(('B', 'T'), ChannelType(), optional=True),
'past_key_values': [[NeuralType(None, StringType(), optional=True)]],
'use_cache': NeuralType(None, VoidType(), optional=True),
'position_ids': NeuralType(('B', 'T'), ChannelType(), optional=True),
"return_dict": NeuralType(None, StringType(), optional=True),
"output_attentions": NeuralType(None, StringType(), optional=True),
"output_hidden_states": NeuralType(None, StringType(), optional=True),
"max_length": NeuralType(None, IntType(), optional=True),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {
'loss': NeuralType(None, FloatType(), optional=True),
'hidden_states': NeuralType(('B', 'T', 'D'), ChannelType()),
}
def restore_weights(self, restore_path: str):
"""Restores module/model's weights"""
logging.info(f"Restoring weights from {restore_path}")
if not os.path.exists(restore_path):
logging.warning(f'Path {restore_path} not found')
return
pretrained_dict = torch.load(restore_path)
# backward compatibility with NeMo0.11
if "state_dict" in pretrained_dict.keys():
pretrained_dict = pretrained_dict["state_dict"]
# remove prefix from pretrained dict
m = re.match("^gpt.*?\.", list(pretrained_dict.keys())[0])
if m:
prefix = m.group(0)
pretrained_dict = {k[len(prefix) :]: v for k, v in pretrained_dict.items()}
model_dict = self.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# starting with transformers 3.1.0, embeddings.position_ids is added to the model's state dict and could be
# missing in checkpoints trained with older transformers version
if 'embeddings.position_ids' in model_dict and 'embeddings.position_ids' not in pretrained_dict:
pretrained_dict['embeddings.position_ids'] = model_dict['embeddings.position_ids']
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
logging.info(f"Weights for {type(self).__name__} restored from {restore_path}")
def input_example(self):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
sample = next(self.parameters())
input_ids = torch.randint(low=0, high=2048, size=(2, 16), device=sample.device)
token_type_ids = torch.randint(low=0, high=1, size=(2, 16), device=sample.device)
attention_mask = torch.randint(low=0, high=1, size=(2, 16), device=sample.device)
return tuple([input_ids, token_type_ids, attention_mask])
| NeMo-main | nemo/collections/nlp/modules/common/gpt_module.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Dict, Optional
from torch import nn as nn
from nemo.collections.common.parts import MultiLayerPerceptron
from nemo.collections.nlp.modules.common.classifier import Classifier
from nemo.core.classes import typecheck
from nemo.core.neural_types import LogitsType, LogprobsType, NeuralType
__all__ = ['BertPretrainingTokenClassifier', 'TokenClassifier']
ACT2FN = {"gelu": nn.functional.gelu, "relu": nn.functional.relu}
@dataclass
class TokenClassifierConfig:
num_layers: int = 1
activation: str = 'relu'
log_softmax: bool = True
dropout: float = 0.0
use_transformer_init: bool = True
class TokenClassifier(Classifier):
"""
A module to perform token level classification tasks such as Named entity recognition.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""
Returns definitions of module output ports.
"""
if not self.log_softmax:
return {"logits": NeuralType(('B', 'T', 'C'), LogitsType())}
else:
return {"log_probs": NeuralType(('B', 'T', 'C'), LogprobsType())}
def __init__(
self,
hidden_size: int,
num_classes: int,
num_layers: int = 1,
activation: str = 'relu',
log_softmax: bool = True,
dropout: float = 0.0,
use_transformer_init: bool = True,
) -> None:
"""
Initializes the Token Classifier module.
Args:
hidden_size: the size of the hidden dimension
num_classes: number of classes
num_layers: number of fully connected layers in the multilayer perceptron (MLP)
activation: activation to usee between fully connected layers in the MLP
log_softmax: whether to apply softmax to the output of the MLP
dropout: dropout to apply to the input hidden states
use_transformer_init: whether to initialize the weights of the classifier head with the same approach used in Transformer
"""
super().__init__(hidden_size=hidden_size, dropout=dropout)
self.log_softmax = log_softmax
self.mlp = MultiLayerPerceptron(
hidden_size, num_classes, num_layers=num_layers, activation=activation, log_softmax=log_softmax
)
self.post_init(use_transformer_init=use_transformer_init)
@typecheck()
def forward(self, hidden_states):
"""
Performs the forward step of the module.
Args:
hidden_states: batch of hidden states (for example, from the BERT encoder module)
[BATCH_SIZE x SEQ_LENGTH x HIDDEN_SIZE]
Returns: logits value for each class [BATCH_SIZE x SEQ_LENGTH x NUM_CLASSES]
"""
hidden_states = self.dropout(hidden_states)
logits = self.mlp(hidden_states)
return logits
class BertPretrainingTokenClassifier(Classifier):
"""
A module to perform token level classification tasks for Bert pretraining.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""
Returns definitions of module output ports.
"""
if not self.log_softmax:
return {"logits": NeuralType(('B', 'T', 'C'), LogitsType())}
else:
return {"log_probs": NeuralType(('B', 'T', 'C'), LogprobsType())}
def __init__(
self,
hidden_size: int,
num_classes: int,
num_layers: int = 1,
activation: str = 'relu',
log_softmax: bool = True,
dropout: float = 0.0,
use_transformer_init: bool = True,
) -> None:
"""
Initializes the Token Classifier module.
Args:
hidden_size: the size of the hidden dimension
num_classes: number of classes
num_layers: number of fully connected layers in the multilayer perceptron (MLP)
activation: activation to usee between fully connected layers in the MLP
log_softmax: whether to apply softmax to the output of the MLP
dropout: dropout to apply to the input hidden states
use_transformer_init: whether to initialize the weights of the classifier head with the same approach used in Transformer
"""
super().__init__(hidden_size=hidden_size, dropout=dropout)
self.log_softmax = log_softmax
if activation not in ACT2FN:
raise ValueError(f'activation "{activation}" not found')
self.dense = nn.Linear(hidden_size, hidden_size)
self.act = ACT2FN[activation]
self.norm = nn.LayerNorm(hidden_size, eps=1e-12)
self.mlp = MultiLayerPerceptron(
hidden_size, num_classes, num_layers=num_layers, activation=activation, log_softmax=log_softmax
)
self.post_init(use_transformer_init=use_transformer_init)
@typecheck()
def forward(self, hidden_states):
"""
Performs the forward step of the module.
Args:
hidden_states: batch of hidden states (for example, from the BERT encoder module)
[BATCH_SIZE x SEQ_LENGTH x HIDDEN_SIZE]
Returns: logits value for each class [BATCH_SIZE x SEQ_LENGTH x NUM_CLASSES]
"""
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = self.act(hidden_states)
transform = self.norm(hidden_states)
logits = self.mlp(transform)
return logits
| NeMo-main | nemo/collections/nlp/modules/common/token_classifier.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import torch
import torch.nn as nn
from nemo.collections.common.parts import form_attention_mask
from nemo.collections.nlp.modules.common.transformer.transformer_modules import MultiHeadAttention, PositionWiseFF
__all__ = ["TransformerEncoder"]
class TransformerEncoderBlock(nn.Module):
"""
Building block of Transformer encoder.
Args:
hidden_size: size of the embeddings in the model, also known as d_model
inner_size: number of neurons in the intermediate part of feed-forward
net, usually is (4-8 x hidden_size) in the papers
num_attention_heads: number of heads in multi-head attention
attn_score_dropout: probability of dropout applied to attention scores
attn_layer_dropout: probability of dropout applied to the output of the
attention layers, but before layer normalization
ffn_dropout: probability of dropout applied to FFN output
hidden_act: activation function used between two linear layers in FFN
"""
def __init__(
self,
hidden_size: int,
inner_size: int,
num_attention_heads: int = 1,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
ffn_dropout: float = 0.0,
hidden_act: str = "relu",
pre_ln: bool = False,
):
super().__init__()
self.pre_ln = pre_ln
self.layer_norm_1 = nn.LayerNorm(hidden_size, eps=1e-5)
self.first_sub_layer = MultiHeadAttention(
hidden_size, num_attention_heads, attn_score_dropout, attn_layer_dropout
)
self.layer_norm_2 = nn.LayerNorm(hidden_size, eps=1e-5)
self.second_sub_layer = PositionWiseFF(hidden_size, inner_size, ffn_dropout, hidden_act)
def forward_preln(self, encoder_query, encoder_mask, encoder_keys):
"""
Pre-LayerNorm block
Order of operations: LN -> Self-Attn -> Residual -> LN -> Cross-Attn -> Residual -> LN -> FFN
"""
residual = encoder_query
encoder_query = self.layer_norm_1(encoder_query)
encoder_keys = self.layer_norm_1(encoder_keys)
self_attn_output = self.first_sub_layer(encoder_query, encoder_keys, encoder_keys, encoder_mask)
self_attn_output += residual
residual = self_attn_output
self_attn_output = self.layer_norm_2(self_attn_output)
output_states = self.second_sub_layer(self_attn_output)
output_states += residual
return output_states
def forward_postln(self, encoder_query, encoder_mask, encoder_keys):
"""
Post-LayerNorm block
Order of operations: Self-Attn -> Residual -> LN -> Cross-Attn -> Residual -> LN -> FFN -> Residual -> LN
"""
self_attn_output = self.first_sub_layer(encoder_query, encoder_keys, encoder_keys, encoder_mask)
self_attn_output += encoder_query
self_attn_output = self.layer_norm_1(self_attn_output)
output_states = self.second_sub_layer(self_attn_output)
output_states += self_attn_output
output_states = self.layer_norm_2(output_states)
return output_states
def forward(self, encoder_query, encoder_mask, encoder_keys):
if self.pre_ln:
return self.forward_preln(encoder_query, encoder_mask, encoder_keys)
else:
return self.forward_postln(encoder_query, encoder_mask, encoder_keys)
class TransformerEncoder(nn.Module):
def __init__(
self,
num_layers: int,
hidden_size: int,
inner_size: int,
mask_future: bool = False,
num_attention_heads: int = 1,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
ffn_dropout: float = 0.0,
hidden_act: str = "relu",
pre_ln: bool = False,
pre_ln_final_layer_norm: bool = True,
):
super().__init__()
if pre_ln and pre_ln_final_layer_norm:
self.final_layer_norm = nn.LayerNorm(hidden_size, eps=1e-5)
else:
self.final_layer_norm = None
layer = TransformerEncoderBlock(
hidden_size,
inner_size,
num_attention_heads,
attn_score_dropout,
attn_layer_dropout,
ffn_dropout,
hidden_act,
pre_ln,
)
self.layers = nn.ModuleList([copy.deepcopy(layer) for _ in range(num_layers)])
self.diag = 0 if mask_future else None
def _get_memory_states(self, encoder_states, encoder_mems_list=None, i=0):
if encoder_mems_list is not None:
memory_states = torch.cat((encoder_mems_list[i], encoder_states), dim=1)
else:
memory_states = encoder_states
return memory_states
def forward(self, encoder_states, encoder_mask, encoder_mems_list=None, return_mems=False):
"""
Args:
encoder_states: output of the embedding_layer (B x L_enc x H)
encoder_mask: encoder inputs mask (B x L_enc)
encoder_mems_list: list of the cached encoder hidden states
for fast autoregressive generation which will be used instead
of encoder_states as keys and values if not None
return_mems: bool, whether to return outputs of all encoder layers
or the last layer only
"""
encoder_attn_mask = form_attention_mask(encoder_mask, self.diag)
memory_states = self._get_memory_states(encoder_states, encoder_mems_list, 0)
cached_mems_list = [memory_states]
for i, layer in enumerate(self.layers):
encoder_states = layer(encoder_states, encoder_attn_mask, memory_states)
memory_states = self._get_memory_states(encoder_states, encoder_mems_list, i + 1)
cached_mems_list.append(memory_states)
if self.final_layer_norm is not None:
encoder_states = self.final_layer_norm(encoder_states)
memory_states = self._get_memory_states(encoder_states, encoder_mems_list, i + 1)
cached_mems_list.append(memory_states)
if return_mems:
return cached_mems_list
else:
return cached_mems_list[-1]
| NeMo-main | nemo/collections/nlp/modules/common/transformer/transformer_encoders.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
import torch
from nemo.collections.common.parts import NEG_INF, mask_padded_tokens
__all__ = [
"GreedySequenceGenerator",
"TopKSequenceGenerator",
"BeamSearchSequenceGenerator",
"BeamSearchSequenceGeneratorWithLanguageModel",
"EnsembleBeamSearchSequenceGenerator",
]
class GreedySequenceGenerator:
"""
Greedy sequence generator based on the decoder followed by log_softmax.
Args:
embedding: nn.Module, transforms input_ids into vector embeddings
decoder: nn.Module, takes embeddings and produces hidden_states
log_softmax: nn.Module, takes hidden_states and produces log_probs
which correspond to probability distribution of tokens (ids)
pad: index of padding token in the vocabulary
bos: index of beginning of sequence token in the vocabulary
eos: index of end of sequence token in the vocabulary
max_sequence_length: maximum allowed length for generated sequences
max_delta_length: in case of encoder-decoder generation (e.g. NMT),
forbids generated sequences to be longer than the length of
source sequences plus max_delta_length
batch_size: size of the batch of generated sequences if neither
source nor target starting sequences are provided
"""
def __init__(
self,
embedding,
decoder,
log_softmax,
pad=0,
bos=1,
eos=2,
max_sequence_length=512,
max_delta_length=20,
batch_size=1,
):
super().__init__()
self.embedding = embedding
self.decoder = decoder
self.log_softmax = log_softmax
self.pad, self.bos, self.eos = pad, bos, eos
self.max_seq_length = max_sequence_length
self.max_delta_len = max_delta_length
self.batch_size = batch_size
def _one_step_forward(
self,
decoder_input_ids=None,
encoder_hidden_states=None,
encoder_input_mask=None,
decoder_mems_list=None,
pos=0,
):
"""
One step of autoregressive output generation.
Args:
decoder_input_ids: starting sequence of tokens to generate from;
if None, generation will start from a batch of <bos> tokens
encoder_hidden_states: output of the encoder for conditional
sequence generation; if None, generator will use unconditional
mode (e.g., language modeling)
encoder_input_mask: input mask used in the encoder
decoder_mems_list: list of size num_layers with cached activations
of sequence (x[1], ..., x[k-1]) for fast generation of x[k]
pos: starting position in positional encoding
"""
decoder_hidden_states = self.embedding.forward(decoder_input_ids, start_pos=pos)
decoder_input_mask = mask_padded_tokens(decoder_input_ids, self.pad).float()
if encoder_hidden_states is not None:
decoder_mems_list = self.decoder.forward(
decoder_hidden_states,
decoder_input_mask,
encoder_hidden_states,
encoder_input_mask,
decoder_mems_list,
return_mems=True,
)
else:
decoder_mems_list = self.decoder.forward(
decoder_hidden_states, decoder_input_mask, decoder_mems_list, return_mems=True
)
log_probs = self.log_softmax.forward(hidden_states=decoder_mems_list[-1][:, -1:])
return log_probs, decoder_mems_list
def _prepare_for_search(self, decoder_input_ids=None, encoder_hidden_states=None):
"""
Helper function which defines starting sequence to begin generating
with and maximum allowed number of tokens to be generated.
"""
decoder_parameter = next(self.decoder.parameters())
batch_size = self.batch_size
# for encoder-decoder generation, maximum length of generated sequence
# is min(max_sequence_length, src_len + max_delta_length)
if encoder_hidden_states is not None:
batch_size, src_len, _ = encoder_hidden_states.size()
if self.max_delta_len >= 0:
max_seq_length = min(self.max_seq_length, src_len + self.max_delta_len)
else:
max_seq_length = self.max_seq_length
else:
max_seq_length = self.max_seq_length
# if no input is provided, start with the batch of <bos> tokens
if decoder_input_ids is not None:
tgt = decoder_input_ids
batch_size, tgt_len = decoder_input_ids.size()
else:
tgt = torch.zeros(batch_size, 1).long().fill_(self.bos).to(decoder_parameter.device)
tgt_len = 1
max_generation_length = max_seq_length - tgt_len
return tgt, batch_size, max_generation_length
def _forward(
self, decoder_input_ids=None, encoder_hidden_states=None, encoder_input_mask=None, return_beam_scores=False
):
assert not return_beam_scores
tgt, batch_size, max_generation_length = self._prepare_for_search(decoder_input_ids, encoder_hidden_states)
# pad profile tracks sequences ending with <eos> token to replace
# everything after <eos> with <pad> token
decoder_parameter = next(self.decoder.parameters())
pad_profile = torch.zeros(batch_size, 1).long().to(decoder_parameter.device)
decoder_mems_list = None
for i in range(max_generation_length):
log_probs, decoder_mems_list = self._one_step_forward(
tgt[:, -1:], encoder_hidden_states, encoder_input_mask, decoder_mems_list, i
)
next_tokens = torch.argmax(log_probs[:, -1], dim=-1, keepdim=True)
next_tokens = self.pad * pad_profile + next_tokens * (1 - pad_profile)
pad_profile = torch.max(pad_profile, (next_tokens == self.eos).long())
tgt = torch.cat((tgt, next_tokens), dim=-1)
# abort generation if all sequences end with <eos>
if pad_profile.sum() == batch_size:
break
return tgt
def __call__(
self, decoder_input_ids=None, encoder_hidden_states=None, encoder_input_mask=None, return_beam_scores=False
):
with self.as_frozen():
return self._forward(
decoder_input_ids, encoder_hidden_states, encoder_input_mask, return_beam_scores=return_beam_scores
)
def freeze(self) -> None:
"""Freeze weights of embedding, decoder, and classification layers to prevent memory leak.
"""
for param in self.embedding.parameters():
param.requires_grad = False
self.embedding.eval()
for param in self.decoder.parameters():
param.requires_grad = False
self.decoder.eval()
for param in self.log_softmax.parameters():
param.requires_grad = False
self.log_softmax.eval()
def unfreeze(self) -> None:
"""Unfreeze weights of embedding, decoder, and classification layers.
"""
for param in self.embedding.parameters():
param.requires_grad = True
self.embedding.train()
for param in self.decoder.parameters():
param.requires_grad = True
self.decoder.train()
for param in self.log_softmax.parameters():
param.requires_grad = True
self.log_softmax.train()
@contextmanager
def as_frozen(self):
"""
Context manager which temporarily freezes embedding, decoder, and log_softmax modules,
yields control and finally unfreezes the modules.
"""
self.freeze()
try:
yield
finally:
self.unfreeze()
class TopKSequenceGenerator(GreedySequenceGenerator):
"""
Top-k sequence generator based on the decoder followed by log_softmax.
Args:
*all args of GreedySequenceGenerator class
beam_size: size of the beam (parameter k in top-k)
temperature: temperature of top-k sampling, all logits are divided
by temperature before rescaling. High temperature leads to
uniform distribution, low leads to delta-like distribution.
Kwargs:
all remaining parameters of GreedySequenceGenerator class
"""
def __init__(self, embedding, decoder, log_softmax, beam_size=1, temperature=1.0, **kwargs):
super().__init__(embedding, decoder, log_softmax, **kwargs)
self.beam_size = beam_size
self.temp = temperature
# @torch.no_grad()
def _one_step_forward(
self,
decoder_input_ids=None,
encoder_hidden_states=None,
encoder_input_mask=None,
decoder_mems_list=None,
pos=0,
):
log_probs, decoder_mems_list = super()._one_step_forward(
decoder_input_ids, encoder_hidden_states, encoder_input_mask, decoder_mems_list, pos
)
batch_size, seq_len, vocab_size = log_probs.size()
scores, indices = torch.topk(log_probs, self.beam_size, dim=-1)
rescaled_logexp = torch.zeros_like(log_probs).scatter(-1, indices, scores.div(self.temp).exp())
probs = rescaled_logexp / rescaled_logexp.norm(1, -1, keepdim=True)
# We randomly sample next tokens from rescaled probability distribution
# over top-k candidates and return a binary tensor which indicates
# candidates that have been selected. We call this object
# `pseudo_log_probs` as genuine log_probs should have -infs instead of
# 0s and 0s instead of 1s.
ids = torch.multinomial(probs.view(-1, vocab_size), 1).view(-1, seq_len, 1)
pseudo_log_probs = torch.zeros_like(log_probs).scatter(-1, ids, 1.0)
return pseudo_log_probs, decoder_mems_list
class BeamSearchSequenceGenerator(GreedySequenceGenerator):
def __init__(self, embedding, decoder, log_softmax, beam_size=1, len_pen=0, **kwargs):
"""
Beam Search sequence generator based on the decoder followed by
log_softmax.
Args:
*all args of GreedySequenceGenerator class
beam_size: size of the beam
len_pen: length penalty parameter
Kwargs:
all remaining parameters of GreedySequenceGenerator class
"""
super().__init__(embedding, decoder, log_softmax, **kwargs)
self.beam_size = beam_size
self.len_pen = len_pen
@staticmethod
def compute_len_penalty(lengths, alpha):
"""Returns length penalty according to https://arxiv.org/pdf/1609.08144.pdf"""
return ((5 + lengths) / 6).pow(alpha)
def _forward(
self, decoder_input_ids=None, encoder_hidden_states=None, encoder_input_mask=None, return_beam_scores=False
):
tgt, batch_size, max_generation_length = self._prepare_for_search(decoder_input_ids, encoder_hidden_states)
# generate initial buffer of beam_size prefixes-hypotheses
log_probs, decoder_mems_list = self._one_step_forward(tgt, encoder_hidden_states, encoder_input_mask, None, 0)
scores, prefixes = torch.topk(log_probs.permute(0, 2, 1), self.beam_size, dim=1)
scores, prefixes = scores.view(-1, 1), prefixes.view(-1, 1)
# repeat init target prefixes and cached memory states beam_size times
prefixes = torch.cat((tgt.repeat(1, self.beam_size).view(-1, 1), prefixes), dim=1)
for j in range(len(decoder_mems_list)):
decoder_mems_list[j] = decoder_mems_list[j].repeat(self.beam_size, 1, 1)
# repeat source sequence beam_size times for beam search
if encoder_hidden_states is not None:
_, src_length, hidden_size = encoder_hidden_states.size()
encoder_input_mask = encoder_input_mask.repeat(1, self.beam_size).view(-1, src_length)
encoder_hidden_states = encoder_hidden_states.repeat(1, self.beam_size, 1).view(
-1, src_length, hidden_size
)
else:
hidden_size = decoder_mems_list[0].size(2)
# pad_profile tracks finished hypotheses to generate only <pad> tokens
# if <eos> or <pad> has been generated
pad_profile = torch.zeros_like(scores).long()
# prefixes_len tracks lengths of generated hypotheses to perform
# length penalty correction
prefixes_len = torch.zeros_like(scores).fill_(prefixes.size(1) + 1)
for i in range(max_generation_length):
# mask all finished hypotheses to exclude them from beam
pad_mask = pad_profile.repeat(1, self.beam_size)
# generate and score candidates for prefixes continuation
log_probs, decoder_mems_list = self._one_step_forward(
prefixes[:, -1:], encoder_hidden_states, encoder_input_mask, decoder_mems_list, i + 1
)
scores_i, prefixes_i = torch.topk(log_probs[:, -1, :], self.beam_size, dim=-1)
# for all prefixes ending with <eos> or <pad> replace generated
# continuations with <pad>
prefixes_i = self.pad * pad_mask + prefixes_i * (1 - pad_mask)
# force all hypotheses but one generated from already finished
# hypotheses to have extremely low score, so they will not be
# considered during beam re-ranking
pad_mask[:, 1:] = pad_mask[:, 1:] * NEG_INF
scores = scores + scores_i * (1 - pad_mask).to(scores.dtype)
# choose top-k hypotheses with length penalty applied
len_penalties = self.compute_len_penalty(prefixes_len, self.len_pen)
scores = scores / len_penalties
scores, indices_i = torch.topk(scores.view(-1, self.beam_size ** 2), self.beam_size, dim=1)
scores = scores.view(-1, 1) * len_penalties
# select prefixes which correspond to the chosen hypotheses
prefixes = prefixes.unsqueeze(1).repeat(1, self.beam_size, 1)
prefixes = torch.cat((prefixes, prefixes_i.unsqueeze(2)), dim=2)
prefixes = prefixes.view(batch_size, self.beam_size ** 2, -1)
p_len = prefixes.size(2)
prefixes_ids = indices_i.unsqueeze(2).repeat(1, 1, p_len)
prefixes = prefixes.gather(1, prefixes_ids).view(-1, p_len)
# reshuffle cached decoder memory states to restore the order
# of hypotheses broken after top-k selection
mems_ids = indices_i.unsqueeze(2).unsqueeze(3).repeat(1, 1, p_len - 1, hidden_size) // self.beam_size
for j in range(len(decoder_mems_list)):
decoder_mems_list[j] = (
decoder_mems_list[j]
.view(-1, self.beam_size, p_len - 1, hidden_size)
.gather(1, mems_ids)
.view(-1, p_len - 1, hidden_size)
)
# update prefixes_len and pad_profile
not_eos_pad = prefixes.ne(self.eos) & prefixes.ne(self.pad)
prefixes_len = 1 + not_eos_pad.sum(dim=1, keepdim=True).to(scores.dtype)
pad_profile = (~not_eos_pad[:, -1:]).long()
# if all hypotheses end with <eos> or <pad>, interrupt search
if pad_profile.sum() == batch_size * self.beam_size:
break
# select best performing hypotheses in each element of the batch
len_penalties = self.compute_len_penalty(prefixes_len, self.len_pen)
scores = scores / len_penalties
best_guesses = (
torch.argmax(scores.view(-1, self.beam_size), dim=1, keepdim=True).repeat(1, prefixes.size(1)).unsqueeze(1)
)
tgt = prefixes.view(batch_size, self.beam_size, -1).gather(1, best_guesses).squeeze(1)
if return_beam_scores:
return prefixes, scores * len_penalties, tgt
else:
return tgt
class EnsembleBeamSearchSequenceGenerator:
def __init__(
self,
encoders,
embeddings,
decoders,
log_softmaxes,
beam_size=1,
len_pen=0,
pad=0,
bos=1,
eos=2,
max_sequence_length=512,
max_delta_length=20,
batch_size=1,
language_model=None,
fusion_coef=None,
):
"""
Ensemble Beam Search sequence generator based on the decoder followed by
log_softmax. Averages the probabilities of different models.
NOTE: All models must have been trained with the same BPE tokenizers.
Args:
encoders: A list of encoders
embeddings: A list of decoder embedding layers
decoders: A list of decoders
log_softmaxes: A list of decoder output layers
beam_size: Beam size
len_pen: Length penalty to adjust logprob scores to favor longer sequences
pad: pad id
bos: beginning of sequence id
eos: end of sequence id
max_sequence_length: maximum sequence length
max_delta_length: maximum length difference between input and output
batch_size: batch size if not inferrable from input sequence
"""
self.encoders = encoders
self.embeddings = embeddings
self.decoders = decoders
self.log_softmaxes = log_softmaxes
self.beam_size = beam_size
self.len_pen = len_pen
self.pad, self.bos, self.eos = pad, bos, eos
self.max_seq_length = max_sequence_length
self.max_delta_len = max_delta_length
self.batch_size = batch_size
assert len(embeddings) == len(decoders) == len(log_softmaxes) == len(encoders)
self.num_models = len(encoders)
self.language_model = language_model
self.fusion_coef = fusion_coef
@staticmethod
def compute_len_penalty(lengths, alpha):
"""Returns length penalty according to https://arxiv.org/pdf/1609.08144.pdf"""
return ((5 + lengths) / 6).pow(alpha)
def _one_step_forward_lm(self, decoder_input_ids=None, lm_mems_list=None, pos=0):
input_mask = mask_padded_tokens(decoder_input_ids, self.pad).float()
lm_hidden_states = self.language_model.encoder.embedding.forward(decoder_input_ids, start_pos=pos)
lm_mems_list = self.language_model.encoder.encoder.forward(
lm_hidden_states, input_mask, lm_mems_list, return_mems=True,
)
lm_log_probs = self.language_model.log_softmax.forward(hidden_states=lm_mems_list[-1][:, -1:])
return lm_log_probs, lm_mems_list
def _one_step_forward(
self,
ensemble_index,
decoder_input_ids=None,
encoder_hidden_states=None,
encoder_input_mask=None,
decoder_mems_list=None,
pos=0,
):
"""
One step of autoregressive output generation for one particular model.
Args:
decoder_input_ids: starting sequence of tokens to generate from;
if None, generation will start from a batch of <bos> tokens
encoder_hidden_states: output of the encoder for conditional
sequence generation; if None, generator will use unconditional
mode (e.g., language modeling)
encoder_input_mask: input mask used in the encoder
decoder_mems_list: list of size num_layers with cached activations
of sequence (x[1], ..., x[k-1]) for fast generation of x[k]
pos: starting position in positional encoding
"""
decoder_hidden_states = self.embeddings[ensemble_index].forward(decoder_input_ids, start_pos=pos)
decoder_input_mask = mask_padded_tokens(decoder_input_ids, self.pad).float()
if encoder_hidden_states is not None:
decoder_mems_list = self.decoders[ensemble_index].forward(
decoder_hidden_states,
decoder_input_mask,
encoder_hidden_states,
encoder_input_mask,
decoder_mems_list,
return_mems=True,
)
else:
decoder_mems_list = self.decoders[ensemble_index].forward(
decoder_hidden_states, decoder_input_mask, decoder_mems_list, return_mems=True
)
log_probs = self.log_softmaxes[ensemble_index].forward(hidden_states=decoder_mems_list[-1][:, -1:])
return log_probs, decoder_mems_list
def _prepare_for_search(self, decoder_input_ids=None, encoder_hidden_states=None):
"""
Helper function which defines starting sequence to begin generating
with and maximum allowed number of tokens to be generated.
"""
decoder_parameter = next(self.decoders[0].parameters())
batch_size = self.batch_size
# for encoder-decoder generation, maximum length of generated sequence
# is min(max_sequence_length, src_len + max_delta_length)
if encoder_hidden_states is not None:
batch_size, src_len, _ = encoder_hidden_states.size()
if self.max_delta_len >= 0:
max_seq_length = min(self.max_seq_length, src_len + self.max_delta_len)
else:
max_seq_length = self.max_seq_length
else:
max_seq_length = self.max_seq_length
# if no input is provided, start with the batch of <bos> tokens
if decoder_input_ids is not None:
tgt = decoder_input_ids
batch_size, tgt_len = decoder_input_ids.size()
else:
tgt = torch.zeros(batch_size, 1).long().fill_(self.bos).to(decoder_parameter.device)
tgt_len = 1
max_generation_length = max_seq_length - tgt_len
return tgt, batch_size, max_generation_length
def _get_encoder_hidden_states(self, src_ids, encoder_input_mask, ensemble_index):
return self.encoders[ensemble_index](input_ids=src_ids, encoder_mask=encoder_input_mask)
def _average_probs(self, probs_list):
probs_list = torch.stack(probs_list)
return torch.log(torch.exp(probs_list).mean(0))
# probs = torch.stack(probs_list) # Ens x B x T x V
# return torch.log(probs.sum(0) / probs.sum(-1).sum(0).unsqueeze(-1))
def _forward(self, src_ids, encoder_input_mask, decoder_input_ids=None, return_beam_scores=False):
encoder_hidden_states = [
self._get_encoder_hidden_states(src_ids, encoder_input_mask, i) for i in range(self.num_models)
]
tgt, batch_size, max_generation_length = self._prepare_for_search(decoder_input_ids, encoder_hidden_states[0])
# generate initial buffer of beam_size prefixes-hypotheses
outputs = [
self._one_step_forward(i, tgt, encoder_hidden_states[i], encoder_input_mask, None, 0)
for i in range(self.num_models)
]
nmt_log_probs = self._average_probs([x[0] for x in outputs])
decoder_mems_lists = [x[1] for x in outputs]
if self.language_model is not None:
lm_log_probs, lm_mems_list = self._one_step_forward_lm(tgt, None, 0)
log_probs = nmt_log_probs + self.fusion_coef * lm_log_probs
else:
log_probs = nmt_log_probs
scores, prefixes = torch.topk(log_probs.permute(0, 2, 1), self.beam_size, dim=1)
scores, prefixes = scores.view(-1, 1), prefixes.view(-1, 1)
# repeat init target prefixes and cached memory states beam_size times
prefixes = torch.cat((tgt.repeat(1, self.beam_size).view(-1, 1), prefixes), dim=1)
for i in range(self.num_models):
for j in range(len(decoder_mems_lists[i])):
decoder_mems_lists[i][j] = decoder_mems_lists[i][j].repeat(self.beam_size, 1, 1)
if self.language_model is not None:
for j in range(len(lm_mems_list)):
lm_mems_list[j] = lm_mems_list[j].repeat(self.beam_size, 1, 1)
lm_hidden_size = lm_mems_list[0].size(2)
encoder_input_mask = encoder_input_mask.repeat(1, self.beam_size).view(-1, encoder_input_mask.size(1))
for i in range(self.num_models):
_, src_length, hidden_size = encoder_hidden_states[i].size()
encoder_hidden_states[i] = (
encoder_hidden_states[i].repeat(1, self.beam_size, 1).view(-1, src_length, hidden_size)
)
# pad_profile tracks finished hypotheses to generate only <pad> tokens
# if <eos> or <pad> has been generated
pad_profile = torch.zeros_like(scores).long()
# prefixes_len tracks lengths of generated hypotheses to perform
# length penalty correction
prefixes_len = torch.zeros_like(scores).fill_(prefixes.size(1) + 1)
for i in range(max_generation_length):
# mask all finished hypotheses to exclude them from beam
pad_mask = pad_profile.repeat(1, self.beam_size)
# generate and score candidates for prefixes continuation
outputs = [
self._one_step_forward(
model_num,
prefixes[:, -1:],
encoder_hidden_states[model_num],
encoder_input_mask,
decoder_mems_lists[model_num],
i + 1,
)
for model_num in range(self.num_models)
]
nmt_log_probs = self._average_probs([x[0] for x in outputs])
decoder_mems_lists = [x[1] for x in outputs]
if self.language_model is not None:
lm_log_probs, lm_mems_list = self._one_step_forward_lm(prefixes[:, -1:], lm_mems_list, i + 1)
log_probs = nmt_log_probs + self.fusion_coef * lm_log_probs
else:
log_probs = nmt_log_probs
scores_i, prefixes_i = torch.topk(log_probs[:, -1, :], self.beam_size, dim=-1)
# for all prefixes ending with <eos> or <pad> replace generated
# continuations with <pad>
prefixes_i = self.pad * pad_mask + prefixes_i * (1 - pad_mask)
# force all hypotheses but one generated from already finished
# hypotheses to have extremely low score, so they will not be
# considered during beam re-ranking
pad_mask[:, 1:] = pad_mask[:, 1:] * NEG_INF
scores = scores + scores_i * (1 - pad_mask).to(scores.dtype)
# choose top-k hypotheses with length penalty applied
len_penalties = self.compute_len_penalty(prefixes_len, self.len_pen)
scores = scores / len_penalties
scores, indices_i = torch.topk(scores.view(-1, self.beam_size ** 2), self.beam_size, dim=1)
scores = scores.view(-1, 1) * len_penalties
# select prefixes which correspond to the chosen hypotheses
prefixes = prefixes.unsqueeze(1).repeat(1, self.beam_size, 1)
prefixes = torch.cat((prefixes, prefixes_i.unsqueeze(2)), dim=2)
prefixes = prefixes.view(batch_size, self.beam_size ** 2, -1)
p_len = prefixes.size(2)
prefixes_ids = indices_i.unsqueeze(2).repeat(1, 1, p_len)
prefixes = prefixes.gather(1, prefixes_ids).view(-1, p_len)
# reshuffle cached decoder memory states to restore the order
# of hypotheses broken after top-k selection
for model_num in range(self.num_models):
hidden_size = decoder_mems_lists[model_num][0].size(2)
mems_ids = indices_i.unsqueeze(2).unsqueeze(3).repeat(1, 1, p_len - 1, hidden_size) // self.beam_size
for j in range(len(decoder_mems_lists[model_num])):
decoder_mems_lists[model_num][j] = (
decoder_mems_lists[model_num][j]
.view(-1, self.beam_size, p_len - 1, hidden_size)
.gather(1, mems_ids)
.view(-1, p_len - 1, hidden_size)
)
if self.language_model is not None:
lm_mems_ids = (
indices_i.unsqueeze(2).unsqueeze(3).repeat(1, 1, p_len - 1, lm_hidden_size) // self.beam_size
)
for j in range(len(lm_mems_list)):
lm_mems_list[j] = (
lm_mems_list[j]
.view(-1, self.beam_size, p_len - 1, lm_hidden_size)
.gather(1, lm_mems_ids)
.view(-1, p_len - 1, lm_hidden_size)
)
# update prefixes_len and pad_profile
not_eos_pad = prefixes.ne(self.eos) & prefixes.ne(self.pad)
prefixes_len = 1 + not_eos_pad.sum(dim=1, keepdim=True).to(scores.dtype)
pad_profile = (~not_eos_pad[:, -1:]).long()
# if all hypotheses end with <eos> or <pad>, interrupt search
if pad_profile.sum() == batch_size * self.beam_size:
break
# select best performing hypotheses in each element of the batch
len_penalties = self.compute_len_penalty(prefixes_len, self.len_pen)
scores = scores / len_penalties
best_guesses = (
torch.argmax(scores.view(-1, self.beam_size), dim=1, keepdim=True).repeat(1, prefixes.size(1)).unsqueeze(1)
)
tgt = prefixes.view(batch_size, self.beam_size, -1).gather(1, best_guesses).squeeze(1)
if return_beam_scores:
return prefixes, scores * len_penalties, tgt
else:
return tgt
def __call__(self, src_ids, encoder_input_mask, decoder_input_ids=None, return_beam_scores=False):
with self.as_frozen():
return self._forward(src_ids, encoder_input_mask, decoder_input_ids, return_beam_scores)
def freeze(self) -> None:
"""Freeze weights of embedding, decoder, and classification layers to prevent memory leak.
"""
for model_num in range(self.num_models):
for param in self.embeddings[model_num].parameters():
param.requires_grad = False
self.embeddings[model_num].eval()
for param in self.decoders[model_num].parameters():
param.requires_grad = False
self.decoders[model_num].eval()
for param in self.log_softmaxes[model_num].parameters():
param.requires_grad = False
self.log_softmaxes[model_num].eval()
for param in self.encoders[model_num].parameters():
param.requires_grad = False
self.encoders[model_num].eval()
def unfreeze(self) -> None:
"""Unfreeze weights of embedding, decoder, and classification layers.
"""
for model_num in range(self.num_models):
for param in self.embeddings[model_num].parameters():
param.requires_grad = True
self.embeddings[model_num].train()
for param in self.decoders[model_num].parameters():
param.requires_grad = True
self.decoders[model_num].train()
for param in self.log_softmaxes[model_num].parameters():
param.requires_grad = True
self.log_softmaxes[model_num].train()
for param in self.encoders[model_num].parameters():
param.requires_grad = True
self.encoders[model_num].train()
@contextmanager
def as_frozen(self):
"""
Context manager which temporarily freezes embedding, decoder, and log_softmax modules,
yields control and finally unfreezes the modules.
"""
self.freeze()
try:
yield
finally:
self.unfreeze()
class BeamSearchSequenceGeneratorWithLanguageModel(GreedySequenceGenerator):
def __init__(
self, embedding, decoder, log_softmax, language_model, beam_size=1, len_pen=0, fusion_coef=0.0, **kwargs
):
"""
Beam Search sequence generator based on the decoder followed by log_softmax
with external language model fusion.
Args:
*all args of BeamSearchSequenceGenerator class
language_model: nemo TransformerLMModel
fusion_coef: coefficient before language model score, the resulting score is
score = log P_NMT(y|x) + fusion_coef * log P_LM(y)
Kwargs:
all remaining parameters of GreedySequenceGenerator class
"""
super().__init__(embedding, decoder, log_softmax, **kwargs)
self.language_model = language_model
self.beam_size = beam_size
self.len_pen = len_pen
self.fusion_coef = fusion_coef
def _one_step_forward(
self,
decoder_input_ids=None,
encoder_hidden_states=None,
encoder_input_mask=None,
decoder_mems_list=None,
lm_mems_list=None,
pos=0,
):
nmt_log_probs, decoder_mems_list = super()._one_step_forward(
decoder_input_ids, encoder_hidden_states, encoder_input_mask, decoder_mems_list, pos,
)
input_mask = mask_padded_tokens(decoder_input_ids, self.pad).float()
lm_hidden_states = self.language_model.encoder.embedding.forward(decoder_input_ids, start_pos=pos)
lm_mems_list = self.language_model.encoder.encoder.forward(
lm_hidden_states, input_mask, lm_mems_list, return_mems=True,
)
lm_log_probs = self.language_model.log_softmax.forward(hidden_states=lm_mems_list[-1][:, -1:])
log_probs = nmt_log_probs + self.fusion_coef * lm_log_probs
return log_probs, decoder_mems_list, lm_mems_list
@staticmethod
def compute_len_penalty(lengths, alpha):
"""Returns length penalty according to https://arxiv.org/pdf/1609.08144.pdf"""
return ((5 + lengths) / 6).pow(alpha)
def _forward(
self, decoder_input_ids=None, encoder_hidden_states=None, encoder_input_mask=None, return_beam_scores=False
):
tgt, batch_size, max_generation_length = self._prepare_for_search(decoder_input_ids, encoder_hidden_states)
# generate initial buffer of beam_size prefixes-hypotheses
log_probs, decoder_mems_list, lm_mems_list = self._one_step_forward(
tgt, encoder_hidden_states, encoder_input_mask, None, None, 0
)
scores, prefixes = torch.topk(log_probs.permute(0, 2, 1), self.beam_size, dim=1)
scores, prefixes = scores.view(-1, 1), prefixes.view(-1, 1)
# repeat init target prefixes and cached memory states beam_size times
prefixes = torch.cat((tgt.repeat(1, self.beam_size).view(-1, 1), prefixes), dim=1)
for j in range(len(decoder_mems_list)):
decoder_mems_list[j] = decoder_mems_list[j].repeat(self.beam_size, 1, 1)
for j in range(len(lm_mems_list)):
lm_mems_list[j] = lm_mems_list[j].repeat(self.beam_size, 1, 1)
# repeat source sequence beam_size times for beam search
if encoder_hidden_states is not None:
_, src_length, hidden_size = encoder_hidden_states.size()
encoder_input_mask = encoder_input_mask.repeat(1, self.beam_size).view(-1, src_length)
encoder_hidden_states = encoder_hidden_states.repeat(1, self.beam_size, 1).view(
-1, src_length, hidden_size
)
else:
hidden_size = decoder_mems_list[0].size(2)
lm_hidden_size = lm_mems_list[0].size(2)
# pad_profile tracks finished hypotheses to generate only <pad> tokens
# if <eos> or <pad> has been generated
pad_profile = torch.zeros_like(scores).long()
# prefixes_len tracks lengths of generated hypotheses to perform
# length penalty correction
prefixes_len = torch.zeros_like(scores).fill_(prefixes.size(1) + 1)
for i in range(max_generation_length):
# mask all finished hypotheses to exclude them from beam
pad_mask = pad_profile.repeat(1, self.beam_size)
# generate and score candidates for prefixes continuation
log_probs, decoder_mems_list, lm_mems_list = self._one_step_forward(
prefixes[:, -1:], encoder_hidden_states, encoder_input_mask, decoder_mems_list, lm_mems_list, i + 1
)
scores_i, prefixes_i = torch.topk(log_probs[:, -1, :], self.beam_size, dim=-1)
# for all prefixes ending with <eos> or <pad> replace generated
# continuations with <pad>
prefixes_i = self.pad * pad_mask + prefixes_i * (1 - pad_mask)
# force all hypotheses but one generated from already finished
# hypotheses to have extremely low score, so they will not be
# considered during beam re-ranking
pad_mask[:, 1:] = pad_mask[:, 1:] * NEG_INF
scores = scores + scores_i * (1 - pad_mask).to(scores.dtype)
# choose top-k hypotheses with length penalty applied
len_penalties = self.compute_len_penalty(prefixes_len, self.len_pen)
scores = scores / len_penalties
scores, indices_i = torch.topk(scores.view(-1, self.beam_size ** 2), self.beam_size, dim=1)
scores = scores.view(-1, 1) * len_penalties
# select prefixes which correspond to the chosen hypotheses
prefixes = prefixes.unsqueeze(1).repeat(1, self.beam_size, 1)
prefixes = torch.cat((prefixes, prefixes_i.unsqueeze(2)), dim=2)
prefixes = prefixes.view(batch_size, self.beam_size ** 2, -1)
p_len = prefixes.size(2)
prefixes_ids = indices_i.unsqueeze(2).repeat(1, 1, p_len)
prefixes = prefixes.gather(1, prefixes_ids).view(-1, p_len)
# reshuffle cached decoder memory states to restore the order
# of hypotheses broken after top-k selection
mems_ids = indices_i.unsqueeze(2).unsqueeze(3).repeat(1, 1, p_len - 1, hidden_size) // self.beam_size
for j in range(len(decoder_mems_list)):
decoder_mems_list[j] = (
decoder_mems_list[j]
.view(-1, self.beam_size, p_len - 1, hidden_size)
.gather(1, mems_ids)
.view(-1, p_len - 1, hidden_size)
)
lm_mems_ids = indices_i.unsqueeze(2).unsqueeze(3).repeat(1, 1, p_len - 1, lm_hidden_size) // self.beam_size
for j in range(len(lm_mems_list)):
lm_mems_list[j] = (
lm_mems_list[j]
.view(-1, self.beam_size, p_len - 1, lm_hidden_size)
.gather(1, lm_mems_ids)
.view(-1, p_len - 1, lm_hidden_size)
)
# update prefixes_len and pad_profile
not_eos_pad = prefixes.ne(self.eos) & prefixes.ne(self.pad)
prefixes_len = 1 + not_eos_pad.sum(dim=1, keepdim=True).to(scores.dtype)
pad_profile = (~not_eos_pad[:, -1:]).long()
# if all hypotheses end with <eos> or <pad>, interrupt search
if pad_profile.sum() == batch_size * self.beam_size:
break
# select best performing hypotheses in each element of the batch
len_penalties = self.compute_len_penalty(prefixes_len, self.len_pen)
scores = scores / len_penalties
best_guesses = (
torch.argmax(scores.view(-1, self.beam_size), dim=1, keepdim=True).repeat(1, prefixes.size(1)).unsqueeze(1)
)
tgt = prefixes.view(batch_size, self.beam_size, -1).gather(1, best_guesses).squeeze(1)
if return_beam_scores:
return prefixes, scores * len_penalties, tgt
else:
return tgt
| NeMo-main | nemo/collections/nlp/modules/common/transformer/transformer_generators.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Dict, Optional
from nemo.collections.nlp.modules.common.transformer.bridge_encoders import BridgeEncoder
from nemo.collections.nlp.modules.common.transformer.perceiver_encoders import PerceiverEncoder
from nemo.collections.nlp.modules.common.transformer.reduction_encoders import PoolingEncoder
from nemo.collections.nlp.modules.common.transformer.transformer import (
NeMoTransformerConfig,
TransformerDecoderNM,
TransformerEncoderNM,
)
from nemo.core.classes.common import typecheck
from nemo.core.neural_types import MaskType, NeuralType
from nemo.core.neural_types.elements import BoolType
__all__ = [
"NeMoTransformerBottleneckConfig",
"NeMoTransformerBottleneckEncoderConfig",
"NeMoTransformerBottleneckDecoderConfig",
"TransformerBottleneckEncoderNM",
]
@dataclass
class NeMoTransformerBottleneckConfig(NeMoTransformerConfig):
# architecture details (default is no bottleneck)
arch: str = ''
hidden_steps: int = -1
hidden_blocks: int = 1
hidden_init_method: str = "params"
@dataclass
class NeMoTransformerBottleneckEncoderConfig(NeMoTransformerBottleneckConfig):
mask_future: bool = False
# change return_mask to False to return hidden states only (default for non-bottleneck encoder)
return_mask: bool = True
@dataclass
class NeMoTransformerBottleneckDecoderConfig(NeMoTransformerBottleneckConfig):
r2l: bool = False
class TransformerBottleneckEncoderNM(TransformerEncoderNM):
_SUPPORTED_ARCH = ["seq2seq", "bridge", "perceiver", "max_pool", "avg_pool"]
def __init__(
self,
vocab_size: int,
hidden_size: int,
num_layers: int,
inner_size: int,
num_attention_heads: int,
max_sequence_length: int = 512,
num_token_types: int = 2,
embedding_dropout: float = 0.0,
learn_positional_encodings: bool = False,
ffn_dropout: float = 0.0,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
hidden_act: str = 'relu',
mask_future: bool = False,
pre_ln: bool = False,
pre_ln_final_layer_norm: bool = True,
arch: str = '',
hidden_steps: int = -1,
hidden_blocks: int = 1,
hidden_init_method: str = "default",
padding_idx: int = 0,
# default whether forward() method returns hidden or (hidden, mask)
return_mask=True,
):
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_layers=num_layers,
inner_size=inner_size,
num_attention_heads=num_attention_heads,
max_sequence_length=max_sequence_length,
num_token_types=num_token_types,
embedding_dropout=embedding_dropout,
learn_positional_encodings=learn_positional_encodings,
ffn_dropout=ffn_dropout,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
hidden_act=hidden_act,
mask_future=mask_future,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
padding_idx=padding_idx,
)
self._arch = arch
self._return_mask = return_mask
# replace encoder
self._encoder = self._build_encoder(
arch=arch,
hidden_steps=hidden_steps,
hidden_blocks=hidden_blocks,
hidden_init_method=hidden_init_method,
hidden_size=hidden_size,
num_layers=num_layers,
inner_size=inner_size,
num_attention_heads=num_attention_heads,
ffn_dropout=ffn_dropout,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
hidden_act=hidden_act,
mask_future=mask_future,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
)
def _build_encoder(self, arch, **kwargs):
"""
Returns a decoder based on architecture arch and kwargs
"""
# default non-bottleneck transformer encoder
if (not arch) or (arch == "seq2seq"):
encoder = self.encoder
elif arch == "bridge":
encoder = BridgeEncoder(
num_layers=kwargs["num_layers"],
hidden_size=kwargs["hidden_size"],
inner_size=kwargs["inner_size"],
num_attention_heads=kwargs["num_attention_heads"],
attn_score_dropout=kwargs["attn_score_dropout"],
attn_layer_dropout=kwargs["attn_layer_dropout"],
ffn_dropout=kwargs["ffn_dropout"],
hidden_act=kwargs["hidden_act"],
mask_future=kwargs["mask_future"],
pre_ln=kwargs["pre_ln"],
pre_ln_final_layer_norm=kwargs["pre_ln_final_layer_norm"],
hidden_steps=kwargs["hidden_steps"],
hidden_blocks=kwargs["hidden_blocks"],
hidden_init_method=kwargs["hidden_init_method"],
)
elif arch == "perceiver":
encoder = PerceiverEncoder(
num_layers=kwargs["num_layers"],
hidden_size=kwargs["hidden_size"],
inner_size=kwargs["inner_size"],
num_attention_heads=kwargs["num_attention_heads"],
attn_score_dropout=kwargs["attn_score_dropout"],
attn_layer_dropout=kwargs["attn_layer_dropout"],
ffn_dropout=kwargs["ffn_dropout"],
hidden_act=kwargs["hidden_act"],
mask_future=kwargs["mask_future"],
pre_ln=kwargs["pre_ln"],
pre_ln_final_layer_norm=kwargs["pre_ln_final_layer_norm"],
hidden_steps=kwargs["hidden_steps"],
hidden_blocks=kwargs["hidden_blocks"],
hidden_init_method=kwargs["hidden_init_method"],
)
elif arch == "max_pool":
encoder = PoolingEncoder(
num_layers=kwargs["num_layers"],
hidden_size=kwargs["hidden_size"],
inner_size=kwargs["inner_size"],
num_attention_heads=kwargs["num_attention_heads"],
attn_score_dropout=kwargs["attn_score_dropout"],
attn_layer_dropout=kwargs["attn_layer_dropout"],
ffn_dropout=kwargs["ffn_dropout"],
hidden_act=kwargs["hidden_act"],
mask_future=kwargs["mask_future"],
pre_ln=kwargs["pre_ln"],
pre_ln_final_layer_norm=kwargs["pre_ln_final_layer_norm"],
hidden_steps=kwargs["hidden_steps"],
hidden_blocks=kwargs["hidden_blocks"],
hidden_init_method=kwargs["hidden_init_method"],
pooling_type="max",
)
elif arch == "avg_pool":
encoder = PoolingEncoder(
num_layers=kwargs["num_layers"],
hidden_size=kwargs["hidden_size"],
inner_size=kwargs["inner_size"],
num_attention_heads=kwargs["num_attention_heads"],
attn_score_dropout=kwargs["attn_score_dropout"],
attn_layer_dropout=kwargs["attn_layer_dropout"],
ffn_dropout=kwargs["ffn_dropout"],
hidden_act=kwargs["hidden_act"],
mask_future=kwargs["mask_future"],
pre_ln=kwargs["pre_ln"],
pre_ln_final_layer_norm=kwargs["pre_ln_final_layer_norm"],
hidden_steps=kwargs["hidden_steps"],
hidden_blocks=kwargs["hidden_blocks"],
hidden_init_method=kwargs["hidden_init_method"],
pooling_type="avg",
)
else:
raise ValueError(f"Unknown arch = {self.arch}, supported arch = {self.supported_arch}")
return encoder
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
input_types = super().input_types
input_types.update(
{"return_mask": NeuralType((), BoolType(), True),}
)
return input_types
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
output_types = super().output_types
output_types.update(
{"hidden_mask": NeuralType(('B', 'T'), MaskType(), True),}
)
return output_types
@property
def supported_arch(self):
return self._SUPPORTED_ARCH
@property
def arch(self):
return self._arch
@typecheck()
def forward(self, input_ids, encoder_mask, return_mask=None):
if return_mask is None:
return_mask = self._return_mask
embeddings = self._embedding(input_ids=input_ids)
if (not self.arch) or (self.arch == "seq2seq"):
encoder_hidden_states = self._encoder(encoder_states=embeddings, encoder_mask=encoder_mask)
encoder_hidden_mask = encoder_mask
else:
encoder_hidden_states, encoder_hidden_mask = self._encoder(
encoder_states=embeddings, encoder_mask=encoder_mask,
)
if return_mask:
return encoder_hidden_states, encoder_hidden_mask
else:
return encoder_hidden_states
class TransformerBottleneckDecoderNM(TransformerDecoderNM):
_SUPPORTED_ARCH = ["seq2seq"]
def __init__(
self,
vocab_size: int,
hidden_size: int,
num_layers: int,
inner_size: int,
num_attention_heads: int,
max_sequence_length: int = 512,
num_token_types: int = 2,
embedding_dropout: float = 0.0,
learn_positional_encodings: bool = False,
ffn_dropout: float = 0.0,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
hidden_act: str = 'relu',
pre_ln: bool = False,
pre_ln_final_layer_norm: bool = True,
arch='',
):
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_layers=num_layers,
inner_size=inner_size,
num_attention_heads=num_attention_heads,
max_sequence_length=max_sequence_length,
num_token_types=num_token_types,
embedding_dropout=embedding_dropout,
learn_positional_encodings=learn_positional_encodings,
ffn_dropout=ffn_dropout,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
hidden_act=hidden_act,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
)
self._arch = arch
# replace decoder
self._decoder = self._build_decoder(
arch=arch,
hidden_size=hidden_size,
num_layers=num_layers,
inner_size=inner_size,
num_attention_heads=num_attention_heads,
max_sequence_length=max_sequence_length,
num_token_types=num_token_types,
embedding_dropout=embedding_dropout,
learn_positional_encodings=learn_positional_encodings,
ffn_dropout=ffn_dropout,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
hidden_act=hidden_act,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
)
def _build_decoder(self, arch, **kwargs):
"""
Returns a decoder based on architecture arch and kwargs
"""
# usual non-bottleneck transformer decoder
if (not arch) or (arch == "seq2seq"):
decoder = self.decoder
else:
raise ValueError(f"Unknown arch = {self.arch}, supported arch = {self.supported_arch}")
return decoder
@property
def supported_arch(self):
return self._SUPPORTED_ARCH
@property
def arch(self):
return self._arch
| NeMo-main | nemo/collections/nlp/modules/common/transformer/transformer_bottleneck.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import torch
import torch.nn as nn
from nemo.collections.common.parts import form_attention_mask
from nemo.collections.nlp.modules.common.transformer.transformer_modules import MultiHeadAttention, PositionWiseFF
__all__ = ["TransformerDecoder"]
class TransformerDecoderBlock(nn.Module):
"""
Building block of Transformer decoder.
Args:
hidden_size: size of the embeddings in the model, also known as d_model
inner_size: number of neurons in the intermediate part of feed-forward
net, usually is (4-8 x hidden_size) in the papers
num_attention_heads: number of heads in multi-head attention
attn_score_dropout: probability of dropout applied to attention scores
attn_layer_dropout: probability of dropout applied to the output of the
attention layers, but before layer normalization
ffn_dropout: probability of dropout applied to FFN output
hidden_act: activation function used between two linear layers in FFN
"""
def __init__(
self,
hidden_size: int,
inner_size: int,
num_attention_heads: int = 1,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
ffn_dropout: float = 0.0,
hidden_act: str = "relu",
pre_ln: bool = False,
):
super().__init__()
self.pre_ln = pre_ln
self.layer_norm_1 = nn.LayerNorm(hidden_size, eps=1e-5)
self.first_sub_layer = MultiHeadAttention(
hidden_size, num_attention_heads, attn_score_dropout, attn_layer_dropout
)
self.layer_norm_2 = nn.LayerNorm(hidden_size, eps=1e-5)
self.second_sub_layer = MultiHeadAttention(
hidden_size, num_attention_heads, attn_score_dropout, attn_layer_dropout
)
self.layer_norm_3 = nn.LayerNorm(hidden_size, eps=1e-5)
self.third_sub_layer = PositionWiseFF(hidden_size, inner_size, ffn_dropout, hidden_act)
def forward_preln(self, decoder_query, decoder_mask, decoder_keys, encoder_states, encoder_mask):
"""
Pre-LayerNorm block
Order of operations: LN -> Self-Attn -> Residual -> LN -> Cross-Attn -> Residual -> LN -> FFN
"""
residual = decoder_query
decoder_query = self.layer_norm_1(decoder_query)
decoder_keys = self.layer_norm_1(decoder_keys)
self_attn_output = self.first_sub_layer(decoder_query, decoder_keys, decoder_keys, decoder_mask)
self_attn_output += residual
residual = self_attn_output
self_attn_output = self.layer_norm_2(self_attn_output)
enc_dec_attn_output = self.second_sub_layer(self_attn_output, encoder_states, encoder_states, encoder_mask)
enc_dec_attn_output += residual
residual = enc_dec_attn_output
enc_dec_attn_output = self.layer_norm_3(enc_dec_attn_output)
output_states = self.third_sub_layer(enc_dec_attn_output)
output_states += residual
return output_states
def forward_postln(self, decoder_query, decoder_mask, decoder_keys, encoder_states, encoder_mask):
"""
Post-LayerNorm block
Order of operations: Self-Attn -> Residual -> LN -> Cross-Attn -> Residual -> LN -> FFN -> Residual -> LN
"""
self_attn_output = self.first_sub_layer(decoder_query, decoder_keys, decoder_keys, decoder_mask)
self_attn_output += decoder_query
self_attn_output = self.layer_norm_1(self_attn_output)
enc_dec_attn_output = self.second_sub_layer(self_attn_output, encoder_states, encoder_states, encoder_mask)
enc_dec_attn_output += self_attn_output
enc_dec_attn_output = self.layer_norm_2(enc_dec_attn_output)
output_states = self.third_sub_layer(enc_dec_attn_output)
output_states += enc_dec_attn_output
return self.layer_norm_3(output_states)
def forward(self, decoder_query, decoder_mask, decoder_keys, encoder_states, encoder_mask):
if self.pre_ln:
return self.forward_preln(decoder_query, decoder_mask, decoder_keys, encoder_states, encoder_mask)
else:
return self.forward_postln(decoder_query, decoder_mask, decoder_keys, encoder_states, encoder_mask)
class TransformerDecoder(nn.Module):
def __init__(
self,
num_layers: int,
hidden_size: int,
inner_size: int,
num_attention_heads: int = 1,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
ffn_dropout: float = 0.0,
hidden_act: str = "relu",
pre_ln: bool = False,
pre_ln_final_layer_norm: bool = True,
):
super().__init__()
if pre_ln and pre_ln_final_layer_norm:
self.final_layer_norm = nn.LayerNorm(hidden_size, eps=1e-5)
else:
self.final_layer_norm = None
layer = TransformerDecoderBlock(
hidden_size,
inner_size,
num_attention_heads,
attn_score_dropout,
attn_layer_dropout,
ffn_dropout,
hidden_act,
pre_ln,
)
self.layers = nn.ModuleList([copy.deepcopy(layer) for _ in range(num_layers)])
self.diagonal = 0
def _get_memory_states(self, decoder_states, decoder_mems_list=None, i=0):
if decoder_mems_list is not None:
inp1 = torch.transpose(decoder_mems_list[i], 1, 2) # Putting seq_len to last dim to handle export cases
inp2 = torch.transpose(decoder_states, 1, 2)
memory_states = torch.cat((inp1, inp2), dim=2)
memory_states = torch.transpose(memory_states, 1, 2) # Transposing back
else:
memory_states = decoder_states
return memory_states
def forward(
self,
decoder_states,
decoder_mask,
encoder_states,
encoder_mask,
decoder_mems_list=None,
return_mems=False,
return_mems_as_list=True,
):
"""
Args:
decoder_states: output of the embedding layer (B x L_dec x H)
decoder_mask: decoder inputs mask (B x L_dec)
encoder_states: output of the encoder (B x L_enc x H)
encoder_mask: encoder inputs mask (B x L_enc)
decoder_mems_list: list of the cached decoder hidden states
for fast autoregressive generation which will be used instead
of decoder_states as keys and values if not None
return_mems: bool, whether to return outputs of all decoder layers
or the last layer only
return_mems_as_list: bool, when True, mems returned are as a list; otherwise mems are Tensor
"""
decoder_attn_mask = form_attention_mask(decoder_mask, diagonal=self.diagonal)
encoder_attn_mask = form_attention_mask(encoder_mask)
memory_states = self._get_memory_states(decoder_states, decoder_mems_list, 0)
if return_mems_as_list:
cached_mems_list = [memory_states]
else:
cached_mems_list = memory_states.unsqueeze(0)
for i, layer in enumerate(self.layers):
decoder_states = layer(decoder_states, decoder_attn_mask, memory_states, encoder_states, encoder_attn_mask)
memory_states = self._get_memory_states(decoder_states, decoder_mems_list, i + 1)
if return_mems_as_list:
cached_mems_list.append(memory_states)
else:
cached_mems_list = torch.cat((cached_mems_list, memory_states.unsqueeze(0)), dim=0)
if self.final_layer_norm is not None:
decoder_states = self.final_layer_norm(decoder_states)
memory_states = self._get_memory_states(decoder_states, decoder_mems_list, i + 2)
if return_mems_as_list:
cached_mems_list.append(memory_states)
else:
cached_mems_list = torch.cat((cached_mems_list, memory_states.unsqueeze(0)), dim=0)
if return_mems:
return cached_mems_list
else:
return cached_mems_list[-1]
def input_example(self, max_batch=1, max_dim=256):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
sample = next(self.parameters())
input_ids = torch.randint(low=0, high=2048, size=(max_batch, max_dim, 1024), device=sample.device)
encoder_mask = torch.randint(low=0, high=1, size=(max_batch, max_dim), device=sample.device)
return tuple([input_ids, encoder_mask, input_ids, encoder_mask])
| NeMo-main | nemo/collections/nlp/modules/common/transformer/transformer_decoders.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Union
from omegaconf.dictconfig import DictConfig
from nemo.collections.nlp.modules.common.huggingface.huggingface_decoder import HuggingFaceDecoderModule
from nemo.collections.nlp.modules.common.huggingface.huggingface_encoder import HuggingFaceEncoderModule
from nemo.collections.nlp.modules.common.transformer.transformer import TransformerDecoderNM, TransformerEncoderNM
from nemo.collections.nlp.modules.common.transformer.transformer_bottleneck import TransformerBottleneckEncoderNM
def get_nemo_transformer(
model_name: Optional[str] = None,
pretrained: bool = False,
config_dict: Optional[Union[dict, DictConfig]] = None,
encoder: bool = True,
pre_ln_final_layer_norm: bool = True,
padding_idx: int = 0,
) -> Union[TransformerEncoderNM, TransformerDecoderNM]:
"""Returns NeMo transformer.
The following configurations are mandatory:
vocab_size: int
hidden_size: int
num_layers: int
inner_size: int
and must be specified if using config_dict.
Args:
model_name (Optional[str]): model name to download from NGC
pretrained: (bool): False will instantiate the named model architecture with random weights.
config_dict (Optional[dict], optional): model configuration parameters. Defaults to None.
config_file (Optional[str], optional): path to json file containing model configuration. Defaults to None.
checkpoint_file (Optional[str], optional): load weights from path to local checkpoint. Defaults to None.
encoder (bool, optional): True will use EncoderTransformerNM, False will use DecoderTransformerNM. Defaults to True.
"""
if model_name is not None:
raise ValueError(f'NeMo transformers cannot be loaded from NGC yet. model_name should be None')
if pretrained:
raise ValueError(f'NeMo transformers cannot be loaded from NGC yet. pretrained should be False')
cfg = None
if not pretrained:
assert (
config_dict.get('vocab_size') is not None
and config_dict.get('hidden_size') is not None
and config_dict.get('num_layers') is not None
and config_dict.get('inner_size') is not None
), f'Using config_dict: {config_dict}. vocab_size, hidden_size, num_layers, and inner_size must are mandatory arguments'
cfg = config_dict
if encoder:
# if arch exists in cfg we return TransformerBottleneckEncoderNM
arch = cfg.get('arch', '')
if not arch:
model = TransformerEncoderNM(
vocab_size=cfg.get('vocab_size'),
hidden_size=cfg.get('hidden_size'),
num_layers=cfg.get('num_layers'),
inner_size=cfg.get('inner_size'),
max_sequence_length=cfg.get('max_sequence_length', 512),
embedding_dropout=cfg.get('embedding_dropout', 0.0),
learn_positional_encodings=cfg.get('learn_positional_encodings', False),
num_attention_heads=cfg.get('num_attention_heads'),
ffn_dropout=cfg.get('ffn_dropout', 0.0),
attn_score_dropout=cfg.get('attn_score_dropout', 0.0),
attn_layer_dropout=cfg.get('attn_layer_dropout', 0.0),
hidden_act=cfg.get('hidden_act', 'relu'),
mask_future=cfg.get('mask_future', True),
pre_ln=cfg.get('pre_ln', False),
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
num_token_types=cfg.get('num_token_types', 2),
padding_idx=padding_idx,
)
elif arch in TransformerBottleneckEncoderNM._SUPPORTED_ARCH:
model = TransformerBottleneckEncoderNM(
vocab_size=cfg.get('vocab_size'),
hidden_size=cfg.get('hidden_size'),
num_layers=cfg.get('num_layers'),
inner_size=cfg.get('inner_size'),
max_sequence_length=cfg.get('max_sequence_length', 512),
embedding_dropout=cfg.get('embedding_dropout', 0.0),
learn_positional_encodings=cfg.get('learn_positional_encodings', False),
num_attention_heads=cfg.get('num_attention_heads'),
ffn_dropout=cfg.get('ffn_dropout', 0.0),
attn_score_dropout=cfg.get('attn_score_dropout', 0.0),
attn_layer_dropout=cfg.get('attn_layer_dropout', 0.0),
hidden_act=cfg.get('hidden_act', 'relu'),
mask_future=cfg.get('mask_future', False),
pre_ln=cfg.get('pre_ln', False),
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
num_token_types=cfg.get('num_token_types', 2),
arch=cfg.get('arch', 'full'),
hidden_steps=cfg.get('hidden_steps', -1),
hidden_blocks=cfg.get('hidden_blocks', 1),
hidden_init_method=cfg.get('hidden_init_method', 'default'),
return_mask=cfg.get('return_mask', True),
padding_idx=padding_idx,
)
else:
raise ValueError(f"Unknown arch = {arch}")
else:
model = TransformerDecoderNM(
vocab_size=cfg.get('vocab_size'),
hidden_size=cfg.get('hidden_size'),
num_layers=cfg.get('num_layers'),
inner_size=cfg.get('inner_size'),
max_sequence_length=cfg.get('max_sequence_length', 512),
embedding_dropout=cfg.get('embedding_dropout', 0.0),
learn_positional_encodings=cfg.get('learn_positional_encodings', False),
num_attention_heads=cfg.get('num_attention_heads'),
ffn_dropout=cfg.get('ffn_dropout', 0.0),
attn_score_dropout=cfg.get('attn_score_dropout', 0.0),
attn_layer_dropout=cfg.get('attn_layer_dropout', 0.0),
hidden_act=cfg.get('hidden_act', 'relu'),
pre_ln=cfg.get('pre_ln', False),
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
num_token_types=cfg.get('num_token_types', 2),
padding_idx=padding_idx,
)
return model
def get_huggingface_transformer(
model_name: Optional[str] = None,
pretrained: bool = False,
config_dict: Optional[Union[dict, DictConfig]] = None,
encoder: bool = True,
) -> Union[HuggingFaceEncoderModule, HuggingFaceDecoderModule]:
if encoder:
model = HuggingFaceEncoderModule(model_name, pretrained, config_dict)
else:
model = HuggingFaceDecoderModule(model_name, pretrained, config_dict)
return model
def get_megatron_transformer(
model_name: Optional[str] = None,
pretrained: bool = True,
config_dict: Optional[Union[dict, DictConfig]] = None,
encoder: bool = True,
checkpoint_file: str = None,
) -> None:
raise ValueError(
"megatron-lm bert encoders are deprecated in NeMo 1.5.0. Please use NeMo 1.4.0 until megatron bert support is added again."
)
# vocab_file = config_dict.pop('vocab_file', None)
# if encoder:
# model = MegatronEncoderModule(
# model_name=model_name,
# pretrained=pretrained,
# config_dict=config_dict,
# checkpoint_file=checkpoint_file,
# vocab_file=vocab_file,
# )
# else:
# raise ValueError('Megatron decoders are not currently supported.')
# return model
| NeMo-main | nemo/collections/nlp/modules/common/transformer/transformer_utils.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.modules.common.transformer.bridge_encoders import *
from nemo.collections.nlp.modules.common.transformer.perceiver_encoders import *
from nemo.collections.nlp.modules.common.transformer.transformer_bottleneck import *
from nemo.collections.nlp.modules.common.transformer.transformer_decoders import *
from nemo.collections.nlp.modules.common.transformer.transformer_encoders import *
from nemo.collections.nlp.modules.common.transformer.transformer_generators import *
from nemo.collections.nlp.modules.common.transformer.transformer_modules import *
| NeMo-main | nemo/collections/nlp/modules/common/transformer/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import torch
from nemo.collections.nlp.modules.common.transformer.transformer_decoders import TransformerDecoder
from nemo.collections.nlp.modules.common.transformer.transformer_encoders import TransformerEncoder
from nemo.collections.nlp.modules.common.transformer.transformer_modules import AttentionBridge
__all__ = ["PerceiverEncoder"]
class PerceiverEncoder(torch.nn.Module):
def __init__(
self,
num_layers: int,
hidden_size: int,
inner_size: int,
mask_future: bool = False,
num_attention_heads: int = 1,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
ffn_dropout: float = 0.0,
hidden_act: str = "relu",
pre_ln: bool = False,
pre_ln_final_layer_norm: bool = True,
hidden_steps: int = 32,
hidden_init_method: str = "default",
hidden_blocks: int = 2,
):
super().__init__()
self._hidden_steps = hidden_steps
self._hidden_init_method = hidden_init_method
self._hidden_blocks = hidden_blocks
if self._hidden_init_method == "default":
self._hidden_init_method = "params"
if self.hidden_init_method not in self.supported_init_methods:
raise ValueError(
"Unknown hidden_init_method = {hidden_init_method}, supported methods are {supported_init_methods}".format(
hidden_init_method=self.hidden_init_method, supported_init_methods=self.supported_init_methods,
)
)
diagonal = 0 if mask_future else None
if self.hidden_init_method == "params":
# learnable initial hidden values
self.init_hidden = torch.nn.Parameter(torch.nn.init.xavier_normal_(torch.empty(hidden_steps, hidden_size)))
self.init_cross_att = TransformerDecoder(
num_layers=1,
hidden_size=hidden_size,
inner_size=inner_size,
num_attention_heads=num_attention_heads,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
ffn_dropout=ffn_dropout,
hidden_act=hidden_act,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
)
self.init_cross_att.diagonal = diagonal
elif self.hidden_init_method == "bridge":
# initialize latent with attention bridge
self.att_bridge = AttentionBridge(hidden_size=hidden_size, k=hidden_steps, bridge_size=inner_size,)
# cross-attention encoder
layer = TransformerDecoder(
num_layers=1,
hidden_size=hidden_size,
inner_size=inner_size,
num_attention_heads=num_attention_heads,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
ffn_dropout=ffn_dropout,
hidden_act=hidden_act,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
)
layer.diagonal = diagonal
self.cross_att_layers = torch.nn.ModuleList([copy.deepcopy(layer) for _ in range(hidden_blocks)])
# self-attention encoder
layer = TransformerEncoder(
num_layers=num_layers,
hidden_size=hidden_size,
inner_size=inner_size,
mask_future=mask_future,
num_attention_heads=num_attention_heads,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
ffn_dropout=ffn_dropout,
hidden_act=hidden_act,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
)
self.self_att_layers = torch.nn.ModuleList([copy.deepcopy(layer) for _ in range(hidden_blocks)])
@property
def supported_init_methods(self):
return ["params", "bridge"]
@property
def hidden_steps(self):
return self._hidden_steps
@property
def hidden_blocks(self):
return self._hidden_blocks
@property
def hidden_init_method(self):
return self._hidden_init_method
def forward(self, encoder_states, encoder_mask):
"""
Args:
encoder_states: output of the encoder (B x L_enc x H)
encoder_mask: encoder inputs mask (B x L_enc)
"""
# all hidden values are active
hidden_mask = torch.ones(
encoder_states.shape[0], self._hidden_steps, dtype=encoder_mask.dtype, device=encoder_mask.device
)
# initialize hidden state
if self._hidden_init_method == "params":
# initialize latent with learned parameters
hidden_states = self.init_hidden.unsqueeze(0).expand(encoder_states.shape[0], -1, -1)
hidden_states = self.init_cross_att(
decoder_states=hidden_states,
decoder_mask=hidden_mask,
encoder_states=encoder_states,
encoder_mask=encoder_mask,
)
elif self._hidden_init_method == "bridge":
# initialize latent with attention bridge
hidden_states = self.att_bridge(hidden=encoder_states, hidden_mask=encoder_mask,)
# apply block (cross-attention, self-attention) multiple times
# for block in range(self._hidden_blocks):
for self_att, cross_att in zip(self.self_att_layers, self.cross_att_layers):
residual = hidden_states
# cross attention of hidden over encoder states
hidden_states = cross_att(
decoder_states=hidden_states,
decoder_mask=hidden_mask,
encoder_states=encoder_states,
encoder_mask=encoder_mask,
)
# self-attention over hidden
hidden_states = self_att(encoder_states=hidden_states, encoder_mask=hidden_mask,)
# residual connection
hidden_states += residual
return hidden_states, hidden_mask
| NeMo-main | nemo/collections/nlp/modules/common/transformer/perceiver_encoders.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nemo.collections.nlp.modules.common.transformer.transformer_encoders import TransformerEncoder
from nemo.collections.nlp.modules.common.transformer.transformer_modules import AttentionBridge
__all__ = ["BridgeEncoder"]
class BridgeEncoder(torch.nn.Module):
def __init__(
self,
num_layers: int,
hidden_size: int,
inner_size: int,
mask_future: bool = False,
num_attention_heads: int = 1,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
ffn_dropout: float = 0.0,
hidden_act: str = "relu",
pre_ln: bool = False,
pre_ln_final_layer_norm: bool = True,
hidden_steps: int = 32,
hidden_init_method: str = "default",
hidden_blocks: int = 0,
):
super().__init__()
self._hidden_steps = hidden_steps
self._hidden_init_method = hidden_init_method
self._hidden_blocks = hidden_blocks
if self._hidden_init_method == "default":
self._hidden_init_method = "enc_shared"
if self.hidden_init_method not in self.supported_init_methods:
raise ValueError(
"Unknown hidden_init_method = {hidden_init_method}, supported methods are {supported_init_methods}".format(
hidden_init_method=self.hidden_init_method, supported_init_methods=self.supported_init_methods,
)
)
# attention bridge
self.att_bridge = AttentionBridge(hidden_size=hidden_size, k=hidden_steps, bridge_size=inner_size,)
if self.hidden_init_method == "enc":
self.init_hidden_enc = TransformerEncoder(
num_layers=num_layers,
hidden_size=hidden_size,
inner_size=inner_size,
mask_future=mask_future,
num_attention_heads=num_attention_heads,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
ffn_dropout=ffn_dropout,
hidden_act=hidden_act,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
)
# self attention
self.hidden_enc = TransformerEncoder(
num_layers=num_layers,
hidden_size=hidden_size,
inner_size=inner_size,
mask_future=mask_future,
num_attention_heads=num_attention_heads,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
ffn_dropout=ffn_dropout,
hidden_act=hidden_act,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
)
@property
def supported_init_methods(self):
return ["enc_shared", "identity", "enc"]
@property
def hidden_steps(self):
return self._hidden_steps
@property
def hidden_blocks(self):
return self._hidden_blocks
@property
def hidden_init_method(self):
return self._hidden_init_method
def forward(self, encoder_states, encoder_mask):
"""
Args:
encoder_states: output of the encoder (B x L_enc x H)
encoder_mask: encoder inputs mask (B x L_enc)
"""
# self-attention over input
if self.hidden_init_method == "enc_shared":
residual = encoder_states
hidden_states = self.hidden_enc(encoder_states=encoder_states, encoder_mask=encoder_mask)
# residual connection
hidden_states += residual
elif self.hidden_init_method == "identity":
hidden_states = encoder_states
elif self.hidden_init_method == "enc":
residual = encoder_states
hidden_states = self.init_hidden_enc(encoder_states=encoder_states, encoder_mask=encoder_mask)
# residual connection
hidden_states += residual
# project encoder states to a fixed steps hidden using k attention heads
hidden_states = self.att_bridge(hidden=hidden_states, hidden_mask=encoder_mask)
# all hidden values are active
hidden_mask = torch.ones(
encoder_states.shape[0], self._hidden_steps, dtype=encoder_mask.dtype, device=encoder_mask.device
)
# apply self-attention over fixed-size hidden_states
for block in range(self._hidden_blocks):
residual = hidden_states
hidden_states = self.hidden_enc(encoder_states=hidden_states, encoder_mask=hidden_mask)
# residual connection
hidden_states += residual
return hidden_states, hidden_mask
| NeMo-main | nemo/collections/nlp/modules/common/transformer/bridge_encoders.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Dict, Optional
import torch
from omegaconf.omegaconf import MISSING
from nemo.collections.nlp.modules.common.decoder_module import DecoderModule
from nemo.collections.nlp.modules.common.encoder_module import EncoderModule
from nemo.collections.nlp.modules.common.transformer.transformer_decoders import TransformerDecoder
from nemo.collections.nlp.modules.common.transformer.transformer_encoders import TransformerEncoder
from nemo.collections.nlp.modules.common.transformer.transformer_modules import TransformerEmbedding
from nemo.core.classes.common import typecheck
from nemo.core.classes.exportable import Exportable
from nemo.core.neural_types import ChannelType, NeuralType
# @dataclass
# class TransformerConfig:
# # named model arguments
# library: str = 'nemo'
# model_name: Optional[str] = None
# pretrained: bool = False
@dataclass
class NeMoTransformerConfig:
# must be configured by the user
hidden_size: int = MISSING
num_layers: int = MISSING
inner_size: int = MISSING
num_attention_heads: int = MISSING
# embedding
max_sequence_length: int = 512
num_token_types: int = 2
embedding_dropout: float = 0.0
learn_positional_encodings: bool = False
# transformer
ffn_dropout: float = 0.0
attn_score_dropout: float = 0.0
attn_layer_dropout: float = 0.0
hidden_act: str = 'relu'
pre_ln: bool = False
pre_ln_final_layer_norm: bool = True
# named model arguments
library: str = 'nemo'
model_name: Optional[str] = None
pretrained: bool = False
@dataclass
class NeMoTransformerEncoderConfig(NeMoTransformerConfig):
mask_future: bool = False
@dataclass
class NeMoTransformerDecoderConfig(NeMoTransformerConfig):
r2l: bool = False
class TransformerEncoderNM(EncoderModule, Exportable):
def __init__(
self,
vocab_size: int,
hidden_size: int,
num_layers: int,
inner_size: int,
num_attention_heads: int,
max_sequence_length: int = 512,
num_token_types: int = 2,
embedding_dropout: float = 0.0,
learn_positional_encodings: bool = False,
ffn_dropout: float = 0.0,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
hidden_act: str = 'relu',
mask_future: bool = False,
pre_ln: bool = False,
pre_ln_final_layer_norm: bool = True,
padding_idx: int = 0,
):
super().__init__()
self._vocab_size = vocab_size
self._hidden_size = hidden_size
self._max_sequence_length = max_sequence_length
self._embedding = TransformerEmbedding(
vocab_size=self._vocab_size,
hidden_size=self._hidden_size,
max_sequence_length=max_sequence_length,
num_token_types=num_token_types,
embedding_dropout=embedding_dropout,
learn_positional_encodings=learn_positional_encodings,
padding_idx=padding_idx,
)
self._encoder = TransformerEncoder(
hidden_size=self._hidden_size,
num_layers=num_layers,
inner_size=inner_size,
num_attention_heads=num_attention_heads,
ffn_dropout=ffn_dropout,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
hidden_act=hidden_act,
mask_future=mask_future,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
)
@typecheck()
def forward(self, input_ids, encoder_mask):
embeddings = self._embedding(input_ids=input_ids)
encoder_hidden_states = self._encoder(encoder_states=embeddings, encoder_mask=encoder_mask)
return encoder_hidden_states
@property
def hidden_size(self):
return self._hidden_size
@property
def vocab_size(self):
return self._vocab_size
@property
def max_sequence_length(self):
return self._max_sequence_length
@property
def embedding(self):
return self._embedding
@property
def encoder(self):
return self._encoder
def input_example(self, max_batch=1, max_dim=256):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
sample = next(self.parameters())
sz = (max_batch, max_dim)
input_ids = torch.randint(low=0, high=2048, size=sz, device=sample.device)
encoder_mask = torch.randint(low=0, high=1, size=sz, device=sample.device)
return tuple([input_ids, encoder_mask])
class TransformerDecoderNM(DecoderModule, Exportable):
def __init__(
self,
vocab_size: int,
hidden_size: int,
num_layers: int,
inner_size: int,
num_attention_heads: int,
max_sequence_length: int = 512,
num_token_types: int = 2,
embedding_dropout: float = 0.0,
learn_positional_encodings: bool = False,
ffn_dropout: float = 0.0,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
hidden_act: str = 'relu',
pre_ln: bool = False,
pre_ln_final_layer_norm: bool = True,
padding_idx: int = 0,
):
super().__init__()
self._vocab_size = vocab_size
self._hidden_size = hidden_size
self._max_sequence_length = max_sequence_length
self.num_states = num_layers + 1
self.return_mems = False
if pre_ln_final_layer_norm:
self.num_states += 1
self._embedding = TransformerEmbedding(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
max_sequence_length=max_sequence_length,
num_token_types=num_token_types,
embedding_dropout=embedding_dropout,
learn_positional_encodings=learn_positional_encodings,
padding_idx=padding_idx,
)
self._decoder = TransformerDecoder(
hidden_size=self.hidden_size,
num_layers=num_layers,
inner_size=inner_size,
num_attention_heads=num_attention_heads,
ffn_dropout=ffn_dropout,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
hidden_act=hidden_act,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
)
@typecheck()
def forward(
self, input_ids, decoder_mask, encoder_embeddings, encoder_mask, decoder_mems=None,
):
start_pos = 0
if decoder_mems is not None:
start_pos = input_ids.shape[1] - 1
input_ids = input_ids[:, -1:]
decoder_mask = decoder_mask[:, -1:]
decoder_mems = torch.transpose(decoder_mems, 0, 1)
decoder_embeddings = self._embedding(input_ids=input_ids, start_pos=start_pos)
decoder_hidden_states = self._decoder(
decoder_states=decoder_embeddings,
decoder_mask=decoder_mask,
encoder_states=encoder_embeddings,
encoder_mask=encoder_mask,
decoder_mems_list=decoder_mems,
return_mems=self.return_mems,
return_mems_as_list=False,
)
if self.return_mems:
decoder_hidden_states = torch.transpose(decoder_hidden_states, 0, 1)
return decoder_hidden_states
@property
def hidden_size(self):
return self._hidden_size
@property
def vocab_size(self):
return self._vocab_size
@property
def max_sequence_length(self):
return self._max_sequence_length
@property
def embedding(self):
return self._embedding
@property
def decoder(self):
return self._decoder
def input_example(self, max_batch=1, max_dim=256):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
sample = next(self.parameters())
sz = (max_batch, max_dim)
input_ids = torch.randint(low=0, high=2048, size=sz, device=sample.device)
encoder_mask = torch.randint(low=0, high=1, size=sz, device=sample.device)
mem_size = [max_batch, self.num_states, max_dim - 1, self._hidden_size]
decoder_mems = torch.rand(mem_size, device=sample.device)
return tuple([input_ids, encoder_mask, self._embedding(input_ids), encoder_mask, decoder_mems])
def _prepare_for_export(self, **kwargs):
self._decoder.diagonal = None
self.return_mems = True
super()._prepare_for_export(**kwargs)
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
if self.return_mems:
return {"last_hidden_states": NeuralType(('B', 'D', 'T', 'D'), ChannelType())}
else:
return {"last_hidden_states": NeuralType(('B', 'T', 'D'), ChannelType())}
| NeMo-main | nemo/collections/nlp/modules/common/transformer/transformer.py |
# Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import torch
from torch import nn
from torch.nn.functional import gelu
from nemo.collections.common.parts import form_attention_mask
from nemo.utils import logging
__all__ = ["TransformerEmbedding", "AttentionBridge"]
class FixedPositionalEncoding(nn.Module):
"""
Fixed positional encoding (embedding layer) from sine and cosine functions
of different frequencies according to https://arxiv.org/abs/1706.03762
Args:
hidden_size: size of the embeddings in the model, also known as d_model
max_sequence_length: maximum allowed length of the input sequence
"""
def __init__(self, hidden_size, max_sequence_length=512):
super().__init__()
self._hidden_size = hidden_size
self._max_sequence_length = max_sequence_length
self._build_pos_enc(hidden_size=self._hidden_size, max_sequence_length=self._max_sequence_length)
def _build_pos_enc(self, hidden_size, max_sequence_length, device=None):
"""
Builds/replaces pre-computed positional encoding.
"""
pos_enc = torch.zeros(max_sequence_length, hidden_size, device=device)
position = torch.arange(0.0, max_sequence_length).unsqueeze(1)
coef = -math.log(10000.0) / hidden_size
div_term = torch.exp(coef * torch.arange(0.0, hidden_size, 2))
pos_enc[:, 0::2] = torch.sin(position * div_term)
pos_enc[:, 1::2] = torch.cos(position * div_term)
pos_enc.div_(math.sqrt(hidden_size))
self.register_buffer('pos_enc', pos_enc)
def forward(self, position_ids):
max_pos_id = position_ids.max()
# update positional encoding if needed
if max_pos_id >= self._max_sequence_length:
logging.warning(
f'Max position id {max_pos_id} is greater than max sequence length {self._max_sequence_length}. Expanding position embeddings just for this batch. This is not expected to work very well. Consider chunking your input into smaller sequences.'
)
self._build_pos_enc(
hidden_size=self._hidden_size, max_sequence_length=max_pos_id + 1, device=position_ids.device,
)
embeddings = torch.embedding(self.pos_enc, position_ids)
# Revert expansion of position embeddings since this wall checkpoint size mismatches.
if max_pos_id >= self._max_sequence_length:
self._build_pos_enc(
hidden_size=self._hidden_size,
max_sequence_length=self._max_sequence_length,
device=position_ids.device,
)
return embeddings
class TransformerEmbedding(nn.Module):
"""
Embedding from token and position embeddings.
Optionally add token_type embedding (e.g. type of the sentence in BERT).
Args:
vocab_size: size of the vocabulary
hidden_size: size of the embeddings in the model, also known as d_model
max_sequence_length: maximum allowed length of the input sequence
num_token_types: number of different token types
(e.g. tokens of sentence A and tokens of sentence B in BERT)
embedding_dropout: probability of dropout applied to embeddings
learn_positional_encodings: whether to learn positional encodings or
use fixed (sine-cosine) ones
"""
def __init__(
self,
vocab_size: int,
hidden_size: int,
max_sequence_length: int = 512,
num_token_types: int = 2,
embedding_dropout: float = 0.0,
learn_positional_encodings: bool = False,
padding_idx: int = 0,
):
super().__init__()
self.max_sequence_length = max_sequence_length
self.learn_positional_encodings = learn_positional_encodings
self.token_embedding = nn.Embedding(vocab_size, hidden_size, padding_idx=padding_idx)
if learn_positional_encodings:
self.position_embedding = nn.Embedding(max_sequence_length, hidden_size)
else:
self.position_embedding = FixedPositionalEncoding(hidden_size, max_sequence_length)
if num_token_types > 0:
self.token_type_embedding = nn.Embedding(num_token_types, hidden_size)
self.layer_norm = nn.LayerNorm(hidden_size, eps=1e-5)
self.dropout = nn.Dropout(embedding_dropout)
def forward(self, input_ids, token_type_ids=None, start_pos=0):
seq_length = input_ids.size(1)
# we fail here only with parametric positional embedding. FixedPositionalEncoding automatically extends.
if self.learn_positional_encodings and (seq_length > self.max_sequence_length):
raise ValueError(
f"Input sequence is longer than maximum allowed sequence length for positional encoding. "
f"Got {seq_length} and {self.max_sequence_length}"
)
position_ids = torch.arange(
start=start_pos, end=start_pos + seq_length, dtype=torch.long, device=input_ids.device
)
position_ids = position_ids.unsqueeze(0).repeat(input_ids.size(0), 1)
token_embeddings = self.token_embedding(input_ids)
position_embeddings = self.position_embedding(position_ids)
embeddings = token_embeddings + position_embeddings
if token_type_ids is not None:
token_type_embeddings = self.token_type_embedding(token_type_ids)
embeddings = embeddings + token_type_embeddings
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class MultiHeadAttention(nn.Module):
"""
Multi-head scaled dot-product attention layer.
Args:
hidden_size: size of the embeddings in the model, also known as d_model
num_attention_heads: number of heads in multi-head attention
attn_score_dropout: probability of dropout applied to attention scores
attn_layer_dropout: probability of dropout applied to the output of the
whole layer, but before layer normalization
"""
def __init__(self, hidden_size, num_attention_heads, attn_score_dropout=0.0, attn_layer_dropout=0.0):
super().__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number "
"of attention heads (%d)" % (hidden_size, num_attention_heads)
)
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.attn_head_size = int(hidden_size / num_attention_heads)
self.attn_scale = math.sqrt(math.sqrt(self.attn_head_size))
self.query_net = nn.Linear(hidden_size, hidden_size)
self.key_net = nn.Linear(hidden_size, hidden_size)
self.value_net = nn.Linear(hidden_size, hidden_size)
self.out_projection = nn.Linear(hidden_size, hidden_size)
self.attn_dropout = nn.Dropout(attn_score_dropout)
self.layer_dropout = nn.Dropout(attn_layer_dropout)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attn_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, queries, keys, values, attention_mask):
# attention_mask is needed to hide the tokens which correspond to [PAD]
# in the case of BERT, or to hide the future tokens in the case of
# vanilla language modeling and translation
query = self.query_net(queries)
key = self.key_net(keys)
value = self.value_net(values)
query = self.transpose_for_scores(query) / self.attn_scale
key = self.transpose_for_scores(key) / self.attn_scale
value = self.transpose_for_scores(value)
# for numerical stability we pre-divide query and key by sqrt(sqrt(d))
attention_scores = torch.matmul(query, key.transpose(-1, -2))
if attention_mask is not None:
attention_scores = attention_scores + attention_mask.to(attention_scores.dtype)
attention_probs = torch.softmax(attention_scores, dim=-1)
attention_probs = self.attn_dropout(attention_probs)
context = torch.matmul(attention_probs, value)
context = context.permute(0, 2, 1, 3).contiguous()
new_context_shape = context.size()[:-2] + (self.hidden_size,)
context = context.view(*new_context_shape)
# output projection
output_states = self.out_projection(context)
output_states = self.layer_dropout(output_states)
return output_states
class PositionWiseFF(nn.Module):
"""
Position-wise feed-forward network of Transformer block.
Args:
hidden_size: size of the embeddings in the model, also known as d_model
inner_size: number of neurons in the intermediate part of feed-forward
net, usually is (4-8 x hidden_size) in the papers
ffn_dropout: probability of dropout applied to net output
hidden_act: activation function used between two linear layers
"""
def __init__(self, hidden_size, inner_size, ffn_dropout=0.0, hidden_act="relu"):
super().__init__()
self.dense_in = nn.Linear(hidden_size, inner_size)
self.dense_out = nn.Linear(inner_size, hidden_size)
self.layer_dropout = nn.Dropout(ffn_dropout)
ACT2FN = {"gelu": gelu, "relu": torch.relu}
self.act_fn = ACT2FN[hidden_act]
def forward(self, hidden_states):
output_states = self.dense_in(hidden_states)
output_states = self.act_fn(output_states)
output_states = self.dense_out(output_states)
output_states = self.layer_dropout(output_states)
return output_states
class AttentionBridge(torch.nn.Module):
"""
A multi-head attention bridge to project a variable-size hidden states
to k hidden states (per attention head).
Code is based on the paper https://arxiv.org/pdf/1703.03130.pdf
"""
def __init__(self, hidden_size, k, bridge_size):
"""
hidden_size - size of input hidden state
k - number of attention heads
bridge_size - size of internal feed forward weights (i.e., attention head size)
"""
super().__init__()
self.hidden_size = hidden_size
self.k = k
self.bridge_size = bridge_size
self.attn_scale = np.sqrt(np.sqrt(self.bridge_size))
# build model
self.W1 = torch.nn.Linear(hidden_size, bridge_size, bias=False)
self.W2 = torch.nn.Linear(bridge_size, k, bias=False)
self.act = torch.nn.ReLU()
def forward(self, hidden, hidden_mask=None, return_ortho_loss=False):
"""
Project hidden [B x N x H] to fixed-size [B x k x H]
return_ortho_loss - if True returns loss term to encourage
orthogonal attention vectors
"""
attention_scores = self.W2(self.act(self.W1(hidden) / self.attn_scale) / self.attn_scale).transpose(-1, -2)
attention_mask = form_attention_mask(hidden_mask)
if attention_mask is not None:
attention_mask.squeeze_(1)
attention_scores = attention_scores + attention_mask.to(attention_scores.dtype)
A = torch.softmax(attention_scores, dim=-1)
M = A @ hidden
if return_ortho_loss:
ortho_loss = ((A @ A.transpose(-1, -2)) - torch.eye(self.k).type_as(A)).pow(2).sum()
return M, ortho_loss
else:
return M
| NeMo-main | nemo/collections/nlp/modules/common/transformer/transformer_modules.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from typing import List, Tuple, Union
from torch import Tensor
if sys.version_info >= (3, 8):
from typing import TypedDict
else:
from typing_extensions import TypedDict
class LengthParam(TypedDict):
max_length: int # The maximum length of the sequence to be generated.
min_length: int # The minimum length of the sequence to be generated.
class SamplingParam(TypedDict):
use_greedy: bool # Whether or not to use sampling ; use greedy decoding otherwise
temperature: float # sampling temperature
top_k: int # The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p: float # If set to float < 1, only the most probable tokens with probabilities that add up to top_p or higher are kept for generation.
repetition_penalty: float # The parameter for repetition penalty. 1.0 means no penalty.
add_BOS: bool # add the bos token at the begining of the prompt
all_probs: bool # whether return the log prob for all the tokens in vocab
compute_logprob: bool # a flag used to compute logprob of all the input text, a very special case of running inference, default False
end_strings: List[str] # generation will stop when one of these tokens is generated
class OutputType(TypedDict):
sentences: List[str] # output sentences
tokens: List[List[str]] # output sentences borken into tokens
logprob: List[List[float]] # log prob of generated tokens
full_logprob: List[List[float]] # log prob of all the tokens in the vocab
token_ids: List[List[int]] # output sentence token ids
offsets: List[List[int]] # list of tokens start positions in text
class TextGeneration:
"""
Interface for all text generation models.
"""
def generate(
self,
inputs: Union[List[str], Tuple[Tensor, Tensor], List[dict]],
length_params: LengthParam,
sampling_params: SamplingParam = None,
) -> OutputType:
"""
Public method to generate text.
Args:
inputs (Union[List[str], Tensor, List[dict]]):
Can be one of the 3 types:
1. List of strings. Each element of the list provides input prompt. The model will apply tokenizer on it.
E.g [‘sentence’, ‘sentence2’ … ]
2. Tuple of Pytorch Tensors (context_tokens, context_lengths). The `context_tokens` has shape (batch_size, seq_length), it's the batched sequences of tokens used as a prompst for the generation or as model inputs to the encoder.
The generative model will skip the tokenization and padding step. The `context_lengths` has shape (batch_size,), it indicates the length of the context tokens for each of the input sequences.
E.g. ( torch.tensor([[23,5234,23,35,…], [223,323,23,23232,232,...] …]), torch.tensor([20, 30, …]))
3. List of python dict objects. Used for prompt/p-tuning inputs where a set of key-value pairs are converted into input token embeddings for the model.
E.g. [{"prompt-tag": "sentiment", "sentence": "this is a good movie"},
{"prompt-tag": "qa", "context": "some context text", "question": "a simple question"} ... ]
where 'prompt-tag' is used to identify the type of NLP task to solve.
length_params (LengthParam):
a dictionary type which controls the sampling length.
max_length: int, The maximum length of the sequence to be generated.
min_length: int, The minimum length of the sequence to be generated.
If None, max_length is set to 30, and min_length is set to None
sampling_params (SamplingParam):
a dictionary type which contains the parameters for text sampling. It has the following keys
use_greedy: bool, Whether or not to use sampling ; use greedy decoding otherwise
top_k: int, The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p: float, If set to float < 1, only the most probable tokens with probabilities that add up to top_p or higher are kept for generation.
repetition_penalty: float, The parameter for repetition penalty. 1.0 means no penalty.
add_BOS: bool, Whether add the bos token at the begining of the prompt
all_probs: bool # whether return the log prob for all the tokens in vocab
compute_logprob: bool # a flag used to compute logprob of all the input text, a very special case of running inference, default False
end_strings: List[str] # generation will stop when one of these tokens is generated
Default None, If it is None, use_greedy will be "True".
Returns:
OutputType: It generates the output in a dictionary type. It has the following keys:
sentences: List[str], output sentences
tokens: List[List[str]], output sentences borken into tokens
logprob: List[List[float]], log prob of generated tokens
full_logprob: List[List[float]], log prob of all the tokens in the vocab
token_ids: List[List[int]], output sentence token ids
offsets: List[List[int]] # list of tokens start positions in text
"""
raise NotImplementedError("please implement this method")
| NeMo-main | nemo/collections/nlp/modules/common/transformer/text_generation.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import torch
from nemo.collections.nlp.modules.common.transformer.transformer_encoders import TransformerEncoder
__all__ = ["PoolingEncoder"]
class PoolingEncoder(torch.nn.Module):
_SUPPORTED_ARCH = ["max", "avg"]
def __init__(
self,
num_layers: int,
hidden_size: int,
inner_size: int,
mask_future: bool = False,
num_attention_heads: int = 1,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
ffn_dropout: float = 0.0,
hidden_act: str = "relu",
pre_ln: bool = False,
pre_ln_final_layer_norm: bool = True,
hidden_steps: int = 4,
hidden_init_method: str = "default",
hidden_blocks: int = 2,
pooling_type: str = "max",
):
super().__init__()
# minimal steps to allow reduction
self._hidden_steps = hidden_steps
self._hidden_init_method = hidden_init_method
self._hidden_blocks = hidden_blocks
self._pooling_type = pooling_type
if self._hidden_steps < 2:
raise ValueError("Expected hidden_steps >= 2 but received hidden_steps = {self._hidden_steps}")
if self.hidden_init_method not in self.supported_init_methods:
raise ValueError(
"Unknown hidden_init_method = {hidden_init_method}, supported methods are {supported_init_methods}".format(
hidden_init_method=self.hidden_init_method, supported_init_methods=self.supported_init_methods,
)
)
if self._pooling_type not in self.supported_arch:
raise ValueError(f"Unknown pooling_type = {pooling_type}. Available values = {self.supported_arch}")
# self-attention encoder
layer = TransformerEncoder(
num_layers=num_layers,
hidden_size=hidden_size,
inner_size=inner_size,
mask_future=mask_future,
num_attention_heads=num_attention_heads,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
ffn_dropout=ffn_dropout,
hidden_act=hidden_act,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
)
self.self_att_layers = torch.nn.ModuleList([copy.deepcopy(layer) for _ in range(hidden_blocks)])
self.pooling = self._build_pooling_module()
def _build_pooling_module(self):
"""
Returns pooling module.
Allows to override for child classes.
"""
if self._pooling_type == "max":
pooling = torch.nn.MaxPool1d(kernel_size=2, stride=2)
elif self._pooling_type == "avg":
pooling = torch.nn.AvgPool1d(kernel_size=2, stride=2)
return pooling
@property
def supported_arch(self):
return self._SUPPORTED_ARCH
@property
def supported_init_methods(self):
return ["default"]
@property
def hidden_steps(self):
return self._hidden_steps
@property
def hidden_blocks(self):
return self._hidden_blocks
@property
def hidden_init_method(self):
return self._hidden_init_method
def forward(self, encoder_states, encoder_mask):
"""
Args:
encoder_states: output of the encoder (B x L_enc x H)
encoder_mask: encoder inputs mask (B x L_enc)
"""
# initialize hidden state
hidden_mask = encoder_mask
hidden_states = encoder_states
# apply block (self-attention, max-pool) multiple times
for self_att in self.self_att_layers:
residual = hidden_states
# self-attention over hidden
hidden_states = self_att(encoder_states=hidden_states, encoder_mask=hidden_mask)
hidden_states += residual
# max pool reduction if possible
if hidden_states.shape[1] >= self.hidden_steps:
# max pool hidden states
hidden_states = hidden_states.permute(0, 2, 1)
hidden_states = self.pooling(hidden_states)
hidden_states = hidden_states.permute(0, 2, 1)
# max pool mask
hidden_mask = (
self.pooling(hidden_mask.unsqueeze(0).type_as(hidden_states)).squeeze(0).type_as(hidden_mask)
)
return hidden_states, hidden_mask
| NeMo-main | nemo/collections/nlp/modules/common/transformer/reduction_encoders.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from hydra.utils import instantiate
from transformers import AutoConfig, AutoModel
from nemo.collections.nlp.modules.common.encoder_module import EncoderModule
from nemo.collections.nlp.modules.common.huggingface.huggingface_utils import get_huggingface_pretrained_lm_models_list
from nemo.core.classes.common import typecheck
from nemo.utils import logging
class HuggingFaceEncoderModule(EncoderModule):
""" Class for using HuggingFace encoders in NeMo NLP."""
def __init__(
self,
model_name: Optional[str] = None,
pretrained: bool = False,
config_dict: Optional[dict] = None,
checkpoint_file: Optional[str] = None,
):
"""Gets HuggingFace based model to be used as an Encoder in NeMo NLP.
Use the model_name arg to get a named model architecture.
Available model names can be found with get_huggingface_pretrained_lm_models_list() or
by going to https://huggingface.co/models.
Use the pretrained arg to get the named model architecture with or without pretrained weights.
If model_name is None, then we can pass in a custom configuration via the config_dict.
For example, to instantiate a HuggingFace BERT model with custom configuration we would do:
config_dict={
'_target_': 'transformers.BertConfig',
'hidden_size': 1536
}
Args:
model_name (Optional[str]): Named model architecture from HuggingFace. Defaults to None.
pretrained (bool): Use True to get pretrained weights.
False will use the same architecture but with randomly initialized weights.
Defaults to False.
config_dict (Optional[dict], optional): Use for custom configuration of the HuggingFace model. Defaults to None.
checkpoint_file (Optional[str], optional): Provide weights for the transformer from a local checkpoint. Defaults to None.
"""
super().__init__()
if checkpoint_file:
raise NotImplementedError('Restoring from checkpoint file not implemented yet.')
model = None
if model_name is not None:
if model_name in get_huggingface_pretrained_lm_models_list(include_external=False):
if pretrained:
config_dict.pop('vocab_size')
if config_dict:
raise ValueError(
f'When using pretrained model, config_dict should be None or empty. Got: {config_dict}'
)
model = AutoModel.from_pretrained(model_name)
else:
cfg = AutoConfig.from_pretrained(model_name)
model = AutoModel.from_config(cfg)
else:
logging.error(f'{model_name} not found in list of HuggingFace pretrained models')
else:
if pretrained:
raise ValueError(f'If not using model_name, then pretrained should be False. Got: {pretrained}.')
cfg = instantiate(config_dict)
model = AutoModel.from_config(cfg)
self._hidden_size = model.config.hidden_size
self._vocab_size = model.config.vocab_size
self._encoder = model
@typecheck()
def forward(self, input_ids, encoder_mask):
encoder_hidden_states = self._encoder.forward(input_ids=input_ids, attention_mask=encoder_mask)[0]
return encoder_hidden_states
@property
def hidden_size(self) -> Optional[int]:
return self._hidden_size
@property
def vocab_size(self) -> Optional[int]:
return self._vocab_size
| NeMo-main | nemo/collections/nlp/modules/common/huggingface/huggingface_encoder.py |
# Copyright 2020 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import CamembertModel
from nemo.collections.nlp.modules.common.bert_module import BertModule
from nemo.core.classes import typecheck
__all__ = ['CamembertEncoder']
class CamembertEncoder(CamembertModel, BertModule):
"""
Wraps around the Huggingface transformers implementation repository for easy use within NeMo.
"""
@typecheck()
def forward(self, input_ids, attention_mask, token_type_ids):
res = super().forward(input_ids=input_ids, attention_mask=attention_mask)[0]
return res
| NeMo-main | nemo/collections/nlp/modules/common/huggingface/camembert.py |
# Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import RobertaModel
from nemo.collections.nlp.modules.common.bert_module import BertModule
from nemo.core.classes import typecheck
__all__ = ['RobertaEncoder']
class RobertaEncoder(RobertaModel, BertModule):
"""
Wraps around the Huggingface transformers implementation repository for easy use within NeMo.
"""
@typecheck()
def forward(self, input_ids, attention_mask, token_type_ids):
res = super().forward(input_ids=input_ids, attention_mask=attention_mask)[0]
return res
| NeMo-main | nemo/collections/nlp/modules/common/huggingface/roberta.py |
# Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import GPT2LMHeadModel
from nemo.collections.nlp.modules.common.gpt_module import GPTModule
from nemo.core.classes import typecheck
__all__ = ['GPT2Encoder']
class GPT2Encoder(GPT2LMHeadModel, GPTModule):
"""
Wraps around the Huggingface transformers implementation repository for easy use within NeMo.
"""
@typecheck()
def forward(
self,
input_ids,
attention_mask=None,
token_type_ids=None,
labels=None,
return_dict=False,
output_attentions=False,
output_hidden_states=False,
past_key_values=None,
use_cache=False,
position_ids=None,
max_length=128,
):
res = super().forward(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
return_dict=return_dict,
labels=labels,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
past_key_values=past_key_values,
position_ids=position_ids,
use_cache=use_cache,
)
return res if not return_dict else res
| NeMo-main | nemo/collections/nlp/modules/common/huggingface/gpt2.py |
# Copyright 2020 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import DistilBertModel
from nemo.collections.nlp.modules.common.bert_module import BertModule
from nemo.core.classes import typecheck
__all__ = ['DistilBertEncoder']
class DistilBertEncoder(DistilBertModel, BertModule):
"""
Wraps around the Huggingface transformers implementation repository for easy use within NeMo.
"""
@typecheck()
def forward(self, input_ids, attention_mask, token_type_ids=None):
# distilBert does not use token_type_ids as the most of the other Bert models
res = super().forward(input_ids=input_ids, attention_mask=attention_mask)[0]
return res
| NeMo-main | nemo/collections/nlp/modules/common/huggingface/distilbert.py |
# Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import AlbertModel
from nemo.collections.nlp.modules.common.bert_module import BertModule
from nemo.core.classes import typecheck
__all__ = ['AlbertEncoder']
class AlbertEncoder(AlbertModel, BertModule):
"""
Wraps around the Huggingface transformers implementation repository for easy use within NeMo.
"""
@typecheck()
def forward(self, input_ids, attention_mask, token_type_ids):
res = super().forward(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)[0]
return res
| NeMo-main | nemo/collections/nlp/modules/common/huggingface/albert.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.modules.common.huggingface.albert import AlbertEncoder
from nemo.collections.nlp.modules.common.huggingface.bert import BertEncoder
from nemo.collections.nlp.modules.common.huggingface.camembert import CamembertEncoder
from nemo.collections.nlp.modules.common.huggingface.distilbert import DistilBertEncoder
from nemo.collections.nlp.modules.common.huggingface.huggingface_utils import (
get_huggingface_lm_model,
get_huggingface_pretrained_lm_models_list,
)
from nemo.collections.nlp.modules.common.huggingface.roberta import RobertaEncoder
| NeMo-main | nemo/collections/nlp/modules/common/huggingface/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from hydra.utils import instantiate
from transformers import AutoConfig, AutoModel
from nemo.collections.nlp.modules.common.decoder_module import DecoderModule
from nemo.collections.nlp.modules.common.huggingface.huggingface_utils import get_huggingface_pretrained_lm_models_list
from nemo.utils import logging
class HuggingFaceDecoderModule(DecoderModule):
"""Gets HuggingFace based model to be used as an Decoder in NeMo NLP.
Use the model_name arg to get a named model architecture.
Available model names can be found with get_huggingface_pretrained_lm_models_list() or
by going to https://huggingface.co/models.
Use the pretrained arg to get the named model architecture with or without pretrained weights.
If model_name is None, then we can pass in a custom configuration via the config_dict.
For example, to instantiate a HuggingFace BERT model with custom configuration we would do:
config_dict={
'_target_': 'transformers.BertConfig',
'hidden_size': 1536
}
Args:
model_name (Optional[str]): Named model architecture from HuggingFace. Defaults to None.
pretrained (bool): Use True to get pretrained weights.
False will use the same architecture but with randomly initialized weights.
Defaults to False.
config_dict (Optional[dict], optional): Use for custom configuration of the HuggingFace model. Defaults to None.
checkpoint_file (Optional[str], optional): Provide weights for the transformer from a local checkpoint. Defaults to None.
"""
def __init__(
self,
model_name: Optional[str] = None,
pretrained: bool = False,
config_dict: Optional[dict] = None,
checkpoint_file: Optional[str] = None,
):
super().__init__()
model = None
if model_name is not None:
if model_name in get_huggingface_pretrained_lm_models_list():
if pretrained:
model = AutoModel.from_pretrained(model_name)
else:
cfg = AutoConfig.from_pretrained(model_name)
model = AutoModel.from_config(cfg)
else:
logging.error(f'{model_name} not found in list of HuggingFace pretrained models')
else:
cfg = instantiate(config_dict)
model = AutoModel.from_config(cfg)
self._hidden_size = model.config.hidden_size
self._vocab_size = model.config.vocab_size
@property
def hidden_size(self) -> Optional[int]:
return self._hidden_size
@property
def vocab_size(self) -> Optional[int]:
return self._vocab_size
| NeMo-main | nemo/collections/nlp/modules/common/huggingface/huggingface_decoder.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List, Optional
from transformers import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertConfig,
AutoModel,
BertConfig,
CamembertConfig,
DistilBertConfig,
GPT2Config,
RobertaConfig,
)
from nemo.collections.nlp.modules.common.huggingface.albert import AlbertEncoder
from nemo.collections.nlp.modules.common.huggingface.bert import BertEncoder
from nemo.collections.nlp.modules.common.huggingface.camembert import CamembertEncoder
from nemo.collections.nlp.modules.common.huggingface.distilbert import DistilBertEncoder
from nemo.collections.nlp.modules.common.huggingface.gpt2 import GPT2Encoder
from nemo.collections.nlp.modules.common.huggingface.roberta import RobertaEncoder
from nemo.utils import logging
__all__ = ["get_huggingface_lm_model", "get_huggingface_pretrained_lm_models_list", "VOCAB_FILE_NAME"]
HUGGINGFACE_MODELS = {
"BertModel": {
"default": "bert-base-uncased",
"class": BertEncoder,
"config": BertConfig,
"pretrained_model_list": BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
},
"DistilBertModel": {
"default": "distilbert-base-uncased",
"class": DistilBertEncoder,
"config": DistilBertConfig,
"pretrained_model_list": DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
},
"CamembertModel": {
"default": "camembert-base-uncased",
"class": CamembertEncoder,
"config": CamembertConfig,
"pretrained_model_list": CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
},
"RobertaModel": {
"default": "roberta-base",
"class": RobertaEncoder,
"config": RobertaConfig,
"pretrained_model_list": ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
},
"AlbertModel": {
"default": "albert-base-v2",
"class": AlbertEncoder,
"config": AlbertConfig,
"pretrained_model_list": ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
},
"GPT2Model": {
"default": "gpt2",
"class": GPT2Encoder,
"config": GPT2Config,
"pretrained_model_list": GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
},
}
VOCAB_FILE_NAME = {
'AlbertTokenizer': "spiece.model",
'RobertaTokenizer': "vocab.json",
'BertTokenizer': "vocab.txt",
'DistilBertTokenizer': "vocab.txt",
'CamembertTokenizer': "sentencepiece.bpe.model",
'GPT2Tokenizer': "vocab.json",
'T5Tokenizer': "spiece.model",
"BartTokenizer": "vocab.json",
}
def get_huggingface_lm_model(
pretrained_model_name: str, config_dict: Optional[dict] = None, config_file: Optional[str] = None,
):
"""
Returns lm model instantiated with Huggingface
Args:
pretrained_mode_name: specify this to instantiate pretrained model from Huggingface,
e.g. bert-base-cased. For entire list, see get_huggingface_pretrained_lm_models_list().
config_dict: model configuration dictionary used to instantiate Huggingface model from scratch
config_file: path to model configuration file used to instantiate Huggingface model from scratch
Returns:
BertModule
"""
try:
automodel = AutoModel.from_pretrained(pretrained_model_name)
except Exception as e:
raise ValueError(f"{pretrained_model_name} is not supported by HuggingFace. {e}")
model_type = type(automodel).__name__
if model_type in HUGGINGFACE_MODELS:
model_class = HUGGINGFACE_MODELS[model_type]["class"]
if config_file:
if not os.path.exists(config_file):
logging.warning(
f"Config file was not found at {config_file}. Will attempt to use config_dict or pretrained_model_name."
)
else:
config_class = HUGGINGFACE_MODELS[model_type]["config"]
return model_class(config_class.from_json_file(config_file))
if config_dict:
config_class = HUGGINGFACE_MODELS[model_type]["config"]
return model_class(config=config_class(**config_dict))
else:
return model_class.from_pretrained(pretrained_model_name)
else:
raise ValueError(f"Use HuggingFace API directly in NeMo for {pretrained_model_name}")
def get_huggingface_pretrained_lm_models_list(include_external: bool = False,) -> List[str]:
"""
Returns the list of pretrained HuggingFace language models
Args:
include_external if true includes all HuggingFace model names, not only those supported language models in NeMo.
Returns the list of HuggingFace models
"""
huggingface_models = []
for model in HUGGINGFACE_MODELS:
model_names = HUGGINGFACE_MODELS[model]["pretrained_model_list"]
huggingface_models.extend(model_names)
return huggingface_models
| NeMo-main | nemo/collections/nlp/modules/common/huggingface/huggingface_utils.py |
# Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import BertModel
from nemo.collections.nlp.modules.common.bert_module import BertModule
from nemo.core.classes import typecheck
__all__ = ['BertEncoder']
class BertEncoder(BertModel, BertModule):
"""
Wraps around the Huggingface transformers implementation repository for easy use within NeMo.
"""
@typecheck()
def forward(self, input_ids, attention_mask=None, token_type_ids=None):
res = super().forward(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)[0]
return res
| NeMo-main | nemo/collections/nlp/modules/common/huggingface/bert.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
try:
from megatron.core.parallel_state import (
get_tensor_model_parallel_group,
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size,
)
from megatron.core.tensor_parallel.utils import VocabUtility
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
__all__ = ["vocab_parallel_cross_entropy"]
class _VocabParallelCrossEntropy(torch.autograd.Function):
@staticmethod
def forward(ctx, vocab_parallel_logits, target, label_smoothing=0.0):
# Maximum value along vocab dimension across all GPUs.
logits_max = torch.max(vocab_parallel_logits, dim=-1)[0]
torch.distributed.all_reduce(
logits_max, op=torch.distributed.ReduceOp.MAX, group=get_tensor_model_parallel_group()
)
# Subtract the maximum value.
vocab_parallel_logits = vocab_parallel_logits - logits_max.unsqueeze(dim=-1)
# Get the partition's vocab indecies
get_vocab_range = VocabUtility.vocab_range_from_per_partition_vocab_size
partition_vocab_size = vocab_parallel_logits.size()[-1]
rank = get_tensor_model_parallel_rank()
world_size = get_tensor_model_parallel_world_size()
vocab_start_index, vocab_end_index = get_vocab_range(partition_vocab_size, rank, world_size)
# Create a mask of valid vocab ids (1 means it needs to be masked).
target_mask = (target < vocab_start_index) | (target >= vocab_end_index)
masked_target = target.clone() - vocab_start_index
masked_target[target_mask] = 0
# Get predicted-logits = logits[target].
# For Simplicity, we convert logits to a 2-D tensor with size
# [*, partition-vocab-size] and target to a 1-D tensor of size [*].
logits_2d = vocab_parallel_logits.view(-1, partition_vocab_size)
masked_target_1d = masked_target.view(-1)
arange_1d = torch.arange(start=0, end=logits_2d.size()[0], device=logits_2d.device)
predicted_logits_1d = logits_2d[arange_1d, masked_target_1d]
predicted_logits_1d = predicted_logits_1d.clone().contiguous()
predicted_logits = predicted_logits_1d.view_as(target)
predicted_logits[target_mask] = 0.0
# All reduce is needed to get the chunks from other GPUs.
torch.distributed.all_reduce(
predicted_logits, op=torch.distributed.ReduceOp.SUM, group=get_tensor_model_parallel_group()
)
# Sum of exponential of logits along vocab dimension across all GPUs.
exp_logits = vocab_parallel_logits
torch.exp(vocab_parallel_logits, out=exp_logits)
sum_exp_logits = exp_logits.sum(dim=-1)
torch.distributed.all_reduce(
sum_exp_logits, op=torch.distributed.ReduceOp.SUM, group=get_tensor_model_parallel_group()
)
# Loss = log(sum(exp(logits))) - predicted-logit.
loss = torch.log(sum_exp_logits) - predicted_logits
# Store softmax, target-mask and masked-target for backward pass.
exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1))
vocab_size = exp_logits.size(-1)
if label_smoothing > 0:
"""
We'd like to assign 1 / (K - 1) probability mass to every index that is not the ground truth.
= (1 - alpha) * y_gt + alpha * mean(y_{i for i != gt})
= (1 - alpha) * y_gt + (alpha / (K - 1)) * \sum_{i != gt} y_i
= ((K - 1) * (1 - alpha) / (K - 1)) * y_gt + (alpha / (K - 1)) * \sum_{i != gt} y_i
= (K * (1 - alpha) - 1) / (K - 1)) * y_gt + (alpha / (K - 1)) * \sum_{i} y_i
= (1 - (alpha * K) / (K - 1)) * y_gt + ( (alpha * K) / (K - 1) ) * \sum_{i} y_i / K
From: https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/common/losses/smoothed_cross_entropy.py
"""
assert 1.0 > label_smoothing > 0.0
smoothing = label_smoothing * vocab_size / (vocab_size - 1)
# Exp logits at this point are normalized probabilities. So we can just take the log to get log-probs.
log_probs = torch.log(exp_logits)
mean_log_probs = log_probs.mean(dim=-1)
loss = (1.0 - smoothing) * loss - smoothing * mean_log_probs
ctx.save_for_backward(
exp_logits, target_mask, masked_target_1d, torch.Tensor([label_smoothing]), torch.LongTensor([vocab_size])
)
return loss
@staticmethod
def backward(ctx, grad_output):
# Retreive tensors from the forward path.
softmax, target_mask, masked_target_1d, label_smoothing, vocab_size = ctx.saved_tensors
label_smoothing = label_smoothing.item()
vocab_size = vocab_size.item()
# All the inputs have softmax as thier gradient.
grad_input = softmax
# For simplicity, work with the 2D gradient.
partition_vocab_size = softmax.size()[-1]
grad_2d = grad_input.view(-1, partition_vocab_size)
# Add the gradient from matching classes.
arange_1d = torch.arange(start=0, end=grad_2d.size()[0], device=grad_2d.device)
softmax_update = 1.0 - target_mask.view(-1).float()
if label_smoothing > 0:
smoothing = label_smoothing * vocab_size / (vocab_size - 1)
grad_2d[arange_1d, masked_target_1d] -= (1.0 - smoothing) * softmax_update
average_grad = 1 / vocab_size
grad_2d[arange_1d, :] -= smoothing * average_grad
else:
grad_2d[arange_1d, masked_target_1d] -= softmax_update
# Finally elementwise multiplication with the output gradients.
grad_input.mul_(grad_output.unsqueeze(dim=-1))
return grad_input, None, None
def vocab_parallel_cross_entropy(vocab_parallel_logits, target, label_smoothing):
"""Helper function for the cross entropy."""
return _VocabParallelCrossEntropy.apply(vocab_parallel_logits, target, label_smoothing)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/vocab_parallel_cross_entropy.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from apex.transformer.layers.layer_norm import FastLayerNorm, MixedFusedLayerNorm
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
def get_layer_norm(hidden_size, eps=1e-5, persist_layer_norm=False, sequence_parallel=False):
# List of hiddens sizes supported in the persistent layer norm kernel
# If the hidden size is not supported, fall back to the non-persistent
# kernel.
persist_ln_hidden_sizes = [
1024,
1536,
2048,
2304,
3072,
3840,
4096,
5120,
6144,
8192,
10240,
12288,
12800,
15360,
16384,
18432,
20480,
24576,
25600,
30720,
32768,
40960,
49152,
65536,
]
if hidden_size not in persist_ln_hidden_sizes:
persist_layer_norm = False
if persist_layer_norm:
return FastLayerNorm(hidden_size, eps, sequence_parallel_enabled=sequence_parallel)
else:
return MixedFusedLayerNorm(hidden_size, eps, sequence_parallel_enabled=sequence_parallel)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/fused_layer_norm.py |
# Copyright 2020 The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from typing import Dict, List
import torch
import wget
from torch.hub import _get_torch_home
from nemo.utils import logging
__all__ = [
"get_megatron_lm_model",
"get_megatron_lm_models_list",
"get_megatron_checkpoint",
"is_lower_cased_megatron",
"get_megatron_tokenizer",
"get_megatron_pretrained_bert_models",
]
torch_home = _get_torch_home()
if not isinstance(torch_home, str):
logging.info("Torch home not found, caching megatron in cwd")
torch_home = os.getcwd()
MEGATRON_CACHE = os.path.join(torch_home, "megatron")
CONFIGS = {"345m": {"hidden_size": 1024, "num_attention_heads": 16, "num_layers": 24, "max_position_embeddings": 512}}
MEGATRON_CONFIG_MAP = {
"megatron-gpt-345m": {
"config": CONFIGS["345m"],
"checkpoint": "models/nvidia/megatron_lm_345m/versions/v0.0/files/release/mp_rank_00/model_optim_rng.pt",
"vocab": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json",
"merges_file": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt",
"do_lower_case": False,
"tokenizer_name": "gpt2",
},
"megatron-bert-345m-uncased": {
"config": CONFIGS["345m"],
"checkpoint": "https://api.ngc.nvidia.com/v2/models/nvidia/megatron_bert_345m/versions/v0.0/files/release/mp_rank_00/model_optim_rng.pt",
"vocab": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
"do_lower_case": True,
"tokenizer_name": "bert-large-uncased",
},
"megatron-bert-345m-cased": {
"config": CONFIGS["345m"],
"checkpoint": "https://api.ngc.nvidia.com/v2/models/nvidia/megatron_bert_345m/versions/v0.1_cased/files/release/mp_rank_00/model_optim_rng.pt",
"vocab": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
"do_lower_case": False,
"tokenizer_name": "bert-large-cased",
},
"megatron-bert-uncased": {
"config": None,
"checkpoint": None,
"vocab": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
"do_lower_case": True,
"tokenizer_name": "bert-large-uncased",
},
"megatron-bert-cased": {
"config": None,
"checkpoint": None,
"vocab": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
"do_lower_case": False,
"tokenizer_name": "bert-large-cased",
},
"biomegatron-bert-345m-uncased": {
"config": CONFIGS["345m"],
"checkpoint": "https://api.ngc.nvidia.com/v2/models/nvidia/biomegatron345muncased/versions/0/files/MegatronBERT.pt",
"vocab": "https://api.ngc.nvidia.com/v2/models/nvidia/biomegatron345muncased/versions/0/files/vocab.txt",
"do_lower_case": True,
"tokenizer_name": "bert-large-uncased",
},
"biomegatron-bert-345m-cased": {
"config": CONFIGS["345m"],
"checkpoint": "https://api.ngc.nvidia.com/v2/models/nvidia/biomegatron345mcased/versions/0/files/MegatronBERT.pt",
"vocab": "https://api.ngc.nvidia.com/v2/models/nvidia/biomegatron345mcased/versions/0/files/vocab.txt",
"do_lower_case": False,
"tokenizer_name": "bert-large-cased",
},
}
def compute_model_parallel_rank(local_rank, model_parallel_size):
return local_rank % model_parallel_size
def get_megatron_pretrained_bert_models() -> List[str]:
from nemo.collections.nlp.models.language_modeling.megatron_bert_model import MegatronBertModel
all_pretrained_megatron_bert_models = [
model.pretrained_model_name for model in MegatronBertModel.list_available_models()
]
return all_pretrained_megatron_bert_models
def get_megatron_lm_models_list() -> List[str]:
"""
Returns the list of supported Megatron-LM models
"""
return list(MEGATRON_CONFIG_MAP.keys())
def get_megatron_config(pretrained_model_name: str) -> Dict[str, int]:
"""
Returns Megatron-LM model config file
Args:
pretrained_model_name (str): pretrained model name
Returns:
config (dict): contains model configuration: number of hidden layers, number of attention heads, etc
"""
_check_megatron_name(pretrained_model_name)
return MEGATRON_CONFIG_MAP[pretrained_model_name]["config"]
def _check_megatron_name(pretrained_model_name: str) -> None:
megatron_model_list = get_megatron_lm_models_list()
if pretrained_model_name not in megatron_model_list:
raise ValueError(f'For Megatron-LM models, choose from the following list: {megatron_model_list}')
def get_megatron_vocab_file(pretrained_model_name: str) -> str:
"""
Gets vocabulary file from cache or downloads it
Args:
pretrained_model_name: pretrained model name
Returns:
path: path to the vocab file
"""
_check_megatron_name(pretrained_model_name)
url = MEGATRON_CONFIG_MAP[pretrained_model_name]["vocab"]
path = os.path.join(MEGATRON_CACHE, pretrained_model_name + "_vocab")
path = _download(path, url)
return path
def get_megatron_merges_file(pretrained_model_name: str) -> str:
"""
Gets merge file from cache or downloads it
Args:
pretrained_model_name: pretrained model name
Returns:
path: path to the vocab file
"""
if 'gpt' not in pretrained_model_name.lower():
return None
_check_megatron_name(pretrained_model_name)
url = MEGATRON_CONFIG_MAP[pretrained_model_name]["merges_file"]
path = os.path.join(MEGATRON_CACHE, pretrained_model_name + "_merges")
path = _download(path, url)
return path
def get_megatron_checkpoint(pretrained_model_name: str) -> str:
"""
Gets checkpoint file from cache or downloads it
Args:
pretrained_model_name: pretrained model name
Returns:
path: path to model checkpoint
"""
_check_megatron_name(pretrained_model_name)
url = MEGATRON_CONFIG_MAP[pretrained_model_name]["checkpoint"]
path = os.path.join(MEGATRON_CACHE, pretrained_model_name)
return _download(path, url)
def _download(path: str, url: str):
"""
Gets a file from cache or downloads it
Args:
path: path to the file in cache
url: url to the file
Returns:
path: path to the file in cache
"""
if url is None:
return None
if (not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0) and not os.path.exists(path):
os.makedirs(MEGATRON_CACHE, exist_ok=True)
logging.info(f"Downloading from {url} to {path}")
downloaded_path = wget.download(url)
shutil.move(downloaded_path, path)
# wait until the master process downloads the file and writes it to the cache dir
if torch.distributed.is_initialized():
torch.distributed.barrier()
return path
def is_lower_cased_megatron(pretrained_model_name):
"""
Returns if the megatron is cased or uncased
Args:
pretrained_model_name (str): pretrained model name
Returns:
do_lower_cased (bool): whether the model uses lower cased data
"""
_check_megatron_name(pretrained_model_name)
return MEGATRON_CONFIG_MAP[pretrained_model_name]["do_lower_case"]
def get_megatron_tokenizer(pretrained_model_name: str):
"""
Takes a pretrained_model_name for megatron such as "megatron-bert-cased" and returns the according
tokenizer name for tokenizer instantiating.
Args:
pretrained_model_name: pretrained_model_name for megatron such as "megatron-bert-cased"
Returns:
tokenizer name for tokenizer instantiating
"""
_check_megatron_name(pretrained_model_name)
return MEGATRON_CONFIG_MAP[pretrained_model_name]["tokenizer_name"]
| NeMo-main | nemo/collections/nlp/modules/common/megatron/megatron_utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer based language model."""
from ast import Mod
from nemo.collections.nlp.modules.common.megatron.megatron_transformer_decoder import MegatronTransformerDecoderModule
from nemo.collections.nlp.modules.common.megatron.retrieval_transformer import (
MegatronRetrievalTransformerDecoderModule,
)
from nemo.collections.nlp.modules.common.megatron.utils import (
ApexGuardDefaults,
init_method_normal,
scaled_init_method_normal,
)
try:
from apex.transformer.enums import AttnMaskType, ModelType
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
# fake missing classes with None attributes
AttnMaskType = ApexGuardDefaults()
ModelType = ApexGuardDefaults()
try:
from megatron.core import ModelParallelConfig
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelParallelConfig = ApexGuardDefaults
HAVE_MEGATRON_CORE = False
__all__ = []
AVAILABLE_DECODERS = ["transformer"]
def get_decoder_model(
config: ModelParallelConfig,
arch,
hidden_size,
ffn_hidden_size,
num_layers,
num_attention_heads,
apply_query_key_layer_scaling=False,
kv_channels=None,
init_method=None,
scaled_init_method=None,
decoder_attn_mask_type=AttnMaskType.causal,
pre_process=True,
post_process=True,
init_method_std=0.02,
megatron_amp_O2=False,
hidden_dropout=0.1,
attention_dropout=0.1,
ffn_dropout=0.0,
precision=16,
fp32_residual_connection=False,
activations_checkpoint_method=None,
activations_checkpoint_num_layers=1,
activations_checkpoint_granularity=None,
layernorm_epsilon=1e-5,
bias_activation_fusion=True,
bias_dropout_add_fusion=True,
masked_softmax_fusion=True,
persist_layer_norm=False,
openai_gelu=False,
activation="gelu",
onnx_safe=False,
bias=True,
normalization="layernorm",
headscale=False,
transformer_block_type="pre_ln",
hidden_steps=-1,
parent_model_type=ModelType.encoder_or_decoder,
layer_type=None,
chunk_size=64,
layer_number_offset=0, # this is use only for attention norm_factor scaling
megatron_legacy=False,
normalize_attention_scores=True,
sequence_parallel=False,
num_moe_experts=1,
moe_frequency=1,
moe_dropout=0.0,
turn_off_rop=False, # turn off the RoP positional embedding
version=1,
position_embedding_type='learned_absolute',
use_flash_attention=False,
):
"""Build language model and return along with the key to save."""
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
if init_method is None:
init_method = init_method_normal(init_method_std)
if scaled_init_method is None:
scaled_init_method = scaled_init_method_normal(init_method_std, num_layers)
if arch == "transformer":
# Language model.
decoder = MegatronTransformerDecoderModule(
config=config,
init_method=init_method,
output_layer_init_method=scaled_init_method,
hidden_size=hidden_size,
num_layers=num_layers,
num_attention_heads=num_attention_heads,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
ffn_hidden_size=ffn_hidden_size,
decoder_attn_mask_type=decoder_attn_mask_type,
pre_process=pre_process,
post_process=post_process,
megatron_amp_O2=megatron_amp_O2,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
ffn_dropout=ffn_dropout,
precision=precision,
fp32_residual_connection=fp32_residual_connection,
activations_checkpoint_method=activations_checkpoint_method,
activations_checkpoint_num_layers=activations_checkpoint_num_layers,
activations_checkpoint_granularity=activations_checkpoint_granularity,
layernorm_epsilon=layernorm_epsilon,
bias_activation_fusion=bias_activation_fusion,
bias_dropout_add_fusion=bias_dropout_add_fusion,
masked_softmax_fusion=masked_softmax_fusion,
persist_layer_norm=persist_layer_norm,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
activation=activation,
bias=bias,
normalization=normalization,
transformer_block_type=transformer_block_type,
headscale=headscale,
parent_model_type=parent_model_type,
megatron_legacy=megatron_legacy,
normalize_attention_scores=normalize_attention_scores,
num_moe_experts=num_moe_experts,
moe_frequency=moe_frequency,
moe_dropout=moe_dropout,
position_embedding_type=position_embedding_type,
use_flash_attention=use_flash_attention,
)
elif arch == "retro":
decoder = MegatronRetrievalTransformerDecoderModule(
config=config,
init_method=init_method,
output_layer_init_method=scaled_init_method,
hidden_size=hidden_size,
num_layers=num_layers,
num_attention_heads=num_attention_heads,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
layer_type=layer_type,
ffn_hidden_size=ffn_hidden_size,
pre_process=pre_process,
post_process=post_process,
megatron_amp_O2=megatron_amp_O2,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
precision=precision,
fp32_residual_connection=fp32_residual_connection,
activations_checkpoint_method=activations_checkpoint_method,
activations_checkpoint_num_layers=activations_checkpoint_num_layers,
activations_checkpoint_granularity=activations_checkpoint_granularity,
layernorm_epsilon=layernorm_epsilon,
bias_activation_fusion=bias_activation_fusion,
bias_dropout_add_fusion=bias_dropout_add_fusion,
masked_softmax_fusion=masked_softmax_fusion,
persist_layer_norm=persist_layer_norm,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
activation=activation,
bias=bias,
normalization=normalization,
transformer_block_type=transformer_block_type,
parent_model_type=parent_model_type,
chunk_size=chunk_size,
layer_number_offset=layer_number_offset,
megatron_legacy=megatron_legacy,
normalize_attention_scores=normalize_attention_scores,
turn_off_rop=turn_off_rop,
version=version,
)
else:
raise ValueError(f"Unknown decoder arch = {arch}. Available decoder arch = {AVAILABLE_DECODERS}")
return decoder
| NeMo-main | nemo/collections/nlp/modules/common/megatron/megatron_decoders.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer based language model."""
from ast import Mod
import torch
from nemo.collections.nlp.modules.common.megatron.adapters.parallel_adapters import (
AdapterName,
PromptEncoderAdapterConfig,
)
from nemo.collections.nlp.modules.common.megatron.layer_type import LayerType
from nemo.collections.nlp.modules.common.megatron.module import MegatronModule
from nemo.collections.nlp.modules.common.megatron.position_embedding import (
ALiBiRelativePositionEmbedding,
KERPLERelativePositionEmbedding,
RotaryEmbedding,
SandwichRelativePositionEmbedding,
)
from nemo.collections.nlp.modules.common.megatron.transformer import ParallelTransformer
from nemo.collections.nlp.modules.common.megatron.utils import (
ApexGuardDefaults,
get_linear_layer,
init_method_normal,
scaled_init_method_normal,
)
from nemo.collections.nlp.parts import utils_funcs
from nemo.core import adapter_mixins
try:
from apex.transformer.enums import AttnMaskType
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
# fake missing classes with None attributes
AttnMaskType = ApexGuardDefaults()
LayerType = ApexGuardDefaults()
try:
from megatron.core import ModelParallelConfig, parallel_state, tensor_parallel
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelParallelConfig = ApexGuardDefaults
HAVE_MEGATRON_CORE = False
def get_language_model(
config: ModelParallelConfig,
hidden_size,
ffn_hidden_size,
num_layers,
max_position_embeddings,
num_tokentypes,
add_pooler,
vocab_size,
num_attention_heads,
encoder_attn_mask_type,
apply_query_key_layer_scaling=False,
kv_channels=None,
init_method=None,
scaled_init_method=None,
add_decoder=False,
decoder_attn_mask_type=AttnMaskType.causal,
pre_process=True,
post_process=True,
init_method_std=0.02,
megatron_amp_O2=False,
hidden_dropout=0.1,
attention_dropout=0.1,
ffn_dropout=0.0,
precision=16,
fp32_residual_connection=False,
activations_checkpoint_method=None,
activations_checkpoint_num_layers=1,
normalization='layernorm',
layernorm_epsilon=1e-5,
bias_activation_fusion=True,
masked_softmax_fusion=True,
activation='gelu',
headscale=False,
transformer_block_type='pre_ln',
normalize_attention_scores=True,
position_embedding_type='learned_absolute',
attention_type='multihead',
share_embeddings_and_output_weights=True,
rotary_percentage=1.0,
multi_query_attention=False,
bias_dropout_add_fusion=True,
bias=True,
persist_layer_norm=False,
openai_gelu=False,
onnx_safe=False,
megatron_legacy=False,
activations_checkpoint_granularity=None,
activations_checkpoint_layers_per_pipeline=None,
transformer_engine=False,
fp8=False,
fp8_e4m3=False,
fp8_hybrid=False,
fp8_margin=0,
fp8_interval=1,
fp8_amax_history_len=1,
fp8_amax_compute_algo='most_recent',
reduce_amax=True,
use_emha=False,
ub_tp_comm_overlap=False,
use_flash_attention=False,
seq_len_interpolation_factor=None,
):
"""Build language model and return along with the key to save."""
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
if init_method is None:
init_method = init_method_normal(init_method_std)
if scaled_init_method is None:
scaled_init_method = scaled_init_method_normal(init_method_std, num_layers)
# Language model.
language_model = TransformerLanguageModel(
config=config,
init_method=init_method,
output_layer_init_method=scaled_init_method,
encoder_attn_mask_type=encoder_attn_mask_type,
num_tokentypes=num_tokentypes,
vocab_size=vocab_size,
max_position_embeddings=max_position_embeddings,
hidden_size=hidden_size,
num_layers=num_layers,
num_attention_heads=num_attention_heads,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
ffn_hidden_size=ffn_hidden_size,
add_decoder=add_decoder,
decoder_attn_mask_type=decoder_attn_mask_type,
add_pooler=add_pooler,
pre_process=pre_process,
post_process=post_process,
megatron_amp_O2=megatron_amp_O2,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
ffn_dropout=ffn_dropout,
precision=precision,
fp32_residual_connection=fp32_residual_connection,
activations_checkpoint_method=activations_checkpoint_method,
activations_checkpoint_num_layers=activations_checkpoint_num_layers,
normalization=normalization,
layernorm_epsilon=layernorm_epsilon,
bias_activation_fusion=bias_activation_fusion,
bias_dropout_add_fusion=bias_dropout_add_fusion,
bias=bias,
rotary_percentage=rotary_percentage,
share_embeddings_and_output_weights=share_embeddings_and_output_weights,
masked_softmax_fusion=masked_softmax_fusion,
activation=activation,
headscale=headscale,
transformer_block_type=transformer_block_type,
normalize_attention_scores=normalize_attention_scores,
position_embedding_type=position_embedding_type,
multi_query_attention=multi_query_attention,
persist_layer_norm=persist_layer_norm,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
megatron_legacy=megatron_legacy,
activations_checkpoint_granularity=activations_checkpoint_granularity,
activations_checkpoint_layers_per_pipeline=activations_checkpoint_layers_per_pipeline,
transformer_engine=transformer_engine,
fp8=fp8,
fp8_e4m3=fp8_e4m3,
fp8_hybrid=fp8_hybrid,
fp8_margin=fp8_margin,
fp8_interval=fp8_interval,
fp8_amax_history_len=fp8_amax_history_len,
fp8_amax_compute_algo=fp8_amax_compute_algo,
reduce_amax=reduce_amax,
use_emha=use_emha,
ub_tp_comm_overlap=ub_tp_comm_overlap,
use_flash_attention=use_flash_attention,
seq_len_interpolation_factor=seq_len_interpolation_factor,
)
# key used for checkpoints.
language_model_key = 'language_model'
return language_model, language_model_key
class Pooler(MegatronModule):
"""Pooler layer.
Pool hidden states of a specific token (for example start of the
sequence) and add a linear transformation followed by a tanh.
Arguments:
hidden_size: hidden size
init_method: weight initialization method for the linear layer.
bias is set to zero.
"""
def __init__(self, hidden_size, init_method, sequence_parallel=False):
super(Pooler, self).__init__()
self.dense = get_linear_layer(hidden_size, hidden_size, init_method)
self.sequence_parallel = sequence_parallel
def forward(self, hidden_states, sequence_index=0):
# hidden_states: [s, b, h] prompt_embeddings
# sequence_index: index of the token to pool.
# gather data along sequence dimensions
# same pooler is run on all tensor parallel nodes
if self.sequence_parallel:
hidden_states = tensor_parallel.mappings.gather_from_sequence_parallel_region(hidden_states)
pooled = hidden_states[sequence_index, :, :]
pooled = self.dense(pooled)
pooled = torch.tanh(pooled)
return pooled
class Embedding(MegatronModule):
"""Language model embeddings.
Arguments:
hidden_size: hidden size
vocab_size: vocabulary size
max_sequence_length: maximum size of sequence. This
is used for positional embedding
embedding_dropout_prob: dropout probability for embeddings
init_method: weight initialization method
num_tokentypes: size of the token-type embeddings. 0 value
will ignore this embedding
position_embedding_type: position embedding type determines whether we instantiate a learnable position embedding table.
"""
def __init__(
self,
config: ModelParallelConfig,
hidden_size,
vocab_size,
max_sequence_length,
embedding_dropout_prob,
init_method,
num_tokentypes=0,
dtype=torch.float32,
fp32_residual_connection=False,
position_embedding_type='learned_absolute',
transpose_batch_sequence=True,
):
super(Embedding, self).__init__(config=config)
self.hidden_size = hidden_size
self.init_method = init_method
self.num_tokentypes = num_tokentypes
self.position_embedding_type = position_embedding_type
self.transpose_batch_sequence = transpose_batch_sequence
# Word embeddings (parallel).
self.word_embeddings = tensor_parallel.VocabParallelEmbedding(
vocab_size, self.hidden_size, init_method=self.init_method, config=config,
)
self._word_embeddings_key = 'word_embeddings'
if self.position_embedding_type == 'learned_absolute':
# Position embedding (serial).
self.position_embeddings = torch.nn.Embedding(max_sequence_length, self.hidden_size, dtype=dtype)
self._position_embeddings_key = 'position_embeddings'
# Initialize the position embeddings.
self.init_method(self.position_embeddings.weight)
# Token type embedding.
# Add this as an optional field that can be added through
# method call so we can load a pretrain model without
# token types and add them as needed.
self._tokentype_embeddings_key = 'tokentype_embeddings'
if self.num_tokentypes > 0:
self.tokentype_embeddings = torch.nn.Embedding(self.num_tokentypes, self.hidden_size, dtype=dtype)
# Initialize the token-type embeddings.
self.init_method(self.tokentype_embeddings.weight)
else:
self.tokentype_embeddings = None
self.fp32_residual_connection = fp32_residual_connection
self.sequence_parallel = config.sequence_parallel
# Embeddings dropout
self.embedding_dropout = torch.nn.Dropout(embedding_dropout_prob)
def zero_parameters(self):
"""Zero out all parameters in embedding."""
self.word_embeddings.weight.data.fill_(0)
self.word_embeddings.weight.shared = True
if self.position_embedding_type == 'learned_absolute':
self.position_embeddings.weight.data.fill_(0)
self.position_embeddings.weight.shared = True
if self.num_tokentypes > 0:
self.tokentype_embeddings.weight.data.fill_(0)
self.tokentype_embeddings.weight.shared = True
def add_tokentype_embeddings(self, num_tokentypes):
"""Add token-type embedding. This function is provided so we can add
token-type embeddings in case the pretrained model does not have it.
This allows us to load the model normally and then add this embedding.
"""
if self.tokentype_embeddings is not None:
raise Exception('tokentype embeddings is already initialized')
if torch.distributed.get_rank() == 0:
print('adding embedding for {} tokentypes'.format(num_tokentypes), flush=True)
self.num_tokentypes = num_tokentypes
self.tokentype_embeddings = torch.nn.Embedding(num_tokentypes, self.hidden_size)
# Initialize the token-type embeddings.
self.init_method(self.tokentype_embeddings.weight)
def forward(self, input_ids, position_ids=None, token_type_ids=None):
# Embeddings.
words_embeddings = self.word_embeddings(input_ids)
if self.position_embedding_type == 'learned_absolute':
assert position_ids is not None
position_embeddings = self.position_embeddings(position_ids)
embeddings = words_embeddings + position_embeddings
else:
embeddings = words_embeddings
if token_type_ids is not None:
assert self.tokentype_embeddings is not None
embeddings = embeddings + self.tokentype_embeddings(token_type_ids)
else:
assert self.tokentype_embeddings is None
# Data format change to avoid explicit tranposes : [b s h] --> [s b h].
if self.transpose_batch_sequence:
embeddings = embeddings.transpose(0, 1).contiguous()
# If the input flag for fp32 residual connection is set, convert for float.
if self.fp32_residual_connection:
embeddings = embeddings.float()
# Dropout.
if self.sequence_parallel:
embeddings = tensor_parallel.mappings.scatter_to_sequence_parallel_region(embeddings)
with tensor_parallel.random.get_cuda_rng_tracker().fork():
embeddings = self.embedding_dropout(embeddings)
else:
embeddings = self.embedding_dropout(embeddings)
return embeddings
def state_dict_for_save_checkpoint(self, destination=None, prefix='', keep_vars=False):
"""For easy load."""
state_dict_ = {}
state_dict_[self._word_embeddings_key] = self.word_embeddings.state_dict(destination, prefix, keep_vars)
if self.position_embedding_type == 'learned_absolute':
state_dict_[self._position_embeddings_key] = self.position_embeddings.state_dict(
destination, prefix, keep_vars
)
if self.num_tokentypes > 0:
state_dict_[self._tokentype_embeddings_key] = self.tokentype_embeddings.state_dict(
destination, prefix, keep_vars
)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
# Word embedding.
if self._word_embeddings_key in state_dict:
state_dict_ = state_dict[self._word_embeddings_key]
else:
# for backward compatibility.
state_dict_ = {}
for key in state_dict.keys():
if 'word_embeddings' in key:
state_dict_[key.split('word_embeddings.')[1]] = state_dict[key]
self.word_embeddings.load_state_dict(state_dict_, strict=strict)
if self.position_embedding_type == 'learned_absolute':
# Position embedding.
if self._position_embeddings_key in state_dict:
state_dict_ = state_dict[self._position_embeddings_key]
else:
# for backward compatibility.
state_dict_ = {}
for key in state_dict.keys():
if 'position_embeddings' in key:
state_dict_[key.split('position_embeddings.')[1]] = state_dict[key]
self.position_embeddings.load_state_dict(state_dict_, strict=strict)
# Tokentype embedding.
if self.num_tokentypes > 0:
state_dict_ = {}
if self._tokentype_embeddings_key in state_dict:
state_dict_ = state_dict[self._tokentype_embeddings_key]
else:
# for backward compatibility.
for key in state_dict.keys():
if 'tokentype_embeddings' in key:
state_dict_[key.split('tokentype_embeddings.')[1]] = state_dict[key]
if len(state_dict_.keys()) > 0:
self.tokentype_embeddings.load_state_dict(state_dict_, strict=strict)
else:
print(
'***WARNING*** expected tokentype embeddings in the ' 'checkpoint but could not find it',
flush=True,
)
class TransformerLanguageModel(MegatronModule, adapter_mixins.AdapterModuleMixin):
"""Transformer language model.
Arguments:
transformer_hparams: transformer hyperparameters
vocab_size: vocabulary size
max_sequence_length: maximum size of sequence. This
is used for positional embedding
embedding_dropout_prob: dropout probability for embeddings
num_tokentypes: size of the token-type embeddings. 0 value
will ignore this embedding
"""
def __init__(
self,
config: ModelParallelConfig,
init_method,
output_layer_init_method,
encoder_attn_mask_type,
vocab_size,
max_position_embeddings,
hidden_size,
ffn_hidden_size,
num_layers,
num_tokentypes,
num_attention_heads,
apply_query_key_layer_scaling=True,
kv_channels=None,
add_decoder=False,
decoder_attn_mask_type=AttnMaskType.causal,
add_pooler=False,
pre_process=True,
post_process=True,
megatron_amp_O2=False,
hidden_dropout=0.1,
attention_dropout=0.1,
ffn_dropout=0.0,
precision=16,
fp32_residual_connection=False,
activations_checkpoint_method=None,
activations_checkpoint_num_layers=1,
normalization='layernorm',
layernorm_epsilon=1e-5,
bias_activation_fusion=True,
bias_dropout_add_fusion=True,
bias=True,
masked_softmax_fusion=True,
activation='gelu',
headscale=False,
transformer_block_type='pre_ln',
normalize_attention_scores=True,
position_embedding_type='learned_absolute',
rotary_percentage=1.0,
multi_query_attention=False,
share_embeddings_and_output_weights=True,
persist_layer_norm=False,
openai_gelu=False,
onnx_safe=False,
megatron_legacy=False,
activations_checkpoint_granularity=None,
activations_checkpoint_layers_per_pipeline=None,
transformer_engine=False,
fp8=False,
fp8_e4m3=False,
fp8_hybrid=False,
fp8_margin=0,
fp8_interval=1,
fp8_amax_history_len=1,
fp8_amax_compute_algo='most_recent',
reduce_amax=True,
use_emha=False,
ub_tp_comm_overlap=False,
use_flash_attention=False,
seq_len_interpolation_factor=None,
):
super(TransformerLanguageModel, self).__init__(
config=config, share_token_embeddings=share_embeddings_and_output_weights
)
self.pre_process = pre_process
self.post_process = post_process
self.hidden_size = hidden_size
self.num_layers = num_layers
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.num_tokentypes = num_tokentypes
self.init_method = init_method
self.encoder_attn_mask_type = encoder_attn_mask_type
self.add_decoder = add_decoder
self.decoder_attn_mask_type = decoder_attn_mask_type
self.add_pooler = add_pooler
self.hidden_dropout = hidden_dropout
self.output_layer_init_method = output_layer_init_method
self.position_embedding_type = position_embedding_type
self.share_embeddings_and_output_weights = share_embeddings_and_output_weights
self.sequence_parallel = config.sequence_parallel
self.dtype = utils_funcs.torch_dtype_from_precision(precision, megatron_amp_O2)
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
# Embeddings.
if self.pre_process:
self.embedding = Embedding(
config=config,
hidden_size=self.hidden_size,
vocab_size=self.vocab_size,
max_sequence_length=self.max_position_embeddings,
init_method=self.init_method,
num_tokentypes=self.num_tokentypes,
embedding_dropout_prob=self.hidden_dropout,
position_embedding_type=position_embedding_type,
fp32_residual_connection=fp32_residual_connection,
dtype=self.dtype,
)
self._embedding_key = 'embedding'
if position_embedding_type == 'rope':
rotary_dim = self.hidden_size // num_attention_heads if kv_channels is None else kv_channels
assert 0 < rotary_percentage <= 1
if rotary_percentage < 1:
rotary_dim = int(rotary_dim * rotary_percentage)
self.rotary_pos_emb = RotaryEmbedding(
rotary_dim, seq_len_interpolation_factor=seq_len_interpolation_factor
)
elif position_embedding_type == 'alibi':
# TODO: If this is used for encoder-decodemax_position_embeddingsr model, implement proper logic and following
# addition for decoder. Currently it is only used for decoder model only.
# Encoder-decoder model, such as T5 is implemented in token_level_encoder_decoder.py
self.encoder_relative_position_embedding = ALiBiRelativePositionEmbedding(
bidirectional=encoder_attn_mask_type != AttnMaskType.causal,
num_attention_heads=num_attention_heads,
layer_type=LayerType.encoder,
num_attention_heads_alibi=None,
max_seq_len=max_position_embeddings,
)
elif position_embedding_type == 'kerple':
# TODO: If this is used for encoder-decodemax_position_embeddingsr model, implement proper logic and following
# addition for decoder. Currently it is only used for decoder model only.
# Encoder-decoder model, such as T5 is implemented in token_level_encoder_decoder.py
self.encoder_relative_position_embedding = KERPLERelativePositionEmbedding(
bidirectional=encoder_attn_mask_type != AttnMaskType.causal,
num_attention_heads=num_attention_heads,
layer_type=LayerType.encoder,
num_attention_heads_kerple=None,
max_seq_len=max_position_embeddings,
)
assert use_flash_attention == False # flash-attention not supported with kerple at this point
elif position_embedding_type == 'sandwich':
self.encoder_relative_position_embedding = SandwichRelativePositionEmbedding(
bidirectional=encoder_attn_mask_type != AttnMaskType.causal,
num_attention_heads=num_attention_heads,
layer_type=LayerType.encoder,
hidden_size=self.hidden_size // num_attention_heads if kv_channels is None else kv_channels,
max_seq_len=max_position_embeddings,
)
# Transformer.
self.encoder = ParallelTransformer(
config=config,
init_method=self.init_method,
output_layer_init_method=self.output_layer_init_method,
num_layers=self.num_layers,
hidden_size=self.hidden_size,
num_attention_heads=num_attention_heads,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
ffn_hidden_size=ffn_hidden_size,
self_attn_mask_type=self.encoder_attn_mask_type,
pre_process=self.pre_process,
post_process=self.post_process,
precision=precision,
fp32_residual_connection=fp32_residual_connection,
activations_checkpoint_method=activations_checkpoint_method,
activations_checkpoint_num_layers=activations_checkpoint_num_layers,
normalization=normalization,
layernorm_epsilon=layernorm_epsilon,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
ffn_dropout=ffn_dropout,
megatron_amp_O2=megatron_amp_O2,
persist_layer_norm=persist_layer_norm,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
bias=bias,
bias_activation_fusion=bias_activation_fusion,
bias_dropout_add_fusion=bias_dropout_add_fusion,
masked_softmax_fusion=masked_softmax_fusion,
activation=activation,
headscale=headscale,
transformer_block_type=transformer_block_type,
normalize_attention_scores=normalize_attention_scores,
multi_query_attention=multi_query_attention,
megatron_legacy=megatron_legacy,
activations_checkpoint_granularity=activations_checkpoint_granularity,
activations_checkpoint_layers_per_pipeline=activations_checkpoint_layers_per_pipeline,
transformer_engine=transformer_engine,
fp8=fp8,
fp8_e4m3=fp8_e4m3,
fp8_hybrid=fp8_hybrid,
fp8_margin=fp8_margin,
fp8_interval=fp8_interval,
fp8_amax_history_len=fp8_amax_history_len,
fp8_amax_compute_algo=fp8_amax_compute_algo,
reduce_amax=reduce_amax,
use_emha=use_emha,
ub_tp_comm_overlap=ub_tp_comm_overlap,
position_embedding_type=position_embedding_type,
use_flash_attention=use_flash_attention,
)
self._encoder_key = 'encoder'
# Decoder
if self.add_decoder:
self.decoder = ParallelTransformer(
config=config,
layer_type=LayerType.decoder,
self_attn_mask_type=self.decoder_attn_mask_type,
init_method=self.init_method,
output_layer_init_method=self.output_layer_init_method,
num_layers=self.num_layers,
hidden_size=self.hidden_size,
num_attention_heads=num_attention_heads,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
ffn_hidden_size=ffn_hidden_size,
pre_process=self.pre_process,
post_process=self.post_process,
precision=precision,
fp32_residual_connection=fp32_residual_connection,
activations_checkpoint_method=activations_checkpoint_method,
activations_checkpoint_num_layers=activations_checkpoint_num_layers,
normalization=normalization,
layernorm_epsilon=layernorm_epsilon,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
megatron_amp_O2=megatron_amp_O2,
bias_activation_fusion=bias_activation_fusion,
bias_dropout_add_fusion=bias_dropout_add_fusion,
masked_softmax_fusion=masked_softmax_fusion,
persist_layer_norm=persist_layer_norm,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
megatron_legacy=megatron_legacy,
activations_checkpoint_granularity=activations_checkpoint_granularity,
activations_checkpoint_layers_per_pipeline=activations_checkpoint_layers_per_pipeline,
transformer_engine=transformer_engine,
position_embedding_type=position_embedding_type,
use_flash_attention=use_flash_attention,
)
self._decoder_key = 'decoder'
if self.post_process:
# Pooler.
if self.add_pooler:
self.pooler = Pooler(self.hidden_size, self.init_method, sequence_parallel=self.sequence_parallel)
self._pooler_key = 'pooler'
if not self.share_embeddings_and_output_weights:
self.output_layer = tensor_parallel.ColumnParallelLinear(
self.hidden_size,
self.vocab_size,
config=config,
bias=False, # Setting bias to False always to keep it consistent with embedding tying that also does not have a bias.
init_method=self.init_method,
)
self._output_layer_key = 'output_layer'
self.set_accepted_adapter_types([PromptEncoderAdapterConfig._target_])
def set_input_tensor(self, input_tensor):
""" See megatron.model.transformer.set_input_tensor()"""
# This is usually handled in schedules.py but some inference code still
# gives us non-lists or None
if not isinstance(input_tensor, list):
input_tensor = [input_tensor]
self.encoder.set_input_tensor(input_tensor[0])
def forward(
self,
enc_input_ids,
enc_position_ids,
enc_attn_mask,
dec_input_ids=None,
dec_position_ids=None,
dec_attn_mask=None,
enc_dec_attn_mask=None,
token_type_ids=None,
layer_past=None,
get_key_value=False,
pooling_sequence_index=0,
enc_hidden_states=None,
output_enc_hidden_only=False,
encoder_input=None,
set_inference_key_value_memory=False,
inference_max_sequence_len=None,
checkpoint_activations_all_layers=None,
):
# Embeddings.
if self.pre_process and encoder_input is None:
encoder_input = self.embedding(enc_input_ids, enc_position_ids, token_type_ids=token_type_ids)
if self.is_adapter_available():
_sq, _bs, _hs = encoder_input.size()
ptuning_adapter = self.get_adapter_module(AdapterName.PTUNING_ADAPTER)
v = ptuning_adapter.virtual_tokens
if ptuning_adapter and _sq >= v: # The sequence should be longer the v to insert virtual embeddings.
virtual_embeddings = ptuning_adapter(_bs)
encoder_input = encoder_input[
v:, :, :
] # the first v tokens are pads so that they can be swapped out with virtual embeddings.
encoder_input = torch.concat([virtual_embeddings, encoder_input], dim=0)
else:
pass
# enc_attn_mask: [1, 1, s, s]
if inference_max_sequence_len is not None:
enc_seq_length = inference_max_sequence_len
elif self.encoder.input_tensor is not None:
if self.sequence_parallel:
enc_seq_length = (
self.encoder.input_tensor.size(0) * parallel_state.get_tensor_model_parallel_world_size()
)
else:
enc_seq_length = self.encoder.input_tensor.size(0)
else:
if self.sequence_parallel:
enc_seq_length = encoder_input.size(0) * parallel_state.get_tensor_model_parallel_world_size()
else:
enc_seq_length = encoder_input.size(0)
rotary_pos_emb = None
encoder_self_attention_relative_position_bias = None
if self.position_embedding_type == 'rope':
rotary_pos_emb = self.rotary_pos_emb(enc_seq_length)
elif (
self.position_embedding_type == 'alibi'
or self.position_embedding_type == 'sandwich'
or self.position_embedding_type == 'kerple'
):
encoder_self_attention_relative_position_bias = self.encoder_relative_position_embedding(
query_seq_length=enc_seq_length, key_seq_length=enc_seq_length,
)
# causal attention bias: [1, head, 1, k]
# non-causal attention bias: [1, head, q, k]
# encoder.
if enc_hidden_states is None:
encoder_output = self.encoder(
encoder_input,
enc_attn_mask,
layer_past=layer_past,
get_key_value=get_key_value,
set_inference_key_value_memory=set_inference_key_value_memory,
inference_max_sequence_len=inference_max_sequence_len,
checkpoint_activations_all_layers=checkpoint_activations_all_layers,
rotary_pos_emb=(rotary_pos_emb, None, None)
if rotary_pos_emb is not None
else None, # This assumes that this being used as a GPT/BERT model only (no cross-attention)
self_attention_relative_position_bias=encoder_self_attention_relative_position_bias,
)
else:
encoder_output = enc_hidden_states.to(encoder_input.dtype)
if self.post_process:
if self.add_pooler:
pooled_output = self.pooler(encoder_output, pooling_sequence_index)
# output_enc_hidden_only refers to when we just need the encoder's
# output. For example, it is helpful to compute
# similarity between two sequences by average pooling
if not self.add_decoder or output_enc_hidden_only:
if self.add_pooler and self.post_process:
return encoder_output, pooled_output
else:
return encoder_output
# Decoder Embedding
dec_embedding_output = self.embedding(dec_input_ids, dec_position_ids)
# decoder
decoder_output = self.decoder(
dec_embedding_output,
dec_attn_mask,
layer_past=layer_past,
get_key_value=get_key_value,
encoder_output=encoder_output,
enc_dec_attn_mask=enc_dec_attn_mask,
set_inference_key_value_memory=set_inference_key_value_memory,
inference_max_sequence_len=inference_max_sequence_len,
checkpoint_activations_all_layers=checkpoint_activations_all_layers,
)
if self.add_pooler and self.post_process:
return decoder_output, encoder_output, pooled_output
else:
return decoder_output, encoder_output
def state_dict_for_save_checkpoint(self, destination=None, prefix='', keep_vars=False):
"""For easy load."""
state_dict_ = {}
if self.pre_process:
state_dict_[self._embedding_key] = self.embedding.state_dict_for_save_checkpoint(
destination, prefix, keep_vars
)
state_dict_[self._encoder_key] = self.encoder.state_dict_for_save_checkpoint(destination, prefix, keep_vars)
if self.post_process:
if self.add_pooler:
state_dict_[self._pooler_key] = self.pooler.state_dict_for_save_checkpoint(
destination, prefix, keep_vars
)
if self.add_decoder:
state_dict_[self._decoder_key] = self.decoder.state_dict_for_save_checkpoint(
destination, prefix, keep_vars
)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
# Embedding.
if self.pre_process:
if self._embedding_key in state_dict:
state_dict_ = state_dict[self._embedding_key]
else:
# for backward compatibility.
state_dict_ = {}
for key in state_dict.keys():
if '_embeddings' in key:
state_dict_[key] = state_dict[key]
self.embedding.load_state_dict(state_dict_, strict=strict)
# Encoder.
if self._encoder_key in state_dict:
state_dict_ = state_dict[self._encoder_key]
# for backward compatibility.
elif 'transformer' in state_dict:
state_dict_ = state_dict['transformer']
else:
# for backward compatibility.
state_dict_ = {}
for key in state_dict.keys():
if 'transformer.' in key:
state_dict_[key.split('transformer.')[1]] = state_dict[key]
# for backward compatibility.
state_dict_self_attention = {}
for key in state_dict_.keys():
if '.attention.' in key:
state_dict_self_attention[key.replace(".attention.", ".self_attention.")] = state_dict_[key]
else:
state_dict_self_attention[key] = state_dict_[key]
state_dict_ = state_dict_self_attention
self.encoder.load_state_dict(state_dict_, strict=strict)
if self.post_process:
# pooler
if self.add_pooler:
assert 'pooler' in state_dict, 'could not find data for pooler in the checkpoint'
self.pooler.load_state_dict(state_dict[self._pooler_key], strict=strict)
# decoder
if self.add_decoder:
assert 'decoder' in state_dict, 'could not find data for pooler in the checkpoint'
self.decoder.load_state_dict(state_dict[self._decoder_key], strict=strict)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/language_model.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from nemo.collections.nlp.modules.common.megatron.adapters.parallel_adapters import (
AdapterName,
InfusedAdapterConfig,
LoraKQVAdapterConfig,
LoraKQVAdapterWeightTyingConfig,
LoraKVAdapterConfig,
LoraQAdapterConfig,
)
from nemo.collections.nlp.modules.common.megatron.fused_softmax import MatchedScaleMaskSoftmax
from nemo.collections.nlp.modules.common.megatron.module import MegatronModule
from nemo.collections.nlp.modules.common.megatron.position_embedding import XPOSPositionEmbedding
from nemo.collections.nlp.modules.common.megatron.position_embedding.rotary_position_embedding import (
apply_rotary_pos_emb,
)
from nemo.collections.nlp.modules.common.megatron.utils import (
ApexGuardDefaults,
_cast_if_autocast_enabled,
attention_mask_func,
)
from nemo.collections.nlp.parts import utils_funcs
from nemo.core import adapter_mixins
try:
from apex.transformer.enums import AttnMaskType, AttnType
from apex.transformer.utils import divide as safe_divide
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
# fake missing classes with None attributes
ModelType = AttnMaskType = AttnType = LayerType = ApexGuardDefaults()
try:
from megatron.core import ModelParallelConfig, parallel_state, tensor_parallel
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelParallelConfig = ApexGuardDefaults
HAVE_MEGATRON_CORE = False
try:
# Flash Attention 1.X
from flash_attn.bert_padding import pad_input, unpad_input
from flash_attn.flash_attn_interface import flash_attn_unpadded_func
from flash_attn.flash_attn_triton import flash_attn_func as flash_attn_func_triton
HAVE_FLASH_ATTENTION = True
flash_attn_func = None
except (ImportError, ModuleNotFoundError):
try:
# Flash Attention 2.X
from flash_attn import flash_attn_func
from flash_attn.flash_attn_interface import flash_attn_varlen_func as flash_attn_unpadded_func
HAVE_FLASH_ATTENTION = True
except (ImportError, ModuleNotFoundError):
HAVE_FLASH_ATTENTION = False
flash_attn_unpadded_func, flash_attn_func_triton, flash_attn_func = None, None, None
unpad_input, pad_input = None, None
""" We use the following notation throughout this file:
h: hidden size
n: number of attention heads
p: number of model parallel partitions
np: n/p
hp: h/p
hn: h/n
b: batch size
s: sequence length
l: number of layers
Transformer takes input of size [s, b, h] and returns a
tensor of the same size. We use the following arguments:
hyperparameters: transformer hyperparameters
"""
class ParallelAttention(MegatronModule, adapter_mixins.AdapterModuleMixin):
"""Parallel self-attention layer abstract class.
Self-attention layer takes input with size [s, b, h]
and returns output of the same size.
"""
def __init__(
self,
config: ModelParallelConfig,
init_method,
output_layer_init_method,
layer_number,
num_attention_heads,
hidden_size,
attention_type=AttnType.self_attn,
attn_mask_type=AttnMaskType.padding,
precision=16,
apply_query_key_layer_scaling=False,
kv_channels=None,
megatron_amp_O2=False,
masked_softmax_fusion=True,
attention_dropout=0.1,
layer_type=None,
megatron_legacy=False,
bias=True,
headscale=False,
position_embedding_type='learned_absolute',
multi_query_attention=False,
normalize_attention_scores=True,
use_flash_attention=False,
):
super(ParallelAttention, self).__init__(config=config)
self.layer_number = max(1, layer_number)
self.attention_type = attention_type
self.attn_mask_type = attn_mask_type
self.normalize_attention_scores = normalize_attention_scores
self.position_embedding_type = position_embedding_type
self.multi_query_attention = multi_query_attention
self.megatron_legacy = megatron_legacy
self.dtype = utils_funcs.torch_dtype_from_precision(precision, megatron_amp_O2)
self.set_accepted_adapter_types(
[
InfusedAdapterConfig._target_,
LoraKQVAdapterConfig._target_,
LoraQAdapterConfig._target_,
LoraKVAdapterConfig._target_,
LoraKQVAdapterWeightTyingConfig._target_,
]
)
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
projection_size = kv_channels * num_attention_heads
# Per attention head and per partition values.
world_size = parallel_state.get_tensor_model_parallel_world_size()
self.hidden_size_per_attention_head = safe_divide(projection_size, num_attention_heads)
self.num_attention_heads_per_partition = safe_divide(num_attention_heads, world_size)
self.num_attention_heads_partition_offset = (
self.num_attention_heads_per_partition * parallel_state.get_tensor_model_parallel_rank()
)
# Strided linear layer.
if attention_type == AttnType.self_attn:
self.query_key_value = tensor_parallel.ColumnParallelLinear(
hidden_size,
3 * projection_size,
config=config,
gather_output=False,
init_method=init_method,
bias=bias,
)
else:
assert attention_type == AttnType.cross_attn
self.query = tensor_parallel.ColumnParallelLinear(
hidden_size, projection_size, config=config, gather_output=False, init_method=init_method, bias=bias,
)
self.key_value = tensor_parallel.ColumnParallelLinear(
hidden_size,
2 * projection_size,
config=config,
gather_output=False,
init_method=init_method,
bias=bias,
)
self.core_attention = CoreAttention(
config=config,
layer_number=self.layer_number,
num_attention_heads=num_attention_heads,
hidden_size=hidden_size,
attention_type=self.attention_type,
attn_mask_type=self.attn_mask_type,
precision=precision,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
masked_softmax_fusion=masked_softmax_fusion,
attention_dropout=attention_dropout,
multi_query_attention=multi_query_attention,
normalize_attention_scores=normalize_attention_scores,
position_embedding_type=position_embedding_type,
use_flash_attention=use_flash_attention,
)
# Output.
self.dense = tensor_parallel.RowParallelLinear(
projection_size,
hidden_size,
config=config,
input_is_parallel=True,
init_method=output_layer_init_method,
skip_bias_add=True,
bias=bias,
)
self.headscale = headscale
if headscale:
self.head_scale_tensor = torch.nn.Parameter(
torch.ones(1, self.num_attention_heads_per_partition, 1, 1), requires_grad=True
)
# Inference key-value memory
self.inference_key_memory = None
self.inference_value_memory = None
self.inference_current_sequence_len = 0
# relative position embedding
self.layer_type = layer_type
def _checkpointed_attention_forward(
self,
query_layer,
key_layer,
value_layer,
attention_mask,
rotary_pos_emb=None,
relative_position_bias=None,
headscale_tensor=None,
inference_mode=None,
):
"""Forward method with activation checkpointing."""
def custom_forward(*inputs):
if len(inputs) == 7:
query_layer = inputs[0]
key_layer = inputs[1]
value_layer = inputs[2]
attention_mask = inputs[3]
rotary_pos_emb = inputs[4]
relative_position_bias = inputs[5]
headscale_tensor = inputs[6]
elif len(inputs) == 8:
query_layer = inputs[0]
key_layer = inputs[1]
value_layer = inputs[2]
attention_mask = inputs[3]
rotary_pos_emb = (inputs[4], inputs[5])
relative_position_bias = inputs[6]
headscale_tensor = inputs[7]
else:
raise ValueError('unexpected number of inputs')
output_ = self.core_attention(
query_layer,
key_layer,
value_layer,
attention_mask,
rotary_pos_emb=rotary_pos_emb,
relative_position_bias=relative_position_bias,
headscale_tensor=headscale_tensor,
inference_mode=inference_mode,
)
return output_
if rotary_pos_emb is None:
rot_tuple = (rotary_pos_emb,)
else:
rot_tuple = (rotary_pos_emb[0], rotary_pos_emb[1])
hidden_states = tensor_parallel.checkpoint(
custom_forward,
False,
query_layer,
key_layer,
value_layer,
attention_mask,
*rot_tuple,
relative_position_bias,
headscale_tensor,
)
return hidden_states
def _allocate_memory(self, inference_max_sequence_len, batch_size, dtype, device):
return torch.empty(
inference_max_sequence_len,
batch_size,
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
dtype=dtype,
device=device,
)
def _transpose_last_dim(self, mixed_layer, num_splits, num_splits_first):
input_shape = mixed_layer.size()
if num_splits_first:
"""[s, b, num_splits * np * hn]
-->(view) [s, b, num_splits, np, hn]
-->(tranpose) [s, b, np, num_splits, hn]
-->(view) [s, b, np * num_splits * hn] """
intermediate_shape = input_shape[:-1] + (
num_splits,
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
)
mixed_layer = mixed_layer.view(*intermediate_shape)
mixed_layer = mixed_layer.transpose(-2, -3).contiguous()
else:
"""[s, b, np * hn * num_splits]
-->(view) [s, b, np, hn, num_splits]
-->(tranpose) [s, b, np, num_splits, hn]
-->(view) [s, b, np * num_splits * hn] """
intermediate_shape = input_shape[:-1] + (
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
num_splits,
)
mixed_layer = mixed_layer.view(*intermediate_shape)
mixed_layer = mixed_layer.transpose(-1, -2).contiguous()
mixed_layer = mixed_layer.view(*input_shape)
return mixed_layer
def forward(
self,
hidden_states,
attention_mask,
layer_past=None,
get_key_value=False,
encoder_output=None,
set_inference_key_value_memory=False,
inference_max_sequence_len=None,
rotary_pos_emb=None, # rotary positional embedding
relative_position_bias=None,
checkpoint_core_attention=False,
):
# hidden_states: [sq, b, h]
# =================================================
# Pre-allocate memory for key-values for inference.
# =================================================
if set_inference_key_value_memory:
assert inference_max_sequence_len and inference_max_sequence_len > 0
self.inference_key_memory = self._allocate_memory(
inference_max_sequence_len, hidden_states.size(1), hidden_states.dtype, hidden_states.device
)
self.inference_value_memory = self._allocate_memory(
inference_max_sequence_len, hidden_states.size(1), hidden_states.dtype, hidden_states.device
)
self.inference_current_sequence_len = 0
# Some consistency check.
if inference_max_sequence_len:
assert self.inference_current_sequence_len < self.inference_key_memory.size(0)
assert inference_max_sequence_len == self.inference_key_memory.size(0)
# This is added for safety. In case inference_max_sequence_len
# is not provided, make sure there is no potential memory left
# from previous inference.
if not inference_max_sequence_len:
self.inference_key_memory = None
self.inference_value_memory = None
# =====================
# Query, Key, and Value
# =====================
if self.attention_type == AttnType.self_attn:
# Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)]
mixed_x_layer, _ = self.query_key_value(hidden_states)
if self.is_adapter_available():
lora_kqv_adapter = self.get_adapter_module(AdapterName.LORA_KQV_ADAPTER)
if lora_kqv_adapter:
lora_mixed_x_layer = lora_kqv_adapter(hidden_states)
mixed_x_layer = mixed_x_layer + lora_mixed_x_layer
# [sq, b, (np * 3 * hn)] --> [sq, b, np, 3 * hn]
new_tensor_shape = mixed_x_layer.size()[:-1] + (
self.num_attention_heads_per_partition,
3 * self.hidden_size_per_attention_head,
)
if self.megatron_legacy:
mixed_x_layer = self._transpose_last_dim(mixed_x_layer, 3, True)
mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
# [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]
(query_layer, key_layer, value_layer) = tensor_parallel.split_tensor_along_last_dim(
mixed_x_layer, 3, contiguous_split_chunks=True
)
else:
# Attention heads [sk, b, h] --> [sk, b, (np * 2 * hn)]
mixed_kv_layer, _ = self.key_value(encoder_output)
if self.is_adapter_available():
lora_kv_adapter = self.get_adapter_module(AdapterName.LORA_KV_ADAPTER)
if lora_kv_adapter:
lora_mixed_kv_layer = lora_kv_adapter(encoder_output)
mixed_kv_layer = mixed_kv_layer + lora_mixed_kv_layer
# [sk, b, (np * 2 * hn)] --> [sk, b, np, 2 * hn]
new_tensor_shape = mixed_kv_layer.size()[:-1] + (
self.num_attention_heads_per_partition,
2 * self.hidden_size_per_attention_head,
)
if self.megatron_legacy:
mixed_kv_layer = self._transpose_last_dim(mixed_kv_layer, 2, True)
mixed_kv_layer = mixed_kv_layer.view(*new_tensor_shape)
# [sk, b, np, 2 * hn] --> 2 [sk, b, np, hn]
(key_layer, value_layer) = tensor_parallel.split_tensor_along_last_dim(
mixed_kv_layer, 2, contiguous_split_chunks=True
)
# Attention head [sq, b, h] --> [sq, b, hp]
query_layer, _ = self.query(hidden_states)
if self.is_adapter_available():
lora_q_adapter = self.get_adapter_module(AdapterName.LORA_Q_ADAPTER)
if lora_q_adapter:
lora_q_layer = lora_q_adapter(hidden_states)
query_layer = query_layer + lora_q_layer
# [sq, b, hp] --> [sq, b, np, hn]
new_tensor_shape = query_layer.size()[:-1] + (
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
)
query_layer = query_layer.view(*new_tensor_shape)
if self.is_adapter_available():
key_infused_adapter = self.get_adapter_module(AdapterName.KEY_INFUSED)
value_infused_adapter = self.get_adapter_module(AdapterName.VALUE_INFUSED)
if key_infused_adapter:
assert value_infused_adapter is not None, "Expected value_infused_adapter not found!"
kls = key_layer.shape
key_layer = key_infused_adapter(key_layer.reshape(kls[0], kls[1], -1)).reshape(kls)
if value_infused_adapter:
assert key_infused_adapter is not None, "Expected key_infused_adapter not found!"
vls = value_layer.shape
value_layer = value_infused_adapter(value_layer.reshape(vls[0], vls[1], -1)).reshape(vls)
# ===================================================
# Adjust key, value, and attention mask for inference
# ===================================================
# duplicate the pos_emb for self attention
if rotary_pos_emb is not None:
rotary_pos_emb = rotary_pos_emb if isinstance(rotary_pos_emb, tuple) else ((rotary_pos_emb,) * 2)
if inference_max_sequence_len:
# Adjust the range variables.
start = self.inference_current_sequence_len
self.inference_current_sequence_len += key_layer.size(0)
end = self.inference_current_sequence_len
# Copy key and values.
self.inference_key_memory[start:end, ...] = key_layer
self.inference_value_memory[start:end, ...] = value_layer
key_layer = self.inference_key_memory[:end, ...]
value_layer = self.inference_value_memory[:end, ...]
# Adjust attention mask
if attention_mask is not None:
attention_mask = attention_mask[..., start:end, :end]
# adjust the key rotary positional embedding
if rotary_pos_emb is not None:
q_pos_emb, k_pos_emb = rotary_pos_emb
if not set_inference_key_value_memory:
# In inference, we compute one token at a time.
# Select the correct positional embedding.
q_pos_emb = q_pos_emb[end - 1 : end]
else:
q_pos_emb = q_pos_emb[:end, :, :, :]
k_pos_emb = k_pos_emb[:end, :, :, :]
rotary_pos_emb = (q_pos_emb, k_pos_emb)
if layer_past is not None:
past_key, past_value = layer_past
key_layer = torch.cat((past_key.type_as(key_layer), key_layer), dim=0)
value_layer = torch.cat((past_value.type_as(value_layer), value_layer), dim=0)
if get_key_value:
present = (key_layer, value_layer)
if checkpoint_core_attention:
context_layer = self._checkpointed_attention_forward(
query_layer,
key_layer,
value_layer,
attention_mask,
rotary_pos_emb=rotary_pos_emb,
relative_position_bias=relative_position_bias,
headscale_tensor=self.head_scale_tensor if self.headscale else None,
inference_mode=inference_max_sequence_len is not None and query_layer.shape[0] == 1,
)
else:
context_layer = self.core_attention(
query_layer,
key_layer,
value_layer,
attention_mask,
layer_past=layer_past,
get_key_value=get_key_value,
rotary_pos_emb=rotary_pos_emb,
relative_position_bias=relative_position_bias,
headscale_tensor=self.head_scale_tensor if self.headscale else None,
inference_mode=inference_max_sequence_len is not None and query_layer.shape[0] == 1,
)
# =================
# Output. [sq, b, h]
# =================
output, bias = self.dense(context_layer)
if get_key_value:
output = [output, present]
return output, bias
class ParallelChunkedCrossAttention(MegatronModule):
"""Parallel chunked cross-attention layer class.
Self-attention layer takes input with size [b, s, h]
and returns output of the same size.
"""
def __init__(
self,
config: ModelParallelConfig,
init_method,
output_layer_init_method,
layer_number,
num_attention_heads,
hidden_size,
precision=16,
apply_query_key_layer_scaling=False,
kv_channels=None,
megatron_amp_O2=False,
masked_softmax_fusion=True,
attention_dropout=0.1,
megatron_legacy=False,
chunk_size=64, # each chunk, how many tokens
bias=True,
headscale=False,
normalize_attention_scores=True,
):
super(ParallelChunkedCrossAttention, self).__init__(config=config)
self.cross_attention = ParallelAttention(
config=config,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
layer_number=layer_number,
num_attention_heads=num_attention_heads,
hidden_size=hidden_size,
attention_type=AttnType.cross_attn,
attn_mask_type=AttnMaskType.padding,
precision=precision,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
megatron_amp_O2=megatron_amp_O2,
masked_softmax_fusion=masked_softmax_fusion,
attention_dropout=attention_dropout,
megatron_legacy=megatron_legacy,
bias=bias,
headscale=headscale,
normalize_attention_scores=normalize_attention_scores,
)
self.chunk_size = chunk_size
def forward(
self,
hidden_states,
attention_mask,
encoder_output=None,
set_inference_key_value_memory=False,
inference_max_sequence_len=None,
rotary_pos_emb=None,
checkpoint_core_attention=False,
):
if checkpoint_core_attention:
raise ValueError(
'checkpoint_core_attention during forward not implemented yet for ParallelChunkedCrossAttention'
)
# hidden_states is assumed to have dimension [token length, batch, dimension]
# derive variables
# encoder_output here is the retrieved context
context = encoder_output
# context is assumed to have dimension [num_chunks, num_neighbors, context_token_len, batch, dimension]
chunk_size = self.chunk_size
b, n, dim = (
hidden_states.shape[1],
hidden_states.shape[0],
hidden_states.shape[2],
)
default_bias = self.cross_attention.dense.bias
if set_inference_key_value_memory:
seq_index = (n // chunk_size) * chunk_size
self.current_len = n
elif inference_max_sequence_len is not None:
# only handles single token increment
assert n == 1
self.current_len += n
chunk_id = self.current_len // chunk_size
if chunk_id <= 0:
# if sequence length less than chunk size, do an early return
return torch.zeros_like(hidden_states), default_bias
causal_padding = chunk_size - 1
# pad it as a full chunk, put it at the end of the chunk position
hidden_states = F.pad(hidden_states, (0, 0, 0, 0, causal_padding, 0), value=0.0)
# only use the relevant context
context = context[chunk_id - 1 : chunk_id, :, :, :, :]
attention_mask = rearrange(attention_mask, '(b k) 1 q v -> b k 1 q v', b=b)
# select the relevant chunk attn mask
attention_mask = attention_mask[:, chunk_id - 1]
seq_index = chunk_size
else:
# this is normal forward without inference
seq_index = (n // chunk_size) * chunk_size
# if sequence length less than chunk size, do an early return
if n < self.chunk_size and set_inference_key_value_memory and inference_max_sequence_len is not None:
return torch.zeros_like(hidden_states), default_bias
num_chunks, num_retrieved = (
context.shape[-5],
context.shape[-4],
)
# causal padding
causal_padding = chunk_size - 1
x = F.pad(hidden_states, (0, 0, 0, 0, -causal_padding, causal_padding), value=0.0)
# remove sequence which is ahead of the neighbors retrieved (during inference)
# seq_index = (n // chunk_size) * chunk_size
x, x_remainder = x[:seq_index], x[seq_index:]
seq_remain_len = x_remainder.shape[0]
# take care of rotary positional embedding
# make sure queries positions are properly shifted to the future
if rotary_pos_emb is not None:
q_pos_emb, k_pos_emb = rotary_pos_emb
# currently implementation is broken
# q need to extend to causal_padding, and just do
# q_pos_emb = F.pad(q_pos_emb, (0, 0, -causal_padding, 0), value = 0.)
if inference_max_sequence_len is not None and not set_inference_key_value_memory:
token_pos = (self.current_len - 1) % chunk_size
q_pos_emb = F.pad(
q_pos_emb, (0, 0, 0, 0, 0, 0, -causal_padding - token_pos, -causal_padding + token_pos), value=0.0
)
else:
q_pos_emb = F.pad(q_pos_emb, (0, 0, 0, 0, 0, 0, -causal_padding, 0), value=0.0)
k_pos_emb = repeat(k_pos_emb, 'n b h d -> (r n) b h d', r=num_retrieved)
rotary_pos_emb = (q_pos_emb, k_pos_emb)
# make sure number context chunks is enough
assert x.shape[0] // chunk_size == num_chunks
# reshape so we have chunk to chunk attention, without breaking causality
x = rearrange(x, '(k n) b d -> n (b k) d', k=num_chunks)
context = rearrange(context, 'k r n b d -> (r n) (b k) d')
# cross attention
out, bias = self.cross_attention(x, attention_mask, encoder_output=context, rotary_pos_emb=rotary_pos_emb)
# reshape back to original sequence
out = rearrange(out, 'n (b k) d -> (k n) b d', b=b)
# pad back to original, with 0s at the beginning (which will be added to the residual and be fine)
out = F.pad(out, (0, 0, 0, 0, causal_padding, -causal_padding + seq_remain_len), value=0.0)
if not set_inference_key_value_memory and inference_max_sequence_len is not None:
out = out[-1:]
return out, bias
class CoreAttention(MegatronModule):
""" Region where selective activation recomputation is applied.
See Figure 3. in Reducing Activation Recomputation in Large Transformer Models
https://arxiv.org/pdf/2205.05198.pdf for more details.
"""
def __init__(
self,
config: ModelParallelConfig,
layer_number,
num_attention_heads,
hidden_size,
attention_type=AttnType.self_attn,
attn_mask_type=AttnMaskType.padding,
precision=16,
apply_query_key_layer_scaling=False,
kv_channels=None,
masked_softmax_fusion=True,
attention_dropout=0.1,
normalize_attention_scores=True,
multi_query_attention=False,
position_embedding_type='learned_absolute',
use_flash_attention=False,
):
super(CoreAttention, self).__init__(config=config)
self.precision = precision
self.fp16 = False
self.bf16 = False
if precision in ['bf16', 'bf16-mixed']:
self.bf16 = True
elif precision in [16, '16', '16-mixed']:
self.fp16 = True
self.multi_query_attention = multi_query_attention
self.position_embedding_type = position_embedding_type
self.apply_query_key_layer_scaling = apply_query_key_layer_scaling
self.attention_softmax_in_fp32 = False
if self.apply_query_key_layer_scaling:
self.attention_softmax_in_fp32 = True
self.layer_number = max(1, layer_number)
self.attention_type = attention_type
self.attn_mask_type = attn_mask_type
self.sequence_parallel = config.sequence_parallel
# If True, will scale attention scores by 1 / sqrt(hidden_size_per_attention_head).
# This arg is been provided mostly to support weight conversion of Huggingface models. (ex: T5v1.1)
self.normalize_attention_scores = normalize_attention_scores
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
projection_size = kv_channels * num_attention_heads
# Per attention head and per partition values.
world_size = parallel_state.get_tensor_model_parallel_world_size()
self.hidden_size_per_partition = safe_divide(projection_size, world_size)
self.hidden_size_per_attention_head = safe_divide(projection_size, num_attention_heads)
self.num_attention_heads_per_partition = safe_divide(num_attention_heads, world_size)
self.num_attention_heads_partition_offset = (
self.num_attention_heads_per_partition * parallel_state.get_tensor_model_parallel_rank()
)
coeff = None
self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
if self.apply_query_key_layer_scaling:
coeff = self.layer_number
self.norm_factor *= coeff
self.scale_mask_softmax = MatchedScaleMaskSoftmax(
self.fp16,
self.bf16,
self.attn_mask_type,
masked_softmax_fusion,
attention_mask_func,
self.attention_softmax_in_fp32,
coeff,
)
# Dropout. Note that for a single iteration, this layer will generate
# different outputs on different number of parallel partitions but
# on average it should not be partition dependent.
self.attention_dropout_p = attention_dropout
self.attention_dropout = torch.nn.Dropout(attention_dropout)
if use_flash_attention:
self.attn_fn = self.flash_attention
else:
self.attn_fn = self.torch_attention
if position_embedding_type.lower() == 'xpos':
self.xpos = XPOSPositionEmbedding(kv_channels)
def forward(
self,
query_layer,
key_layer,
value_layer,
attention_mask,
layer_past=None,
get_key_value=False,
rotary_pos_emb=None,
relative_position_bias=None,
headscale_tensor=None,
inference_mode=None,
):
b, np, sq, sk, hn = (
query_layer.size(1),
query_layer.size(2),
query_layer.size(0),
key_layer.size(0),
query_layer.size(3),
)
# ==================================================
# Update attention mask for inference. [b, np, sq, sk]
# ==================================================
if get_key_value:
with torch.no_grad():
if layer_past is not None:
attention_mask = attention_mask[..., sq - 1, :sk].unsqueeze(2)
else:
attention_mask = attention_mask[..., :sq, :sk]
# ==================================================
# Update attention bias. [b, np, sq, sk]
# ==================================================
if relative_position_bias is not None:
relative_position_bias = relative_position_bias[
:,
self.num_attention_heads_partition_offset : self.num_attention_heads_partition_offset
+ self.num_attention_heads_per_partition,
-sq:,
-sk:,
]
# ==================================================
# Update query_layer, key_layer, value_layer
# ==================================================
# TODO: figure out how to do this
# apply relative positional encoding (rotary embedding)
if rotary_pos_emb is not None:
q_pos_emb, k_pos_emb = rotary_pos_emb
query_layer = apply_rotary_pos_emb(query_layer, q_pos_emb)
key_layer = apply_rotary_pos_emb(key_layer, k_pos_emb)
# TODO, can apply positional embedding to value_layer so it has
# absolute positional embedding.
# otherwise, only relative positional embedding takes effect
# value_layer = apply_rotary_pos_emb(value_layer, k_pos_emb)
if self.position_embedding_type.lower() == 'xpos':
query_layer = self.xpos(query_layer, offset=key_layer.shape[-2] - query_layer.shape[-2], downscale=False)
key_layer = self.xpos(key_layer, offset=0, downscale=True)
# ==================================================
# query_layer [sq, b, np, hn]
# key_layer [sk, b, np, hn]
# value_layer [sk, b, np, hn]
# attention_mask [b, 1, sq, sk] or [b, s]
# relative_position_bias [b, np, sq, sk]
# context_layer [b, np, sq, hn]
# ==================================================
context_layer = self.attn_fn(
query_layer, key_layer, value_layer, attention_mask, relative_position_bias, inference_mode
)
if headscale_tensor is not None:
context_layer = context_layer * headscale_tensor
# [b, np, sq, hn] --> [sq, b, np, hn]
context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
# [sq, b, np, hn] --> [sq, b, hp]
new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
def torch_attention(self, query_layer, key_layer, value_layer, attention_mask, attention_bias, inference_mode):
sq, b, np, hn = query_layer.shape
sk = key_layer.shape[0]
if self.multi_query_attention:
query_layer = rearrange(query_layer, 'sq b np hn -> b (np sq) hn')
key_layer = rearrange(key_layer, 'sk b 1 hn -> b hn sk')
value_layer = rearrange(value_layer, 'sv b np hn -> (b np) sv hn')
else:
query_layer = rearrange(query_layer, 'sq b np hn -> (b np) sq hn')
key_layer = rearrange(key_layer, 'sk b np hn -> (b np) hn sk')
value_layer = rearrange(value_layer, 'sv b np hn -> (b np) sv hn')
matmul_input_buffer = torch.empty(
query_layer.shape[0],
query_layer.shape[1],
key_layer.shape[2],
dtype=query_layer.dtype,
device=query_layer.device,
)
matmul_result = torch.baddbmm(
matmul_input_buffer,
query_layer,
key_layer,
beta=0.0,
alpha=(1.0 / self.norm_factor) if self.normalize_attention_scores else 1.0,
)
# change view to [b, np, sq, sk]
attention_scores = matmul_result.view(b, np, sq, sk)
if attention_bias is not None:
attention_scores += attention_bias
attention_probs = self.scale_mask_softmax(attention_scores, attention_mask)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
if not self.sequence_parallel:
with tensor_parallel.random.get_cuda_rng_tracker().fork():
attention_probs = self.attention_dropout(attention_probs)
else:
attention_probs = self.attention_dropout(attention_probs)
# change view [b * np, sq, sk]
attention_probs = rearrange(attention_probs, 'b np sq sk -> (b np) sq sk')
# matmul: [b * np, sq, hn]
context_layer = torch.bmm(attention_probs, value_layer)
# change view [b, np, sq, hn]
context_layer = rearrange(context_layer, '(b np) sq hn -> b np sq hn', np=np)
return context_layer
def flash_attention(self, query_layer, key_layer, value_layer, attention_mask, attention_bias, inference_mode):
query_layer = rearrange(query_layer, 'sq b np hn -> b sq np hn')
key_layer = rearrange(key_layer, 'sk b np hn -> b sk np hn')
value_layer = rearrange(value_layer, 'sv b np hn -> b sv np hn')
# Use to ensure dtype cast to fp16 or bf16
query_layer = _cast_if_autocast_enabled(query_layer)
key_layer = _cast_if_autocast_enabled(key_layer)
value_layer = _cast_if_autocast_enabled(value_layer)
attention_bias = _cast_if_autocast_enabled(attention_bias)
is_causal = self.attn_mask_type == AttnMaskType.causal and not inference_mode
if attention_bias is not None:
return self.flash_attention_triton(
query_layer, key_layer, value_layer, attention_mask, attention_bias, is_causal,
)
else:
return self.flash_attention_cuda(query_layer, key_layer, value_layer, attention_mask, is_causal,)
def flash_attention_cuda(self, query_layer, key_layer, value_layer, attention_mask, is_causal):
batch_size, seqlen, nheads, _ = query_layer.shape
# True: attend / False: not attend
if attention_mask is None:
attention_mask_q = torch.ones(batch_size, query_layer.shape[1], device=query_layer.device).bool()
attention_mask_kv = torch.ones(batch_size, key_layer.shape[1], device=query_layer.device).bool()
elif len(attention_mask.shape) == 4:
# [b, 1, sq, sk] -> [b, sq] / [b, sk]
attention_mask_q = torch.any(torch.eq(attention_mask, False), dim=3).squeeze(1)
attention_mask_kv = torch.any(torch.eq(attention_mask, False), dim=2).squeeze(1)
else:
assert len(attention_mask.shape) == 2
attention_mask_q = ~attention_mask
attention_mask_kv = ~attention_mask
seqlens_q_in_batch = len(attention_mask_q.sum(dim=-1, dtype=torch.int32).unique())
seqlens_kv_in_batch = len(attention_mask_kv.sum(dim=-1, dtype=torch.int32).unique())
if seqlens_q_in_batch == 1 and seqlens_kv_in_batch == 1 and flash_attn_func is not None:
# [b, sq, np, hn]
context_layer = flash_attn_func(
query_layer,
key_layer,
value_layer,
dropout_p=self.attention_dropout_p if self.training else 0.0,
causal=is_causal,
)
else:
q, indices_q, cu_seqlens_q, max_seqlen_q = unpad_input(query_layer, attention_mask_q)
k, _, cu_seqlens_k, max_seqlen_k = unpad_input(key_layer, attention_mask_kv)
v, _, _, _ = unpad_input(value_layer, attention_mask_kv)
context_layer = flash_attn_unpadded_func(
q,
k,
v,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout_p=self.attention_dropout_p if self.training else 0.0,
causal=is_causal,
)
# [b, sq, np, hn]
context_layer = pad_input(context_layer, indices_q, batch_size, seqlen)
# [b, sq, np, hn] -> [b, np, sq, hn]
context_layer = context_layer.permute(0, 2, 1, 3)
return context_layer
def flash_attention_triton(self, query_layer, key_layer, value_layer, attention_mask, attention_bias, is_causal):
if self.attention_dropout_p > 0.0:
raise NotImplementedError(f'attention_dropout not implemented for flash_attention with attention bias')
if attention_mask is not None:
if len(attention_mask.shape) == 4:
# [b, 1, sq, sk] -> [b, 1, sq, 1] / [b, 1, 1, sk]
attention_mask_q = torch.any(torch.eq(attention_mask, False), dim=3).unsqueeze(3)
attention_mask_kv = torch.any(torch.eq(attention_mask, False), dim=2).unsqueeze(2)
else:
# [b, s] -> [b, 1, s, 1] / [b, 1, 1, s]
assert len(attention_mask.shape) == 2
attention_mask_q = (~attention_mask).unsqueeze(1).unsqueeze(3)
attention_mask_kv = (~attention_mask).unsqueeze(1).unsqueeze(2)
if attention_bias.shape[2] == attention_mask_q.shape[2]:
attention_bias = attention_bias.masked_fill(~attention_mask_q, torch.finfo(query_layer.dtype).min)
if attention_bias.shape[3] == attention_mask_kv.shape[3]:
attention_bias = attention_bias.masked_fill(~attention_mask_kv, torch.finfo(query_layer.dtype).min)
context_layer = flash_attn_func_triton(query_layer, key_layer, value_layer, attention_bias, is_causal,)
# [b, sq, np, hn] -> [b, np, sq, hn]
context_layer = context_layer.permute(0, 2, 1, 3)
if attention_mask is not None:
context_layer = context_layer * attention_mask_q
return context_layer
| NeMo-main | nemo/collections/nlp/modules/common/megatron/attention.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer based language model."""
from nemo.collections.nlp.modules.common.megatron.layer_type import LayerType
from nemo.collections.nlp.modules.common.megatron.megatron_decoder_module import MegatronDecoderModule
from nemo.collections.nlp.modules.common.megatron.module import MegatronModule
from nemo.collections.nlp.modules.common.megatron.transformer import ParallelTransformer
from nemo.collections.nlp.modules.common.megatron.utils import (
ApexGuardDefaults,
attn_mask_postprocess,
build_attention_mask_3d,
)
from nemo.core.classes.exportable import Exportable
try:
from apex.transformer.enums import AttnMaskType, ModelType
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
# fake missing classes with None attributes
AttnMaskType = ApexGuardDefaults()
LayerType = ApexGuardDefaults()
ModelType = ApexGuardDefaults()
try:
from megatron.core import ModelParallelConfig
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelParallelConfig = ApexGuardDefaults
HAVE_MEGATRON_CORE = False
__all__ = ["MegatronTransformerDecoderModule"]
class MegatronTransformerDecoderModule(MegatronModule, Exportable, MegatronDecoderModule):
"""Transformer decoder model.
"""
def __init__(
self,
config: ModelParallelConfig,
init_method,
output_layer_init_method,
hidden_size,
ffn_hidden_size,
num_layers,
num_attention_heads,
apply_query_key_layer_scaling=True,
kv_channels=None,
pre_process=True,
post_process=True,
megatron_amp_O2=False,
decoder_attn_mask_type=AttnMaskType.causal,
hidden_dropout=0.1,
attention_dropout=0.1,
ffn_dropout=0.0,
precision=16,
fp32_residual_connection=False,
activations_checkpoint_method=None,
activations_checkpoint_num_layers=1,
activations_checkpoint_granularity=None,
layernorm_epsilon=1e-5,
bias_activation_fusion=True,
bias_dropout_add_fusion=True,
masked_softmax_fusion=True,
persist_layer_norm=False,
openai_gelu=False,
onnx_safe=False,
activation='gelu',
bias=True,
normalization='layernorm',
transformer_block_type='pre_ln',
headscale=False,
parent_model_type=ModelType.encoder_or_decoder,
megatron_legacy=False,
normalize_attention_scores=True,
num_moe_experts=1,
moe_frequency=1,
moe_dropout=0.0,
position_embedding_type='learned_absolute',
use_flash_attention=False,
):
super(MegatronTransformerDecoderModule, self).__init__(config=config)
self.pre_process = pre_process
self.post_process = post_process
self.hidden_size = hidden_size
self.num_layers = num_layers
self.init_method = init_method
self.model_attn_mask_type = decoder_attn_mask_type
self.hidden_dropout = hidden_dropout
self.output_layer_init_method = output_layer_init_method
self.parent_model_type = parent_model_type
self.normalization = normalization
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
# Transformer.
self.model = ParallelTransformer(
config=config,
layer_type=LayerType.decoder,
init_method=self.init_method,
output_layer_init_method=self.output_layer_init_method,
num_layers=self.num_layers,
hidden_size=self.hidden_size,
num_attention_heads=num_attention_heads,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
ffn_hidden_size=ffn_hidden_size,
self_attn_mask_type=self.model_attn_mask_type,
pre_process=self.pre_process,
post_process=self.post_process,
precision=precision,
fp32_residual_connection=fp32_residual_connection,
activations_checkpoint_method=activations_checkpoint_method,
activations_checkpoint_num_layers=activations_checkpoint_num_layers,
activations_checkpoint_granularity=activations_checkpoint_granularity,
layernorm_epsilon=layernorm_epsilon,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
ffn_dropout=ffn_dropout,
megatron_amp_O2=megatron_amp_O2,
bias_activation_fusion=bias_activation_fusion,
bias_dropout_add_fusion=bias_dropout_add_fusion,
masked_softmax_fusion=masked_softmax_fusion,
persist_layer_norm=persist_layer_norm,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
activation=activation,
bias=bias,
normalization=normalization,
model_type=parent_model_type,
transformer_block_type=transformer_block_type,
headscale=headscale,
megatron_legacy=megatron_legacy,
normalize_attention_scores=normalize_attention_scores,
num_moe_experts=num_moe_experts,
moe_frequency=moe_frequency,
moe_dropout=moe_dropout,
position_embedding_type=position_embedding_type,
use_flash_attention=use_flash_attention,
)
self._model_key = 'model'
def set_input_tensor(self, input_tensor):
""" See megatron.model.transformer.set_input_tensor()"""
self.model.set_input_tensor(input_tensor)
def forward(
self,
dec_input,
dec_attn_mask,
enc_output,
enc_attn_mask,
layer_past=None,
get_key_value=False,
dec_self_attention_relative_position_bias=None,
dec_cross_attention_relative_position_bias=None,
):
# convert to Megatron mask
dec_attn_mask_3d = build_attention_mask_3d(
source_mask=dec_attn_mask, target_mask=dec_attn_mask, attn_mask_type=self.model_attn_mask_type,
)
enc_dec_attn_mask_3d = build_attention_mask_3d(
source_mask=dec_attn_mask, target_mask=enc_attn_mask, attn_mask_type=AttnMaskType.padding,
)
# transformer decoder
dec_output = self.model(
dec_input,
attn_mask_postprocess(dec_attn_mask_3d),
layer_past=layer_past,
get_key_value=get_key_value,
encoder_output=enc_output,
enc_dec_attn_mask=attn_mask_postprocess(enc_dec_attn_mask_3d),
self_attention_relative_position_bias=dec_self_attention_relative_position_bias,
cross_attention_relative_position_bias=dec_cross_attention_relative_position_bias,
)
return dec_output
def state_dict_for_save_checkpoint(self, destination=None, prefix='', keep_vars=False):
"""For easy load."""
state_dict_ = {}
state_dict_[self._model_key] = self.model.state_dict_for_save_checkpoint(destination, prefix, keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
# Encoder.
if self._model_key in state_dict:
state_dict_ = state_dict[self._model_key]
# for backward compatibility.
elif 'transformer' in state_dict:
state_dict_ = state_dict['transformer']
else:
# for backward compatibility.
state_dict_ = {}
for key in state_dict.keys():
if 'transformer.' in key:
state_dict_[key.split('transformer.')[1]] = state_dict[key]
# for backward compatibility.
state_dict_self_attention = {}
for key in state_dict_.keys():
if '.attention.' in key:
state_dict_self_attention[key.replace(".attention.", ".self_attention.")] = state_dict_[key]
else:
state_dict_self_attention[key] = state_dict_[key]
state_dict_ = state_dict_self_attention
self.model.load_state_dict(state_dict_, strict=strict)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/megatron_transformer_decoder.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nemo.collections.nlp.modules.common.megatron.utils import ApexGuardDefaults
from nemo.utils import logging
try:
from apex.transformer.functional.fused_softmax import FusedScaleMaskSoftmax
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
if HAVE_APEX:
class MatchedScaleMaskSoftmax(FusedScaleMaskSoftmax):
"""
fused operation: scaling + mask + softmax
match the behavior of fused softmax and torch softmax.
This is a workaround for https://github.com/NVIDIA/apex/issues/1493.
Arguments:
input_in_fp16: flag to indicate if input in fp16 data format.
input_in_bf16: flag to indicate if input in bf16 data format.
attn_mask_type: attention mask type (pad or causal)
scaled_masked_softmax_fusion: flag to indicate user want to use softmax fusion
mask_func: mask function to be applied.
softmax_in_fp32: if true, softmax in performed at fp32 precision.
scale: scaling factor used in input tensor scaling.
"""
def forward_torch_softmax(self, input, mask):
if self.input_in_float16 and self.softmax_in_fp32:
input = input.float()
if self.scale is not None:
input = input * self.scale
mask_output = self.mask_func(input, mask) if mask is not None else input
probs = torch.nn.Softmax(dim=-1)(mask_output)
if mask is not None:
all_k_masked = mask.all(axis=-1)
zero_attention_mask = (1.0 - all_k_masked.type(probs.type()))[:, :, :, None]
probs = probs * zero_attention_mask
if self.input_in_float16 and self.softmax_in_fp32:
if self.input_in_fp16:
probs = probs.half()
else:
probs = probs.bfloat16()
return probs
else:
class MatchedScaleMaskSoftmax(ApexGuardDefaults):
def __init__(self):
super().__init__()
logging.warning(
"Apex was not found. ColumnLinear will not work. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/fused_softmax.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
try:
from apex._autocast_utils import _cast_if_autocast_enabled
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
def dropout_add(x, bias, residual, prob, training):
# type: (Tensor, None, Tensor, float, bool) -> Tensor
if bias is not None:
raise ValueError(f"bias is expected to be None when using the bias_dropout function.")
out = torch.nn.functional.dropout(x, p=prob, training=training)
out = residual + out
return out
def bias_dropout_add(x, bias, residual, prob, training):
# type: (Tensor, Tensor, Tensor, float, bool) -> Tensor
out = torch.nn.functional.dropout(x + bias, p=prob, training=training)
out = residual + out
return out
@torch.jit.script
def bias_dropout_add_fused_train_(
x: torch.Tensor, bias: torch.Tensor, residual: torch.Tensor, prob: float
) -> torch.Tensor:
# type: (Tensor, Tensor, Tensor, float) -> Tensor
return bias_dropout_add(x, bias, residual, prob, True)
def bias_dropout_add_fused_train(x, bias, residual, prob):
# re-enable torch grad to enable fused optimization.
with torch.enable_grad():
args = _cast_if_autocast_enabled(x, bias, residual, prob)
with torch.cuda.amp.autocast(enabled=False):
return bias_dropout_add_fused_train_(*args)
@torch.jit.script
def bias_dropout_add_fused_inference_(
x: torch.Tensor, bias: torch.Tensor, residual: torch.Tensor, prob: float
) -> torch.Tensor:
# type: (Tensor, Tensor, Tensor, float) -> Tensor
return bias_dropout_add(x, bias, residual, prob, False)
def bias_dropout_add_fused_inference(x, bias, residual, prob):
args = _cast_if_autocast_enabled(x, bias, residual, prob)
with torch.cuda.amp.autocast(enabled=False):
return bias_dropout_add_fused_inference_(*args)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/fused_bias_dropout_add.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nemo.collections.nlp.modules.common.megatron.language_model import Embedding
from nemo.collections.nlp.modules.common.megatron.layer_type import LayerType
from nemo.collections.nlp.modules.common.megatron.megatron_decoders import get_decoder_model
from nemo.collections.nlp.modules.common.megatron.megatron_encoders import get_encoder_model
from nemo.collections.nlp.modules.common.megatron.module import MegatronModule
from nemo.collections.nlp.modules.common.megatron.mup.layer import MuReadout
from nemo.collections.nlp.modules.common.megatron.utils import (
ApexGuardDefaults,
init_method_normal,
scaled_init_method_normal,
)
from nemo.collections.nlp.parts import utils_funcs
try:
from apex.transformer.enums import ModelType
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
# fake missing classes with None attributes
AttnMaskType = ApexGuardDefaults()
ModelType = ApexGuardDefaults()
try:
from megatron.core import ModelParallelConfig, tensor_parallel
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelParallelConfig = ApexGuardDefaults
HAVE_MEGATRON_CORE = True
__all__ = ["MegatronRetrievalTokenLevelEncoderDecoderModule"]
class MegatronRetrievalTokenLevelEncoderDecoderModule(MegatronModule):
"""Token-based (input/output is tokens) retrieval encoder-decoder model"""
def __init__(
self,
config: ModelParallelConfig,
vocab_size,
hidden_size,
max_position_embeddings,
num_attention_heads,
ffn_hidden_size,
apply_query_key_layer_scaling=True,
kv_channels=None,
num_tokentypes=0,
parallel_output=True,
pre_process=True,
post_process=True,
init_method_std=0.02,
fp16_cross_entropy=False,
megatron_amp_O2=False,
hidden_dropout=0.1,
attention_dropout=0.1,
precision=16,
fp32_residual_connection=False,
activations_checkpoint_method=None,
activations_checkpoint_num_layers=1,
layernorm_epsilon=1e-5,
persist_layer_norm=False,
bias_gelu_fusion=True,
bias_dropout_add_fusion=True,
masked_softmax_fusion=True,
openai_gelu=False,
activation='gelu',
onnx_safe=False,
bias=True,
normalization='layernorm',
headscale=False,
transformer_block_type='pre_ln',
hidden_steps=-1,
add_encoder=True,
add_decoder=True,
chunk_size=64,
enc_num_layers=4, # total number of encoder layers
dec_num_layers=6, # total number of decoder layers
enc_cross_attention=[3], # layer numbers for cross attention
dec_cross_attention=[3, 5], # layer numbers for chunked cross attention
add_position_embedding=False,
tokenizer=None, # tokenizer
normalize_attention_scores=True,
activations_checkpoint_granularity=None,
megatron_lm_compatible=False,
version=1,
):
super(MegatronRetrievalTokenLevelEncoderDecoderModule, self).__init__()
if megatron_lm_compatible:
assert (
apply_query_key_layer_scaling
), "megatron lm compatible model has to set apply_query_key_layer_scaling"
assert add_position_embedding, "megatron lm compatible model has to set add_position_embedding"
self.parallel_output = parallel_output
self.pre_process = pre_process
self.post_process = post_process
self.fp16_cross_entropy = fp16_cross_entropy
self.precision = precision
self.add_encoder = add_encoder
self.add_decoder = add_decoder
self.add_abs_position_embedding = add_position_embedding # whether use absolute position embedding
self.tokenizer = tokenizer
self.eod_id = tokenizer.eos_id
self.transformer_block_type = transformer_block_type
self.num_chunked_cross_attention = len(dec_cross_attention)
self.megatron_lm_compatible = megatron_lm_compatible
self.dtype = utils_funcs.torch_dtype_from_precision(precision, megatron_amp_O2)
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
if pre_process:
self.encoder_embedding = Embedding(
config=config,
hidden_size=hidden_size,
vocab_size=vocab_size,
max_sequence_length=max_position_embeddings,
init_method=init_method_normal(init_method_std),
num_tokentypes=num_tokentypes,
embedding_dropout_prob=hidden_dropout,
position_embedding_type='learned_absolute' if add_position_embedding else '',
transpose_batch_sequence=False,
dtype=self.dtype,
)
self._embedding_key = "embedding"
encoder_init = init_method_normal(init_method_std)
encoder_scaled_init = scaled_init_method_normal(init_method_std, dec_num_layers)
pre_decoder_init = init_method_normal(init_method_std)
pre_decoder_scaled_init = scaled_init_method_normal(init_method_std, dec_num_layers)
post_decoder_init = init_method_normal(init_method_std)
post_decoder_scaled_init = scaled_init_method_normal(init_method_std, dec_num_layers)
if add_encoder:
enc_layer_types = []
for i in range(enc_num_layers):
if i in enc_cross_attention:
enc_layer_types.append(LayerType.retrieval_encoder)
else:
enc_layer_types.append(LayerType.encoder)
self.encoder = get_encoder_model(
config=config,
arch="retro",
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
num_layers=enc_num_layers,
num_attention_heads=num_attention_heads,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
init_method=encoder_init,
scaled_init_method=encoder_scaled_init,
pre_process=pre_process,
post_process=False
if megatron_lm_compatible
else post_process, # megatron lm model has no final layer_norm
init_method_std=init_method_std,
megatron_amp_O2=megatron_amp_O2,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
precision=precision,
fp32_residual_connection=fp32_residual_connection,
activations_checkpoint_method=activations_checkpoint_method,
activations_checkpoint_num_layers=activations_checkpoint_num_layers,
activations_checkpoint_granularity=activations_checkpoint_granularity,
layernorm_epsilon=layernorm_epsilon,
bias_activation_fusion=bias_gelu_fusion,
bias_dropout_add_fusion=bias_dropout_add_fusion,
masked_softmax_fusion=masked_softmax_fusion,
persist_layer_norm=persist_layer_norm,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
hidden_steps=hidden_steps,
activation=activation,
bias=bias,
normalization=normalization,
transformer_block_type=transformer_block_type,
headscale=headscale,
parent_model_type=ModelType.encoder_and_decoder,
layer_type=enc_layer_types,
chunk_size=chunk_size,
layer_number_offset=0,
normalize_attention_scores=normalize_attention_scores,
turn_off_rop=megatron_lm_compatible,
version=version,
)
self._encoder_key = "encoder"
if add_decoder:
pre_decoder_num_layers = min(dec_cross_attention)
pre_decoder_layer_types = []
for i in range(pre_decoder_num_layers):
pre_decoder_layer_types.append(LayerType.encoder)
pre_decoder_layer_types.append(LayerType.decoder_pre_mlp)
post_decoder_num_layers = dec_num_layers - pre_decoder_num_layers
post_decoder_layer_types = []
# the first layer in post decoder has to be chunked cross attention without self attention
assert pre_decoder_num_layers in dec_cross_attention
for i in range(post_decoder_num_layers):
if i == 0:
post_decoder_layer_types.append(LayerType.retrieval_decoder_after_self_attn)
elif i + pre_decoder_num_layers in dec_cross_attention:
post_decoder_layer_types.append(LayerType.retrieval_decoder)
else:
post_decoder_layer_types.append(LayerType.encoder)
# it is used to process the inputs for encoder to use as context (H in the paper)
self.pre_decoder = get_decoder_model(
config=config,
arch="retro",
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
num_layers=pre_decoder_num_layers + 1,
num_attention_heads=num_attention_heads,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
init_method=pre_decoder_init,
scaled_init_method=pre_decoder_scaled_init,
pre_process=pre_process,
post_process=False, # no need for post process
init_method_std=init_method_std,
megatron_amp_O2=megatron_amp_O2,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
precision=precision,
fp32_residual_connection=fp32_residual_connection,
activations_checkpoint_method=activations_checkpoint_method,
activations_checkpoint_num_layers=activations_checkpoint_num_layers,
activations_checkpoint_granularity=activations_checkpoint_granularity,
layernorm_epsilon=layernorm_epsilon,
bias_activation_fusion=bias_gelu_fusion,
bias_dropout_add_fusion=bias_dropout_add_fusion,
masked_softmax_fusion=masked_softmax_fusion,
persist_layer_norm=persist_layer_norm,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
hidden_steps=hidden_steps,
activation=activation,
bias=bias,
normalization=normalization,
transformer_block_type=transformer_block_type,
headscale=headscale,
parent_model_type=ModelType.encoder_and_decoder,
layer_type=pre_decoder_layer_types,
chunk_size=chunk_size,
layer_number_offset=0,
normalize_attention_scores=normalize_attention_scores,
turn_off_rop=megatron_lm_compatible,
version=version,
)
# it is where the chunked cross attention happens
self.post_decoder = get_decoder_model(
config=config,
arch="retro",
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
num_layers=post_decoder_num_layers,
num_attention_heads=num_attention_heads,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
init_method=post_decoder_init,
scaled_init_method=post_decoder_scaled_init,
pre_process=False, # directly take the pre_decoder output, skip preprocess
post_process=post_process,
init_method_std=init_method_std,
megatron_amp_O2=megatron_amp_O2,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
precision=precision,
fp32_residual_connection=fp32_residual_connection,
activations_checkpoint_method=activations_checkpoint_method,
activations_checkpoint_num_layers=activations_checkpoint_num_layers,
activations_checkpoint_granularity=activations_checkpoint_granularity,
layernorm_epsilon=layernorm_epsilon,
bias_activation_fusion=bias_gelu_fusion,
bias_dropout_add_fusion=bias_dropout_add_fusion,
masked_softmax_fusion=masked_softmax_fusion,
persist_layer_norm=persist_layer_norm,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
hidden_steps=hidden_steps,
activation=activation,
bias=bias,
normalization=normalization,
headscale=headscale,
transformer_block_type=transformer_block_type,
parent_model_type=ModelType.encoder_and_decoder,
layer_type=post_decoder_layer_types,
chunk_size=chunk_size,
layer_number_offset=pre_decoder_num_layers + 1,
normalize_attention_scores=normalize_attention_scores,
turn_off_rop=megatron_lm_compatible,
version=version,
)
self._pre_decoder_key = "pre_decoder"
self._post_decoder_key = "post_decoder"
self.initialize_word_embeddings(
init_method=init_method_normal(init_method_std), vocab_size=vocab_size, hidden_size=hidden_size
)
if add_decoder and post_process:
self.tokens_head = MuReadout(self.word_embeddings_weight().size(0), parallel_output)
self._tokens_head_key = 'tokens_head'
def set_input_tensor(self, input_tensor):
"""Set input tensor to be used instead of forward()'s input.
When doing pipeline parallelism the input from the previous
stage comes from communication, not from the input, so the
model's forward_step_func won't have it. This function is thus
used by internal code to bypass the input provided by the
forward_step_func"""
self.input_tensor = input_tensor
def forward(
self,
input_ids,
input_attn_mask,
retrieved_ids,
retrieved_attn_mask,
token_type_ids=None,
labels=None,
input_emb=None,
set_inference_key_value_memory=False,
inference_max_sequence_len=None,
neighbors=None,
position_ids=None,
):
"""
Return value is per token / per dimension (i.e., non collapsed loss value)
"""
eod_positions = None
retrieved_emb = None
if input_ids is not None and self.eod_id is not None and not self.megatron_lm_compatible:
# do not reset attention for megatron lm compatible model
eod_positions = torch.where(input_ids == self.eod_id)
if input_emb is None:
if self.pre_process and self.add_encoder:
# encoder embeddings
if self.add_abs_position_embedding:
input_position_ids = position_ids
else:
input_position_ids = None
input_emb = self.encoder_embedding(input_ids, input_position_ids, token_type_ids=token_type_ids)
else:
input_emb = None
if retrieved_ids is not None:
if self.add_abs_position_embedding:
seq_length = retrieved_ids.size(-1)
retrieved_position_ids = torch.arange(seq_length, dtype=torch.long, device=retrieved_ids.device)
retrieved_position_ids = retrieved_position_ids.unsqueeze(0).expand_as(retrieved_ids).clone()
else:
retrieved_position_ids = None
retrieved_emb = self.encoder_embedding(retrieved_ids, retrieved_position_ids)
if self.add_decoder:
hidden = self.pre_decoder(
input_emb,
input_attn_mask,
eod_positions=eod_positions,
set_inference_key_value_memory=set_inference_key_value_memory,
inference_max_sequence_len=inference_max_sequence_len,
)
# hidden is a tuple, (layernorm_input, layernorm_output)
self.post_decoder.set_input_tensor(hidden)
encoder_output = hidden[1].transpose(0, 1).contiguous()
if self.add_encoder:
if retrieved_emb is not None and neighbors is None:
neighbors = retrieved_emb.shape[2]
retrieved_emb = self.encoder(
retrieved_emb,
retrieved_attn_mask,
context_attn_mask=input_attn_mask,
encoder_output=encoder_output,
set_inference_key_value_memory=set_inference_key_value_memory,
inference_max_sequence_len=inference_max_sequence_len,
neighbors=neighbors,
)
if self.add_decoder:
dec_output = self.post_decoder(
hidden,
input_attn_mask,
retrieved_attn_mask=retrieved_attn_mask,
retrieved_emb=retrieved_emb,
eod_positions=eod_positions,
set_inference_key_value_memory=set_inference_key_value_memory,
inference_max_sequence_len=inference_max_sequence_len,
)
# only transpose it for post_ln
token_logits = self.tokens_head(dec_output, self.word_embeddings_weight())
if labels is not None:
# [b, s] -> [s, b]
labels = labels.transpose(0, 1).contiguous()
# tensor_parallel.vocab_parallel_cross_entropy performs log_softmax and return log p(x_i|z) per token i
if self.fp16_cross_entropy:
assert token_logits.dtype == torch.half
tokens_loss = tensor_parallel.vocab_parallel_cross_entropy(token_logits, labels)
else:
tokens_loss = tensor_parallel.vocab_parallel_cross_entropy(token_logits.float(), labels)
# [s, b] -> [b, s]
tokens_loss = tokens_loss.transpose(0, 1).contiguous()
return tokens_loss
else:
# [s, b, h] -> [b, s, h]
token_logits = token_logits.transpose(0, 1).contiguous()
return token_logits
def state_dict_for_save_checkpoint(self, destination=None, prefix='', keep_vars=False):
"""For easy load when model is combined with other heads,
add an extra key."""
state_dict_ = {}
state_dict_[self._encoder_embedding_key] = self.encoder_embedding.state_dict_for_save_checkpoint(
destination, prefix, keep_vars
)
state_dict_[self._encoder_key] = self.encoder.state_dict_for_save_checkpoint(destination, prefix, keep_vars)
state_dict_[self._pre_decoder_key] = self.pre_decoder.state_dict_for_save_checkpoint(
destination, prefix, keep_vars
)
state_dict_[self._post_decoder_key] = self.post_decoder.state_dict_for_save_checkpoint(
destination, prefix, keep_vars
)
state_dict_[self._tokens_head_key] = self.tokens_head.state_dict_for_save_checkpoint(
destination, prefix, keep_vars
)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
self.encoder_embedding.encoder_embeddingload_state_dict(state_dict[self._encoder_embedding_key], strict=strict)
self.encoder.load_state_dict(state_dict[self._encoder_key], strict=strict)
self.pre_decoder.load_state_dict(state_dict[self._pre_decoder_key], strict=strict)
self.post_decoder.load_state_dict(state_dict[self._post_decoder_key], strict=strict)
self.tokens_head.load_state_dict(state_dict[self._tokens_head_key], strict=strict)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/retrieval_token_level_encoder_decoder.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Retrieval Transformer."""
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from nemo.collections.nlp.modules.common.megatron.module import MegatronModule
from nemo.collections.nlp.modules.common.megatron.position_embedding import RotaryEmbedding
from nemo.collections.nlp.modules.common.megatron.transformer import ParallelTransformer
from nemo.collections.nlp.modules.common.megatron.utils import ApexGuardDefaults, build_attention_mask_3d
try:
from apex.transformer.enums import AttnMaskType, ModelType
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
# fake missing classes with None attributes
AttnMaskType = ApexGuardDefaults()
ModelType = ApexGuardDefaults()
HAVE_APEX = False
try:
from megatron.core import ModelParallelConfig
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelParallelConfig = ApexGuardDefaults
HAVE_MEGATRON_CORE = False
MIN_DIM_HEAD = 32
class MegatronRetrievalTransformerEncoderModule(MegatronModule):
"""Transformer encoder model.
"""
def __init__(
self,
config: ModelParallelConfig,
init_method,
output_layer_init_method,
hidden_size,
ffn_hidden_size,
num_layers,
num_attention_heads,
apply_query_key_layer_scaling=True,
kv_channels=None,
layer_type=[],
pre_process=True,
post_process=True,
megatron_amp_O2=False,
hidden_dropout=0.1,
attention_dropout=0.1,
precision=16,
fp32_residual_connection=False,
activations_checkpoint_method=None,
activations_checkpoint_num_layers=1,
activations_checkpoint_granularity=None,
layernorm_epsilon=1e-5,
bias_activation_fusion=True,
bias_dropout_add_fusion=True,
masked_softmax_fusion=True,
persist_layer_norm=False,
openai_gelu=False,
onnx_safe=False,
activation='gelu',
bias=True,
normalization='layernorm',
transformer_block_type='pre_ln',
parent_model_type=ModelType.encoder_or_decoder,
chunk_size=64,
layer_number_offset=0, # this is use only for attention norm_factor scaling
normalize_attention_scores=True,
megatron_legacy=False,
turn_off_rop=False,
version=1, # model version
):
super(MegatronRetrievalTransformerEncoderModule, self).__init__(config=config)
self.transformer_block_type = transformer_block_type
self.pre_process = pre_process
self.post_process = post_process
self.hidden_size = hidden_size
self.num_layers = num_layers
self.init_method = init_method
self.hidden_dropout = hidden_dropout
self.output_layer_init_method = output_layer_init_method
self.parent_model_type = parent_model_type
self.turn_off_rop = turn_off_rop
self.version = version
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
# Transformer.
self.model = ParallelTransformer(
config=config,
init_method=self.init_method,
output_layer_init_method=self.output_layer_init_method,
num_layers=self.num_layers,
hidden_size=self.hidden_size,
num_attention_heads=num_attention_heads,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
layer_type=layer_type,
ffn_hidden_size=ffn_hidden_size,
self_attn_mask_type=AttnMaskType.padding,
pre_process=self.pre_process,
post_process=self.post_process,
precision=precision,
fp32_residual_connection=fp32_residual_connection,
activations_checkpoint_method=activations_checkpoint_method,
activations_checkpoint_num_layers=activations_checkpoint_num_layers,
activations_checkpoint_granularity=activations_checkpoint_granularity,
layernorm_epsilon=layernorm_epsilon,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
megatron_amp_O2=megatron_amp_O2,
bias_activation_fusion=bias_activation_fusion,
bias_dropout_add_fusion=bias_dropout_add_fusion,
masked_softmax_fusion=masked_softmax_fusion,
persist_layer_norm=persist_layer_norm,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
activation=activation,
bias=bias,
normalization=normalization,
transformer_block_type=transformer_block_type,
model_type=parent_model_type,
chunk_size=chunk_size,
layer_number_offset=layer_number_offset,
normalize_attention_scores=normalize_attention_scores,
megatron_legacy=megatron_legacy,
)
rot_dim = hidden_size // num_attention_heads if kv_channels is None else kv_channels
# partial rotary embeddings, which is better than full rotary
# Wang and Komatsuzaki et al https://github.com/kingoflolz/mesh-transformer-jax/
if not turn_off_rop:
self.rotary_pos_emb = RotaryEmbedding(min(rot_dim, MIN_DIM_HEAD))
self.chunk_size = chunk_size
self._model_key = 'model'
def set_input_tensor(self, input_tensor):
""" See megatron.model.transformer.set_input_tensor()"""
self.model.set_input_tensor(input_tensor)
def _allocate_memory(self, *shape, dtype):
return torch.empty(*shape, dtype=dtype, device=torch.cuda.current_device())
def forward(
self,
enc_input,
enc_attn_mask,
context_attn_mask=None,
encoder_output=None,
layer_past=None,
get_key_value=False,
set_inference_key_value_memory=False, # when doing inference, set this to true to allocate all the cached matrix. later set false to do incremental inference
inference_max_sequence_len=None,
neighbors=2,
):
# expected enc_input shape [batch, num_chunks, num_neighbors, retrieval_seq_len, dim]
# expected enc_attn_mask shape [batch, num_chunks, num_neighbors, retrieval_seq_len]
# expected encoder_output shape [batch, seq_len, dim]
# batch, seq_len, dim
b, n, dim = encoder_output.shape
if set_inference_key_value_memory:
# run once to setup the cache
chunk_start = 0
num_seq_chunks = n // self.chunk_size
num_chunks = inference_max_sequence_len // self.chunk_size
self.cache_output = self._allocate_memory(
b, num_chunks, neighbors, self.chunk_size * 2, dim, dtype=encoder_output.dtype
)
self.seq_pos_in_chunk = n
self.current_chunk = n // self.chunk_size
self.encoder_output = self._allocate_memory(b, self.chunk_size, dim, dtype=encoder_output.dtype)
self.context_attn_mask = self._allocate_memory(b, self.chunk_size, dtype=context_attn_mask.dtype)
self.context_attn_mask
chunk_beg = self.chunk_size * num_seq_chunks
chunk_end = self.chunk_size * num_seq_chunks + self.seq_pos_in_chunk % self.chunk_size
# store the remainders
self.encoder_output[:, : self.seq_pos_in_chunk % self.chunk_size, :] = encoder_output[
:, chunk_beg:chunk_end, :
]
self.context_attn_mask[:, : self.seq_pos_in_chunk % self.chunk_size] = context_attn_mask[
:, chunk_beg:chunk_end
]
elif inference_max_sequence_len is not None:
# second time of running
# only support one token at a time
assert n == 1
self.seq_pos_in_chunk += n
self.current_chunk = self.seq_pos_in_chunk // self.chunk_size
# if exceed the chunk size
pos_beg = (self.seq_pos_in_chunk - 1) % self.chunk_size
# if self.seq_pos_in_chunk - 1 >= self.chunk_size:
# self.current_chunk += 1
# self.seq_pos_in_chunk -= self.chunk_size
chunk_start = self.current_chunk - 1
self.encoder_output[:, pos_beg : pos_beg + 1, :] = encoder_output
self.context_attn_mask[:, pos_beg : pos_beg + 1] = context_attn_mask[
:, self.seq_pos_in_chunk - 1 : self.seq_pos_in_chunk
]
encoder_output = self.encoder_output[:, : pos_beg + 1, :]
context_attn_mask = self.context_attn_mask[:, : pos_beg + 1]
num_seq_chunks = 1
if not self.seq_pos_in_chunk % self.chunk_size == 0:
# still accumulate the encoder_output
# return the cached results
if self.current_chunk == 0:
return None
return self.cache_output[:, : self.current_chunk]
if enc_input is not None:
# only need one chunk for the later calculation
enc_input = enc_input[:, self.current_chunk - 1 : self.current_chunk]
enc_attn_mask = enc_attn_mask[:, self.current_chunk - 1 : self.current_chunk]
if enc_input is None:
return None
_, k, r, rn, _ = enc_input.shape
assert r == neighbors
if inference_max_sequence_len is None:
num_seq_chunks = n // self.chunk_size
assert k == num_seq_chunks, f'sequence requires {num_seq_chunks} retrieved chunks, but only {k} passed in'
else:
pass
seq_index = num_seq_chunks * self.chunk_size
retrieved = rearrange(enc_input, 'b k r n d -> n (b k r) d')
enc_attn_mask = rearrange(enc_attn_mask, 'b k r n -> (b k r) n')
# embed_as_context = repeat(encoder_output[:, :seq_index], 'b (k n) d -> (b k r) n d', n=self.chunk_size, r=r)
# context_attn_mask = repeat(context_attn_mask[:, :seq_index], 'b (k n) -> (b k r) n', n=self.chunk_size, r=r)
if inference_max_sequence_len is not None and not set_inference_key_value_memory:
embed_as_context = repeat(encoder_output[:, :seq_index], 'b (k n) d -> n (b k r) d', n=pos_beg + 1, r=r)
context_attn_mask = repeat(context_attn_mask[:, :seq_index], 'b (k n) -> (b k r) n', n=pos_beg + 1, r=r)
else:
embed_as_context = repeat(
encoder_output[:, :seq_index], 'b (k n) d -> n (b k r) d', n=self.chunk_size, r=r
)
context_attn_mask = repeat(
context_attn_mask[:, :seq_index], 'b (k n) -> (b k r) n', n=self.chunk_size, r=r
)
if not self.turn_off_rop:
if inference_max_sequence_len is not None and not set_inference_key_value_memory:
cross_attn_k_pos_emb = self.rotary_pos_emb(n % self.chunk_size, offset=pos_beg)
else:
cross_attn_k_pos_emb = self.rotary_pos_emb(self.chunk_size, offset=0)
cross_attn_q_pos_emb = self.rotary_pos_emb(rn, offset=0)
attn_pos_emb = (cross_attn_q_pos_emb, cross_attn_q_pos_emb, cross_attn_k_pos_emb)
else:
attn_pos_emb = None
# # convert to Megatron mask
enc_attn_mask_3d = build_attention_mask_3d(
source_mask=enc_attn_mask, target_mask=enc_attn_mask, attn_mask_type=AttnMaskType.padding,
)
enc_attn_mask_3d = enc_attn_mask_3d[:, None, :, :]
enc_dec_attn_mask_3d = build_attention_mask_3d(
source_mask=enc_attn_mask, target_mask=context_attn_mask, attn_mask_type=AttnMaskType.padding,
)
enc_dec_attn_mask_3d = enc_dec_attn_mask_3d[:, None, :, :]
# transformer encoder
enc_output = self.model(
retrieved,
enc_attn_mask_3d,
layer_past=layer_past,
get_key_value=get_key_value,
encoder_output=embed_as_context,
enc_dec_attn_mask=enc_dec_attn_mask_3d,
rotary_pos_emb=attn_pos_emb,
)
# revert back to original retrieved shape
enc_output = rearrange(enc_output, 'n (b k r) d -> b k r n d', b=b, k=k)
if inference_max_sequence_len is not None:
# update encoded for current chunk
self.cache_output[:, chunk_start : self.current_chunk, :, :, :] = enc_output
# read all encodings
enc_output = self.cache_output[:, : self.current_chunk]
return enc_output
def state_dict_for_save_checkpoint(self, destination=None, prefix='', keep_vars=False):
"""For easy load."""
state_dict_ = {}
state_dict_[self._model_key] = self.model.state_dict_for_save_checkpoint(destination, prefix, keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
# Encoder.
if self._model_key in state_dict:
state_dict_ = state_dict[self._model_key]
self.model.load_state_dict(state_dict_, strict=strict)
class MegatronRetrievalTransformerDecoderModule(MegatronModule):
"""Transformer decoder model.
"""
def __init__(
self,
config: ModelParallelConfig,
init_method,
output_layer_init_method,
hidden_size,
ffn_hidden_size,
num_layers,
num_attention_heads,
apply_query_key_layer_scaling=True,
kv_channels=None,
layer_type=[],
pre_process=True,
post_process=True,
megatron_amp_O2=False,
hidden_dropout=0.1,
attention_dropout=0.1,
precision=16,
fp32_residual_connection=False,
activations_checkpoint_method=None,
activations_checkpoint_num_layers=1,
activations_checkpoint_granularity=None,
layernorm_epsilon=1e-5,
bias_activation_fusion=True,
bias_dropout_add_fusion=True,
masked_softmax_fusion=True,
persist_layer_norm=False,
openai_gelu=False,
onnx_safe=False,
activation='gelu',
bias=True,
normalization='layernorm',
transformer_block_type='pre_ln',
parent_model_type=ModelType.encoder_or_decoder,
chunk_size=64,
layer_number_offset=0, # this is use only for attention norm_factor scaling
normalize_attention_scores=True,
megatron_legacy=False,
turn_off_rop=False,
version=1, # model version
):
super(MegatronRetrievalTransformerDecoderModule, self).__init__(config=config)
self.pre_process = pre_process
self.post_process = post_process
self.hidden_size = hidden_size
self.num_layers = num_layers
self.init_method = init_method
self.hidden_dropout = hidden_dropout
self.output_layer_init_method = output_layer_init_method
self.parent_model_type = parent_model_type
self.turn_off_rop = turn_off_rop
self.version = version
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
# Transformer.
self.model = ParallelTransformer(
config=config,
init_method=self.init_method,
output_layer_init_method=self.output_layer_init_method,
num_layers=self.num_layers,
hidden_size=self.hidden_size,
num_attention_heads=num_attention_heads,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
layer_type=layer_type,
ffn_hidden_size=ffn_hidden_size,
self_attn_mask_type=AttnMaskType.padding, # we use attention mask reset, enforce to use padding AttnMaskType, otherwise it has numeric issues
pre_process=self.pre_process,
post_process=self.post_process,
precision=precision,
fp32_residual_connection=fp32_residual_connection,
activations_checkpoint_method=activations_checkpoint_method,
activations_checkpoint_num_layers=activations_checkpoint_num_layers,
activations_checkpoint_granularity=activations_checkpoint_granularity,
layernorm_epsilon=layernorm_epsilon,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
megatron_amp_O2=megatron_amp_O2,
bias_activation_fusion=bias_activation_fusion,
bias_dropout_add_fusion=bias_dropout_add_fusion,
masked_softmax_fusion=masked_softmax_fusion,
persist_layer_norm=persist_layer_norm,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
activation=activation,
bias=bias,
normalization=normalization,
transformer_block_type=transformer_block_type,
model_type=parent_model_type,
chunk_size=chunk_size,
layer_number_offset=layer_number_offset,
normalize_attention_scores=normalize_attention_scores,
megatron_legacy=megatron_legacy,
)
rot_dim = hidden_size // num_attention_heads if kv_channels is None else kv_channels
# partial rotary embeddings, which is better than full rotary
# Wang and Komatsuzaki et al https://github.com/kingoflolz/mesh-transformer-jax/
if not turn_off_rop:
self.rotary_pos_emb = RotaryEmbedding(min(rot_dim, MIN_DIM_HEAD))
self.chunk_size = chunk_size
self._model_key = 'model'
def set_input_tensor(self, input_tensor):
""" See megatron.model.transformer.set_input_tensor()"""
self.model.set_input_tensor(input_tensor)
def _calculate_dec_att_mask(self, dec_attn_mask, eod_positions):
# # convert to Megatron mask
# customized attention mask, starts with causal attention mask
dec_attn_mask_3d = build_attention_mask_3d(
source_mask=dec_attn_mask, target_mask=dec_attn_mask, attn_mask_type=AttnMaskType.causal,
)
# add the attention mask reset
if eod_positions is not None:
# to mask out the token ids [id, id, eod, id, pad, eod, id, id]
# so attention is not across eod, mask should be:
# [false, true, true, true, true, true, true, true]
# [false, false, true, true, true, true, true, true]
# [false, false, false,true, true, true, true, true]
# [true, true, true, false, true, true, true, true]
# [true, true, true, true, true, true, true, true]
# [true, true, true, false, true, false, true, true]
# [true, true, true, true, true, true, false, true]
# [true, true, true, true, true, true, false, false]
for batch, eod_pos in zip(*eod_positions):
eod_plus_one = eod_pos.item() + 1
dec_attn_mask_3d[batch][eod_plus_one:, :eod_plus_one] = True
dec_attn_mask_3d = dec_attn_mask_3d[:, None, :, :]
return dec_attn_mask_3d
def forward(
self,
dec_input,
dec_attn_mask,
retrieved_attn_mask=None,
retrieved_emb=None,
layer_past=None,
get_key_value=False,
eod_positions=None, # this is a tuple of eod positions returned from tensor.where(tensor == eod_id)
set_inference_key_value_memory=False,
inference_max_sequence_len=None,
):
# expected dec_input shape [batch, seq_len, dim]
# expected dec_attn_mask shape [batch, seq_len]
# expected retrieved_input shape [batch, num_chunks, num_neighbors, retrival_seq_len, dim]
# expected retrieved_attn_mask shape [batch, num_chunks, num_neighbors, retrival_seq_len]
# batch, seq_len, dim
if isinstance(dec_input, tuple):
n, _, _ = dec_input[1].shape
else:
_, n, _ = dec_input.shape
if set_inference_key_value_memory:
# seq_index = (n // chunk_size) * chunk_size
self.current_len = n
num_seq_chunks = self.current_len // self.chunk_size
elif inference_max_sequence_len is not None:
# only handles single token increment
assert n == 1
self.current_len += n
num_seq_chunks = self.current_len // self.chunk_size
else:
# this is normal forward without inference
num_seq_chunks = n // self.chunk_size
if retrieved_emb is not None:
b, k, r, rn, dim = retrieved_emb.shape
assert (
k == num_seq_chunks
), f'sequence requires {num_seq_chunks} retrieved chunks, but only {k} passed in' # need to add extra chunk size, since it will be shifted
if not self.turn_off_rop:
if set_inference_key_value_memory:
self_attn_emb = self.rotary_pos_emb(self.current_len)
elif inference_max_sequence_len is not None:
self_attn_emb = self.rotary_pos_emb(self.current_len)
else:
self_attn_emb = self.rotary_pos_emb(n)
if retrieved_emb is not None:
# -63, -62, ... 63 will be cut into -> [0, ... 63] in the chunk cross attention layer
cross_attn_q_pos_emb = self.rotary_pos_emb(self.chunk_size * 2 - 1, offset=-self.chunk_size + 1)
if self.version == 1:
cross_attn_k_pos_emb = self.rotary_pos_emb(rn, offset=0)
elif self.version > 1:
# the first 64 tokens in retrieved is from the last chunk, align the continuation part with the query tokens
# use the following in the future. [-63, -62, ..., 63, 64]
cross_attn_k_pos_emb = self.rotary_pos_emb(rn, offset=-self.chunk_size + 1)
else:
raise ValueError(f'incorrect version number {self.version}')
attn_pos_emb = (self_attn_emb, cross_attn_q_pos_emb, cross_attn_k_pos_emb)
else:
attn_pos_emb = (self_attn_emb, None, None)
else:
attn_pos_emb = None
dec_attn_mask_3d = self._calculate_dec_att_mask(dec_attn_mask, eod_positions)
if retrieved_emb is not None:
# need to shift the dec_attn_mask as first causal_padding elements are ignored
# also pad it to be the multiple of self.chunk_size
causal_padding = self.chunk_size - 1
reminder = (self.chunk_size - (dec_attn_mask.shape[1] + 1)) % self.chunk_size
dec_attn_mask = F.pad(dec_attn_mask, (-causal_padding, reminder), value=False)
dec_attn_mask = rearrange(dec_attn_mask, 'b (k n) -> (b k) n', k=k)
retrieved_attn_mask = rearrange(retrieved_attn_mask, 'b k r n -> (b k) (r n)')
enc_dec_attn_mask_3d = build_attention_mask_3d(
source_mask=dec_attn_mask, target_mask=retrieved_attn_mask, attn_mask_type=AttnMaskType.padding,
)
enc_dec_attn_mask_3d = enc_dec_attn_mask_3d[:, None, :, :]
else:
enc_dec_attn_mask_3d = None
# transformer encoder
if not isinstance(dec_input, tuple):
dec_input = rearrange(dec_input, 'b s d -> s b d').contiguous()
enc_output = self.model(
dec_input,
dec_attn_mask_3d,
layer_past=layer_past,
get_key_value=get_key_value,
encoder_output=None,
retrieved_emb=retrieved_emb,
enc_dec_attn_mask=enc_dec_attn_mask_3d,
rotary_pos_emb=attn_pos_emb,
set_inference_key_value_memory=set_inference_key_value_memory,
inference_max_sequence_len=inference_max_sequence_len,
)
# enc_output = rearrange(dec_input, 's b d -> b s d')
return enc_output
def state_dict_for_save_checkpoint(self, destination=None, prefix='', keep_vars=False):
"""For easy load."""
state_dict_ = {}
state_dict_[self._model_key] = self.model.state_dict_for_save_checkpoint(destination, prefix, keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
# Encoder.
if self._model_key in state_dict:
state_dict_ = state_dict[self._model_key]
self.model.load_state_dict(state_dict_, strict=strict)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/retrieval_transformer.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gradient clipping."""
import itertools
import torch
from torch import inf
from nemo.collections.nlp.modules.common.megatron.module import param_is_not_shared
from nemo.utils import logging
try:
import amp_C
from apex.multi_tensor_apply import multi_tensor_applier
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
HAVE_APEX_DISTRIBUTED_ADAM = False
if HAVE_APEX:
try:
from apex.contrib.optimizers.distributed_fused_adam import DistributedFusedAdam
HAVE_APEX_DISTRIBUTED_ADAM = True
except (ImportError, ModuleNotFoundError):
pass
try:
from megatron.core import parallel_state
from megatron.core.tensor_parallel.layers import param_is_not_tensor_parallel_duplicate
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
def clip_grad_norm_fp32(parameters, max_norm, norm_type=2):
"""Clips gradient norm of an iterable of parameters whose gradients
are in fp32.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
# Filter parameters based on:
# - grad should not be none
# - parameter should not be shared
# - should not be a replica due to tensor model parallelism
grads = []
grads_for_norm = []
for param in parameters:
grad_not_none = param.grad is not None
is_not_shared = param_is_not_shared(param)
is_not_tp_duplicate = param_is_not_tensor_parallel_duplicate(param)
if grad_not_none:
grad = param.grad.detach()
# Make sure the grads are in fp32
assert isinstance(param.grad, torch.cuda.FloatTensor)
grads.append(grad)
if grad_not_none and is_not_shared and is_not_tp_duplicate:
grads_for_norm.append(grad)
if not grads_for_norm:
logging.warning("No grads found, consider disabling gradient clipping")
# Norm parameters.
max_norm = float(max_norm)
norm_type = float(norm_type)
total_norm = 0.0
# Calculate norm.
if norm_type == inf:
if grads_for_norm: # (@adithyare) grads_for_norm can be empty for adapter training with pp>1
total_norm = max(grad.abs().max() for grad in grads_for_norm)
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
# Take max across all model-parallel GPUs.
torch.distributed.all_reduce(
total_norm_cuda, op=torch.distributed.ReduceOp.MAX, group=parallel_state.get_model_parallel_group()
)
total_norm = total_norm_cuda[0].item()
else:
if norm_type == 2.0:
dummy_overflow_buf = torch.cuda.IntTensor([0])
# Use apex's multi-tensor applier for efficiency reasons.
# Multi-tensor applier takes a function and a list of list
# and performs the operation on that list all in one kernel.
if grads_for_norm: # (@adithyare) grads_for_norm can be empty for adapter training with pp>1
grad_norm, _ = multi_tensor_applier(
amp_C.multi_tensor_l2norm, dummy_overflow_buf, [grads_for_norm], False # no per-parameter norm
)
else:
grad_norm = 0.0
# Since we will be summing across data parallel groups,
# we need the pow(norm-type).
total_norm = grad_norm ** norm_type
else:
for grad in grads_for_norm:
grad_norm = torch.norm(grad, norm_type)
total_norm += grad_norm ** norm_type
# Sum across all model-parallel GPUs.
total_norm_cuda = torch.cuda.FloatTensor(
[float(total_norm)]
) # (@adithyare) total_norm can be a float at this point so we convert it to cuda.FloatTensor
torch.distributed.all_reduce(
total_norm_cuda, op=torch.distributed.ReduceOp.SUM, group=parallel_state.get_model_parallel_group()
)
total_norm = total_norm_cuda[0].item()
total_norm = total_norm ** (1.0 / norm_type)
# Scale.
clip_coeff = max_norm / (total_norm + 1.0e-6)
if clip_coeff < 1.0 and grads: # (@adithyare) grads can be empty for adapter training.
dummy_overflow_buf = torch.cuda.IntTensor([0])
multi_tensor_applier(amp_C.multi_tensor_scale, dummy_overflow_buf, [grads, grads], clip_coeff)
return total_norm
def count_zeros_fp32(parameters):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
# Filter parameters based on:
# - grad should not be none
# - parameter should not be shared
# - should not be a replica due to tensor model parallelism
total_num_zeros = 0.0
for param in parameters:
grad_not_none = param.grad is not None
is_not_shared = param_is_not_shared(param)
is_not_tp_duplicate = param_is_not_tensor_parallel_duplicate(param)
if grad_not_none and is_not_shared and is_not_tp_duplicate:
grad = param.grad.detach()
num_zeros = grad.numel() - torch.count_nonzero(grad)
total_num_zeros = num_zeros + total_num_zeros
# Sum across all model-parallel GPUs.
torch.distributed.all_reduce(
total_num_zeros, op=torch.distributed.ReduceOp.SUM, group=parallel_state.get_model_parallel_group()
)
total_num_zeros = total_num_zeros.item()
return total_num_zeros
def clip_grad_norm_distributed_optimizer(optimizer, max_norm, norm_type=2):
"""Clips gradient norm of parameters in distributed optimizer
This is a wrapper around DistributedFusedAdam.clip_grad_norm with
added functionality to handle model parallel parameters.
Arguments:
parameters (DistributedFusedAdam): distributed optimizer
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Currently
only 2-norm is supported.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
assert isinstance(optimizer, DistributedFusedAdam)
# Filter parameters based on:
# - parameter should not be shared
# - should not be a replica due to tensor model parallelism
params_for_norm = []
for param in optimizer.parameters():
is_not_shared = param_is_not_shared(param)
is_not_tp_duplicate = param_is_not_tensor_parallel_duplicate(param)
if is_not_shared and is_not_tp_duplicate:
params_for_norm.append(param)
# Compute grad norm
# Note: DistributedFusedAdam caches grad norm to avoid redundant
# communication.
optimizer.grad_norm(parameters=params_for_norm, norm_type=norm_type)
return optimizer.clip_grad_norm(max_norm, norm_type=norm_type)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/clip_grads.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
from re import L
from typing import Dict, List, Optional
from nemo.core.classes import NeuralModule
from nemo.core.neural_types import ChannelType, MaskType, NeuralType
__all__ = ['MegatronEncoderModule']
class MegatronEncoderModule(NeuralModule, ABC):
""" Base class for encoder neural module to be used in NLP models. """
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"encoder_mask": NeuralType(('B', 'T'), MaskType()),
}
@property
def input_names(self) -> List[str]:
return ['input_ids', 'encoder_mask']
@property
def output_names(self) -> List[str]:
return ['encoder_output']
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"encoder_output": NeuralType(('B', 'T', 'D'), ChannelType())}
| NeMo-main | nemo/collections/nlp/modules/common/megatron/megatron_encoder_module.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer based language model."""
from MeCab import Model
from nemo.collections.nlp.modules.common.megatron.megatron_perceiver_encoders import MegatronPerceiverEncoderModule
from nemo.collections.nlp.modules.common.megatron.megatron_transformer_encoder import MegatronTransformerEncoderModule
from nemo.collections.nlp.modules.common.megatron.retrieval_transformer import (
MegatronRetrievalTransformerEncoderModule,
)
from nemo.collections.nlp.modules.common.megatron.utils import (
ApexGuardDefaults,
init_method_normal,
scaled_init_method_normal,
)
try:
from apex.transformer.enums import AttnMaskType, ModelType
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
# fake missing classes with None attributes
AttnMaskType = ApexGuardDefaults()
ModelType = ApexGuardDefaults()
try:
from megatron.core import ModelParallelConfig
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelParallelConfig = ApexGuardDefaults
HAVE_MEGATRON_CORE = False
__all__ = []
AVAILABLE_ENCODERS = ["transformer", "perceiver", "retro"]
def get_encoder_model(
config: ModelParallelConfig,
arch,
hidden_size,
ffn_hidden_size,
num_layers,
num_attention_heads,
apply_query_key_layer_scaling=False,
kv_channels=None,
init_method=None,
scaled_init_method=None,
encoder_attn_mask_type=AttnMaskType.padding,
pre_process=True,
post_process=True,
init_method_std=0.02,
megatron_amp_O2=False,
hidden_dropout=0.1,
attention_dropout=0.1,
ffn_dropout=0.0,
precision=16,
fp32_residual_connection=False,
activations_checkpoint_method=None,
activations_checkpoint_num_layers=1,
activations_checkpoint_granularity=None,
layernorm_epsilon=1e-5,
bias_activation_fusion=True,
bias_dropout_add_fusion=True,
masked_softmax_fusion=True,
persist_layer_norm=False,
openai_gelu=False,
activation="gelu",
onnx_safe=False,
bias=True,
normalization="layernorm",
headscale=False,
transformer_block_type="pre_ln",
hidden_steps=32,
parent_model_type=ModelType.encoder_or_decoder,
layer_type=None,
chunk_size=64,
num_self_attention_per_cross_attention=1,
layer_number_offset=0, # this is use only for attention norm_factor scaling
megatron_legacy=False,
normalize_attention_scores=True,
sequence_parallel=False,
num_moe_experts=1,
moe_frequency=1,
moe_dropout=0.0,
turn_off_rop=False, # turn off the RoP positional embedding
version=1, # model version
position_embedding_type='learned_absolute',
use_flash_attention=False,
):
"""Build language model and return along with the key to save."""
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
if init_method is None:
init_method = init_method_normal(init_method_std)
if scaled_init_method is None:
scaled_init_method = scaled_init_method_normal(init_method_std, num_layers)
if arch == "transformer":
# Language encoder.
encoder = MegatronTransformerEncoderModule(
config=config,
init_method=init_method,
output_layer_init_method=scaled_init_method,
hidden_size=hidden_size,
num_layers=num_layers,
num_attention_heads=num_attention_heads,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
ffn_hidden_size=ffn_hidden_size,
encoder_attn_mask_type=encoder_attn_mask_type,
pre_process=pre_process,
post_process=post_process,
megatron_amp_O2=megatron_amp_O2,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
ffn_dropout=ffn_dropout,
precision=precision,
fp32_residual_connection=fp32_residual_connection,
activations_checkpoint_method=activations_checkpoint_method,
activations_checkpoint_num_layers=activations_checkpoint_num_layers,
activations_checkpoint_granularity=activations_checkpoint_granularity,
layernorm_epsilon=layernorm_epsilon,
bias_activation_fusion=bias_activation_fusion,
bias_dropout_add_fusion=bias_dropout_add_fusion,
masked_softmax_fusion=masked_softmax_fusion,
persist_layer_norm=persist_layer_norm,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
activation=activation,
bias=bias,
normalization=normalization,
transformer_block_type=transformer_block_type,
headscale=headscale,
parent_model_type=parent_model_type,
megatron_legacy=megatron_legacy,
normalize_attention_scores=normalize_attention_scores,
num_moe_experts=num_moe_experts,
moe_frequency=moe_frequency,
moe_dropout=moe_dropout,
position_embedding_type=position_embedding_type,
use_flash_attention=use_flash_attention,
)
elif arch == "retro":
encoder = MegatronRetrievalTransformerEncoderModule(
config=config,
init_method=init_method,
output_layer_init_method=scaled_init_method,
hidden_size=hidden_size,
num_layers=num_layers,
num_attention_heads=num_attention_heads,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
layer_type=layer_type,
ffn_hidden_size=ffn_hidden_size,
pre_process=pre_process,
post_process=post_process,
megatron_amp_O2=megatron_amp_O2,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
precision=precision,
fp32_residual_connection=fp32_residual_connection,
activations_checkpoint_method=activations_checkpoint_method,
activations_checkpoint_num_layers=activations_checkpoint_num_layers,
activations_checkpoint_granularity=activations_checkpoint_granularity,
layernorm_epsilon=layernorm_epsilon,
bias_activation_fusion=bias_activation_fusion,
bias_dropout_add_fusion=bias_dropout_add_fusion,
masked_softmax_fusion=masked_softmax_fusion,
persist_layer_norm=persist_layer_norm,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
activation=activation,
bias=bias,
normalization=normalization,
transformer_block_type=transformer_block_type,
parent_model_type=parent_model_type,
chunk_size=chunk_size,
layer_number_offset=layer_number_offset,
megatron_legacy=megatron_legacy,
normalize_attention_scores=normalize_attention_scores,
turn_off_rop=turn_off_rop,
version=version,
)
elif arch == "perceiver":
encoder = MegatronPerceiverEncoderModule(
config=config,
init_method=init_method,
output_layer_init_method=scaled_init_method,
hidden_size=hidden_size,
num_layers=num_layers,
num_attention_heads=num_attention_heads,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
ffn_hidden_size=ffn_hidden_size,
encoder_attn_mask_type=encoder_attn_mask_type,
pre_process=pre_process,
post_process=post_process,
megatron_amp_O2=megatron_amp_O2,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
ffn_dropout=ffn_dropout,
precision=precision,
fp32_residual_connection=fp32_residual_connection,
activations_checkpoint_method=activations_checkpoint_method,
activations_checkpoint_num_layers=activations_checkpoint_num_layers,
activations_checkpoint_granularity=activations_checkpoint_granularity,
layernorm_epsilon=layernorm_epsilon,
bias_activation_fusion=bias_activation_fusion,
bias_dropout_add_fusion=bias_dropout_add_fusion,
masked_softmax_fusion=masked_softmax_fusion,
persist_layer_norm=persist_layer_norm,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
activation=activation,
bias=bias,
normalization=normalization,
transformer_block_type=transformer_block_type,
headscale=headscale,
parent_model_type=parent_model_type,
hidden_steps=hidden_steps,
num_self_attention_per_cross_attention=num_self_attention_per_cross_attention,
megatron_legacy=megatron_legacy,
normalize_attention_scores=normalize_attention_scores,
)
else:
raise ValueError(f"Unknown encoder arch = {arch}. Available encoder arch = {AVAILABLE_ENCODERS}")
return encoder
| NeMo-main | nemo/collections/nlp/modules/common/megatron/megatron_encoders.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from typing import Dict, List, Optional
import torch
import torch.nn.functional as F
from nemo.collections.nlp.modules.common.megatron.utils import build_position_ids
from nemo.core.classes.exportable import Exportable
from nemo.core.neural_types import ChannelType, EncodedRepresentation, MaskType, NeuralType
__all__ = ["TokensHeadEmb", "DecEmb", "EncEmb"]
class TokensHeadEmb(torch.nn.Module, Exportable):
"""
Combines decoder_embedding with the tokens_head layer to simulate the classifier in NemoNMT
"""
def __init__(self, decoder_embedding, tokens_head, device):
super(TokensHeadEmb, self).__init__()
self.decoder_embedding = decoder_embedding
self.tokens_head_bias = tokens_head.bias
self.device = device
# properties needed for export
self.training = False
def train(self, dummy_input):
return None
def modules(self):
return []
def forward(self, dec_output):
if isinstance(dec_output, list):
dec_output = dec_output[0]
if self.tokens_head_bias is not None:
return F.linear(dec_output, self.decoder_embedding.word_embeddings.weight, self.tokens_head_bias).float()
return F.linear(dec_output, self.decoder_embedding.word_embeddings.weight).float()
def input_example(self, max_batch=1, max_dim=768, seq_len=6):
return [
torch.randint(low=-3, high=3, size=(max_batch, seq_len, max_dim), device=self.device, dtype=torch.float32)
]
def freeze(self):
for param in self.parameters():
param.requires_grad = False
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"hidden_states": NeuralType(('B', 'T', 'D'), ChannelType()),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"log_probs": NeuralType(('B', 'T', 'D'), ChannelType())}
@property
def input_names(self) -> List[str]:
return ['hidden_states']
@property
def output_names(self) -> List[str]:
return ['log_probs']
class DecEmb(torch.nn.Module, Exportable):
"""
Combines decoder_embedding with the decoder component
"""
def __init__(self, decoder_embedding, decoder, rpe, device):
super(DecEmb, self).__init__()
self.decoder_embedding = decoder_embedding
self.decoder = decoder
self.device = device
self.rpe = rpe
# properties needed for export
self.training = False
def train(self, dummy_input):
return None
def modules(self):
return (self.decoder_embedding, self.decoder)
def forward(self, input_ids, decoder_mask, encoder_mask, encoder_embeddings, decoder_mems):
position_ids = build_position_ids(input_ids)
dec_input = self.decoder_embedding(input_ids, position_ids, token_type_ids=None)
rpe = None
if self.rpe is not None:
rpe = self.rpe(query_seq_length=input_ids.size(1), key_seq_length=input_ids.size(1),)
dec_out = (
self.decoder(
dec_input,
decoder_mask,
encoder_embeddings.permute(1, 0, 2),
encoder_mask,
dec_self_attention_relative_position_bias=rpe,
)
.permute(1, 0, 2)
.float()
)
zeros = torch.zeros(
(decoder_mems.shape[0], self.decoder.num_layers, dec_out.shape[1], decoder_mems.shape[-1])
).to(self.device)
return torch.cat((zeros, dec_out.unsqueeze(1)), dim=1)
def freeze(self):
for param in self.parameters():
param.requires_grad = False
def input_example(self, max_batch=1, max_dim=768, seq_len=6):
enc_output = torch.randint(
low=-3, high=3, size=(max_batch, seq_len, max_dim), device=self.device, dtype=torch.float32
)
enc_attn_mask = torch.tensor([[1 for _ in range(seq_len)]]).to(self.device)
dec_len = random.randint(10, 128)
dec_input = torch.randint(low=0, high=1000, size=(max_batch, dec_len), device=self.device)
dec_attn_mask = torch.tensor([[1 for _ in range(dec_len)]]).to(self.device)
# constant decoder_mems as placeholder for now
decoder_mems = torch.zeros([max_batch, self.decoder.num_layers + 1, seq_len, max_dim], dtype=torch.float32).to(
self.device
)
# input_ids, decoder_mask, encoder_mask, encoder_embeddings
return tuple([dec_input, dec_attn_mask, enc_attn_mask, enc_output, decoder_mems])
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"input_ids": NeuralType(('B', 'T', 'D'), ChannelType()),
"decoder_mask": NeuralType(('B', 'T'), MaskType()),
"encoder_mask": NeuralType(('B', 'T', 'D'), ChannelType()),
"encoder_embeddings": NeuralType(('B', 'T'), MaskType()),
"decoder_mems": NeuralType(('B', 'S', 'T', 'D'), ChannelType()),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"last_hidden_states": NeuralType(('B', 'S', 'T', 'D'), EncodedRepresentation()),
}
@property
def input_names(self) -> List[str]:
return ['input_ids', 'decoder_mask', 'encoder_mask', 'encoder_embeddings', 'decoder_mems']
@property
def output_names(self) -> List[str]:
return ['last_hidden_states']
class EncEmb(torch.nn.Module, Exportable):
"""
Combines encoder_embedding with the encoder component
"""
def __init__(self, encoder_embedding, encoder, rpe, device):
super(EncEmb, self).__init__()
self.encoder_embedding = encoder_embedding
self.encoder = encoder
self.device = device
self.rpe = rpe
# properties needed for export
self.training = False
def train(self, dummy_input):
return None
def modules(self):
return (self.encoder_embedding, self.encoder)
def forward(self, input_ids, encoder_mask):
if self.rpe is None:
position_ids = build_position_ids(input_ids)
else:
position_ids = None
enc_input = self.encoder_embedding(input_ids, position_ids, token_type_ids=None)
# pass input through the encoder
enc_seq_length = input_ids.size(1)
rpe = None
if self.rpe is not None:
rpe = self.rpe(query_seq_length=enc_seq_length, key_seq_length=enc_seq_length,)
return (
self.encoder(
enc_input=enc_input, enc_attn_mask=encoder_mask, enc_self_attention_relative_position_bias=rpe
)
.permute(1, 0, 2)
.float()
)
def input_example(self, max_batch=1, max_dim=30000, seq_len=6):
seq_len = random.randint(0, 128)
return (
torch.randint(0, max_dim, (max_batch, seq_len)).to(self.device),
torch.ones((max_batch, seq_len), dtype=int).to(self.device),
)
def freeze(self):
for param in self.parameters():
param.requires_grad = False
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"encoder_mask": NeuralType(('B', 'T'), MaskType()),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"last_hidden_states": NeuralType(('B', 'T', 'D'), ChannelType())}
@property
def input_names(self) -> List[str]:
return ['input_ids', 'encoder_mask']
@property
def output_names(self) -> List[str]:
return ['last_hidden_states']
| NeMo-main | nemo/collections/nlp/modules/common/megatron/megatron_export.py |
# coding=utf-8
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nemo.collections.nlp.modules.common.megatron.utils import _cast_if_autocast_enabled
try:
from apex.contrib.layer_norm.layer_norm import FastLayerNorm as OrigFastLayerNorm
from apex.contrib.layer_norm.layer_norm import _fast_layer_norm
from apex.transformer.layers.layer_norm import FastLayerNorm
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
if HAVE_APEX:
# TODO: use Apex implementation
class LayerNorm1P(FastLayerNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
assert isinstance(
self, OrigFastLayerNorm
), 'LayerNorm1P implemented only as an apex.contrib.layer_norm.FastLayerNorm extension'
def reset_parameters(self):
torch.nn.init.zeros_(self.weight)
torch.nn.init.zeros_(self.bias)
def forward(self, x):
return _fast_layer_norm(x, self.weight + 1, self.bias, self.epsilon)
else:
class LayerNorm1P(torch.nn.Module):
def __init__(self, *args, **kwargs):
raise NotImplementedError('LayerNorm1P available only with apex installed')
class LPLayerNorm(torch.nn.LayerNorm):
def __init__(self, normalized_shape, eps=1e-05, elementwise_affine=True, device=None, dtype=None):
super().__init__(
normalized_shape=normalized_shape,
eps=eps,
elementwise_affine=elementwise_affine,
device=device,
dtype=dtype,
)
def forward(self, x):
module_device = x.device
downcast_x = _cast_if_autocast_enabled(x)
downcast_weight = _cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight
downcast_bias = _cast_if_autocast_enabled(self.bias) if self.bias is not None else self.bias
with torch.autocast(enabled=False, device_type=module_device.type):
return torch.nn.functional.layer_norm(
downcast_x, self.normalized_shape, downcast_weight, downcast_bias, self.eps
)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/layer_norm_1p.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer based language model."""
from nemo.collections.nlp.modules.common.megatron.layer_type import LayerType
from nemo.collections.nlp.modules.common.megatron.megatron_encoder_module import MegatronEncoderModule
from nemo.collections.nlp.modules.common.megatron.module import MegatronModule
from nemo.collections.nlp.modules.common.megatron.transformer import ParallelTransformer
from nemo.collections.nlp.modules.common.megatron.utils import (
ApexGuardDefaults,
attn_mask_postprocess,
build_attention_mask_3d,
)
from nemo.core.classes.exportable import Exportable
try:
from apex.transformer.enums import AttnMaskType, ModelType
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
# fake missing classes with None attributes
AttnMaskType = ApexGuardDefaults()
ModelType = ApexGuardDefaults()
try:
from megatron.core import ModelParallelConfig
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelParallelConfig = ApexGuardDefaults
HAVE_MEGATRON_CORE = False
__all__ = ["MegatronTransformerEncoderModule"]
class MegatronTransformerEncoderModule(MegatronModule, Exportable, MegatronEncoderModule):
"""Transformer encoder model."""
def __init__(
self,
config: ModelParallelConfig,
init_method,
output_layer_init_method,
hidden_size,
ffn_hidden_size,
num_layers,
num_attention_heads,
apply_query_key_layer_scaling=True,
kv_channels=None,
pre_process=True,
post_process=True,
megatron_amp_O2=False,
encoder_attn_mask_type=AttnMaskType.padding,
hidden_dropout=0.1,
attention_dropout=0.1,
ffn_dropout=0.0,
precision=16,
fp32_residual_connection=False,
activations_checkpoint_method=None,
activations_checkpoint_num_layers=1,
activations_checkpoint_granularity=None,
layernorm_epsilon=1e-5,
bias_activation_fusion=True,
bias_dropout_add_fusion=True,
masked_softmax_fusion=True,
persist_layer_norm=False,
openai_gelu=False,
onnx_safe=False,
activation='gelu',
bias=True,
normalization='layernorm',
transformer_block_type='pre_ln',
headscale=False,
parent_model_type=ModelType.encoder_or_decoder,
megatron_legacy=False,
normalize_attention_scores=True,
num_moe_experts=1,
moe_frequency=1,
moe_dropout=0.0,
position_embedding_type='learned_absolute',
use_flash_attention=False,
):
super(MegatronTransformerEncoderModule, self).__init__(config=config)
self.pre_process = pre_process
self.post_process = post_process
self.hidden_size = hidden_size
self.num_layers = num_layers
self.init_method = init_method
self.model_attn_mask_type = encoder_attn_mask_type
self.hidden_dropout = hidden_dropout
self.output_layer_init_method = output_layer_init_method
self.parent_model_type = parent_model_type
self.normalization = normalization
self.transformer_block_type = transformer_block_type
self.use_flash_attention = use_flash_attention
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
# Transformer.
self.model = ParallelTransformer(
config=config,
layer_type=LayerType.encoder,
init_method=self.init_method,
output_layer_init_method=self.output_layer_init_method,
num_layers=self.num_layers,
hidden_size=self.hidden_size,
num_attention_heads=num_attention_heads,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
ffn_hidden_size=ffn_hidden_size,
self_attn_mask_type=self.model_attn_mask_type,
pre_process=self.pre_process,
post_process=self.post_process,
precision=precision,
fp32_residual_connection=fp32_residual_connection,
activations_checkpoint_method=activations_checkpoint_method,
activations_checkpoint_num_layers=activations_checkpoint_num_layers,
activations_checkpoint_granularity=activations_checkpoint_granularity,
layernorm_epsilon=layernorm_epsilon,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
ffn_dropout=ffn_dropout,
megatron_amp_O2=megatron_amp_O2,
bias_activation_fusion=bias_activation_fusion,
bias_dropout_add_fusion=bias_dropout_add_fusion,
masked_softmax_fusion=masked_softmax_fusion,
persist_layer_norm=persist_layer_norm,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
activation=activation,
bias=bias,
normalization=normalization,
transformer_block_type=transformer_block_type,
headscale=headscale,
model_type=parent_model_type,
megatron_legacy=megatron_legacy,
normalize_attention_scores=normalize_attention_scores,
num_moe_experts=num_moe_experts,
moe_frequency=moe_frequency,
moe_dropout=moe_dropout,
position_embedding_type=position_embedding_type,
use_flash_attention=use_flash_attention,
)
self._model_key = 'model'
def set_input_tensor(self, input_tensor):
""" See megatron.model.transformer.set_input_tensor()"""
self.model.set_input_tensor(input_tensor)
def forward(
self,
enc_input,
enc_attn_mask,
layer_past=None,
get_key_value=False,
enc_self_attention_relative_position_bias=None,
):
# convert to Megatron mask
if self.use_flash_attention:
enc_attn_mask_3d = enc_attn_mask < 0.5
else:
enc_attn_mask_3d = attn_mask_postprocess(
build_attention_mask_3d(
source_mask=enc_attn_mask, target_mask=enc_attn_mask, attn_mask_type=self.model_attn_mask_type,
)
)
# transformer encoder
enc_output = self.model(
enc_input,
enc_attn_mask_3d,
layer_past=layer_past,
get_key_value=get_key_value,
self_attention_relative_position_bias=enc_self_attention_relative_position_bias,
cross_attention_relative_position_bias=None,
)
return enc_output
def state_dict_for_save_checkpoint(self, destination=None, prefix='', keep_vars=False):
"""For easy load."""
state_dict_ = {}
state_dict_[self._model_key] = self.model.state_dict_for_save_checkpoint(destination, prefix, keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
# Encoder.
if self._model_key in state_dict:
state_dict_ = state_dict[self._model_key]
# for backward compatibility.
elif 'transformer' in state_dict:
state_dict_ = state_dict['transformer']
else:
# for backward compatibility.
state_dict_ = {}
for key in state_dict.keys():
if 'transformer.' in key:
state_dict_[key.split('transformer.')[1]] = state_dict[key]
# for backward compatibility.
state_dict_self_attention = {}
for key in state_dict_.keys():
if '.attention.' in key:
state_dict_self_attention[key.replace(".attention.", ".self_attention.")] = state_dict_[key]
else:
state_dict_self_attention[key] = state_dict_[key]
state_dict_ = state_dict_self_attention
self.model.load_state_dict(state_dict_, strict=strict)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/megatron_transformer_encoder.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.modules.common.megatron.megatron_utils import (
get_megatron_checkpoint,
get_megatron_lm_models_list,
)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/__init__.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
try:
from apex._autocast_utils import _cast_if_autocast_enabled
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
###### BIAS GELU FUSION/ NO AUTOGRAD ################
# 1/sqrt(2*pi)-> 0.3989423
# 1/sqrt(2) -> 0.70710678
# sqrt(2/pi) -> 0.79788456
# this function is tanh approximation of gelu
# actual gelu is:
# x * 0.5 * (1.0 + torch.erf(x * 0.70710678))
@torch.jit.script
def bias_gelu(bias, y):
x = bias + y
return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))
# gradient of tanh approximation of gelu
# gradient of actual gelu is:
# 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x)
@torch.jit.script
def bias_gelu_back(g, bias, y):
x = bias + y
tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
# sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out)
return ff * g
class GeLUFunction(torch.autograd.Function):
@staticmethod
# bias is an optional argument
def forward(ctx, input, bias):
ctx.save_for_backward(input, bias)
return bias_gelu(bias, input)
@staticmethod
def backward(ctx, grad_output):
input, bias = ctx.saved_tensors
tmp = bias_gelu_back(grad_output, bias, input)
return tmp, tmp
@staticmethod
def symbolic(g: torch.Graph, input: torch.Value, bias: torch.Value):
# define constants and variables
x = g.op("Add", input, bias)
const_1 = g.op("Constant", value_t=torch.tensor(0.5, dtype=torch.float16))
const_2 = g.op("Constant", value_t=torch.tensor(1.0, dtype=torch.float16))
const_3 = g.op("Constant", value_t=torch.tensor(0.79788456, dtype=torch.float16))
const_4 = g.op("Constant", value_t=torch.tensor(0.044715, dtype=torch.float16))
# calculates (1 + 0.044715 * x * x)
p_1 = g.op("Add", const_2, g.op("Mul", x, g.op("Mul", const_4, x)))
# calculates torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
p_2 = g.op("Tanh", g.op("Mul", const_3, g.op("Mul", x, p_1)))
# calculates x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))
return g.op("Mul", const_1, g.op("Mul", x, g.op("Add", const_2, p_2)))
def fused_bias_gelu(input, bias):
args = _cast_if_autocast_enabled(input, bias)
with torch.cuda.amp.autocast(enabled=False):
return GeLUFunction.apply(*args)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/fused_bias_gelu.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
from typing import Dict, List, Optional
from nemo.core.classes import NeuralModule
from nemo.core.neural_types import ChannelType, MaskType, NeuralType
__all__ = ['MegatronDecoderModule']
class MegatronDecoderModule(NeuralModule, ABC):
""" Base class for encoder neural module to be used in NLP models. """
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"dec_input": NeuralType(('B', 'T', 'D'), ChannelType()),
"dec_attn_mask": NeuralType(('B', 'T'), MaskType()),
"enc_output": NeuralType(('B', 'T', 'D'), ChannelType()),
"enc_attn_mask": NeuralType(('B', 'T'), MaskType()),
}
@property
def input_names(self) -> List[str]:
return ['dec_input', 'dec_attn_mask', 'enc_output', 'enc_attn_mask']
@property
def output_names(self) -> List[str]:
return ['decoder_output']
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"dec_output": NeuralType(('B', 'T', 'D'), ChannelType())}
| NeMo-main | nemo/collections/nlp/modules/common/megatron/megatron_decoder_module.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer based language model."""
from ast import Mod
import torch
from nemo.collections.nlp.modules.common.megatron.hiddens import MegatronHiddensModule
from nemo.collections.nlp.modules.common.megatron.megatron_perceiver_encoders import MegatronPerceiverEncoderModule
from nemo.collections.nlp.modules.common.megatron.module import MegatronModule
from nemo.collections.nlp.modules.common.megatron.utils import ApexGuardDefaults
try:
from apex.transformer.enums import AttnMaskType
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
# fake missing classes with None attributes
AttnMaskType = ApexGuardDefaults()
try:
from megatron.core import ModelParallelConfig
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelParallelConfig = ApexGuardDefaults
HAVE_MEGATRON_CORE = False
__all__ = ["MegatronTransformerEncoderDecoderModule"]
class MegatronTransformerEncoderDecoderModule(MegatronModule):
"""Transformer encoder-decoder model.
"""
def __init__(
self,
config: ModelParallelConfig,
encoder,
decoder,
# AttnMaskType enum mask type (e.g., padding, casual)
encoder_attn_mask_type: AttnMaskType = None,
decoder_attn_mask_type: AttnMaskType = None,
hidden_steps: int = None,
hiddens_module: MegatronHiddensModule = None, # allows for hidden state transformations before the decoder
):
super(MegatronTransformerEncoderDecoderModule, self).__init__(config=config)
self.encoder = encoder
self.decoder = decoder
self.hidden_steps = hidden_steps
if isinstance(encoder, MegatronPerceiverEncoderModule) and hidden_steps is None:
raise ValueError(
f"hidden_steps cannot be None for perceiver encoders. It is needed to compute the encoder-decoder cross attention mask."
)
self.hiddens_module = hiddens_module
if self.hiddens_module is not None and not isinstance(self.hiddens_module, MegatronHiddensModule):
raise TypeError(
f"hiddens_module must be of type MegatronHiddensModule, but got {type(self.hiddens_module)} instead."
)
# try to infer mask_type if not given
if encoder_attn_mask_type is None:
if encoder is None:
encoder_attn_mask_type = None
# Perceiver does not have a `.model` attribute, assume it always uses padding mask.
elif isinstance(encoder, MegatronPerceiverEncoderModule):
encoder_attn_mask_type = AttnMaskType.padding
elif hasattr(encoder.model, 'self_attn_mask_type'):
encoder_attn_mask_type = encoder.model.self_attn_mask_type
else:
raise AttributeError(
"Could not find an attribute for encoder self_attn_mask_type, make sure it is set when instatiating the encoder or pass it to the constructor of this class."
)
if decoder_attn_mask_type is None:
if decoder is None:
decoder_attn_mask_type = None
elif hasattr(decoder.model, 'self_attn_mask_type'):
decoder_attn_mask_type = decoder.model.self_attn_mask_type
else:
raise AttributeError(
"Could not find an attribute for decoder self_attn_mask_type, make sure it is set when instatiating the decoder or pass it to the constructor of this class."
)
self.encoder_attn_mask_type = encoder_attn_mask_type
self.decoder_attn_mask_type = decoder_attn_mask_type
self._encoder_key = "encoder"
self._decoder_key = "decoder"
self._hiddens_module = "hiddens_module"
def get_hiddens_mask(self, enc_attn_mask):
"""
Returns the attention mask for the output of the encoder.
Required for fixed-size bottleneck models.
"""
if self.encoder is not None and isinstance(self.encoder, MegatronPerceiverEncoderModule):
# Attention mask is expected to be of shape [B x S]
hiddens_mask = torch.ones(enc_attn_mask.size(0), self.hidden_steps).to(enc_attn_mask.device)
else:
hiddens_mask = enc_attn_mask
return hiddens_mask
def encode(
self,
enc_input,
enc_attn_mask,
enc_layer_past=None,
enc_get_key_value=False,
enc_self_attention_relative_position_bias=None,
batch_data=None,
):
"""Encodes embedder input using encoder"""
if self.encoder is None:
raise ValueError(f"Cannot call .encode(...) when self.encoder is None.")
enc_output = self.encoder(
enc_input=enc_input,
enc_attn_mask=enc_attn_mask,
layer_past=enc_layer_past,
get_key_value=enc_get_key_value,
enc_self_attention_relative_position_bias=enc_self_attention_relative_position_bias,
)
# apply hidden transformations if needed
if self.hiddens_module is not None:
enc_output = self.hiddens_module.apply_hidden_transforms(
{"hiddens": enc_output, "hiddens_mask": self.get_hiddens_mask(enc_attn_mask),}, batch_data=batch_data,
)
return enc_output
def decode(
self,
dec_input,
dec_attn_mask,
enc_output,
enc_attn_mask,
dec_layer_past=None,
dec_get_key_value=False,
dec_self_attention_relative_position_bias=None,
dec_cross_attention_relative_position_bias=None,
):
if self.decoder is None:
raise ValueError(f"Cannot call .decode(...) when self.decoder is None.")
"""Decodes embedder input using decoder and encoder input"""
dec_output = self.decoder(
dec_input=dec_input,
dec_attn_mask=dec_attn_mask,
layer_past=dec_layer_past,
get_key_value=dec_get_key_value,
enc_output=enc_output,
enc_attn_mask=enc_attn_mask,
dec_self_attention_relative_position_bias=dec_self_attention_relative_position_bias,
dec_cross_attention_relative_position_bias=dec_cross_attention_relative_position_bias,
)
return dec_output
def forward(
self,
enc_input,
enc_attn_mask,
dec_input,
dec_attn_mask,
enc_layer_past=None,
enc_get_key_value=False,
enc_output=None,
enc_output_attn_mask=None,
dec_layer_past=None,
dec_get_key_value=False,
output_enc_hidden_only=False,
enc_self_attention_relative_position_bias=None,
dec_self_attention_relative_position_bias=None,
dec_cross_attention_relative_position_bias=None,
batch_data=None,
):
# encoder
if enc_output is None:
if self.encoder is not None:
enc_output = self.encode(
enc_input=enc_input,
enc_attn_mask=enc_attn_mask,
enc_layer_past=enc_layer_past,
enc_get_key_value=enc_get_key_value,
enc_self_attention_relative_position_bias=enc_self_attention_relative_position_bias,
batch_data=batch_data,
)
else:
assert self.encoder_hidden_state is not None
enc_output = self.encoder_hidden_state
else:
enc_attn_mask = enc_output_attn_mask.to(enc_attn_mask)
if self.decoder is None or output_enc_hidden_only:
return enc_output
# decoder
dec_output = self.decode(
dec_input=dec_input,
dec_attn_mask=dec_attn_mask,
enc_output=enc_output["enc_output"] # enc_output is a dict if we used hidden transformations
if self.hiddens_module is not None
else enc_output,
# Adjust encoder attention mask if encoder is a perceiver.
enc_attn_mask=self.get_hiddens_mask(enc_attn_mask),
dec_layer_past=dec_layer_past,
dec_get_key_value=dec_get_key_value,
dec_self_attention_relative_position_bias=dec_self_attention_relative_position_bias,
dec_cross_attention_relative_position_bias=dec_cross_attention_relative_position_bias,
)
# if self.hiddens_module is not None enc_output is a dict, else it is a torch.tensor
return dec_output, enc_output
def state_dict_for_save_checkpoint(self, destination=None, prefix='', keep_vars=False):
"""For easy load."""
state_dict_ = {}
state_dict_[self._encoder_key] = self.encoder.state_dict_for_save_checkpoint(destination, prefix, keep_vars)
state_dict_[self._decoder_key] = self.decoder.state_dict_for_save_checkpoint(destination, prefix, keep_vars)
if self.hiddens_module is not None:
state_dict_[self._hiddens_module] = self.hiddens_module.state_dict(destination, prefix, keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
self.encoder.load_state_dict(state_dict[self._encoder_key], strict=strict)
self.decoder.load_state_dict(state_dict[self._decoder_key], strict=strict)
if self.hiddens_module is not None:
self.hiddens_module.load_state_dict(state_dict[self._hiddens_module], strict=strict)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/megatron_encoder_decoder.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
from nemo.collections.nlp.modules.common.megatron.adapters.parallel_adapters import (
AdapterName,
MLPInfusedAdapterConfig,
)
from nemo.collections.nlp.modules.common.megatron.fused_bias_geglu import fused_bias_geglu
from nemo.collections.nlp.modules.common.megatron.fused_bias_gelu import fused_bias_gelu
from nemo.collections.nlp.modules.common.megatron.fused_layer_norm import get_layer_norm
from nemo.collections.nlp.modules.common.megatron.layer_norm_1p import LayerNorm1P
from nemo.collections.nlp.modules.common.megatron.module import MegatronModule
from nemo.collections.nlp.modules.common.megatron.utils import ApexGuardDefaults, erf_gelu
from nemo.collections.nlp.modules.common.megatron.utils import openai_gelu as openai_gelu_func
from nemo.collections.nlp.modules.common.megatron.utils import squared_relu
from nemo.core import adapter_mixins
try:
from apex.normalization import MixedFusedRMSNorm
from apex.transformer import parallel_state, tensor_parallel
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
# fake missing classes with None attributes
ModelType = AttnMaskType = AttnType = LayerType = ApexGuardDefaults()
try:
from megatron.core import ModelParallelConfig, parallel_state, tensor_parallel
from megatron.core.parallel_state import get_tensor_model_parallel_world_size
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelParallelConfig = ApexGuardDefaults
HAVE_MEGATRON_CORE = False
class ParallelMLP(MegatronModule, adapter_mixins.AdapterModuleMixin):
"""MLP.
MLP will take the input with h hidden state, project it to 4*h
hidden dimension, perform nonlinear transformation, and project the
state back into h hidden dimension.
"""
def __init__(
self,
config: ModelParallelConfig,
init_method,
output_layer_init_method,
hidden_size,
ffn_hidden_size,
dtype=torch.float32,
bias_activation_fusion=True,
openai_gelu=False,
onnx_safe=False,
activation='gelu',
bias=True,
transformer_block_type='pre_ln',
normalization='layernorm',
layernorm_epsilon=1e-5,
persist_layer_norm=False,
dropout=0.0,
):
super(ParallelMLP, self).__init__(config=config)
self.activation = activation
self.bias = bias
self.transformer_block_type = transformer_block_type
self.normalization = normalization
self.layernorm_epsilon = layernorm_epsilon
self.persist_layer_norm = persist_layer_norm
self.activation = activation
self.dropout = dropout
self.dtype = dtype
self.set_accepted_adapter_types([MLPInfusedAdapterConfig._target_])
supported_activations = [
'gelu',
'geglu',
'reglu',
'swiglu',
'squared-relu',
'fast-geglu',
'fast-swiglu',
'fast-reglu',
]
if activation not in supported_activations:
raise ValueError(
f"Activation {activation} not supported. Supported activations are {supported_activations}"
)
self.fast_glu_activation = activation in ['fast-geglu', 'fast-swiglu', 'fast-reglu']
# Project to 4h.
self.dense_h_to_4h = tensor_parallel.ColumnParallelLinear(
hidden_size,
ffn_hidden_size * 2
if self.fast_glu_activation
else ffn_hidden_size, # NOTE: When using geglu, divide ffn dim by 2/3 to keep overall params the same.
config=config,
gather_output=False,
init_method=init_method,
skip_bias_add=True,
bias=bias,
)
if activation in ['geglu', 'reglu', 'swiglu']:
# Separate linear layer for *GLU activations.
# Source: https://github.com/huggingface/transformers/blob/bee361c6f1f7704f8c688895f2f86f6e5ff84727/src/transformers/models/t5/modeling_t5.py#L292
self.dense_h_to_4h_2 = tensor_parallel.ColumnParallelLinear(
hidden_size,
ffn_hidden_size, # NOTE: When using *glu, divide ffn dim by 2/3 to keep overall params the same.
config=config,
gather_output=False,
init_method=init_method,
skip_bias_add=True,
bias=bias,
)
self.glu_activation_family = activation in [
'geglu',
'reglu',
'swiglu',
'fast-geglu',
'fast-reglu',
'fast-swiglu',
]
bias_activation_fusion_unavailable = activation in ['reglu', 'swiglu']
if bias_activation_fusion_unavailable and bias_activation_fusion:
raise ValueError(
f"Cannot use bias_activation_fusion with {activation} activation. Please turn bias gelu fusion off."
)
if self.glu_activation_family and onnx_safe and self.bias_activation_fusion:
raise ValueError(
f"Cannot use onnx_safe with specificed activation function and bias_activation_fusion : {activation} Please turn onnx safe off."
)
if bias_activation_fusion and not bias:
raise ValueError(
f"Cannot use bias_activation_fusion without bias terms. Please set bias=True or bias_activation_fusion=False."
)
self.bias_activation_fusion = bias_activation_fusion
# Give openai_gelu precedence over other activations if set, for HF compatibility. Normally this is off and shouldn't affect regular model training.
if openai_gelu:
self.activation_func = openai_gelu_func
elif activation in ["gelu", "geglu", "fast-geglu"]:
self.activation_func = F.gelu
elif onnx_safe:
self.activation_func = erf_gelu
elif activation in ["reglu", "fast-reglu"]:
self.activation_func = F.relu
elif activation in ["swiglu", "fast-swiglu"]:
# SiLU or sigmoid linear unit is the same as swish with beta = 1 (which is what https://arxiv.org/pdf/2002.05202.pdf uses.)
self.activation_func = F.silu
elif activation == 'squared-relu':
self.activation_func = squared_relu
# Project back to h.
self.dense_4h_to_h = tensor_parallel.RowParallelLinear(
ffn_hidden_size,
hidden_size,
config=config,
input_is_parallel=True,
init_method=output_layer_init_method,
skip_bias_add=True,
bias=bias,
)
# Normformer normalization
if transformer_block_type == 'normformer':
if normalization == 'layernorm':
self.normalization = get_layer_norm(
ffn_hidden_size // get_tensor_model_parallel_world_size(), layernorm_epsilon, persist_layer_norm
)
elif normalization == 'layernorm1p':
self.normalization = LayerNorm1P(
ffn_hidden_size // get_tensor_model_parallel_world_size(),
layernorm_epsilon,
sequence_parallel_enabled=config.sequence_parallel,
)
else:
self.normalization = MixedFusedRMSNorm(
ffn_hidden_size // get_tensor_model_parallel_world_size(), layernorm_epsilon
)
def forward(self, hidden_states):
# [s, b, 4hp]
intermediate_parallel, bias_parallel = self.dense_h_to_4h(hidden_states)
if self.fast_glu_activation:
intermediate_parallel, intermediate_parallel_2 = torch.chunk(intermediate_parallel, 2, dim=-1)
if bias_parallel is not None:
bias_parallel, bias_parallel_2 = torch.chunk(bias_parallel, 2, dim=-1)
elif self.glu_activation_family and not self.fast_glu_activation:
intermediate_parallel_2, bias_parallel_2 = self.dense_h_to_4h_2(hidden_states)
if self.bias_activation_fusion:
if self.activation == 'gelu':
intermediate_parallel = fused_bias_gelu(intermediate_parallel, bias_parallel)
elif self.activation in ['geglu', 'fast-geglu']:
intermediate_parallel = fused_bias_geglu(
intermediate_parallel, bias_parallel, intermediate_parallel_2, bias_parallel_2
)
elif self.glu_activation_family and not self.bias_activation_fusion:
if bias_parallel is not None:
intermediate_parallel = self.activation_func(intermediate_parallel + bias_parallel) * (
intermediate_parallel_2 + bias_parallel_2
)
else:
intermediate_parallel = self.activation_func(intermediate_parallel) * intermediate_parallel_2
else:
if bias_parallel is not None:
intermediate_parallel = self.activation_func(intermediate_parallel + bias_parallel)
else:
intermediate_parallel = self.activation_func(intermediate_parallel)
if self.dropout > 0:
intermediate_parallel = F.dropout(intermediate_parallel, p=self.dropout, training=self.training)
infused_adapter = self.get_adapter_module(AdapterName.MLP_INFUSED)
if infused_adapter:
intermediate_parallel = infused_adapter(intermediate_parallel)
# Normformer normalization
if self.transformer_block_type == 'normformer':
intermediate_parallel = self.normalization(intermediate_parallel)
# [s, b, h]
output, output_bias = self.dense_4h_to_h(intermediate_parallel)
return output, output_bias
class SwitchMLP(MegatronModule):
"""Top-1 MoE
Curently supports Sinkhorn based expert routing."""
def __init__(
self,
config: ModelParallelConfig,
num_experts,
init_method,
output_layer_init_method,
hidden_size,
ffn_hidden_size,
dtype=torch.float32,
bias_activation_fusion=True,
openai_gelu=False,
onnx_safe=False,
activation='gelu',
bias=True,
transformer_block_type='pre_ln',
normalization='layernorm',
layernorm_epsilon=1e-5,
persist_layer_norm=False,
sequence_parallel=False,
dropout=0.0,
):
super(SwitchMLP, self).__init__(config=config)
self.num_experts = num_experts
self.route_algo = SwitchMLP.sinkhorn
self.router = tensor_parallel.RowParallelLinear(
hidden_size,
num_experts,
config=config,
input_is_parallel=False,
init_method=init_method,
skip_bias_add=False,
bias=bias,
)
mlp_args = {
'config': config,
'init_method': init_method,
'output_layer_init_method': output_layer_init_method,
'hidden_size': hidden_size,
'ffn_hidden_size': ffn_hidden_size,
'dtype': dtype,
'bias_activation_fusion': bias_activation_fusion,
'openai_gelu': openai_gelu,
'onnx_safe': onnx_safe,
'activation': activation,
'bias': bias,
'transformer_block_type': transformer_block_type,
'normalization': normalization,
'layernorm_epsilon': layernorm_epsilon,
'persist_layer_norm': persist_layer_norm,
'dropout': dropout,
}
self.experts = torch.nn.ModuleList([ParallelMLP(**mlp_args) for _ in range(num_experts)])
def forward(self, hidden_states):
hidden_shape = hidden_states.shape
route, _ = self.router(hidden_states)
route = route.view(-1, self.num_experts)
if self.training:
with torch.no_grad():
norm_route = self.route_algo(
route.detach().to(dtype=torch.float32)
) # explicit fp32 conversion for stability
_, max_ind = torch.max(norm_route, dim=1)
route = torch.sigmoid(route)
max_prob = route[torch.arange(route.size(0)), max_ind]
else:
route = torch.sigmoid(route)
max_prob, max_ind = torch.max(route, dim=1)
max_prob = torch.unsqueeze(max_prob, 1)
hidden_states = hidden_states.view(-1, hidden_shape[-1])
local_indices = (max_ind == 0).nonzero()
hidden = hidden_states[local_indices, :]
output, output_bias = self.experts[0](hidden)
output_bias = output_bias.expand_as(output)
output_total = torch.empty_like(hidden_states, dtype=output.dtype)
output_bias_total = torch.empty_like(hidden_states, dtype=output_bias.dtype)
output_total[local_indices, :] = output
output_bias_total[local_indices, :] = output_bias
for expert_num, expert in enumerate(self.experts):
if expert_num == 0:
continue
local_indices = (max_ind == expert_num).nonzero()
hidden = hidden_states[local_indices, :]
output, output_bias = expert(hidden)
output_bias = output_bias.expand_as(output)
output_total[local_indices, :] = output
output_bias_total[local_indices, :] = output_bias
output_total = output_total * max_prob
output_bias_total = output_bias_total * max_prob
output_total = output_total.view(hidden_shape)
output_bias_total = output_bias_total.view(hidden_shape)
return output_total, output_bias_total
@classmethod
def sinkhorn(cls, cost, tol=0.0001):
"Megatron-LMs sinkhorn implementation"
cost = torch.exp(cost)
d0 = torch.ones(cost.size(0), device=cost.device, dtype=cost.dtype)
d1 = torch.ones(cost.size(1), device=cost.device, dtype=cost.dtype)
eps = 0.00000001
error = 1e9
d1_old = d1
while error > tol:
d0 = (1 / d0.size(0)) * 1 / (torch.sum(d1 * cost, 1) + eps)
d1 = (1 / d1.size(0)) * 1 / (torch.sum(d0.unsqueeze(1) * cost, 0) + eps)
error = torch.mean(torch.abs(d1_old - d1))
d1_old = d1
return d1 * cost * d0.unsqueeze(1)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/mlp.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import numpy as np
import torch
from nemo.utils import AppState, logging
try:
from apex.transformer.log_util import set_logging_level
from apex.transformer.microbatches import ConstantNumMicroBatches
from apex.transformer.pipeline_parallel.utils import setup_microbatch_calculator
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
try:
from megatron.core import tensor_parallel
from megatron.core.parallel_state import (
get_pipeline_model_parallel_rank,
set_pipeline_model_parallel_rank,
set_pipeline_model_parallel_split_rank,
set_pipeline_model_parallel_world_size,
set_tensor_model_parallel_rank,
set_tensor_model_parallel_world_size,
set_virtual_pipeline_model_parallel_rank,
)
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
try:
from apex.transformer.parallel_state import set_virtual_pipeline_model_parallel_world_size
HAVE_INTERLEAVED = True
except:
HAVE_INTERLEAVED = False
def initialize_model_parallel_for_nemo(
world_size,
global_rank,
local_rank,
tensor_model_parallel_size=1,
pipeline_model_parallel_size=1,
virtual_pipeline_model_parallel_size=None,
pipeline_model_parallel_split_rank=None,
micro_batch_size=None,
global_batch_size=None,
rampup_batch_size=None,
use_fp8=False,
init_mpi_proc_group=False,
seed=1234,
apex_transformer_log_level=30,
):
if virtual_pipeline_model_parallel_size is not None and not HAVE_INTERLEAVED:
raise ValueError("set_virtual_pipeline_model_parallel_world_size is needed in megatron-core for interleaved.")
# updating NeMo globals
app_state = AppState()
app_state.global_rank = global_rank
app_state.world_size = world_size
app_state.local_rank = local_rank
app_state.tensor_model_parallel_size = tensor_model_parallel_size
app_state.pipeline_model_parallel_size = pipeline_model_parallel_size
app_state.virtual_pipeline_model_parallel_size = virtual_pipeline_model_parallel_size
app_state.use_fp8 = use_fp8
app_state.init_mpi_proc_group = init_mpi_proc_group
(
app_state.tensor_model_parallel_rank,
app_state.pipeline_model_parallel_rank,
app_state.model_parallel_size,
app_state.data_parallel_size,
app_state.pipeline_model_parallel_split_rank,
app_state.virtual_pipeline_model_parallel_rank,
) = fake_initialize_model_parallel(
world_size=world_size,
rank=global_rank,
tensor_model_parallel_size_=tensor_model_parallel_size,
pipeline_model_parallel_size_=pipeline_model_parallel_size,
virtual_pipeline_model_parallel_size_=virtual_pipeline_model_parallel_size,
pipeline_model_parallel_split_rank_=pipeline_model_parallel_split_rank,
)
# update apex.transformer globals
set_tensor_model_parallel_world_size(app_state.tensor_model_parallel_size)
set_tensor_model_parallel_rank(app_state.tensor_model_parallel_rank)
set_pipeline_model_parallel_rank(app_state.pipeline_model_parallel_rank)
if HAVE_INTERLEAVED:
set_virtual_pipeline_model_parallel_world_size(app_state.virtual_pipeline_model_parallel_size)
set_virtual_pipeline_model_parallel_rank(app_state.virtual_pipeline_model_parallel_rank)
set_pipeline_model_parallel_world_size(app_state.pipeline_model_parallel_size)
set_pipeline_model_parallel_split_rank(app_state.pipeline_model_parallel_split_rank)
_set_random_seed(seed)
if global_batch_size and micro_batch_size is not None:
# TODO: add rampup_batch_size here when we have it implemented
from apex.transformer.pipeline_parallel.utils import _GLOBAL_NUM_MICROBATCHES_CALCULATOR
if _GLOBAL_NUM_MICROBATCHES_CALCULATOR is None:
setup_microbatch_calculator(
rank=global_rank,
global_batch_size=global_batch_size,
micro_batch_size=micro_batch_size,
data_parallel_size=app_state.data_parallel_size,
rampup_batch_size=rampup_batch_size,
)
else:
if isinstance(_GLOBAL_NUM_MICROBATCHES_CALCULATOR, ConstantNumMicroBatches):
assert _GLOBAL_NUM_MICROBATCHES_CALCULATOR.current_global_batch_size == global_batch_size
assert _GLOBAL_NUM_MICROBATCHES_CALCULATOR.micro_batch_size == micro_batch_size
assert _GLOBAL_NUM_MICROBATCHES_CALCULATOR.num_micro_batches == global_batch_size // (
micro_batch_size * app_state.data_parallel_size
)
else:
raise Exception("Microbatch calculator already initialized.")
app_state._is_megatron_initialized = True
set_logging_level(apex_transformer_log_level)
def _set_random_seed(seed_):
"""Set random seed for reproducability."""
if seed_ is not None and seed_ > 0:
# Ensure that different pipeline MP stages get different seeds.
seed = seed_ + (100 * get_pipeline_model_parallel_rank())
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.device_count() > 0:
tensor_parallel.model_parallel_cuda_manual_seed(seed)
else:
raise ValueError('Seed ({}) should be a positive integer.'.format(seed_))
def set_jit_fusion_options():
"""Set PyTorch JIT layer fusion options."""
# set flags if we are using the 21.10 container
if torch.__version__ == "1.10.0a0+0aef44c":
# nvfuser
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(True)
torch._C._debug_set_autodiff_subgraph_inlining(False)
def fake_initialize_model_parallel(
world_size,
rank,
tensor_model_parallel_size_,
pipeline_model_parallel_size_,
pipeline_model_parallel_split_rank_=None,
virtual_pipeline_model_parallel_size_=None,
):
"""
Fake initialize model data parallel groups so that we can instantiate model parallel models before DDP is initialized.
This is needed because PTL execution flow is init model, init trainer -> call trainer.fit(model). DDP is initialized during .fit.
This function is taken from megatron.core.parallel_state and modified so that the distributed groups are not created.
We only need the tensor parallel and pipeline parallel ranks to instantiate the model.
Arguments:
tensor_model_parallel_size: number of GPUs used to parallelize model tensor.
pipeline_model_parallel_size: number of GPUs used to parallelize model pipeline.
Let's say we have a total of 16 GPUs denoted by g0 ... g15 and we
use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize
the model pipeline. The present function will
create 8 tensor model-parallel groups, 4 pipeline model-parallel groups
and 8 data-parallel groups as:
8 data_parallel groups:
[g0, g2], [g1, g3], [g4, g6], [g5, g7], [g8, g10], [g9, g11], [g12, g14], [g13, g15]
8 tensor model-parallel groups:
[g0, g1], [g2, g3], [g4, g5], [g6, g7], [g8, g9], [g10, g11], [g12, g13], [g14, g15]
4 pipeline model-parallel groups:
[g0, g4, g8, g12], [g1, g5, g9, g13], [g2, g6, g10, g14], [g3, g7, g11, g15]
Note that for efficiency, the caller should make sure adjacent ranks
are on the same DGX box. For example if we are using 2 DGX-1 boxes
with a total of 16 GPUs, rank 0 to 7 belong to the first box and
ranks 8 to 15 belong to the second box.
"""
# Get world size and rank. Ensure some consistencies.
tensor_model_parallel_size = min(tensor_model_parallel_size_, world_size)
pipeline_model_parallel_size = min(pipeline_model_parallel_size_, world_size)
model_parallel_size = tensor_model_parallel_size * pipeline_model_parallel_size
assert (
world_size % tensor_model_parallel_size * pipeline_model_parallel_size == 0
), f'world_size: {world_size} must be divisible by tensor_model_parallel_size: {tensor_model_parallel_size} times pipeline_model_parallel_size {pipeline_model_parallel_size}'
data_parallel_size = world_size // (tensor_model_parallel_size * pipeline_model_parallel_size)
num_tensor_model_parallel_groups = world_size // tensor_model_parallel_size
num_pipeline_model_parallel_groups = world_size // pipeline_model_parallel_size
virtual_pipeline_model_parallel_rank = None
if virtual_pipeline_model_parallel_size_ is not None:
virtual_pipeline_model_parallel_rank = 0
# Build the data-parallel groups.
all_data_parallel_group_ranks = []
for i in range(pipeline_model_parallel_size):
start_rank = i * num_pipeline_model_parallel_groups
end_rank = (i + 1) * num_pipeline_model_parallel_groups
for j in range(tensor_model_parallel_size):
ranks = range(start_rank + j, end_rank, tensor_model_parallel_size)
all_data_parallel_group_ranks.append(list(ranks))
if rank in ranks:
data_parallel_group = list(ranks)
logging.info(f'Rank {rank} has data parallel group: {data_parallel_group}')
data_parallel_rank = data_parallel_group.index(rank)
logging.info(f'All data parallel group ranks: {all_data_parallel_group_ranks}')
logging.info(f'Ranks {rank} has data parallel rank: {data_parallel_rank}')
# Build the model-parallel groups.
all_model_parallel_group_ranks = []
for i in range(data_parallel_size):
ranks = [data_parallel_group_ranks[i] for data_parallel_group_ranks in all_data_parallel_group_ranks]
all_model_parallel_group_ranks.append(ranks)
if rank in ranks:
logging.info(f'Rank {rank} has model parallel group: {list(ranks)}')
logging.info(f'All model parallel group ranks: {all_model_parallel_group_ranks}')
# Build the tensor model-parallel groups.
all_tensor_model_parallel_group_ranks = []
tensor_model_parallel_group = None
for i in range(num_tensor_model_parallel_groups):
ranks = range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size)
all_tensor_model_parallel_group_ranks.append(list(ranks))
if rank in ranks:
tensor_model_parallel_group = list(ranks)
logging.info(f'Rank {rank} has tensor model parallel group: {tensor_model_parallel_group}')
tensor_model_parallel_rank = tensor_model_parallel_group.index(rank)
logging.info(f'All tensor model parallel group ranks: {all_tensor_model_parallel_group_ranks}')
logging.info(f'Rank {rank} has tensor model parallel rank: {tensor_model_parallel_rank}')
# Build the pipeline model-parallel groups and embedding groups
# (first and last rank in each pipeline model-parallel group).
all_pipeline_model_parallel_group_ranks = []
all_embedding_group_ranks = []
pipeline_model_parallel_group = None
embedding_group = None
embedding_rank = None
for i in range(num_pipeline_model_parallel_groups):
ranks = range(i, world_size, num_pipeline_model_parallel_groups)
all_pipeline_model_parallel_group_ranks.append(list(ranks))
if rank in ranks:
pipeline_model_parallel_group = list(ranks)
logging.info(f'Rank {rank} has pipeline model parallel group: {pipeline_model_parallel_group}')
# Setup embedding group (to exchange gradients between
# first and last stages).
if len(ranks) > 1:
embedding_ranks = [ranks[0], ranks[-1]]
all_embedding_group_ranks.append(embedding_ranks)
else:
embedding_ranks = ranks
all_embedding_group_ranks.append(list(embedding_ranks))
if rank in embedding_ranks:
embedding_group = list(embedding_ranks)
logging.info(f'Rank {rank} has embedding group: {embedding_group}')
pipeline_model_parallel_rank = pipeline_model_parallel_group.index(rank)
if embedding_group is not None:
embedding_rank = embedding_group.index(rank)
logging.info(f'All pipeline model parallel group ranks: {all_pipeline_model_parallel_group_ranks}')
logging.info(f'Rank {rank} has pipeline model parallel rank {pipeline_model_parallel_rank}')
logging.info(f'All embedding group ranks: {all_pipeline_model_parallel_group_ranks}')
logging.info(f'Rank {rank} has embedding rank: {embedding_rank}')
return (
tensor_model_parallel_rank,
pipeline_model_parallel_rank,
model_parallel_size,
data_parallel_size,
pipeline_model_parallel_split_rank_,
virtual_pipeline_model_parallel_rank,
)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/megatron_init.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for models."""
import itertools
import math
from typing import Dict, Iterator, List, Tuple, Union
import torch
try:
from apex.normalization import MixedFusedRMSNorm
from apex.normalization.fused_layer_norm import FusedLayerNorm # NOQA
from apex.transformer.enums import AttnMaskType
from apex.transformer.layers.layer_norm import FastLayerNorm
from apex.transformer.pipeline_parallel.schedules.common import listify_model
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
try:
from megatron.core import parallel_state, tensor_parallel
from megatron.core.tensor_parallel.layers import linear_with_grad_accumulation_and_async_allreduce
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
class ApexGuardDefaults(object):
"""
This class can be used to replace missing classes when apex is missing.
"""
def __init__(self):
super().__init__()
def __getattr__(self, item):
return None
def parallel_lm_logits(
input_: torch.Tensor,
word_embeddings_weight: torch.Tensor,
parallel_output: bool,
bias: torch.Tensor = None,
async_tensor_model_parallel_allreduce: bool = False,
sequence_parallel: bool = False,
gradient_accumulation_fusion: bool = False,
):
"""Language Model logits using word embedding weights.
Args:
input_ (torch.Tensor): [b, s, h]
word_embeddings_weight (torch.Tensor): [(padded) vocab size, h]
parallel_output (bool): False will gather logits from tensor model parallel region
bias (torch.Tensor, optional): bias tensor. Defaults to None.
async_tensor_model_parallel_allreduce (bool, optional): Defaults to False.
sequence_parallel (bool, optional): If True will use sequence parallelism. Defaults to False.
gradient_accumulation_fusioa (bool, optional): If True fuse gradient accumulation to WGRAD GEMM
Returns:
torch.Tensor: [b, s, (padded) vocab size]
"""
tensor_model_parallel = parallel_state.get_tensor_model_parallel_world_size() > 1
# async grad allreduce can only be used when not using sequence parallelism
async_grad_allreduce = async_tensor_model_parallel_allreduce and tensor_model_parallel and not sequence_parallel
# copy input_ to model parallel region if needed
if async_tensor_model_parallel_allreduce or sequence_parallel:
input_parallel = input_
else:
input_parallel = tensor_parallel.copy_to_tensor_model_parallel_region(input_)
# Matrix multiply.
logits_parallel = linear_with_grad_accumulation_and_async_allreduce(
input=input_parallel,
weight=word_embeddings_weight,
bias=bias,
gradient_accumulation_fusion=gradient_accumulation_fusion,
async_grad_allreduce=async_grad_allreduce,
sequence_parallel=sequence_parallel,
)
# Gather if needed.
if parallel_output:
return logits_parallel
else:
return tensor_parallel.gather_from_tensor_model_parallel_region(logits_parallel)
def init_method_normal(sigma):
"""Init method based on N(0, sigma)."""
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)
return init_
def init_method_const(val):
def init_(tensor):
return torch.nn.init.constant_(tensor, val)
return init_
def scaled_init_method_normal(sigma, num_layers):
"""Init method based on N(0, sigma/sqrt(2*num_layers)."""
std = sigma / math.sqrt(2.0 * num_layers)
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=std)
return init_
def attention_mask_func(attention_scores, attention_mask):
attention_scores.masked_fill_(attention_mask, -10000.0)
return attention_scores
def get_linear_layer(rows, columns, init_method):
"""Simple linear layer with weight initialization."""
layer = torch.nn.Linear(rows, columns)
init_method(layer.weight)
with torch.no_grad():
layer.bias.zero_()
return layer
@torch.jit.script
def gelu_impl(x):
"""OpenAI's gelu implementation."""
return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x * (1.0 + 0.044715 * x * x)))
def openai_gelu(x):
return gelu_impl(x)
def squared_relu(x):
return torch.pow(torch.nn.functional.relu(x), 2)
# This is actually Python equivalent of torch.nn.functional.gelu(), also with type hints for ONNX exporter
@torch.jit.script
def erf_gelu(x):
return x * 0.5 * (torch.erf(x / 1.41421).to(dtype=x.dtype) + torch.ones_like(x).to(dtype=x.dtype))
def average_losses_across_data_parallel_group(losses):
"""Reduce a tensor of losses across all GPUs."""
averaged_losses = torch.cat([loss.clone().detach().view(1) for loss in losses])
torch.distributed.all_reduce(averaged_losses, group=parallel_state.get_data_parallel_group())
averaged_losses = averaged_losses / torch.distributed.get_world_size(
group=parallel_state.get_data_parallel_group()
)
return averaged_losses
def get_ltor_masks_and_position_ids(
data, eod_token, reset_position_ids, reset_attention_mask, eod_mask_loss, compute_attention_mask=True
):
"""Build masks and position id for left to right model."""
# Extract batch size and sequence length.
micro_batch_size, seq_length = data.size()
# Attention mask (lower triangular).
if reset_attention_mask:
att_mask_batch = micro_batch_size
else:
att_mask_batch = 1
attention_mask = None
if compute_attention_mask:
attention_mask = torch.tril(torch.ones((att_mask_batch, seq_length, seq_length), device=data.device)).view(
att_mask_batch, 1, seq_length, seq_length
)
# Loss mask.
loss_mask = torch.ones(data.size(), dtype=torch.float, device=data.device)
if eod_mask_loss:
loss_mask[data == eod_token] = 0.0
# Position ids.
position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device)
position_ids = position_ids.unsqueeze(0).repeat(micro_batch_size, 1)
# We need to clone as the ids will be modifed based on batch index.
if reset_position_ids:
position_ids = position_ids.clone()
if reset_position_ids or reset_attention_mask:
# Loop through the batches:
for b in range(micro_batch_size):
# Find indecies where EOD token is.
eod_index = position_ids[b, data[b] == eod_token]
# Detach indecies from positions if going to modify positions.
if reset_position_ids:
eod_index = eod_index.clone()
# Loop through EOD indicies:
prev_index = 0
for j in range(eod_index.size()[0]):
i = eod_index[j]
# Mask attention loss.
if reset_attention_mask:
attention_mask[b, 0, (i + 1) :, : (i + 1)] = 0
# Reset positions.
if reset_position_ids:
position_ids[b, (i + 1) :] -= i + 1 - prev_index
prev_index = i + 1
if compute_attention_mask:
# Convert attention mask to binary:
attention_mask = attention_mask < 0.5
return attention_mask, loss_mask, position_ids
def attn_mask_postprocess(attn_mask):
# [b, 1, s, s]
# Attn_masks for enc-dec attn and dec attn is None when trying to get just the encoder hidden states.
if attn_mask is None:
return None
extended_attention_mask = attn_mask.unsqueeze(1)
return extended_attention_mask
def enc_dec_extended_attention_mask(attention_mask_list):
return [attn_mask_postprocess(attn_mask) for attn_mask in attention_mask_list]
def build_position_ids(token_ids):
# Create position ids
seq_length = token_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=token_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(token_ids).clone()
return position_ids
def make_attention_mask_3d(source_mask, target_mask):
"""
Returns a 3-dimensional (3-D) attention mask
:param source_block: 2-D array
:param target_block: 2-D array
"""
mask = target_mask[:, None, :] * source_mask[:, :, None]
return mask
def make_inference_attention_mask_3d(source_block, target_block, pad_id):
"""
Returns a 3-dimensional (3-D) attention mask
:param source_block: 2-D array
:param target_block: 2-D array
"""
# mask = (target_block[:, None, :] != pad_id) * (source_block[:, :, None] != pad_id)
return make_attention_mask_3d(source_block != pad_id, target_block != pad_id)
def make_inference_history_mask_3d(block):
batch, length = block.shape
arange = torch.arange(length, device=block.device)
history_mask = (arange[None,] <= arange[:, None])[
None,
]
history_mask = history_mask.expand(batch, length, length)
return history_mask
def build_attention_mask_3d_padding(source_mask, target_mask):
"""
Returns a 3D joint attention mask for Megatron given two 2D masks
:param source_mask - True for non-masked, else masked [batch, src length]
:param target_mask - True for non-masked, else masked [batch, tgt length]
"""
mask = make_attention_mask_3d(source_mask, target_mask)
# invert mask for Megatron
return mask < 0.5
def build_attention_mask_3d_causal(source_mask, target_mask):
"""
Returns a 3D joint attention mask for Megatron given two 2D masks
:param source_mask - True for non-masked, else masked [batch, src length]
:param target_mask - True for non-masked, else masked [batch, tgt length]
"""
causal_mask = make_inference_history_mask_3d(target_mask)
mask = make_attention_mask_3d(source_mask, target_mask)
mask = mask * causal_mask
# invert mask for Megatron
return mask < 0.5
def build_attention_mask_3d(source_mask, target_mask, attn_mask_type):
"""
Returns a 3D attention mask for Megatron given two 2D masks
:param source_mask - < 0.5 for non-masked, else masked [batch, src length]
:param target_mask - < 0.5 for non-masked, else masked [batch, tgt length]
:param attn_mask_type - AttnMaskType enum
"""
if attn_mask_type == AttnMaskType.padding:
mask = build_attention_mask_3d_padding(source_mask, target_mask)
elif attn_mask_type == AttnMaskType.causal:
mask = build_attention_mask_3d_causal(source_mask, target_mask)
else:
raise ValueError(f"Unsupported attention mask attn_mask_type = {attn_mask_type}")
return mask
def get_params_for_weight_decay_optimization(
model: Union[torch.nn.Module, List[torch.nn.Module]],
) -> Dict[str, torch.nn.Parameter]:
"""Divide params into with-weight-decay and without-weight-decay groups.
Layernorms and biases will have no weight decay but the rest will.
"""
modules = listify_model(model)
weight_decay_params = {'params': []}
no_weight_decay_params = {'params': [], 'weight_decay': 0.0}
for module in modules:
for module_ in module.modules():
if isinstance(module_, (FusedLayerNorm, FastLayerNorm, MixedFusedRMSNorm)):
no_weight_decay_params['params'].extend(
[p for p in list(module_._parameters.values()) if p is not None]
)
else:
weight_decay_params['params'].extend(
[p for n, p in list(module_._parameters.items()) if p is not None and n != 'bias']
)
no_weight_decay_params['params'].extend(
[p for n, p in list(module_._parameters.items()) if p is not None and n == 'bias']
)
return weight_decay_params, no_weight_decay_params
def get_all_params_for_weight_decay_optimization(
model: Union[torch.nn.Module, List[torch.nn.Module]],
) -> Tuple[Dict[str, List[torch.nn.Parameter]]]:
"""Use all params for weight decay."""
modules = listify_model(model)
weight_decay_params = [
p for module in modules for module_ in module.modules() for p in module_._parameters.values() if p is not None
]
return ({'params': weight_decay_params},)
def get_iterator_k_split(batch: List[torch.Tensor], num_microbatches: int) -> Iterator:
if isinstance(batch, dict):
items = list(batch.items())
assert items[0][1].shape[0] % num_microbatches == 0, "Issue with batch size configuration!"
split_batch = [torch.tensor_split(item[1], num_microbatches, dim=0) for item in items]
microbatches = [[(items[i][0], split_batch[i][j]) for i in range(len(items))] for j in range(num_microbatches)]
microbatches = [dict(elem) for elem in microbatches]
else:
assert batch[0].shape[0] % num_microbatches == 0, "Issue with batch size configuration!"
split_batch = [
torch.tensor_split(item, num_microbatches, dim=0) if torch.is_tensor(item) else item for item in batch
]
microbatches = [
[elem[i] if elem is not None else elem for elem in split_batch] for i in range(num_microbatches)
]
return itertools.chain(microbatches)
def _cast_if_autocast_enabled(tensor):
if torch.is_autocast_enabled():
if isinstance(tensor, torch.Tensor):
if tensor.device.type == 'cuda':
dtype = torch.get_autocast_gpu_dtype()
elif tensor.device.type == 'cpu':
dtype = torch.get_autocast_cpu_dtype()
else:
raise NotImplementedError()
return tensor.to(dtype=dtype)
return tensor
| NeMo-main | nemo/collections/nlp/modules/common/megatron/utils.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer."""
from contextlib import nullcontext
from typing import Any, Callable, Optional
import torch
import torch.nn as nn
from einops import rearrange
from nemo.collections.common.parts.adapter_modules import LinearAdapterConfig
from nemo.collections.nlp.modules.common.megatron.adapters.parallel_adapters import (
AdapterName,
ParallelLinearAdapterConfig,
ParallelLinearAdapterWeightTyingConfig,
)
from nemo.collections.nlp.modules.common.megatron.attention import ParallelAttention, ParallelChunkedCrossAttention
from nemo.collections.nlp.modules.common.megatron.fused_bias_dropout_add import (
bias_dropout_add,
bias_dropout_add_fused_inference,
bias_dropout_add_fused_train,
dropout_add,
)
from nemo.collections.nlp.modules.common.megatron.fused_layer_norm import get_layer_norm
from nemo.collections.nlp.modules.common.megatron.layer_norm_1p import LayerNorm1P, LPLayerNorm
from nemo.collections.nlp.modules.common.megatron.layer_type import LayerType
from nemo.collections.nlp.modules.common.megatron.mlp import ParallelMLP, SwitchMLP
from nemo.collections.nlp.modules.common.megatron.module import MegatronModule
from nemo.collections.nlp.modules.common.megatron.utils import ApexGuardDefaults
from nemo.collections.nlp.parts import utils_funcs
from nemo.core import adapter_mixins
from nemo.utils import logging
try:
from apex.normalization import MixedFusedRMSNorm
from apex.transformer.enums import AttnMaskType, AttnType, ModelType
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
# fake missing classes with None attributes
ModelType = AttnMaskType = AttnType = LayerType = ApexGuardDefaults()
try:
from megatron.core import ModelParallelConfig, parallel_state, tensor_parallel
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelParallelConfig = ApexGuardDefaults
HAVE_MEGATRON_CORE = False
try:
from transformer_engine.common import recipe
from transformer_engine.pytorch import TransformerLayer, fp8_autocast
from transformer_engine.pytorch.distributed import checkpoint as te_checkpoint
HAVE_TE = True
except:
HAVE_TE = False
# fake missing class
class TransformerLayer(ApexGuardDefaults):
def __init__(self):
super().__init__()
logging.warning(
"Transformer Engine was not found. transformer_engine.pytorch.transformer.TransformerLayer will not work. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
""" We use the following notation throughout this file:
h: hidden size
n: number of attention heads
p: number of model parallel partitions
np: n/p
hp: h/p
hn: h/n
b: batch size
s: sequence length
l: number of layers
Transformer takes input of size [s, b, h] and returns a
tensor of the same size. We use the following arguments:
hyperparameters: transformer hyperparameters
"""
def get_bias_dropout_add(training):
def _bias_dropout_add(x, bias, residual, prob):
return bias_dropout_add(x, bias, residual, prob, training)
return _bias_dropout_add
def get_dropout_add(training):
def _dropout_add(x, bias, residual, prob):
assert bias is None
return dropout_add(x, bias, residual, prob, training)
return _dropout_add
def remove_bias_from_layernorm(layer):
for module in layer.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
module.register_parameter('bias', None)
class ParallelTransformerLayer_(MegatronModule, adapter_mixins.AdapterModuleMixin):
"""A single transformer layer.
Transformer layer takes input with size [s, b, h] and returns an
output of the same size.
"""
def __init__(
self,
config: ModelParallelConfig,
init_method,
output_layer_init_method,
layer_number,
hidden_size,
ffn_hidden_size,
num_attention_heads,
layer_type=LayerType.encoder,
self_attn_mask_type=AttnMaskType.padding,
fp32_residual_connection=False,
precision=16,
apply_query_key_layer_scaling=False,
kv_channels=None,
layernorm_epsilon=1e-5,
hidden_dropout=0.1,
persist_layer_norm=False,
megatron_amp_O2=False,
bias_activation_fusion=True,
bias_dropout_add_fusion=True,
masked_softmax_fusion=True,
openai_gelu=False,
onnx_safe=False,
attention_dropout=0.1,
ffn_dropout=0.0,
activation='gelu',
megatron_legacy=False,
bias=True,
chunk_size=64,
normalization='layernorm',
transformer_block_type='pre_ln',
position_embedding_type='learned_absolute',
multi_query_attention=False,
headscale=False,
activations_checkpoint_granularity=None,
normalize_attention_scores=True,
num_moe_experts=1,
moe_frequency=1,
moe_dropout=0.0,
use_flash_attention=False,
):
super(ParallelTransformerLayer_, self).__init__(config=config)
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
self.layer_number = layer_number
self.layer_type = layer_type
self.bias = bias
self.transformer_block_type = transformer_block_type
self.position_embedding_type = position_embedding_type
self.param_dtype = utils_funcs.torch_dtype_from_precision(precision, megatron_amp_O2)
self.set_accepted_adapter_types(
[
LinearAdapterConfig._target_,
ParallelLinearAdapterConfig._target_,
ParallelLinearAdapterWeightTyingConfig._target_,
]
)
if not bias and bias_dropout_add_fusion:
raise ValueError(
'bias_dropout_add_fusion=True requires bias=True, found bias=False. Either set both to True or both to False.'
)
# the low_precision_layernorm does not require a bias term, whereas layernorm1p from apex
# does require a bias, so it cannot be used for bias-less low precision LN such as in MPT-7B
if normalization not in ['layernorm', 'layernorm1p', 'rmsnorm', 'low_precision_layernorm']:
raise ValueError(f'normalization must be "layernorm", "layernorm1p" or "rmsnorm", found {normalization}')
if transformer_block_type not in ['pre_ln', 'post_ln', 'normformer']:
raise ValueError(
f'transformer_block_type must be either "pre_ln" or "post_ln" or "normformer", found {transformer_block_type}'
)
self.fp32_residual_connection = fp32_residual_connection # if true move residual connections to fp32
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.bias_dropout_add_fusion = bias_dropout_add_fusion # if true, enable bias dropout fusion
# Self attention.
# retrieval_decoder_after_self_attn skips the self attention
if self.layer_type != LayerType.retrieval_decoder_after_self_attn:
# Layernorm on the input data.
if normalization == 'layernorm':
self.input_layernorm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, config.sequence_parallel
)
elif normalization == 'layernorm1p':
self.input_layernorm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=config.sequence_parallel
)
elif normalization == 'low_precision_layernorm':
self.input_layernorm = LPLayerNorm(hidden_size, layernorm_epsilon)
else:
self.input_layernorm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
# for architectures such as MPT, there is no bias term even on the layernorms
# this code allows us to remove the bias terms from the layernorm module
# so that we can support MPT. However, certain apex-based LNs don't support
# removing bias, so we also have to check for that
if not bias and normalization not in ['layernorm', 'layernorm1p']:
remove_bias_from_layernorm(self.input_layernorm)
self.self_attention = ParallelAttention(
config=config,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
layer_number=layer_number,
num_attention_heads=num_attention_heads,
hidden_size=hidden_size,
attention_type=AttnType.self_attn,
attn_mask_type=self_attn_mask_type,
precision=precision,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
megatron_amp_O2=megatron_amp_O2,
masked_softmax_fusion=masked_softmax_fusion,
attention_dropout=attention_dropout,
multi_query_attention=multi_query_attention,
layer_type=layer_type,
megatron_legacy=megatron_legacy,
bias=bias,
headscale=headscale,
position_embedding_type=position_embedding_type,
normalize_attention_scores=normalize_attention_scores,
use_flash_attention=use_flash_attention,
)
if transformer_block_type == 'normformer':
if normalization == 'layernorm':
self.post_attention_normformer_norm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm
)
else:
self.post_attention_normformer_norm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
if self.layer_type != LayerType.decoder_pre_mlp or self.transformer_block_type != 'post_ln':
# the post_attention_layernorm is used for layermorm after mlp
# don't need it for decoder_pre_mlp and post_ln
if normalization == 'layernorm':
self.post_attention_layernorm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, config.sequence_parallel
)
elif normalization == 'layernorm1p':
self.post_attention_layernorm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=config.sequence_parallel
)
elif normalization == 'low_precision_layernorm':
self.post_attention_layernorm = LPLayerNorm(hidden_size, layernorm_epsilon)
else:
self.post_attention_layernorm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
if not bias and normalization not in ['layernorm', 'layernorm1p']:
remove_bias_from_layernorm(self.post_attention_layernorm)
if self.layer_type == LayerType.decoder_pre_mlp:
# skip MLP and cross attention
return
# the post_attention_layernorm is used for layermorm after mlp
# need it for post_ln
if self.layer_type == LayerType.retrieval_decoder_after_self_attn and self.transformer_block_type == 'post_ln':
# Layernorm on the attention output
if normalization == 'layernorm':
self.post_attention_layernorm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, config.sequence_parallel
)
elif normalization == 'layernorm1p':
self.post_attention_layernorm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=config.sequence_parallel
)
elif normalization == 'low_precision_layernorm':
self.post_attention_layernorm = LPLayerNorm(hidden_size, layernorm_epsilon)
else:
self.post_attention_layernorm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
if not bias and normalization not in ['layernorm', 'layernorm1p']:
remove_bias_from_layernorm(self.post_attention_layernorm)
if self.layer_type == LayerType.decoder or self.layer_type == LayerType.retrieval_encoder:
self.inter_attention = ParallelAttention(
config=config,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
layer_number=layer_number,
num_attention_heads=num_attention_heads,
hidden_size=hidden_size,
attention_type=AttnType.cross_attn,
attn_mask_type=AttnMaskType.padding,
precision=precision,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
multi_query_attention=multi_query_attention,
megatron_amp_O2=megatron_amp_O2,
masked_softmax_fusion=masked_softmax_fusion,
attention_dropout=attention_dropout,
megatron_legacy=megatron_legacy,
bias=bias,
headscale=headscale,
normalize_attention_scores=normalize_attention_scores,
)
# Normformer normalization
if transformer_block_type == 'normformer':
if normalization == 'layernorm':
self.post_inter_attention_normformer_norm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, config.sequence_parallel
)
elif normalization == 'layernorm1p':
self.post_inter_attention_normformer_norm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=config.sequence_parallel
)
else:
self.post_inter_attention_normformer_norm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
# Layernorm on the attention output.
if normalization == 'layernorm':
self.post_inter_attention_layernorm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, config.sequence_parallel
)
elif normalization == 'layernorm1p':
self.post_inter_attention_layernorm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=config.sequence_parallel
)
else:
self.post_inter_attention_layernorm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
elif (
self.layer_type == LayerType.retrieval_decoder
or self.layer_type == LayerType.retrieval_decoder_after_self_attn
):
self.inter_attention = ParallelChunkedCrossAttention(
config=config,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
layer_number=layer_number,
num_attention_heads=num_attention_heads,
hidden_size=hidden_size,
precision=precision,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
megatron_amp_O2=megatron_amp_O2,
masked_softmax_fusion=masked_softmax_fusion,
attention_dropout=attention_dropout,
megatron_legacy=megatron_legacy,
chunk_size=chunk_size,
bias=bias,
headscale=headscale,
)
# Normformer normalization
if transformer_block_type == 'normformer':
if normalization == 'layernorm':
self.post_inter_attention_normformer_norm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, config.sequence_parallel
)
elif normalization == 'layernorm1p':
self.post_inter_attention_normformer_norm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=config.sequence_parallel
)
else:
self.post_inter_attention_normformer_norm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
# Layernorm on the attention output.
if normalization == 'layernorm':
self.post_inter_attention_layernorm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, config.sequence_parallel
)
elif normalization == 'layernorm1p':
self.post_inter_attention_layernorm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=config.sequence_parallel
)
else:
self.post_inter_attention_layernorm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
# MLP
if num_moe_experts > 1 and self.layer_number % moe_frequency == 0:
self.mlp = SwitchMLP(
config=config,
num_experts=num_moe_experts,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
dtype=self.param_dtype,
bias_activation_fusion=bias_activation_fusion,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
activation=activation,
bias=bias,
transformer_block_type=transformer_block_type,
normalization=normalization,
layernorm_epsilon=layernorm_epsilon,
persist_layer_norm=persist_layer_norm,
dropout=moe_dropout,
)
else:
self.mlp = ParallelMLP(
config=config,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
dtype=self.param_dtype,
bias_activation_fusion=bias_activation_fusion,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
activation=activation,
bias=bias,
transformer_block_type=transformer_block_type,
normalization=normalization,
layernorm_epsilon=layernorm_epsilon,
persist_layer_norm=persist_layer_norm,
dropout=ffn_dropout,
)
def _get_bias_droput_add_func(self, transformer_block_type='pre_ln', position_after='attention'):
"""
Returns a function that potentially fuses the dropout and bias addition.
This function is particularly helpful for the normformer architecture that does not the fused kernel after attention layers, but can after the MLP.
"""
# Normformer activations at this point have no bias vector since they've gone through another normalization layer.
if transformer_block_type == 'normformer' and position_after == 'attention':
bias_dropout_add_func = get_dropout_add(self.training)
# Bias dropout add fused kernel
elif self.bias and self.bias_dropout_add_fusion:
if self.training:
bias_dropout_add_func = bias_dropout_add_fused_train
else:
bias_dropout_add_func = bias_dropout_add_fused_inference
# Bias dropout add non-fused kernel
elif self.bias and not self.bias_dropout_add_fusion:
bias_dropout_add_func = get_bias_dropout_add(self.training)
# Dropout add non-fused kernel for a model without bias terms.
else:
bias_dropout_add_func = get_dropout_add(self.training)
return bias_dropout_add_func
def forward(
self,
hidden_states,
attention_mask,
encoder_output=None,
enc_dec_attn_mask=None,
layer_past=None,
get_key_value=False,
set_inference_key_value_memory=False,
inference_max_sequence_len=None,
rotary_pos_emb=None, # list of positional embedding tensors, first one self attention, second one and third one are for cross attention (q, k)
self_attention_relative_position_bias=None,
cross_attention_relative_position_bias=None,
checkpoint_core_attention=False,
):
# Self attention.
if rotary_pos_emb is not None:
# self attention pos_emb is (q, q)
self_attention_pos_emb = (rotary_pos_emb[0], rotary_pos_emb[0])
cross_attention_pos_emb = (rotary_pos_emb[1], rotary_pos_emb[2])
else:
self_attention_pos_emb = None
cross_attention_pos_emb = None
if self.layer_type != LayerType.retrieval_decoder_after_self_attn:
# hidden_states: [b, s, h]
# Pre-LN: x -> LN -> MHA -> Residual -> LN -> MLP -> Residual
# Post-LN: x -> MHA -> Residual -> LN -> MLP -> Residual -> LN
# Normformer: x -> LN -> MHA -> LN -> Residual -> MLP (w/LN) -> Residual
residual = hidden_states
# Layer norm at the beginning of the transformer layer.
if self.transformer_block_type in ['pre_ln', 'normformer']:
hidden_states = self.input_layernorm(hidden_states)
attention_output, attention_bias = self.self_attention(
hidden_states,
attention_mask,
layer_past=layer_past,
get_key_value=get_key_value,
set_inference_key_value_memory=set_inference_key_value_memory,
inference_max_sequence_len=inference_max_sequence_len,
rotary_pos_emb=self_attention_pos_emb,
relative_position_bias=self_attention_relative_position_bias,
checkpoint_core_attention=checkpoint_core_attention,
)
if get_key_value:
attention_output, presents = attention_output
# If normformer, apply norm on the output of the self attention.
if self.transformer_block_type == 'normformer':
# Normformer normalization
attention_output = (
attention_output + attention_bias if attention_bias is not None else attention_output
)
attention_output = self.post_attention_normformer_norm(attention_output)
attention_bias = None
# jit scripting for a nn.module (with dropout) is not
# trigerring the fusion kernel. For now, we use two
# different nn.functional routines to account for varying
# dropout semantics during training and inference phases.
bias_dropout_add_func = self._get_bias_droput_add_func(
transformer_block_type=self.transformer_block_type, position_after='attention'
)
if attention_bias is not None:
attention_bias = attention_bias.expand_as(residual)
if self.is_adapter_available():
adapter_1 = self.get_adapter_module(AdapterName.PRE_ATTN_ADAPTER)
if adapter_1:
attention_output = (
adapter_1(attention_output) + attention_output
) # simple adapter call with residual connection
layernorm_input = bias_dropout_add_func(attention_output, attention_bias, residual, self.hidden_dropout)
# print(f"Layer: {self.layer_number} Attention checksum {layernorm_input.sum()}")
# Post-LN normalization after residual
if self.transformer_block_type == 'post_ln':
normalization_output = self.input_layernorm(layernorm_input)
layernorm_input = normalization_output
elif self.transformer_block_type in ['pre_ln', 'normformer']:
# Layer norm post the self attention.
normalization_output = self.post_attention_layernorm(layernorm_input)
else:
layernorm_input, normalization_output = hidden_states
if self.layer_type == LayerType.decoder_pre_mlp:
return layernorm_input, normalization_output
if (
self.layer_type == LayerType.decoder
or self.layer_type == LayerType.retrieval_decoder
or self.layer_type == LayerType.retrieval_encoder
or self.layer_type == LayerType.retrieval_decoder_after_self_attn
):
if (
self.layer_type == LayerType.retrieval_decoder
or self.layer_type == LayerType.retrieval_decoder_after_self_attn
):
attention_output, attention_bias = self.inter_attention(
normalization_output,
enc_dec_attn_mask,
encoder_output=encoder_output,
rotary_pos_emb=cross_attention_pos_emb,
set_inference_key_value_memory=set_inference_key_value_memory,
inference_max_sequence_len=inference_max_sequence_len,
checkpoint_core_attention=checkpoint_core_attention,
)
else:
attention_output, attention_bias = self.inter_attention(
normalization_output,
enc_dec_attn_mask,
encoder_output=encoder_output,
rotary_pos_emb=cross_attention_pos_emb,
relative_position_bias=cross_attention_relative_position_bias,
checkpoint_core_attention=checkpoint_core_attention,
)
# If normformer, apply norm on the output of the self attention.
if self.transformer_block_type == 'normformer':
# Normformer normalization
attention_output = (
attention_output + attention_bias if attention_bias is not None else attention_output
)
attention_output = self.post_inter_attention_normformer_norm(attention_output)
attention_bias = None
residual = layernorm_input
bias_dropout_add_func = self._get_bias_droput_add_func(
transformer_block_type=self.transformer_block_type, position_after='attention'
)
layernorm_input = bias_dropout_add_func(attention_output, attention_bias, residual, self.hidden_dropout)
# print(f"Layer: {self.layer_number} Cross-Attention checksum {layernorm_input.sum()}")
normalization_output = self.post_inter_attention_layernorm(layernorm_input)
# Post-LN normalization after residual
if self.transformer_block_type == 'post_ln':
layernorm_input = normalization_output
# MLP.
mlp_output, mlp_bias = self.mlp(normalization_output)
if self.is_adapter_available():
# TODO: (@adithyre) was able to move adapter_2 back to the end of the transformer after ptl 1.7 update.
adapter_2 = self.get_adapter_module(AdapterName.POST_ATTN_ADAPTER)
if adapter_2:
mlp_output = adapter_2(mlp_output) + mlp_output # simple adapter call with residual connection
residual = layernorm_input
bias_dropout_add_func = self._get_bias_droput_add_func(
transformer_block_type=self.transformer_block_type, position_after='mlp'
)
output = bias_dropout_add_func(mlp_output, mlp_bias, residual, self.hidden_dropout)
# print(f"Layer: {self.layer_number} MLP + Dropout + Residual checksum {output.sum()}")
if self.transformer_block_type == 'post_ln':
output = self.post_attention_layernorm(output)
if get_key_value:
output = [output, presents]
return output
class ParallelTransformerLayer(ParallelTransformerLayer_):
def __init__(
self,
config: ModelParallelConfig,
init_method,
output_layer_init_method,
layer_number,
hidden_size,
ffn_hidden_size,
num_attention_heads,
layer_type=LayerType.encoder,
self_attn_mask_type=AttnMaskType.padding,
fp32_residual_connection=False,
precision=16,
apply_query_key_layer_scaling=False,
kv_channels=None,
layernorm_epsilon=1e-5,
hidden_dropout=0.1,
bias_dropout_add_fusion=True,
persist_layer_norm=False,
megatron_amp_O2=False,
bias_activation_fusion=True,
openai_gelu=False,
onnx_safe=False,
masked_softmax_fusion=True,
attention_dropout=0.1,
ffn_dropout=0.0,
activation='gelu',
megatron_legacy=False,
bias=True,
chunk_size=64,
normalization='layernorm',
transformer_block_type='pre_ln',
position_embedding_type='learned_absolute',
multi_query_attention=False,
headscale=False,
activations_checkpoint_granularity=None,
normalize_attention_scores=True,
num_moe_experts=1,
moe_frequency=1,
moe_dropout=0.0,
use_flash_attention=False,
):
super(ParallelTransformerLayer, self).__init__(
config=config,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
layer_number=layer_number,
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
num_attention_heads=num_attention_heads,
layer_type=layer_type,
self_attn_mask_type=self_attn_mask_type,
fp32_residual_connection=fp32_residual_connection,
precision=precision,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
layernorm_epsilon=layernorm_epsilon,
hidden_dropout=hidden_dropout,
bias_dropout_add_fusion=bias_dropout_add_fusion,
persist_layer_norm=persist_layer_norm,
megatron_amp_O2=megatron_amp_O2,
bias_activation_fusion=bias_activation_fusion,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
masked_softmax_fusion=masked_softmax_fusion,
attention_dropout=attention_dropout,
ffn_dropout=ffn_dropout,
activation=activation,
megatron_legacy=megatron_legacy,
bias=bias,
chunk_size=chunk_size,
normalization=normalization,
transformer_block_type=transformer_block_type,
position_embedding_type=position_embedding_type,
headscale=headscale,
multi_query_attention=multi_query_attention,
activations_checkpoint_granularity=activations_checkpoint_granularity,
normalize_attention_scores=normalize_attention_scores,
num_moe_experts=num_moe_experts,
moe_frequency=moe_frequency,
moe_dropout=moe_dropout,
use_flash_attention=use_flash_attention,
)
# Dtype for forward pass - ignore amp O2
self.dtype = utils_funcs.torch_dtype_from_precision(precision, megatron_amp_O2=None)
def forward(
self,
hidden_states,
attention_mask,
encoder_output=None,
enc_dec_attn_mask=None,
rotary_pos_emb=None,
layer_past=None,
get_key_value=False,
set_inference_key_value_memory=False,
inference_max_sequence_len=None,
self_attention_relative_position_bias=None,
cross_attention_relative_position_bias=None,
checkpoint_core_attention=False,
):
if self.dtype == torch.float32:
return super().forward(
hidden_states,
attention_mask,
encoder_output,
enc_dec_attn_mask,
layer_past,
get_key_value,
set_inference_key_value_memory,
inference_max_sequence_len,
rotary_pos_emb,
self_attention_relative_position_bias,
cross_attention_relative_position_bias,
checkpoint_core_attention,
)
with torch.autocast(device_type="cuda", dtype=self.dtype):
return super().forward(
hidden_states,
attention_mask,
encoder_output,
enc_dec_attn_mask,
layer_past,
get_key_value,
set_inference_key_value_memory,
inference_max_sequence_len,
rotary_pos_emb,
self_attention_relative_position_bias,
cross_attention_relative_position_bias,
checkpoint_core_attention,
)
class AutocastTransformerLayer(TransformerLayer):
def __init__(
self,
hidden_size: int,
ffn_hidden_size: int,
layernorm_epsilon: float,
num_attention_heads: int,
init_method: Callable,
output_layer_init_method: Callable,
hidden_dropout: float,
attention_dropout: float,
layer_number: Optional[int] = None,
kv_channels: Optional[int] = None,
self_attn_mask_type: str = "causal",
tp_group: Optional[Any] = None,
tp_size: int = 1,
params_dtype: torch.dtype = torch.float32,
get_rng_state_tracker: Optional[Callable] = None,
fuse_wgrad_accumulation: bool = False,
apply_query_key_layer_scaling: bool = False,
attention_softmax_in_fp32: bool = False,
seq_length: Optional[int] = None,
micro_batch_size: Optional[int] = None,
sequence_parallel: bool = False,
apply_residual_connection_post_layernorm: bool = False,
output_layernorm: bool = False,
layer_type: str = "encoder",
drop_path_rate: float = 0,
use_emha: bool = False,
ub_tp_comm_overlap: bool = False,
autocast_dtype: Any = 16,
zero_centered_gamma: bool = False,
) -> None:
super().__init__(
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
layernorm_epsilon=layernorm_epsilon,
num_attention_heads=num_attention_heads,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
layer_number=layer_number,
kv_channels=kv_channels,
self_attn_mask_type=self_attn_mask_type,
tp_group=tp_group,
tp_size=tp_size,
params_dtype=params_dtype,
get_rng_state_tracker=get_rng_state_tracker,
fuse_wgrad_accumulation=fuse_wgrad_accumulation,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
attention_softmax_in_fp32=attention_softmax_in_fp32,
seq_length=seq_length,
micro_batch_size=micro_batch_size,
sequence_parallel=sequence_parallel,
apply_residual_connection_post_layernorm=apply_residual_connection_post_layernorm,
output_layernorm=output_layernorm,
layer_type=layer_type,
drop_path_rate=drop_path_rate,
set_parallel_mode=tp_size > 1,
fuse_qkv_params=True,
zero_centered_gamma=zero_centered_gamma,
ub_tp_comm_overlap=ub_tp_comm_overlap,
)
# use_emha=use_emha,
# Dtype for forward pass - ignore amp O2
self.dtype = utils_funcs.torch_dtype_from_precision(autocast_dtype, megatron_amp_O2=None)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
encoder_output: Optional[torch.Tensor] = None,
enc_dec_attn_mask: Optional[torch.Tensor] = None,
inference_params: Optional[Any] = None,
is_first_microbatch: Optional[bool] = None,
checkpoint_core_attention: Optional[bool] = False,
) -> torch.Tensor:
if self.dtype == torch.float32:
return super().forward(
hidden_states,
attention_mask,
encoder_output=encoder_output,
enc_dec_attn_mask=enc_dec_attn_mask,
inference_params=inference_params,
is_first_microbatch=is_first_microbatch,
checkpoint_core_attention=checkpoint_core_attention,
)
with torch.autocast(device_type="cuda", dtype=self.dtype):
return super().forward(
hidden_states,
attention_mask,
encoder_output=encoder_output,
enc_dec_attn_mask=enc_dec_attn_mask,
inference_params=inference_params,
is_first_microbatch=is_first_microbatch,
checkpoint_core_attention=checkpoint_core_attention,
)
class ParallelTransformer(MegatronModule):
"""Transformer class."""
def __init__(
self,
config: ModelParallelConfig,
init_method,
output_layer_init_method,
num_layers,
hidden_size,
ffn_hidden_size,
num_attention_heads,
apply_query_key_layer_scaling=False,
kv_channels=None,
layer_type=LayerType.encoder, # it can be a list of types or single type
self_attn_mask_type=AttnMaskType.padding,
pre_process=True,
post_process=True,
precision=16,
fp32_residual_connection=False,
activations_checkpoint_method=None,
activations_checkpoint_num_layers=None,
layernorm_epsilon=1e-5,
hidden_dropout=0.1,
attention_dropout=0.1,
ffn_dropout=0.0,
megatron_amp_O2=False,
bias_activation_fusion=True,
bias_dropout_add_fusion=True,
masked_softmax_fusion=True,
persist_layer_norm=False,
openai_gelu=False,
onnx_safe=False,
activation='gelu',
model_type=ModelType.encoder_or_decoder,
megatron_legacy=False,
bias=True,
chunk_size=64,
normalization='layernorm',
transformer_block_type='pre_ln',
position_embedding_type='learned_absolute',
headscale=False,
layer_number_offset=0, # this is use only for attention norm_factor scaling
activations_checkpoint_granularity=None,
activations_checkpoint_layers_per_pipeline=None,
transformer_engine=False,
fp8=False,
fp8_e4m3=False,
fp8_hybrid=False,
fp8_margin=0,
fp8_interval=1,
fp8_amax_history_len=1,
fp8_amax_compute_algo='most_recent',
reduce_amax=True,
use_emha=False,
ub_tp_comm_overlap=False,
normalize_attention_scores=True,
multi_query_attention=False,
num_moe_experts=1,
moe_frequency=1,
moe_dropout=0.0,
use_flash_attention=False,
):
super(ParallelTransformer, self).__init__(config=config)
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
self.fp32_residual_connection = fp32_residual_connection
self.pre_process = pre_process
self.post_process = post_process
self.input_tensor = None
self.self_attn_mask_type = self_attn_mask_type
self.model_type = model_type
self.normalization = normalization
self.transformer_block_type = transformer_block_type
self.layer_type = layer_type
self.position_embedding_type = position_embedding_type
self.multi_query_attention = multi_query_attention
self.inference_current_sequence_len = 0
self.inference_params = None
self.activations_checkpoint_method = activations_checkpoint_method
self.activations_checkpoint_num_layers = activations_checkpoint_num_layers
self.activations_checkpoint_granularity = activations_checkpoint_granularity
self.activations_checkpoint_layers_per_pipeline = activations_checkpoint_layers_per_pipeline
if self.activations_checkpoint_granularity:
if self.activations_checkpoint_granularity == 'selective':
if self.activations_checkpoint_method == 'uniform':
logging.info(
(
f'Using uniform activation checkpointing with granularity selective forces all layers to use checkpointing.'
)
)
elif self.activations_checkpoint_method == 'block':
logging.info(
(
f'Using block activation checkpointing with granularity selective forces all layers to use checkpointing.'
)
)
else:
raise ValueError(
f'activations_checkpoint_method should be "uniform" or "block" when using granularity selective.'
)
self.activations_checkpoint_num_layers = num_layers # forcing all layers
elif self.activations_checkpoint_granularity == 'full':
if self.activations_checkpoint_method in ['uniform', 'block']:
if not self.activations_checkpoint_num_layers:
logging.info(
(
f'Using uniform or block activation checkpointing requires activations_checkpoint_num_layers to be set.'
f'Got: {self.activations_checkpoint_num_layers}. Setting to 1 by default.'
)
)
self.activations_checkpoint_num_layers = 1 # keeping the old default
else:
raise ValueError(
f'activations_checkpoint_method should be "uniform" or "block" when using granularity full.'
)
else:
raise ValueError(f'activations_checkpoint_granularity should be "selective" or "full".')
self.sequence_parallel = config.sequence_parallel
self.transformer_engine = transformer_engine
self.fp8 = fp8
self.fp8_e4m3 = fp8_e4m3
self.fp8_hybrid = fp8_hybrid
self.fp8_margin = fp8_margin
self.fp8_interval = fp8_interval
self.fp8_amax_history_len = fp8_amax_history_len
self.fp8_amax_compute_algo = fp8_amax_compute_algo
self.reduce_amax = reduce_amax
self.fp8_recipe = None
if self.fp8:
if self.fp8_e4m3:
fp8_format = recipe.Format.E4M3
elif self.fp8_hybrid:
fp8_format = recipe.Format.HYBRID
self.fp8_recipe = recipe.DelayedScaling(
margin=self.fp8_margin,
interval=self.fp8_interval,
fp8_format=fp8_format,
amax_history_len=self.fp8_amax_history_len,
amax_compute_algo=self.fp8_amax_compute_algo,
reduce_amax=reduce_amax,
)
self.is_first_microbatch = True
self.microbatch_count = 0 # transformer engine forward needs to know if it is working on the first microbatch
self.checkpoint_core_attention = (
activations_checkpoint_granularity == 'selective'
) # transformer engine forward allows for more granular selective checkpointing
if self.model_type == ModelType.encoder_or_decoder:
assert (
num_layers % parallel_state.get_pipeline_model_parallel_world_size() == 0
), 'num_layers must be divisible by pipeline_model_parallel_size'
assert moe_frequency <= num_layers, 'MoE frequency must be <= number of transformer layers'
# TODO: Add similar assert for encoder-decoder.
self.num_layers = self.get_num_layers(num_layers)
if (
self.activations_checkpoint_num_layers is not None
and self.activations_checkpoint_num_layers > self.num_layers
):
self.activations_checkpoint_num_layers = self.num_layers
# Transformer layers.
def build_layer(layer_number):
if isinstance(layer_type, list):
lt = layer_type[layer_number - 1]
else:
lt = layer_type
if self.transformer_engine:
return AutocastTransformerLayer(
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
layernorm_epsilon=layernorm_epsilon,
num_attention_heads=num_attention_heads,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
layer_number=layer_number + layer_number_offset,
kv_channels=kv_channels,
self_attn_mask_type=self_attn_mask_type.name,
tp_size=parallel_state.get_tensor_model_parallel_world_size(),
params_dtype=torch.float32, # dtype params are initialized in
get_rng_state_tracker=tensor_parallel.random.get_cuda_rng_tracker,
fuse_wgrad_accumulation=config.gradient_accumulation_fusion,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
seq_length=None, # used for jit warmup
micro_batch_size=None, # used for jit warmup
sequence_parallel=config.sequence_parallel,
apply_residual_connection_post_layernorm=False,
autocast_dtype=precision,
use_emha=use_emha,
ub_tp_comm_overlap=ub_tp_comm_overlap,
zero_centered_gamma=normalization == 'layernorm1p',
)
else:
return ParallelTransformerLayer(
config=config,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
layer_number=layer_number + layer_number_offset,
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
num_attention_heads=num_attention_heads,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
layer_type=lt,
self_attn_mask_type=self_attn_mask_type,
precision=precision,
fp32_residual_connection=fp32_residual_connection,
layernorm_epsilon=layernorm_epsilon,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
ffn_dropout=ffn_dropout,
megatron_amp_O2=megatron_amp_O2,
bias_activation_fusion=bias_activation_fusion,
bias_dropout_add_fusion=bias_dropout_add_fusion,
masked_softmax_fusion=masked_softmax_fusion,
persist_layer_norm=persist_layer_norm,
position_embedding_type=position_embedding_type,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
activation=activation,
megatron_legacy=megatron_legacy,
bias=bias,
chunk_size=chunk_size,
normalization=normalization,
transformer_block_type=transformer_block_type,
headscale=headscale,
activations_checkpoint_granularity=activations_checkpoint_granularity,
normalize_attention_scores=normalize_attention_scores,
num_moe_experts=num_moe_experts,
moe_frequency=moe_frequency,
moe_dropout=moe_dropout,
use_flash_attention=use_flash_attention,
)
if parallel_state.get_virtual_pipeline_model_parallel_world_size() is not None:
assert num_layers % parallel_state.get_virtual_pipeline_model_parallel_world_size() == 0, (
'num_layers_per_stage must be divisible by ' 'virtual_pipeline_model_parallel_size'
)
assert self.model_type.value != 2, f'virtual pipeline parallel currently only supported for GPT'
# Number of layers in each model chunk is the number of layers in the stage,
# divided by the number of model chunks in a stage.
self.num_layers = self.num_layers // parallel_state.get_virtual_pipeline_model_parallel_world_size()
# With 8 layers, 2 stages, and 4 model chunks, we want an assignment of
# layers to stages like (each list is a model chunk):
# Stage 0: [0] [2] [4] [6]
# Stage 1: [1] [3] [5] [7]
# With 8 layers, 2 stages, and 2 virtual stages, we want an assignment of
# layers to stages like (each list is a model chunk):
# Stage 0: [0, 1] [4, 5]
# Stage 1: [2, 3] [6, 7]
offset = parallel_state.get_virtual_pipeline_model_parallel_rank() * (
num_layers // parallel_state.get_virtual_pipeline_model_parallel_world_size()
) + (parallel_state.get_pipeline_model_parallel_rank() * self.num_layers)
else:
# Each stage gets a contiguous set of layers.
if (
self.model_type == ModelType.encoder_and_decoder
and parallel_state.get_pipeline_model_parallel_world_size() > 1
):
pipeline_rank = parallel_state.get_pipeline_model_parallel_rank()
if layer_type == LayerType.encoder:
offset = pipeline_rank * self.num_layers
else:
num_ranks_in_enc = parallel_state.get_pipeline_model_parallel_split_rank()
offset = (pipeline_rank - num_ranks_in_enc) * self.num_layers
else:
offset = parallel_state.get_pipeline_model_parallel_rank() * self.num_layers
self.layers = torch.nn.ModuleList([build_layer(i + 1 + offset) for i in range(self.num_layers)])
if self.post_process and self.transformer_block_type != 'post_ln':
# Final layer norm before output.
if normalization == 'layernorm':
self.final_layernorm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, sequence_parallel=config.sequence_parallel
)
elif normalization == 'layernorm1p':
self.final_layernorm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=config.sequence_parallel
)
elif normalization == 'low_precision_layernorm':
self.final_layernorm = LPLayerNorm(hidden_size, layernorm_epsilon)
else:
self.final_layernorm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
# for architectures such as MPT, there is no bias term even on the layernorms
# this code allows us to remove the bias terms from the layernorm module
# so that we can support MPT. However, certain apex-based LNs don't support
# removing bias, so we also have to check for that
if not bias and normalization not in ['layernorm', 'layernorm1p']:
remove_bias_from_layernorm(self.final_layernorm)
def _get_layer(self, layer_number):
return self.layers[layer_number]
def get_num_layers(self, num_layers):
"""Compute the number of transformer layers resident on the current rank."""
if parallel_state.get_pipeline_model_parallel_world_size() > 1:
if self.model_type == ModelType.encoder_and_decoder:
assert parallel_state.get_pipeline_model_parallel_split_rank() is not None
num_ranks_in_encoder = parallel_state.get_pipeline_model_parallel_split_rank()
num_ranks_in_decoder = parallel_state.get_pipeline_model_parallel_world_size() - num_ranks_in_encoder
if self.layer_type == LayerType.encoder:
assert (
num_layers % num_ranks_in_encoder == 0
), 'num_layers must be divisible by number of ranks given to encoder'
elif self.layer_type == LayerType.decoder:
assert (
num_layers % num_ranks_in_decoder == 0
), 'num_layers must be divisible by number of ranks given to decoder'
else:
raise ValueError(f"Unknown layer type {self.layer_type}")
if parallel_state.is_pipeline_stage_before_split():
num_layers = num_layers // num_ranks_in_encoder
else:
num_layers = num_layers // num_ranks_in_decoder
elif self.model_type == ModelType.encoder_or_decoder:
assert (
num_layers % parallel_state.get_pipeline_model_parallel_world_size() == 0
), 'num_layers must be divisible by pipeline_model_parallel_size'
num_layers = num_layers // parallel_state.get_pipeline_model_parallel_world_size()
return num_layers
def _checkpointed_forward(
self,
hidden_states,
attention_mask,
encoder_output,
enc_dec_attn_mask,
rotary_pos_emb,
self_attention_relative_position_bias,
cross_attention_relative_position_bias,
checkpoint_activations_all_layers,
):
"""Forward method with activation checkpointing."""
def custom(start, end):
if self.transformer_engine:
def custom_forward(*inputs):
hidden_states = inputs[0]
attention_mask = inputs[1]
encoder_output = inputs[2]
enc_dec_attn_mask = inputs[3]
for index in range(start, end):
layer = self._get_layer(index)
hidden_states = layer(
hidden_states,
attention_mask,
encoder_output=encoder_output,
enc_dec_attn_mask=enc_dec_attn_mask,
inference_params=None,
is_first_microbatch=self.is_first_microbatch,
checkpoint_core_attention=False,
)
return hidden_states
else:
def custom_forward(*inputs):
if len(inputs) == 9:
hidden_states = inputs[0]
attention_mask = inputs[1]
encoder_output = inputs[2]
enc_dec_attn_mask = inputs[3]
rotary_pos_emb = (inputs[4], inputs[5], inputs[6])
self_attention_relative_position_bias = inputs[7]
cross_attention_relative_position_bias = inputs[8]
elif len(inputs) == 10:
hidden_states = (inputs[0], inputs[1])
attention_mask = inputs[2]
encoder_output = inputs[3]
enc_dec_attn_mask = inputs[4]
rotary_pos_emb = (inputs[5], inputs[6], inputs[7])
self_attention_relative_position_bias = inputs[8]
cross_attention_relative_position_bias = inputs[9]
else:
hidden_states = inputs[0]
attention_mask = inputs[1]
encoder_output = inputs[2]
enc_dec_attn_mask = inputs[3]
rotary_pos_emb = inputs[4]
self_attention_relative_position_bias = inputs[5]
cross_attention_relative_position_bias = inputs[6]
for index in range(start, end):
layer = self._get_layer(index)
hidden_states = layer(
hidden_states=hidden_states,
attention_mask=attention_mask,
encoder_output=encoder_output,
enc_dec_attn_mask=enc_dec_attn_mask,
rotary_pos_emb=rotary_pos_emb,
self_attention_relative_position_bias=self_attention_relative_position_bias,
cross_attention_relative_position_bias=cross_attention_relative_position_bias,
)
if isinstance(hidden_states, tuple):
pass
else:
hidden_states = hidden_states.contiguous()
return hidden_states
return custom_forward
if self.activations_checkpoint_method == 'uniform':
# Uniformly divide the total number of Transformer layers and checkpoint
# the input activation of each divided chunk.
# A method to further reduce memory usage reducing checkpoints.
l = 0
while l < self.num_layers:
if isinstance(hidden_states, tuple):
hidden_tuple = (hidden_states[0], hidden_states[1])
else:
hidden_tuple = (hidden_states,)
middle_tuple = (
attention_mask,
encoder_output,
enc_dec_attn_mask,
)
if rotary_pos_emb is None:
rot_tuple = (rotary_pos_emb,)
else:
rot_tuple = (rotary_pos_emb[0], rotary_pos_emb[1], rotary_pos_emb[2])
final_tuple = (self_attention_relative_position_bias, cross_attention_relative_position_bias)
arg_tuple = hidden_tuple + middle_tuple + rot_tuple + final_tuple
if self.transformer_engine:
hidden_states = te_checkpoint(
custom(l, l + self.activations_checkpoint_num_layers),
False,
tensor_parallel.random.get_cuda_rng_tracker,
parallel_state.get_tensor_model_parallel_group(),
*arg_tuple,
)
else:
hidden_states = tensor_parallel.checkpoint(
custom(l, l + self.activations_checkpoint_num_layers), False, *arg_tuple
)
l += self.activations_checkpoint_num_layers
elif self.activations_checkpoint_method == 'block':
# When pipeline-parallel size > 1 and 'num_micro_batches_with_partial_activation_checkpoints' = int,
# pipeline scheduling can force to checkpoint all layers or partial layers in a micro-batch.
if checkpoint_activations_all_layers:
activations_checkpoint_num_layers = self.num_layers
else:
activations_checkpoint_num_layers = self.activations_checkpoint_num_layers
if (
parallel_state.get_pipeline_model_parallel_world_size() > 0
and self.activations_checkpoint_layers_per_pipeline is not None
):
# Decrease the number of layers to checkpoint at later pipeline stages
activations_checkpoint_num_layers -= int(
parallel_state.get_pipeline_model_parallel_rank()
* self.activations_checkpoint_layers_per_pipeline
)
# Checkpoint the input activation of only a set number of individual
# Transformer layers and skip the rest.
# A method fully use the device memory removing redundant re-computation.
for l in range(self.num_layers):
if isinstance(hidden_states, tuple):
hidden_tuple = (hidden_states[0], hidden_states[1])
else:
hidden_tuple = (hidden_states,)
middle_tuple = (
attention_mask,
encoder_output,
enc_dec_attn_mask,
)
if rotary_pos_emb is None:
rot_tuple = (rotary_pos_emb,)
else:
rot_tuple = (rotary_pos_emb[0], rotary_pos_emb[1], rotary_pos_emb[2])
final_tuple = (self_attention_relative_position_bias, cross_attention_relative_position_bias)
arg_tuple = hidden_tuple + middle_tuple + rot_tuple + final_tuple
if l < activations_checkpoint_num_layers:
if self.transformer_engine:
hidden_states = te_checkpoint(
custom(l, l + 1),
False,
tensor_parallel.random.get_cuda_rng_tracker,
parallel_state.get_tensor_model_parallel_group(),
*arg_tuple,
)
else:
hidden_states = tensor_parallel.checkpoint(custom(l, l + 1), False, *arg_tuple)
else:
hidden_states = custom(l, l + 1)(*arg_tuple)
else:
raise ValueError("Invalid activation checkpoint method.")
return hidden_states
def set_input_tensor(self, input_tensor):
"""Set input tensor to be used instead of forward()'s input.
When doing pipeline parallelism the input from the previous
stage comes from communication, not from the input, so the
model's forward_step_func won't have it. This function is thus
used by internal code to bypass the input provided by the
forward_step_func"""
self.input_tensor = input_tensor
def forward(
self,
hidden_states,
attention_mask,
layer_past=None,
get_key_value=False,
encoder_output=None,
enc_dec_attn_mask=None,
set_inference_key_value_memory=False,
inference_max_sequence_len=None,
rotary_pos_emb=None, # list of positional embedding tensors, first one self attention, second one and third one are for cross attention (q, k)
retrieved_emb=None, # tensor of retrieved embedding of shape [b, k, r, n, d]
self_attention_relative_position_bias=None,
cross_attention_relative_position_bias=None,
checkpoint_activations_all_layers=None,
):
# Checks.
if inference_max_sequence_len:
assert self.activations_checkpoint_method is None, 'inference does not work with activation checkpointing'
if layer_past is not None:
assert get_key_value, 'for not None values in layer_past, ' 'expected get_key_value to be set'
if get_key_value:
assert self.activations_checkpoint_method is None, (
'get_key_value does not work with ' 'activation checkpointing'
)
if not self.pre_process:
# See set_input_tensor()
hidden_states = self.input_tensor
# TODO: @Yi Dong, what should this be?
if retrieved_emb is not None:
assert len(retrieved_emb.shape) == 5
# this is retrieval decoder, need special transpose
encoder_output = rearrange(retrieved_emb, 'b k r n d -> k r n b d').contiguous()
"""
is_first_microbatch is an optimization parameter for transformer engine.
It indicates if the current step in the forward pass is the first in a gradient accumulation cycle.
If set, FP8 weights are cached and some minor optimizations are applied to fuse_wgrad_accumulation
"""
from apex.transformer.pipeline_parallel.utils import _GLOBAL_NUM_MICROBATCHES_CALCULATOR
num_micro_batches = getattr(_GLOBAL_NUM_MICROBATCHES_CALCULATOR, 'num_micro_batches', 1)
if self.sequence_parallel:
rng_context = tensor_parallel.random.get_cuda_rng_tracker().fork()
else:
rng_context = nullcontext()
with rng_context:
# fp8_autocast will not do anything if TE or FP8 isn't used
fp8_group = None
if self.fp8 and parallel_state.model_parallel_is_initialized():
fp8_group = parallel_state.get_amax_reduction_group()
if HAVE_TE:
# if TE is installed but fp8 is not available then this will do nothing
fp8_context = fp8_autocast(enabled=self.fp8, fp8_recipe=self.fp8_recipe, fp8_group=fp8_group)
else:
fp8_context = nullcontext()
with fp8_context:
if self.activations_checkpoint_granularity == 'full' and self.activations_checkpoint_num_layers > 0:
hidden_states = self._checkpointed_forward(
hidden_states,
attention_mask,
encoder_output,
enc_dec_attn_mask,
rotary_pos_emb,
self_attention_relative_position_bias,
cross_attention_relative_position_bias,
checkpoint_activations_all_layers,
)
else:
if get_key_value:
presents = []
if self.transformer_engine:
# Pass key value information to TE through inference_params to pre-allocate memory
if set_inference_key_value_memory:
self.inference_params = type('', (), {})()
self.inference_params.max_sequence_len = inference_max_sequence_len
self.inference_params.max_batch_size = hidden_states.size(1)
self.inference_params.batch_size_offset = 0
self.inference_params.key_value_memory_dict = {}
self.inference_params.sequence_len_offset = 0
self.inference_current_sequence_len = 0
if self.inference_params != None:
self.inference_params.sequence_len_offset = self.inference_current_sequence_len
for index in range(self.num_layers):
layer = self._get_layer(index)
past = None
if layer_past is not None:
past = layer_past[index]
if self.activations_checkpoint_granularity == 'selective':
# When pipeline-parallel size > 1 and 'num_micro_batches_with_partial_activation_checkpoints' = int,
# pipeline scheduling can force to checkpoint all layers or partial layers in a micro-batch.
if (
checkpoint_activations_all_layers == True
or self.activations_checkpoint_method == 'uniform'
):
checkpoint_core_attention = True
elif self.activations_checkpoint_method == 'block':
activations_checkpoint_num_layers = self.activations_checkpoint_num_layers
# Decrease the number of layers to checkpoint at later pipeline stages
if self.activations_checkpoint_layers_per_pipeline is not None:
activations_checkpoint_num_layers -= int(
parallel_state.get_pipeline_model_parallel_rank()
* self.activations_checkpoint_layers_per_pipeline
)
checkpoint_core_attention = index < activations_checkpoint_num_layers
else:
checkpoint_core_attention = False
if self.transformer_engine:
hidden_states = layer(
hidden_states,
attention_mask,
encoder_output=encoder_output,
enc_dec_attn_mask=enc_dec_attn_mask,
inference_params=self.inference_params,
is_first_microbatch=self.is_first_microbatch,
checkpoint_core_attention=checkpoint_core_attention,
)
else:
hidden_states = layer(
hidden_states,
attention_mask,
encoder_output=encoder_output,
enc_dec_attn_mask=enc_dec_attn_mask,
layer_past=past,
get_key_value=get_key_value,
set_inference_key_value_memory=set_inference_key_value_memory,
inference_max_sequence_len=inference_max_sequence_len,
rotary_pos_emb=rotary_pos_emb,
self_attention_relative_position_bias=self_attention_relative_position_bias,
cross_attention_relative_position_bias=cross_attention_relative_position_bias,
checkpoint_core_attention=checkpoint_core_attention,
)
# Update current sequence length outside of the loops
if self.transformer_engine:
self.inference_current_sequence_len += hidden_states.size(0)
# Skip counter update for eval and activation checkpointing
if torch.is_grad_enabled() and self.training:
self.microbatch_count += 1
if self.microbatch_count % num_micro_batches == 0:
self.microbatch_count = 0
self.is_first_microbatch = True
else:
self.is_first_microbatch = False
output = hidden_states
# Final layer norm.
if self.post_process:
# only apply the final_layernorm for pre-ln
if self.transformer_block_type != 'post_ln':
output = self.final_layernorm(hidden_states)
if get_key_value:
output = [output, presents]
return output
| NeMo-main | nemo/collections/nlp/modules/common/megatron/transformer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer based language model."""
import torch
from nemo.collections.nlp.modules.common.megatron.fused_layer_norm import get_layer_norm
from nemo.collections.nlp.modules.common.megatron.layer_type import LayerType
from nemo.collections.nlp.modules.common.megatron.module import MegatronModule
from nemo.collections.nlp.modules.common.megatron.transformer import ParallelTransformer
from nemo.collections.nlp.modules.common.megatron.utils import (
ApexGuardDefaults,
attn_mask_postprocess,
build_attention_mask_3d,
)
try:
from apex.normalization import MixedFusedRMSNorm
from apex.transformer.enums import AttnMaskType, ModelType
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
# fake missing classes with None attributes
AttnMaskType = ApexGuardDefaults()
ModelType = ApexGuardDefaults()
try:
from megatron.core import ModelParallelConfig
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelParallelConfig = ApexGuardDefaults
HAVE_MEGATRON_CORE = False
__all__ = ["MegatronPerceiverEncoderModule"]
class MegatronPerceiverEncoderModule(MegatronModule):
"""Transformer encoder model.
"""
def __init__(
self,
config: ModelParallelConfig,
init_method,
output_layer_init_method,
hidden_size,
ffn_hidden_size,
num_layers,
num_attention_heads,
apply_query_key_layer_scaling=True,
kv_channels=None,
pre_process=True,
post_process=True,
megatron_amp_O2=False,
encoder_attn_mask_type=AttnMaskType.padding,
hidden_dropout=0.1,
attention_dropout=0.1,
ffn_dropout=0.0,
precision=16,
fp32_residual_connection=False,
activations_checkpoint_method=None,
activations_checkpoint_num_layers=1,
activations_checkpoint_granularity=None,
layernorm_epsilon=1e-5,
bias_activation_fusion=True,
bias_dropout_add_fusion=True,
masked_softmax_fusion=True,
persist_layer_norm=False,
openai_gelu=False,
onnx_safe=False,
activation='gelu',
bias=True,
normalization='layernorm',
transformer_block_type='pre_ln',
headscale=False,
parent_model_type=ModelType.encoder_or_decoder,
hidden_steps=32,
num_self_attention_per_cross_attention=1,
normalize_attention_scores=True,
megatron_legacy=False,
):
super(MegatronPerceiverEncoderModule, self).__init__(config=config)
self.config = config
self.pre_process = pre_process
self.post_process = post_process
self.hidden_size = hidden_size
self.num_layers = num_layers
self.init_method = init_method
self.model_attn_mask_type = encoder_attn_mask_type
self.hidden_dropout = hidden_dropout
self.output_layer_init_method = output_layer_init_method
self.parent_model_type = parent_model_type
self.normalization = normalization
self.transformer_block_type = transformer_block_type
self.hidden_steps = hidden_steps
self.num_self_attention_per_cross_attention = num_self_attention_per_cross_attention
self.num_attention_heads = num_attention_heads
self.apply_query_key_layer_scaling = apply_query_key_layer_scaling
self.kv_channels = kv_channels
self.ffn_hidden_size = ffn_hidden_size
self.precision = precision
self.fp32_residual_connection = fp32_residual_connection
self.activations_checkpoint_method = activations_checkpoint_method
self.activations_checkpoint_num_layers = activations_checkpoint_num_layers
self.activations_checkpoint_granularity = activations_checkpoint_granularity
self.layernorm_epsilon = layernorm_epsilon
self.bias_activation_fusion = bias_activation_fusion
self.bias_dropout_add_fusion = bias_dropout_add_fusion
self.masked_softmax_fusion = masked_softmax_fusion
self.persist_layer_norm = persist_layer_norm
self.openai_gelu = openai_gelu
self.onnx_safe = onnx_safe
self.activation = activation
self.bias = bias
self.headscale = headscale
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.normalization = normalization
self.parent_model_type = parent_model_type
self.transformer_block_type = transformer_block_type
self.ffn_dropout = ffn_dropout
self.normalize_attention_scores = normalize_attention_scores
self.megatron_legacy = megatron_legacy
self.megatron_amp_O2 = megatron_amp_O2
assert self.num_self_attention_per_cross_attention >= 1
assert self.hidden_steps >= 1
self.init_hidden = torch.nn.Parameter(torch.nn.init.xavier_normal_(torch.empty(hidden_steps, hidden_size)))
self.cross_attn_layers = torch.nn.ModuleList([self._build_cross_attn_layer() for _ in range(self.num_layers)])
self.self_attn_layers = torch.nn.ModuleList(
[
self._build_self_attn_layer()
for _ in range(self.num_layers * self.num_self_attention_per_cross_attention)
]
)
if normalization == 'layernorm':
self.final_layernorm = get_layer_norm(hidden_size, layernorm_epsilon, persist_layer_norm)
else:
self.final_layernorm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
def _build_cross_attn_layer(self):
return ParallelTransformer(
config=self.config,
layer_type=LayerType.decoder,
init_method=self.init_method,
output_layer_init_method=self.output_layer_init_method,
num_layers=1,
hidden_size=self.hidden_size,
num_attention_heads=self.num_attention_heads,
apply_query_key_layer_scaling=self.apply_query_key_layer_scaling,
kv_channels=self.kv_channels,
ffn_hidden_size=self.ffn_hidden_size,
self_attn_mask_type=self.model_attn_mask_type,
pre_process=self.pre_process,
post_process=False, # This is to avoid the final layernorm and transpose.
precision=self.precision,
fp32_residual_connection=self.fp32_residual_connection,
activations_checkpoint_method=self.activations_checkpoint_method,
activations_checkpoint_num_layers=self.activations_checkpoint_num_layers,
activations_checkpoint_granularity=self.activations_checkpoint_granularity,
layernorm_epsilon=self.layernorm_epsilon,
hidden_dropout=self.hidden_dropout,
attention_dropout=self.attention_dropout,
ffn_dropout=self.ffn_dropout,
megatron_amp_O2=self.megatron_amp_O2,
bias_activation_fusion=self.bias_activation_fusion,
bias_dropout_add_fusion=self.bias_dropout_add_fusion,
masked_softmax_fusion=self.masked_softmax_fusion,
persist_layer_norm=self.persist_layer_norm,
openai_gelu=self.openai_gelu,
onnx_safe=self.onnx_safe,
activation=self.activation,
bias=self.bias,
normalization=self.normalization,
model_type=self.parent_model_type,
transformer_block_type=self.transformer_block_type,
headscale=self.headscale,
normalize_attention_scores=self.normalize_attention_scores,
megatron_legacy=self.megatron_legacy,
)
def _build_self_attn_layer(self):
return ParallelTransformer(
config=self.config,
layer_type=LayerType.encoder,
init_method=self.init_method,
output_layer_init_method=self.output_layer_init_method,
num_layers=1,
hidden_size=self.hidden_size,
num_attention_heads=self.num_attention_heads,
apply_query_key_layer_scaling=self.apply_query_key_layer_scaling,
kv_channels=self.kv_channels,
ffn_hidden_size=self.ffn_hidden_size,
self_attn_mask_type=self.model_attn_mask_type,
pre_process=self.pre_process,
post_process=False, # This is to avoid the final layernorm and transpose.
precision=self.precision,
fp32_residual_connection=self.fp32_residual_connection,
activations_checkpoint_method=self.activations_checkpoint_method,
activations_checkpoint_num_layers=self.activations_checkpoint_num_layers,
activations_checkpoint_granularity=self.activations_checkpoint_granularity,
layernorm_epsilon=self.layernorm_epsilon,
hidden_dropout=self.hidden_dropout,
attention_dropout=self.attention_dropout,
ffn_dropout=self.ffn_dropout,
megatron_amp_O2=self.megatron_amp_O2,
bias_activation_fusion=self.bias_activation_fusion,
bias_dropout_add_fusion=self.bias_dropout_add_fusion,
masked_softmax_fusion=self.masked_softmax_fusion,
persist_layer_norm=self.persist_layer_norm,
openai_gelu=self.openai_gelu,
onnx_safe=self.onnx_safe,
activation=self.activation,
bias=self.bias,
normalization=self.normalization,
model_type=self.parent_model_type,
transformer_block_type=self.transformer_block_type,
headscale=self.headscale,
normalize_attention_scores=self.normalize_attention_scores,
megatron_legacy=self.megatron_legacy,
)
def set_input_tensor(self, input_tensor):
""" See megatron.model.transformer.set_input_tensor()"""
# TODO: Fix this when adding support for Pipeline Parallel.
pass
def forward(
self,
enc_input,
enc_attn_mask,
layer_past=None,
get_key_value=False,
enc_self_attention_relative_position_bias=None,
):
if enc_self_attention_relative_position_bias is not None:
raise ValueError(
f"enc_self_attention_relative_position_bias is not supported for Megatron Perceiver Encoders."
)
# convert to Megatron mask
latent_attention_mask = torch.ones(enc_input.size(1), self.hidden_steps).to(enc_input.device)
# First convert from 2D (B x T) to 3D (B x T x T)
# Next convert to 4D (B x 1 x T x T) - unsqueeze(1) is for the head dim.
latent_attention_mask_4d = attn_mask_postprocess(
build_attention_mask_3d(
source_mask=latent_attention_mask,
target_mask=latent_attention_mask,
attn_mask_type=AttnMaskType.padding,
)
)
enc_dec_attn_mask_4d = attn_mask_postprocess(
build_attention_mask_3d(
source_mask=latent_attention_mask, target_mask=enc_attn_mask, attn_mask_type=AttnMaskType.padding,
)
)
# 1. Expand latent hidden states to B x S_perceiver x H
# 2. Transpose to S_perceiver x B x H
hidden_states = self.init_hidden.unsqueeze(0).expand(enc_input.size(1), -1, -1).transpose(1, 0)
for i in range(self.num_layers):
residual = hidden_states
hidden_states = self.cross_attn_layers[i](
hidden_states=hidden_states,
attention_mask=latent_attention_mask_4d,
enc_dec_attn_mask=enc_dec_attn_mask_4d,
encoder_output=enc_input,
)
for j in range(self.num_self_attention_per_cross_attention):
hidden_states = self.self_attn_layers[i * self.num_self_attention_per_cross_attention + j](
hidden_states=hidden_states, attention_mask=latent_attention_mask_4d,
)
hidden_states += residual
return self.final_layernorm(hidden_states) # Need to transpose at the end becase pre-process is False
| NeMo-main | nemo/collections/nlp/modules/common/megatron/megatron_perceiver_encoders.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Dict, List, Optional
import torch
from nemo.collections.nlp.modules.common.megatron.utils import ApexGuardDefaults
from nemo.utils import logging
try:
from megatron.core import parallel_state
from megatron.core.enums import ModelType
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelType = ApexGuardDefaults()
HAVE_MEGATRON_CORE = False
try:
from apex.transformer.tensor_parallel.layers import set_defaults_if_not_set_tensor_model_parallel_attributes
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
# Apex's `build model' refactored to call Megatron-Core classes
def build_model(
model_provider_func: Callable[[Any, Dict[str, Any]], torch.nn.Module],
wrap_with_ddp: bool = True,
virtual_pipeline_model_parallel_size: Optional[int] = None,
model_type: ModelType = ModelType.encoder_or_decoder,
on_cpu: bool = False,
*args: Any,
**kwargs: Any,
) -> List[torch.nn.Module]:
"""Build the model satisfying pipeline model parallel requirements.
This function sets `pre_process` and `post_process` to `**kwargs` and pass `*args` and `**kwargs` to
`model_provider_func`.
Args:
model_provider_func: A function which takes `*args` and `**kwargs` and returns a `nn.Module`.
wrap_with_ddp: If :obj:`True`, wrap the instantiated model
with `torch.nn.parallel.distributed.DistributedDataParallel`, a.k.a. `DDP`.
virtual_pipeline_model_parallel_size: Specify when using interleaving scheduling pipeline model parallel.
model_type:
*args: arguments for model provider func
**kwargs: Keyword arguments for model provider func
Returns:
a list of `nn.Module`(s). If `virtual_pipeline_model_parallel_size` is not None,
the list has multiple models, otherwise one.
"""
if model_type is None:
model_type = ModelType.encoder_or_decoder
if (
parallel_state.get_pipeline_model_parallel_world_size() > 1
and virtual_pipeline_model_parallel_size is not None
):
model = []
parallel_state.set_virtual_pipeline_model_parallel_world_size(virtual_pipeline_model_parallel_size)
for i in range(virtual_pipeline_model_parallel_size):
parallel_state.set_virtual_pipeline_model_parallel_rank(i)
model.append(
model_provider_func(
*args,
**kwargs,
pre_process=parallel_state.is_pipeline_first_stage(),
post_process=parallel_state.is_pipeline_last_stage(),
)
)
else:
if model_type == ModelType.encoder_or_decoder:
model = model_provider_func(
*args,
**kwargs,
pre_process=parallel_state.is_pipeline_first_stage(),
post_process=parallel_state.is_pipeline_last_stage(),
)
elif model_type == ModelType.encoder_and_decoder:
pre_process = parallel_state.is_pipeline_first_stage()
post_process = parallel_state.is_pipeline_last_stage()
# `add_encoder` & `add_decoder` logic.
add_encoder, add_decoder = True, True
if parallel_state.get_pipeline_model_parallel_world_size() > 1:
split_rank = parallel_state.get_pipeline_model_parallel_split_rank()
if split_rank is None:
raise RuntimeError("Split rank needs to be specified for model with both encoder and decoder.")
rank = parallel_state.get_pipeline_model_parallel_rank()
world_size = parallel_state.get_pipeline_model_parallel_world_size()
pre_process = rank == 0 or rank == split_rank
post_process = rank == (split_rank - 1) or rank == (world_size - 1)
add_encoder = parallel_state.is_pipeline_stage_before_split()
add_decoder = parallel_state.is_pipeline_stage_after_split()
model = model_provider_func(
*args,
**kwargs,
pre_process=pre_process,
post_process=post_process,
add_encoder=add_encoder,
add_decoder=add_decoder,
)
else:
raise ValueError(f"Unrecognized ModelType '{model_type}'")
if not isinstance(model, list):
model = [model]
for model_module in model:
model_module.model_type = model_type
# Set tensor model parallel attributes if not set.
# Only parameters that are already tensor model parallel have these
# attributes set for them. We should make sure the default attributes
# are set for all params so the optimizer can use them.
for model_module in model:
for param in model_module.parameters():
set_defaults_if_not_set_tensor_model_parallel_attributes(param)
# Print number of parameters.
if parallel_state.model_parallel_is_initialized() and parallel_state.get_data_parallel_rank() == 0:
msg = " > number of parameters on (tensor, pipeline) model parallel rank ({}, {}): {}".format(
parallel_state.get_tensor_model_parallel_rank(),
parallel_state.get_pipeline_model_parallel_rank(),
_calc_number_of_params(model),
)
logging.info(msg)
# GPU allocation.
if not on_cpu:
for model_module in model:
model_module.cuda(torch.cuda.current_device())
if wrap_with_ddp:
i = torch.cuda.current_device()
model = [
torch.nn.parallel.distributed.DistributedDataParallel(
model_module, device_ids=[i], output_device=i, process_group=parallel_state.get_data_parallel_group(),
)
for model_module in model
]
return model
def _calc_number_of_params(model: List[torch.nn.Module]) -> int:
assert isinstance(model, list)
return sum([sum([p.nelement() for p in model_module.parameters()]) for model_module in model])
| NeMo-main | nemo/collections/nlp/modules/common/megatron/build_model.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Megatron Module"""
import torch
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from nemo.collections.nlp.modules.common.megatron.utils import ApexGuardDefaults
from nemo.utils import logging
try:
from megatron.core import ModelParallelConfig, parallel_state, tensor_parallel
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelParallelConfig = ApexGuardDefaults
HAVE_MEGATRON_CORE = False
_FLOAT_TYPES = (torch.FloatTensor, torch.cuda.FloatTensor)
_HALF_TYPES = (torch.HalfTensor, torch.cuda.HalfTensor)
_BF16_TYPES = (torch.BFloat16Tensor, torch.cuda.BFloat16Tensor)
def param_is_not_shared(param):
return not hasattr(param, 'shared') or not param.shared
class MegatronModule(torch.nn.Module):
"""Megatron specific extensions of torch Module with support
for pipelining."""
def __init__(self, config: ModelParallelConfig = None, share_token_embeddings=True):
if not HAVE_MEGATRON_CORE:
raise ImportError(
"megatron-core was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
super(MegatronModule, self).__init__()
self.config = config
self.share_token_embeddings = share_token_embeddings
def word_embeddings_weight(self):
if self.pre_process:
if hasattr(self, 'language_model'):
return self.language_model.embedding.word_embeddings.weight
elif hasattr(self, 'encoder_embedding'):
return self.encoder_embedding.word_embeddings.weight
elif hasattr(self, 'decoder_embedding'):
return self.decoder_embedding.word_embeddings.weight
else:
raise ValueError(
f"Pre_process is True, but no embedding is found on this rank. Looked for language_model.embedding, encoder_embedding, and decoder_embedding"
)
else:
# This is the pipeline parallel last stage.
if not self.share_token_embeddings:
raise Exception(
'word_embeddings_weight() called for last ' 'stage, but share_token_embeddings is false'
)
return self.word_embeddings.weight
def position_embeddings_weight(self):
if self.pre_process:
if hasattr(self, 'language_model'):
return self.language_model.embedding.position_embeddings.weight
elif hasattr(self, 'encoder_embedding'):
return self.encoder_embedding.position_embeddings.weight
elif hasattr(self, 'decoder_embedding'):
return self.decoder_embedding.position_embeddings.weight
else:
raise ValueError(
f"Pre_process is True, but no embedding is found on this rank. Looked for language_model.embedding, encoder_embedding, and decoder_embedding"
)
else:
# We only need position embeddings on the encoder and decoder first stages where pre_process=True
raise ValueError(f"Pre_process is False, there is no position embedding on this rank.")
def encoder_relative_position_embeddings_weight(self):
if hasattr(self, 'encoder_relative_position_embedding'):
return self.encoder_relative_position_embedding.relative_position_embedding.weight
else:
raise ValueError(
f"No encoder_relative_position_embedding found on this rank. Looking for encoder_relative_position_embedding.relative_position_embedding.weight"
)
def decoder_relative_position_embeddings_weight(self):
if hasattr(self, 'decoder_relative_position_embedding'):
return self.decoder_relative_position_embedding.relative_position_embedding.weight
else:
raise ValueError(
f"No decoder_relative_position_embedding found on this rank. Looking for decoder_relative_position_embedding.relative_position_embedding.weight"
)
def decoder_cross_attention_relative_position_embeddings_weight(self):
if hasattr(self, 'decoder_cross_attention_relative_position_embedding'):
return self.decoder_cross_attention_relative_position_embedding.relative_position_embedding.weight
else:
raise ValueError(
f"No decoder_cross_attention_relative_position_embedding found on this rank. Looking for decoder_cross_attention_relative_position_embedding.relative_position_embedding.weight"
)
def initialize_word_embeddings(self, init_method, vocab_size, hidden_size, param_dtype=torch.float32):
if not self.share_token_embeddings:
raise Exception('initialize_word_embeddings() was called but ' 'share_token_embeddings is false')
# This function just initializes the word embeddings in the final stage
# when we are using pipeline parallelism. If we aren't using pipeline
# parallelism there is nothing to do.
if parallel_state.get_pipeline_model_parallel_world_size() == 1:
return
# Parameters are shared between the word embeddings layer, and the
# heads at the end of the model. In a pipelined setup with more than
# one stage, the initial embedding layer and the head are on different
# workers, so we do the following:
# 1. Create a second copy of word_embeddings on the last stage, with
# initial parameters of 0.0.
# 2. Do an all-reduce between the first and last stage to ensure that
# the two copies of word_embeddings start off with the same
# parameter values.
# 3. In the training loop, before an all-reduce between the grads of
# the two word_embeddings layers to ensure that every applied weight
# update is the same on both stages.
if parallel_state.is_pipeline_last_stage() and not self.pre_process:
# This is relevant for T5 when the decoder is only on a single rank. It is the last stage of the pipeline and also has embeddings on this rank already.
assert not parallel_state.is_pipeline_first_stage()
self._word_embeddings_for_head_key = 'word_embeddings_for_head'
# set word_embeddings weights to 0 here, then copy first
# stage's weights using all_reduce below.
self.word_embeddings = tensor_parallel.VocabParallelEmbedding(
vocab_size, hidden_size, init_method=init_method, config=self.config,
)
self.word_embeddings.weight.data.fill_(0)
self.word_embeddings.weight.shared = True
# Zero out initial weights for decoder embedding.
# NOTE: We don't currently support T5 with the interleaved schedule.
# This is the case where PP > 1 and we're on the decoder first stage.
if not parallel_state.is_pipeline_first_stage(ignore_virtual=True) and self.pre_process:
if hasattr(self, 'language_model'):
# Zero params for GPT
self.language_model.embedding.zero_parameters()
else:
# Zero decoder embeddings for T5
assert hasattr(self, 'decoder_embedding')
self.decoder_embedding.zero_parameters()
def sync_initial_word_embeddings(self):
if torch.distributed.is_initialized():
if parallel_state.is_rank_in_embedding_group() and self.share_token_embeddings:
torch.distributed.all_reduce(
self.word_embeddings_weight().data, group=parallel_state.get_embedding_group()
)
else:
logging.warning(
"WARNING! Distributed processes aren't initialized, so "
"word embeddings in the last layer are not synchronized. "
"If you are just manipulating a model this is fine, but "
"this needs to be handled manually. If you are training "
"something is definitely wrong."
)
def sync_initial_position_embeddings(self):
# Ensure that the encoder first stage and decoder first have the same
# initial position embedding parameter values.
# NOTE: We don't currently support T5 with the interleaved schedule.
if (
parallel_state.is_rank_in_position_embedding_group()
and parallel_state.get_pipeline_model_parallel_split_rank() is not None
):
# TODO: Support tokentype embedding.
# self.language_model.embedding.cuda()
position_embeddings = self.position_embeddings_weight()
torch.distributed.all_reduce(position_embeddings.data, group=parallel_state.get_position_embedding_group())
def state_dict_for_save_checkpoint(self, destination=None, prefix='', keep_vars=False):
"""Use this function to override the state dict for
saving checkpoints."""
return self.state_dict(destination, prefix, keep_vars)
def sync_initial_encoder_relative_position_embeddings(self):
# Ensure that all encoder RPE stages have the same weights.
if parallel_state.is_rank_in_encoder_relative_position_embedding_group():
position_embeddings = self.encoder_relative_position_embeddings_weight()
torch.distributed.all_reduce(
position_embeddings.data, group=parallel_state.get_encoder_relative_position_embedding_group()
)
def sync_initial_decoder_relative_position_embeddings(self):
if parallel_state.is_rank_in_decoder_relative_position_embedding_group():
position_embeddings = self.decoder_relative_position_embeddings_weight()
torch.distributed.all_reduce(
position_embeddings.data, group=parallel_state.get_decoder_relative_position_embedding_group()
)
def sync_initial_decoder_cross_attention_relative_position_embeddings(self):
if parallel_state.is_rank_in_decoder_relative_position_embedding_group():
position_embeddings = self.decoder_cross_attention_relative_position_embeddings_weight()
torch.distributed.all_reduce(
position_embeddings.data, group=parallel_state.get_decoder_relative_position_embedding_group()
)
def conversion_helper(val, conversion):
"""Apply conversion to val. Recursively apply conversion if `val`
#is a nested tuple/list structure."""
if not isinstance(val, (tuple, list)):
return conversion(val)
rtn = [conversion_helper(v, conversion) for v in val]
if isinstance(val, tuple):
rtn = tuple(rtn)
return rtn
def fp32_to_float16(val, float16_converter):
"""Convert fp32 `val` to fp16/bf16"""
def half_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, _FLOAT_TYPES):
val = float16_converter(val)
return val
return conversion_helper(val, half_conversion)
def float16_to_fp32(val):
"""Convert fp16/bf16 `val` to fp32"""
def float_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, (_BF16_TYPES, _HALF_TYPES)):
val = val.float()
return val
return conversion_helper(val, float_conversion)
class Float16Module(MegatronModule):
def __init__(self, config: ModelParallelConfig, module, precision, share_token_embeddings=True):
if not HAVE_MEGATRON_CORE:
raise ImportError(
"Megatron-core was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
super().__init__(config=config, share_token_embeddings=share_token_embeddings)
self.precision = precision
if precision in ['bf16', 'bf16-mixed']:
self.add_module('module', module.bfloat16())
def float16_converter(val):
return val.bfloat16()
elif precision in [16, '16', '16-mixed']:
self.add_module('module', module.half())
def float16_converter(val):
return val.half()
else:
raise Exception(
f'precision {precision} is not supported. Float16Module (megatron_amp_O2) supports '
'only fp16 and bf16.'
)
self.float16_converter = float16_converter
def set_input_tensor(self, input_tensor):
return self.module.set_input_tensor(input_tensor)
def forward(self, *inputs, **kwargs):
# Note: Legacy checkpoints didn't have pre-process.
if getattr(self.module, 'pre_process', True):
inputs = fp32_to_float16(inputs, self.float16_converter)
outputs = self.module(*inputs, **kwargs)
if parallel_state.is_pipeline_last_stage() and self.training:
outputs = float16_to_fp32(outputs)
return outputs
def state_dict(self, destination=None, prefix='', keep_vars=False):
return self.module.state_dict(destination, prefix, keep_vars)
def state_dict_for_save_checkpoint(self, destination=None, prefix='', keep_vars=False):
return self.module.state_dict_for_save_checkpoint(destination, prefix, keep_vars)
def word_embeddings_weight(self):
if self.module.pre_process:
if hasattr(self.module, 'language_model'):
return self.module.language_model.embedding.word_embeddings.weight
elif hasattr(self.module, 'encoder_embedding'):
return self.module.encoder_embedding.word_embeddings.weight
elif hasattr(self.module, 'decoder_embedding'):
return self.module.decoder_embedding.word_embeddings.weight
else:
raise ValueError(
f"Pre_process is True, but no embedding is found on this rank. Looked for language_model.embedding, encoder_embedding, and decoder_embedding"
)
else:
# This is the pipeline parallel last stage.
if not self.share_token_embeddings:
raise Exception(
'word_embeddings_weight() called for last ' 'stage, but share_token_embeddings is false'
)
return self.module.word_embeddings.weight
def position_embeddings_weight(self):
if self.module.pre_process:
if hasattr(self.module, 'language_model'):
return self.module.language_model.embedding.position_embeddings.weight
elif hasattr(self.module, 'encoder_embedding'):
return self.module.encoder_embedding.position_embeddings.weight
elif hasattr(self.module, 'decoder_embedding'):
return self.module.decoder_embedding.position_embeddings.weight
else:
raise ValueError(
f"Pre_process is True, but no embedding is found on this rank. Looked for language_model.position_embeddings, encoder_embedding.position_embedding_weight, and decoder_embedding.position_embedding_weight"
)
else:
# We only need position embeddings on the encoder and decoder first stages where pre_process=True
raise ValueError(f"Pre_process is False, there is no position embedding on this rank.")
def encoder_relative_position_embeddings_weight(self):
if hasattr(self.module, 'encoder_relative_position_embedding'):
return self.module.encoder_relative_position_embedding.relative_position_embedding.weight
else:
raise ValueError(
f"No encoder_relative_position_embedding found on this rank. Looking for encoder_relative_position_embedding.relative_position_embedding.weight"
)
def decoder_relative_position_embeddings_weight(self):
if hasattr(self.module, 'decoder_relative_position_embedding'):
return self.module.decoder_relative_position_embedding.relative_position_embedding.weight
else:
raise ValueError(
f"No decoder_relative_position_embedding found on this rank. Looking for decoder_relative_position_embedding.relative_position_embedding.weight"
)
def decoder_cross_attention_relative_position_embeddings_weight(self):
if hasattr(self.module, 'decoder_cross_attention_relative_position_embedding'):
return self.module.decoder_cross_attention_relative_position_embedding.relative_position_embedding.weight
else:
raise ValueError(
f"No decoder_cross_attention_relative_position_embedding found on this rank. Looking for decoder_cross_attention_relative_position_embedding.relative_position_embedding.weight"
)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/module.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer."""
import enum
class LayerType(enum.Enum):
encoder = 1
decoder = 2
retrieval_encoder = (
3 # retrieval model encoder, it uses cross attention to be conditioned on the pre decoder output
)
retrieval_decoder = (
4 # retrieval model decoder, it uses chunked cross attention to be conditioned on the retrieved information
)
decoder_pre_mlp = 5 # decoder that skips the computation after the self-attention
retrieval_decoder_after_self_attn = 6 # retrieval decoder that skips the self-attention
| NeMo-main | nemo/collections/nlp/modules/common/megatron/layer_type.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
from typing import Dict, List, Optional
from nemo.core.classes import NeuralModule
from nemo.core.neural_types import ChannelType, MaskType, NeuralType
__all__ = ['MegatronTokensHeadModule']
class MegatronTokensHeadModule(NeuralModule, ABC):
""" Base class for encoder neural module to be used in NLP models. """
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"dec_output": NeuralType(('B', 'T', 'D'), ChannelType()),
"embeddings_weights": NeuralType(('T', 'D'), MaskType()),
}
@property
def input_names(self) -> List[str]:
return ['dec_output', 'embeddings_weights']
@property
def output_names(self) -> List[str]:
return ['logits']
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"logits": NeuralType(('B', 'T', 'D'), ChannelType())}
| NeMo-main | nemo/collections/nlp/modules/common/megatron/megatron_tokens_head_module.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nemo.collections.nlp.modules.common.megatron.fused_bias_gelu import bias_gelu, bias_gelu_back
try:
from apex._autocast_utils import _cast_if_autocast_enabled
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
@torch.jit.script
def bias_geglu(bias, y, bias_2, y_2):
x_2 = bias_2 + y_2
return bias_gelu(bias, y) * x_2
@torch.jit.script
def bias_geglu_back(g, bias, y, bias_2, y_2):
x_2 = bias_2 + y_2
return bias_gelu_back(g, bias, y) * x_2, bias_gelu(bias, y) * g
class GeGLUFunction(torch.autograd.Function):
@staticmethod
# bias and bias_2 are optional arguments
def forward(ctx, input, bias, input_2, bias_2):
ctx.save_for_backward(input, bias, input_2, bias_2)
return bias_geglu(bias, input, bias_2, input_2)
@staticmethod
def backward(ctx, grad_output):
input, bias, input_2, bias_2 = ctx.saved_tensors
tmp, tmp2 = bias_geglu_back(grad_output, bias, input, bias_2, input_2)
return tmp, tmp, tmp2, tmp2
def fused_bias_geglu(input, bias, input_2, bias_2):
args = _cast_if_autocast_enabled(input, bias, input_2, bias_2)
with torch.cuda.amp.autocast(enabled=False):
return GeGLUFunction.apply(*args)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/fused_bias_geglu.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from omegaconf import DictConfig
from nemo.collections.nlp.modules.common.megatron.hiddens import get_hiddens_module
from nemo.collections.nlp.modules.common.megatron.language_model import Embedding
from nemo.collections.nlp.modules.common.megatron.layer_type import LayerType
from nemo.collections.nlp.modules.common.megatron.megatron_decoders import get_decoder_model
from nemo.collections.nlp.modules.common.megatron.megatron_encoder_decoder import (
MegatronTransformerEncoderDecoderModule,
)
from nemo.collections.nlp.modules.common.megatron.megatron_encoders import get_encoder_model
from nemo.collections.nlp.modules.common.megatron.module import MegatronModule
from nemo.collections.nlp.modules.common.megatron.position_embedding import (
ALiBiRelativePositionEmbedding,
KERPLERelativePositionEmbedding,
T5RelativePositionEmbedding,
)
from nemo.collections.nlp.modules.common.megatron.utils import (
ApexGuardDefaults,
build_position_ids,
init_method_normal,
parallel_lm_logits,
scaled_init_method_normal,
)
from nemo.collections.nlp.modules.common.megatron.vocab_parallel_cross_entropy import vocab_parallel_cross_entropy
from nemo.collections.nlp.parts import utils_funcs
try:
from apex.transformer.enums import AttnMaskType, ModelType
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
# fake missing classes with None attributes
AttnMaskType = ApexGuardDefaults()
ModelType = ApexGuardDefaults()
HAVE_APEX = False
try:
from megatron.core import ModelParallelConfig, parallel_state, tensor_parallel
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelParallelConfig = ApexGuardDefaults
HAVE_MEGATRON_CORE = False
__all__ = ["MegatronTokenLevelHead", "MegatronTokenLevelEncoderDecoderModule"]
class MegatronTokenLevelHead(MegatronModule):
"""Masked LM head for token-based encoder-decoder models (e.g., T5)
Arguments:
mpu_vocab_size: model parallel size of vocabulary.
parallel_output: wether output logits being distributed or not.
"""
def __init__(self, mpu_vocab_size, parallel_output, bias=True):
super(MegatronTokenLevelHead, self).__init__()
if bias:
self.bias = torch.nn.Parameter(torch.zeros(mpu_vocab_size))
self.bias.model_parallel = True
self.bias.partition_dim = 0
self.bias.stride = 1
else:
self.bias = None
self.parallel_output = parallel_output
def forward(self, hidden_states, word_embeddings_weight):
async_tensor_model_parallel_allreduce = parallel_state.get_tensor_model_parallel_world_size() > 1
output = parallel_lm_logits(
hidden_states,
word_embeddings_weight,
self.parallel_output,
bias=self.bias,
async_tensor_model_parallel_allreduce=async_tensor_model_parallel_allreduce,
)
return output
# TODO: add soft prompts as an Embedding sub-class
class MegatronTokenLevelEncoderDecoderModule(MegatronModule):
"""Token-based (input/output is tokens) encoder-decoder model (e.g. T5 Language model.)"""
def __init__(
self,
config: ModelParallelConfig,
encoder_cfg: DictConfig,
decoder_cfg: DictConfig,
vocab_size: int, # TODO: This should eventually go inside encoder_cfg and decoder_cfg when separate enc/dec tokenizers are supported.
max_position_embeddings,
num_tokentypes=0,
parallel_output=True,
pre_process=True,
post_process=True,
fp16_cross_entropy=False,
megatron_amp_O2=False,
precision=16,
embedding_init_method_std=0.02,
embedding_dropout=0.1,
label_smoothing=0.0,
add_encoder=True,
add_decoder=True,
share_token_embeddings=True,
share_decoder_tokens_head_embeddings=True,
tokens_head_bias=True,
hiddens_cfg: DictConfig = None, # allows for hidden state transformations before the decoder
):
super(MegatronTokenLevelEncoderDecoderModule, self).__init__(config=config)
self.encoder_cfg = encoder_cfg
self.decoder_cfg = decoder_cfg
self.parallel_output = parallel_output
self.pre_process = pre_process
self.post_process = post_process
self.fp16_cross_entropy = fp16_cross_entropy
self.precision = precision
self.add_encoder = add_encoder
self.add_decoder = add_decoder
self.label_smoothing = label_smoothing
self.share_token_embeddings = share_token_embeddings
self.share_decoder_tokens_head_embeddings = share_decoder_tokens_head_embeddings
self.tokens_head_bias = tokens_head_bias
self.hiddens_cfg = hiddens_cfg
encoder_kv_channels, decoder_kv_channels = self._validate_config()
self.dtype = utils_funcs.torch_dtype_from_precision(precision, megatron_amp_O2)
encoder, decoder = None, None
if add_encoder:
if pre_process:
self.encoder_embedding = Embedding(
config=self.config,
hidden_size=encoder_cfg.hidden_size,
vocab_size=vocab_size,
max_sequence_length=max_position_embeddings,
init_method=init_method_normal(embedding_init_method_std),
num_tokentypes=num_tokentypes,
dtype=self.dtype,
embedding_dropout_prob=embedding_dropout,
position_embedding_type=encoder_cfg.get('position_embedding_type', 'learned_absolute'),
)
self._encoder_embedding_key = "encoder_embedding"
if self.encoder_cfg.get('position_embedding_type', 'learned_absolute') == 'relative':
self.encoder_relative_position_embedding = T5RelativePositionEmbedding(
init_method=init_method_normal(embedding_init_method_std),
num_attention_heads=encoder_cfg.num_attention_heads,
relative_position_num_buckets=encoder_cfg.relative_attention_num_buckets,
relative_position_max_distance=encoder_cfg.relative_attention_max_distance,
bidirectional=True,
layer_type=LayerType.encoder,
)
self._encoder_relative_position_embedding_key = "encoder_relative_position_embedding"
# Pipeline model parallel rank 0 will have the actual RPE weights. We zero it out on all other ranks and then sync them on setup.
if parallel_state.get_pipeline_model_parallel_rank() != 0:
self.encoder_relative_position_embeddings_weight().data.fill_(0)
self.encoder_relative_position_embeddings_weight().shared = True
elif self.encoder_cfg.get('position_embedding_type', 'learned_absolute') == 'alibi':
self.encoder_relative_position_embedding = ALiBiRelativePositionEmbedding(
bidirectional=True,
num_attention_heads=encoder_cfg.num_attention_heads,
layer_type=LayerType.encoder,
num_attention_heads_alibi=None,
max_seq_len=max_position_embeddings,
)
self._encoder_relative_position_embedding_key = "encoder_alibi_position_embedding"
elif self.encoder_cfg.get('position_embedding_type', 'learned_absolute') == 'kerple':
self.encoder_relative_position_embedding = KERPLERelativePositionEmbedding(
bidirectional=True,
num_attention_heads=encoder_cfg.num_attention_heads,
layer_type=LayerType.encoder,
num_attention_heads_kerple=None,
max_seq_len=max_position_embeddings,
)
self._encoder_relative_position_embedding_key = "encoder_kerple_position_embedding"
else:
self.encoder_relative_position_embedding = None
if encoder_cfg.get('use_flash_attention', False) and encoder_cfg.get(
'position_embedding_type', 'learned_absolute'
) in ['relative', 'kerple']:
raise ValueError('flash-attention not supported with relative or kerple at this point')
encoder = get_encoder_model(
config=config,
arch=encoder_cfg.arch,
hidden_size=encoder_cfg.hidden_size,
ffn_hidden_size=encoder_cfg.ffn_hidden_size,
num_layers=encoder_cfg.num_layers,
num_attention_heads=encoder_cfg.num_attention_heads,
apply_query_key_layer_scaling=encoder_cfg.get('apply_query_key_layer_scaling', True),
kv_channels=encoder_kv_channels,
init_method=init_method_normal(encoder_cfg.get('init_method_std', 0.02)),
scaled_init_method=scaled_init_method_normal(
encoder_cfg.get('init_method_std', 0.02), encoder_cfg.num_layers
),
encoder_attn_mask_type=AttnMaskType.padding,
pre_process=pre_process,
post_process=post_process,
init_method_std=encoder_cfg.get('init_method_std', 0.02),
megatron_amp_O2=megatron_amp_O2,
hidden_dropout=encoder_cfg.get('hidden_dropout', 0.1),
attention_dropout=encoder_cfg.get('attention_dropout', 0.1),
ffn_dropout=encoder_cfg.get('ffn_dropout', 0.0),
precision=precision,
fp32_residual_connection=encoder_cfg.get('fp32_residual_connection', False),
activations_checkpoint_method=encoder_cfg.get('activations_checkpoint_method', None),
activations_checkpoint_num_layers=encoder_cfg.get('activations_checkpoint_num_layers', 1),
activations_checkpoint_granularity=encoder_cfg.get('activations_checkpoint_granularity', None),
layernorm_epsilon=encoder_cfg.get('layernorm_epsilon', 1e-5),
bias_activation_fusion=encoder_cfg.get('bias_activation_fusion', True),
bias_dropout_add_fusion=encoder_cfg.get('bias_dropout_add_fusion', True),
masked_softmax_fusion=encoder_cfg.get('masked_softmax_fusion', True),
persist_layer_norm=encoder_cfg.get('persist_layer_norm', True),
openai_gelu=encoder_cfg.get('openai_gelu', False),
onnx_safe=encoder_cfg.get('onnx_safe', False),
hidden_steps=encoder_cfg.get('hidden_steps', -1),
activation=encoder_cfg.get('activation', 'gelu'),
bias=encoder_cfg.get('bias', True),
normalization=encoder_cfg.get('normalization', 'layernorm'),
transformer_block_type=encoder_cfg.get('transformer_block_type', 'pre_ln'),
headscale=encoder_cfg.get('headscale', False),
parent_model_type=ModelType.encoder_and_decoder,
num_self_attention_per_cross_attention=encoder_cfg.get('num_self_attention_per_cross_attention', 1),
megatron_legacy=encoder_cfg.get('megatron_legacy', False),
normalize_attention_scores=encoder_cfg.get('normalize_attention_scores', True),
num_moe_experts=encoder_cfg.get('num_moe_experts', 1),
moe_frequency=encoder_cfg.get('moe_frequency', 1),
moe_dropout=encoder_cfg.get('moe_dropout', 0.0),
position_embedding_type=encoder_cfg.get('position_embedding_type', 'learned_absolute'),
use_flash_attention=encoder_cfg.get('use_flash_attention', False),
)
if add_decoder:
# If this is the decoder first stage
if pre_process:
# If the encoder also lies on this rank (PP = 1), then just assign embeddings directly.
if hasattr(self, 'encoder_embedding') and share_token_embeddings:
self.decoder_embedding = self.encoder_embedding
else:
# This is the case where PP > 1 and first decoder first stage, or when not sharing embeddings with encoder
self.decoder_embedding = Embedding(
config=self.config,
hidden_size=decoder_cfg.hidden_size,
vocab_size=vocab_size,
max_sequence_length=max_position_embeddings,
init_method=init_method_normal(embedding_init_method_std),
num_tokentypes=num_tokentypes,
dtype=self.dtype,
embedding_dropout_prob=embedding_dropout,
position_embedding_type=decoder_cfg.get('position_embedding_type', 'learned_absolute'),
)
# We initialize decoder embeddings, but set them to zero since we they're tied with the encoder embeddings.
# A later initialize_embedding call will synchronize the embeddings.
if share_token_embeddings:
self.decoder_embedding.zero_parameters()
self._decoder_embedding_key = "decoder_embedding"
if self.decoder_cfg.get('position_embedding_type', 'learned_absolute') == 'relative':
self.decoder_relative_position_embedding = T5RelativePositionEmbedding(
init_method=init_method_normal(embedding_init_method_std),
num_attention_heads=decoder_cfg.num_attention_heads,
relative_position_num_buckets=decoder_cfg.relative_attention_num_buckets,
relative_position_max_distance=decoder_cfg.relative_attention_max_distance,
bidirectional=False,
layer_type=LayerType.decoder,
)
self._decoder_relative_position_embedding_key = "decoder_relative_position_embedding"
# Pipeline model parallel rank == split_rank will have the actual RPE weights. We zero it out on all other ranks and then sync them on setup.
if (
parallel_state.get_pipeline_model_parallel_rank()
!= parallel_state.get_pipeline_model_parallel_split_rank()
):
self.decoder_relative_position_embeddings_weight().data.fill_(0)
self.decoder_relative_position_embeddings_weight().shared = True
if not self.decoder_cfg.relative_position_bias_self_attention_only:
self.decoder_cross_attention_relative_position_embedding = T5RelativePositionEmbedding(
init_method=init_method_normal(embedding_init_method_std),
num_attention_heads=decoder_cfg.num_attention_heads,
relative_position_num_buckets=decoder_cfg.relative_attention_num_buckets,
relative_position_max_distance=decoder_cfg.relative_attention_max_distance,
bidirectional=True,
layer_type=LayerType.decoder,
)
self._decoder_cross_attention_relative_position_embedding_key = (
"decoder_cross_attention_relative_position_embedding"
)
if (
parallel_state.get_pipeline_model_parallel_rank()
!= parallel_state.get_pipeline_model_parallel_split_rank()
):
self.decoder_cross_attention_relative_position_embeddings_weight().data.fill_(0)
self.decoder_cross_attention_relative_position_embeddings_weight().shared = True
elif self.decoder_cfg.get('position_embedding_type', 'learned_absolute') == 'alibi':
self.decoder_relative_position_embedding = ALiBiRelativePositionEmbedding(
bidirectional=False,
num_attention_heads=decoder_cfg.num_attention_heads,
layer_type=LayerType.decoder,
num_attention_heads_alibi=None,
max_seq_len=max_position_embeddings,
)
self._decoder_relative_position_embedding_key = "decoder_alibi_position_embedding"
elif self.decoder_cfg.get('position_embedding_type', 'learned_absolute') == 'kerple':
self.decoder_relative_position_embedding = KERPLERelativePositionEmbedding(
bidirectional=False,
num_attention_heads=decoder_cfg.num_attention_heads,
layer_type=LayerType.decoder,
num_attention_heads_kerple=None,
max_seq_len=max_position_embeddings,
)
self._decoder_relative_position_embedding_key = "decoder_kerple_position_embedding"
else:
self.decoder_relative_position_embedding = None
if decoder_cfg.get('use_flash_attention', False) and decoder_cfg.get(
'position_embedding_type', 'learned_absolute'
) in ['relative', 'kerple']:
raise ValueError('flash-attention not supported with relative or kerple at this point')
decoder = get_decoder_model(
config=config,
arch=decoder_cfg.arch,
hidden_size=decoder_cfg.hidden_size,
ffn_hidden_size=decoder_cfg.ffn_hidden_size,
num_layers=decoder_cfg.num_layers,
num_attention_heads=decoder_cfg.num_attention_heads,
apply_query_key_layer_scaling=decoder_cfg.get('apply_query_key_layer_scaling', True),
kv_channels=decoder_kv_channels,
init_method=init_method_normal(decoder_cfg.get('init_method_std', 0.02)),
scaled_init_method=scaled_init_method_normal(
decoder_cfg.get('init_method_std', 0.02), decoder_cfg.num_layers
),
decoder_attn_mask_type=AttnMaskType.causal,
pre_process=pre_process,
post_process=post_process,
init_method_std=decoder_cfg.get('init_method_std', 0.02),
megatron_amp_O2=megatron_amp_O2,
hidden_dropout=decoder_cfg.get('hidden_dropout', 0.1),
attention_dropout=decoder_cfg.get('attention_dropout', 0.1),
ffn_dropout=decoder_cfg.get('ffn_dropout', 0.0),
precision=precision,
fp32_residual_connection=decoder_cfg.get('fp32_residual_connection', False),
activations_checkpoint_method=decoder_cfg.get('activations_checkpoint_method', None),
activations_checkpoint_num_layers=decoder_cfg.get('activations_checkpoint_num_layers', 1),
activations_checkpoint_granularity=decoder_cfg.get('activations_checkpoint_granularity', None),
layernorm_epsilon=decoder_cfg.get('layernorm_epsilon', 1e-5),
bias_activation_fusion=decoder_cfg.get('bias_activation_fusion', True),
bias_dropout_add_fusion=decoder_cfg.get('bias_dropout_add_fusion', True),
masked_softmax_fusion=decoder_cfg.get('masked_softmax_fusion', True),
persist_layer_norm=decoder_cfg.get('persist_layer_norm', True),
openai_gelu=decoder_cfg.get('openai_gelu', False),
onnx_safe=decoder_cfg.get('onnx_safe', False),
hidden_steps=decoder_cfg.get('hidden_steps', -1),
activation=decoder_cfg.get('activation', 'gelu'),
bias=decoder_cfg.get('bias', True),
normalization=decoder_cfg.get('normalization', 'layernorm'),
transformer_block_type=decoder_cfg.get('transformer_block_type', 'pre_ln'),
headscale=decoder_cfg.get('headscale', False),
parent_model_type=ModelType.encoder_and_decoder,
megatron_legacy=decoder_cfg.get('megatron_legacy', False),
normalize_attention_scores=decoder_cfg.get('normalize_attention_scores', True),
num_moe_experts=decoder_cfg.get('num_moe_experts', 1),
moe_frequency=decoder_cfg.get('moe_frequency', 1),
moe_dropout=decoder_cfg.get('moe_dropout', 0.0),
position_embedding_type=decoder_cfg.get('position_embedding_type', 'learned_absolute'),
use_flash_attention=decoder_cfg.get('use_flash_attention', False),
)
hiddens_module = get_hiddens_module(hiddens_cfg, model_parallel_cfg=config)
self.enc_dec_model = MegatronTransformerEncoderDecoderModule(
config=config,
encoder=encoder,
decoder=decoder,
hidden_steps=encoder_cfg.get('hidden_steps', -1),
hiddens_module=hiddens_module,
)
self._enc_dec_model_key = "enc_dec_model"
if self.share_token_embeddings:
# This is only relevant for PP > 1.
self.initialize_word_embeddings(
init_method=init_method_normal(embedding_init_method_std),
vocab_size=vocab_size,
hidden_size=encoder_cfg.hidden_size,
)
if add_decoder and post_process:
if share_decoder_tokens_head_embeddings:
self.tokens_head = MegatronTokenLevelHead(
self.word_embeddings_weight().size(0), parallel_output, bias=tokens_head_bias
)
else:
self.tokens_head = tensor_parallel.ColumnParallelLinear(
input_size=decoder_cfg.hidden_size,
output_size=vocab_size,
config=config,
bias=tokens_head_bias,
gather_output=not self.parallel_output,
init_method=init_method_normal(decoder_cfg.init_method_std),
)
self._tokens_head_key = 'tokens_head'
def _validate_kv_channels(self, cfg):
kv_channels = cfg.kv_channels
if cfg.kv_channels is None:
assert (
cfg.hidden_size % cfg.num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = cfg.hidden_size // cfg.num_attention_heads
return kv_channels
def _validate_enc_dec_hidden_size(self, encoder_cfg, decoder_cfg):
if encoder_cfg.hidden_size != decoder_cfg.hidden_size:
raise ValueError(
f"Encoder and decoder hidden_size must be equal, but got encoder: {encoder_cfg.hidden_size} and decoder: {decoder_cfg.hidden_size}"
)
def _validate_perceiver_config(self, cfg):
if (
cfg.get("position_embedding_type", "learned_absolute") == "relative"
and cfg.get("arch", "transformer") == "perceiver"
):
raise ValueError(f"Perceivers with relative position embeddings are not supported")
def _validate_config(self):
encoder_kv_channels = self._validate_kv_channels(self.encoder_cfg)
decoder_kv_channels = self._validate_kv_channels(self.decoder_cfg)
self._validate_enc_dec_hidden_size(self.encoder_cfg, self.decoder_cfg)
self._validate_perceiver_config(self.encoder_cfg)
self._validate_perceiver_config(self.decoder_cfg)
if parallel_state.get_pipeline_model_parallel_world_size() > 1:
assert (
self.share_token_embeddings
), "Token embeddings must be shared when using pipeline model parallel size > 1"
assert (
self.share_decoder_tokens_head_embeddings
), "Decoder token embeddings and the outputlayer must be shared when using pipeline model parallel size > 1"
assert (
self.hiddens_cfg is None
), "Hiddens module must not be enabled when using pipeline model parallel size > 1"
return encoder_kv_channels, decoder_kv_channels
def set_input_tensor(self, input_tensor):
""" See megatron.model.transformer.set_input_tensor()"""
# This is usually handled in schedules.py but some inference code still
# gives us non-lists or None
if not isinstance(input_tensor, list):
input_tensor = [input_tensor]
if self.add_encoder and self.add_decoder:
assert (
len(input_tensor) == 1
), 'input_tensor should only be length 1 for stage with both encoder and decoder'
self.enc_dec_model.encoder.set_input_tensor(input_tensor[0])
elif self.add_encoder:
assert len(input_tensor) == 1, 'input_tensor should only be length 1 for stage with only encoder'
self.enc_dec_model.encoder.set_input_tensor(input_tensor[0])
elif self.add_decoder:
if len(input_tensor) == 2:
self.enc_dec_model.decoder.set_input_tensor(input_tensor[0])
self.enc_dec_model.encoder_hidden_state = input_tensor[1]
elif len(input_tensor) == 1:
self.enc_dec_model.decoder.set_input_tensor(None)
self.enc_dec_model.encoder_hidden_state = input_tensor[0]
else:
raise Exception('input_tensor must have either length 1 or 2')
else:
raise Exception('Stage must have at least either encoder or decoder')
def forward(
self,
enc_input_ids=None,
enc_attn_mask=None,
dec_input_ids=None,
dec_attn_mask=None,
token_type_ids=None,
labels=None,
batch_data=None, # additional data to be passed to hiddens module
enc_output=None, # Result of running the entire encoder
enc_output_attn_mask=None,
enc_input=None, # Result of running encoder embedding only
output_enc_hidden_only=False,
):
"""
Return value is per token / per dimension (i.e., non collapsed loss value)
"""
(
encoder_self_attention_relative_position_bias,
decoder_self_attention_relative_position_bias,
decoder_cross_attention_relative_position_bias,
) = (None, None, None)
if enc_input is not None and enc_output is not None:
raise ValueError(
"""Both enc_input and enc_output are not None.
You should only be passing one of them.
enc_input is the result of the encoder embedding layer
enc_output is the result of running the entire transformer encoder."""
)
# In order of precedence, we use enc_output, enc_input, and then enc_input_ids to determine the encoder sequence length.
if enc_output is not None:
# If enc_output is provided in `batch_for_pipeline`, we need to transpose it from [B x S x H] -> [S x B x H].
enc_output = enc_output.transpose(0, 1)
enc_seq_length = enc_output.size(0)
elif enc_input is not None:
# If enc_input is provided, we need to transpose it from [B x S x H] -> [S x B x H].
enc_input = enc_input.transpose(0, 1)
enc_seq_length = enc_input.size(0)
# Only need to run encoder embedding and position ids if enc_input or enc_output is not provided.
elif enc_input_ids is not None:
enc_seq_length = enc_input_ids.size(1)
if self.pre_process and self.add_encoder:
# We don't need position ids for RPE, because the embedding layer does not have position embeddings.
if self.encoder_relative_position_embedding is None:
enc_position_ids = build_position_ids(enc_input_ids)
else:
enc_position_ids = None
enc_input = self.encoder_embedding(enc_input_ids, enc_position_ids, token_type_ids=token_type_ids)
else:
enc_input = None
else:
# This should only happen with PP > 1 for enc-dec prompt learning models
enc_seq_length = enc_attn_mask.size(1)
if self.add_encoder and self.encoder_relative_position_embedding is not None:
encoder_self_attention_relative_position_bias = self.encoder_relative_position_embedding(
query_seq_length=enc_seq_length, key_seq_length=enc_seq_length,
)
if output_enc_hidden_only:
# When pipeline parallel > 1 we need to make sure encoder exist (will be missing in decoder)
if enc_output is None and self.enc_dec_model.encoder is not None:
enc_output = self.enc_dec_model.encode(
enc_input=enc_input,
enc_attn_mask=enc_attn_mask,
enc_layer_past=None,
enc_get_key_value=False,
enc_self_attention_relative_position_bias=encoder_self_attention_relative_position_bias,
batch_data=batch_data,
)
else:
enc_output = self.enc_dec_model.encoder_hidden_state
return enc_output
else:
if enc_output_attn_mask is None:
enc_output_attn_mask = enc_attn_mask
if self.pre_process and self.add_decoder:
# We don't need position ids for RPE, because the embedding layer does not have position embeddings.
if self.decoder_relative_position_embedding is None:
dec_position_ids = build_position_ids(dec_input_ids)
else:
dec_position_ids = None
dec_input = self.decoder_embedding(dec_input_ids, dec_position_ids, token_type_ids=token_type_ids)
else:
# Note: This is when the decoder itself is split across PP ranks.
dec_input = None
if self.add_decoder and self.decoder_relative_position_embedding is not None:
decoder_self_attention_relative_position_bias = self.decoder_relative_position_embedding(
query_seq_length=dec_input_ids.size(1), key_seq_length=dec_input_ids.size(1)
)
if not self.decoder_cfg.relative_position_bias_self_attention_only:
decoder_cross_attention_relative_position_bias = self.decoder_cross_attention_relative_position_embedding(
query_seq_length=dec_input_ids.size(1), key_seq_length=enc_seq_length,
)
else:
decoder_cross_attention_relative_position_bias = None
output = self.enc_dec_model(
enc_input=enc_input,
enc_attn_mask=enc_attn_mask,
dec_input=dec_input,
dec_attn_mask=dec_attn_mask,
enc_layer_past=None,
enc_get_key_value=False,
enc_output=enc_output,
enc_output_attn_mask=enc_output_attn_mask,
dec_layer_past=None,
dec_get_key_value=False,
enc_self_attention_relative_position_bias=encoder_self_attention_relative_position_bias,
dec_self_attention_relative_position_bias=decoder_self_attention_relative_position_bias,
dec_cross_attention_relative_position_bias=decoder_cross_attention_relative_position_bias,
batch_data=batch_data,
)
if self.post_process and self.add_decoder:
dec_output, enc_output = output # [s, b, h], enc_output might be a dict if hiddens_module is used
# project decoder output to vocabulary-size dimensions
if self.share_decoder_tokens_head_embeddings:
token_logits = self.tokens_head(dec_output, self.word_embeddings_weight())
else:
token_logits = self.tokens_head(dec_output)[0]
if labels is not None:
# compute loss here
# [b, s] -> [s, b]
labels = labels.transpose(0, 1).contiguous()
# Set label smoothing to 0 if in eval mode.
label_smoothing = self.label_smoothing if self.training else 0.0
# tensor_parallel.vocab_parallel_cross_entropy performs log_softmax and return log p(x_i|z) per token i
if self.fp16_cross_entropy:
assert token_logits.dtype == torch.half
tokens_loss = vocab_parallel_cross_entropy(token_logits, labels, label_smoothing)
else:
tokens_loss = vocab_parallel_cross_entropy(token_logits.float(), labels, label_smoothing)
# [s, b] -> [b, s]
tokens_loss = tokens_loss.transpose(0, 1).contiguous()
# check if hiddens is used
if self.hiddens_cfg is not None:
loss_dict = self.enc_dec_model.hiddens_module.apply_loss_transforms(
outputs=enc_output, batch_data=batch_data,
)
loss_dict["tokens_loss"] = tokens_loss
# We need to store default output in a known key, so that we can mimic default behaviour
loss_dict["output"] = tokens_loss
return loss_dict
else:
return tokens_loss
else:
# else return token logits (and hiddens if needed)
# [s, b, h] -> [b, s, h]
token_logits = token_logits.transpose(0, 1).contiguous()
if self.hiddens_cfg is not None:
# return all hiddens and token logits
hiddens_dict = enc_output
hiddens_dict["token_logits"] = token_logits
# We need to store default output in a known key, so that we can mimic default behaviour
hiddens_dict["output"] = token_logits
return hiddens_dict
else:
return token_logits
elif self.add_decoder and not self.add_encoder:
decoder_output, _ = output
return decoder_output
else:
encoder_output = output
return encoder_output
def state_dict_for_save_checkpoint(self, destination=None, prefix='', keep_vars=False):
"""For easy load when model is combined with other heads,
add an extra key."""
state_dict_ = {}
state_dict_[self._encoder_embedding_key] = self.encoder_embedding.state_dict_for_save_checkpoint(
destination, prefix, keep_vars
)
state_dict_[self._decoder_embedding_key] = self.decoder_embedding.state_dict_for_save_checkpoint(
destination, prefix, keep_vars
)
state_dict_[self._enc_dec_model_key] = self.enc_dec_model.state_dict_for_save_checkpoint(
destination, prefix, keep_vars
)
state_dict_[self._tokens_head_key] = self.tokens_head.state_dict_for_save_checkpoint(
destination, prefix, keep_vars
)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
self.encoder_embedding.encoder_embeddingload_state_dict(state_dict[self._encoder_embedding_key], strict=strict)
self.decoder_embedding.load_state_dict(state_dict[self._decoder_embedding_key], strict=strict)
self.enc_dec_model.load_state_dict(state_dict[self._enc_dec_model_key], strict=strict)
self.tokens_head.load_state_dict(state_dict[self._tokens_head_key], strict=strict)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/token_level_encoder_decoder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import logging
import pickle
import threading
import time
from typing import List, Union
import faiss
import numpy as np
import torch
from flask import Flask, jsonify, request
from flask_restful import Api, Resource
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.data.language_modeling.megatron.indexed_retrieval_dataset import MMapRetrievalIndexedDataset
from nemo.collections.nlp.modules.common.megatron.retrieval_services.util import lock, request_data
class FaissRetrievalResource(Resource):
"""
Static Faiss Retrieval Flask resource.
The PUT method is to get KNN tokens.
"""
def __init__(
self, index, tokenizer, ds, query_bert_ip, query_bert_port,
):
# server
self.index = index
self.tokenizer = tokenizer
self.ds = ds
self.query_bert_ip = query_bert_ip
self.query_bert_port = query_bert_port
self.chunk_size = ds.chunk_size
pad_id = self.tokenizer.pad_id
self.no_retrieval = np.ones((1, 1, 2 * self.chunk_size), dtype=ds._index.dtype) * pad_id
def put(self):
data = request.get_json()
sentences = data['sentences']
num_neighbors = data['neighbors']
with lock: # Need to get lock to keep multiple threads from hitting code
neighbors = self.get_knn(sentences, num_neighbors)
return jsonify(neighbors.tolist())
# check keys
def get_knn(self, query: Union[List[str], str, torch.Tensor], neighbors: int):
if neighbors == 0:
# use padding
return np.repeat(self.no_retrieval, len(query), 0).astype(np.int64)
single_sentence = False
if isinstance(query, str):
single_sentence = True
query = [query]
elif isinstance(query, torch.Tensor):
sentence_list = []
for q in query:
text = self.tokenizer.ids_to_text(q)
sentence_list.append(text)
query = sentence_list
emb = request_data(query, self.query_bert_ip, self.query_bert_port)
emb_data = base64.b64decode(emb.encode())
emb = pickle.loads(emb_data)
if self.index.ntotal == 0:
# A workaround to fix searching an empty Faiss index
knn = [[-1] * neighbors for i in range(len(emb))]
else:
_, knn = self.index.search(emb, neighbors)
results = []
for sentence_neighbors in knn:
chunks = []
for neighbor_chunk_id in sentence_neighbors:
chunk_id = self.ds.get_chunk(neighbor_chunk_id)
chunks.append(chunk_id)
chunks = np.stack(chunks, axis=0).astype(np.int64)
results.append(chunks)
if single_sentence:
# unpack the single sentence input
return results[0]
return np.stack(results, axis=0).astype(np.int64)
class RetrievalServer(object):
"""
Flask Retrieval server, which helps to get the KNN tokens given the query chunk
"""
def __init__(
self,
faiss_index: str,
faiss_devices: str,
nprobe: int,
retrieval_index: str,
tokenizer: TokenizerSpec,
query_bert_ip: str,
query_bert_port: int = None,
):
self.app = Flask(__name__, static_url_path='')
# server
has_gpu = torch.cuda.is_available() and hasattr(faiss, "index_gpu_to_cpu")
if faiss_devices is None or not torch.cuda.is_available():
device_list = None
else:
device_list = ['cuda:' + str(device) for device in faiss_devices.split(',')]
self.index = faiss.read_index(faiss_index)
if has_gpu and device_list is not None:
beg = time.time()
co = faiss.GpuMultipleClonerOptions()
co.useFloat16 = True
co.usePrecomputed = False
co.shard = True
self.index = faiss.index_cpu_to_all_gpus(self.index, co, ngpu=len(device_list))
end = time.time()
logging.info(f'convert Faiss db to GPU takes {end - beg} s')
self.index.nprobe = nprobe
self.tokenizer = tokenizer
self.ds = MMapRetrievalIndexedDataset(retrieval_index)
api = Api(self.app)
api.add_resource(
FaissRetrievalResource,
'/knn',
resource_class_args=[self.index, self.tokenizer, self.ds, query_bert_ip, query_bert_port],
)
def run(self, url, port=None):
threading.Thread(target=lambda: self.app.run(host=url, threaded=True, port=port)).start()
| NeMo-main | nemo/collections/nlp/modules/common/megatron/retrieval_services/static_retrieval_server.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import threading
import requests
headers = {"Content-Type": "application/json"}
lock = threading.Lock()
__all__ = ["request_data", "lock"]
def request_data(data, ip='localhost', port=None):
resp = requests.put(f'http://{ip}:{port}/knn', data=json.dumps(data), headers=headers)
return resp.json()
def text_generation(data, ip='localhost', port=None):
resp = requests.put(f'http://{ip}:{port}/generate', data=json.dumps(data), headers=headers)
return resp.json()
def convert_retrieved_to_md(retrieved):
output_str = '<table><tr><th>Query</th><th>Retrieved Doc</th></tr>'
for item in retrieved:
output_str += f'<tr><td rowspan="{len(item["neighbors"])}">{item["query"]}</td>'
for i, neighbor in enumerate(item['neighbors']):
if i == 0:
output_str += f"<td>{neighbor}</td></tr>"
else:
output_str += f"<tr><td>{neighbor}</td></tr>"
output_str += '</table>'
return output_str
| NeMo-main | nemo/collections/nlp/modules/common/megatron/retrieval_services/util.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-main | nemo/collections/nlp/modules/common/megatron/retrieval_services/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import logging
import pickle
import threading
import time
from collections import namedtuple
from typing import List
import faiss
import numpy as np
import torch
from flask import Flask, jsonify, request
from flask_restful import Api
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.modules.common.megatron.retrieval_services.static_retrieval_server import (
FaissRetrievalResource,
)
from nemo.collections.nlp.modules.common.megatron.retrieval_services.util import lock, request_data
# define this type to mimic the indexed dataset
DType = namedtuple('DType', ['dtype'])
class ChunkStore:
"""
ChunkStore maps chunk id to tokens. It is used as an in memory storage for dynamic retrieval DB.
"""
def __init__(self, chunk_size, pad_id):
self.store = {}
self._count = 0
self.no_retrieval = np.ones(2 * chunk_size, dtype=np.int64) * pad_id
self.chunk_size = chunk_size
self.store[-1] = self.no_retrieval
field = DType(dtype=np.int64)
self._index = field
def add(self, chunk):
self.store[self._count] = chunk
self._count += 1
def get_chunk(self, neighbor_id):
return self.store[neighbor_id]
def reset(self):
self._count = 0
self.store = {}
self.store[-1] = self.no_retrieval
class DynamicRetrievalResource(FaissRetrievalResource):
"""
Dynamic Faiss Retrieval Flask resource.
The PUT method is to get KNN tokens, add new chunks, reset index.
"""
def __init__(
self,
index,
tokenizer,
chunk_size,
stride,
store,
ctx_bert_ip,
ctx_bert_port,
query_bert_ip,
query_bert_port,
output_filename,
):
super().__init__(index, tokenizer, store, query_bert_ip, query_bert_port)
self.chunk_size = chunk_size
self.stride = stride
self.pad_id = self.tokenizer.pad_id
self.ctx_bert_ip = ctx_bert_ip
self.ctx_bert_port = ctx_bert_port
self.output_filename = output_filename
def put(self):
data = request.get_json()
if 'neighbors' in data:
sentences = data['sentences']
# do knn query
num_neighbors = data['neighbors']
with lock: # Need to get lock to keep multiple threads from hitting code
neighbors = self.get_knn(sentences, num_neighbors)
return jsonify(neighbors.tolist())
elif 'reset' in data:
with lock: # Need to get lock to keep multiple threads from hitting code
self.reset()
return "success"
elif 'index_name' in data:
with lock:
# serialize the index
index = self.index
if hasattr(faiss, 'index_gpu_to_cpu'):
index = faiss.index_gpu_to_cpu(index)
faiss.write_index(index, data['index_name'] + '_' + self.output_filename + '.index')
# save the data
with open(self.output_filename + '.pkl', 'bw') as f:
pickle.dump(self.ds, f)
return "success"
else:
sentences = data['sentences']
add_eos = data['add_eos']
# update the index
with lock: # Need to get lock to keep multiple threads from hitting code
self.add_docs_to_index(sentences, add_eos)
return "success"
def reset(self):
self.index.reset()
self.ds.reset()
def add_docs_to_index(self, docs: List[str], add_eos: bool = True):
"""
Add documents to the Faiss index
Args:
docs: List[str], list of documents that is going to be added to the index
add_eos: bool, whether add the eos in the end
"""
for doc in docs:
token_ids = self.tokenizer.text_to_ids(doc)
# append eos in the end
if add_eos:
token_ids.append(self.tokenizer.eos_id)
np_array = np.array(token_ids, dtype=np.int32)
padded_size = self.chunk_size - (len(np_array) % self.chunk_size)
# for retrieval database, added one more chunk in the end as padding
padded_size += self.chunk_size
np_array = np.pad(np_array, (0, padded_size), 'constant', constant_values=self.pad_id)
chunk_texts = []
for i in range(0, len(np_array), self.stride):
if i + 2 * self.chunk_size <= len(np_array):
chunk = np_array[i : i + 2 * self.chunk_size]
self.ds.add(chunk)
chunk_texts.append(self.tokenizer.ids_to_text(chunk))
emb = request_data(chunk_texts, self.ctx_bert_ip, self.ctx_bert_port)
emb_data = base64.b64decode(emb.encode())
emb = pickle.loads(emb_data)
self.index.add(emb) # add vectors to the index
class DynamicRetrievalServer(object):
"""
Flask Dynamic Retrieval server, which helps to build dynamic retrieval index.
"""
def __init__(
self,
faiss_devices: str,
tokenizer: TokenizerSpec,
chunk_size: int = 64,
stride: int = 32,
faiss_index: str = None,
store_file: str = None,
ctx_bert_ip: str = None,
ctx_bert_port: int = 0,
query_bert_ip: str = None,
query_bert_port: int = 0,
output_filename: str = 'dynamic_db',
):
self.app = Flask(__name__, static_url_path='')
has_gpu = torch.cuda.is_available() and hasattr(faiss, "index_gpu_to_cpu")
embedding_dim = request_data({}, ctx_bert_ip, ctx_bert_port)['dim']
if faiss_index is not None:
self.index = faiss.read_index(faiss_index)
else:
self.index = faiss.IndexFlatL2(embedding_dim) # build the index
self.pad_id = tokenizer.pad_id
self.chunk_size = chunk_size
self.stride = stride
if store_file is not None:
with open(store_file, 'rb') as f:
self.store = pickle.load(f)
else:
self.store = ChunkStore(chunk_size, self.pad_id)
if faiss_devices is None or not torch.cuda.is_available():
device_list = None
else:
device_list = ['cuda:' + str(device) for device in faiss_devices.split(',')]
if has_gpu and device_list is not None:
beg = time.time()
co = faiss.GpuMultipleClonerOptions()
co.useFloat16 = True
co.usePrecomputed = False
co.shard = True
self.index = faiss.index_cpu_to_all_gpus(self.index, co, ngpu=len(device_list))
end = time.time()
logging.info(f'convert Faiss db to GPU takes {end - beg} s')
self.tokenizer = tokenizer
api = Api(self.app)
api.add_resource(
DynamicRetrievalResource,
'/knn',
resource_class_args=[
self.index,
self.tokenizer,
self.chunk_size,
self.stride,
self.store,
ctx_bert_ip,
ctx_bert_port,
query_bert_ip,
query_bert_port,
output_filename,
],
)
def run(self, url, port=None):
threading.Thread(target=lambda: self.app.run(host=url, threaded=True, port=port)).start()
| NeMo-main | nemo/collections/nlp/modules/common/megatron/retrieval_services/dynamic_retrieval_server.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import pickle
import threading
import time
from typing import List, Union
import torch
from flask import Flask, jsonify, request
from flask_restful import Api, Resource
from sentence_transformers import SentenceTransformer
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
BERT_RETRIEVER_PORT_NUM = 17190
class SentenceBertResource(Resource):
"""
SentenceBERT Flask resource.
The PUT method is to get token/str embedding.
"""
def __init__(
self, bert_model, tokenizer, pool, sentence_bert_batch,
):
# server
self.bert_model = bert_model
self.tokenizer = tokenizer
self.pool = pool
self.sentence_bert_batch = sentence_bert_batch
self.embedding_dim = self.bert_model.get_sentence_embedding_dimension()
def put(self):
data = request.get_json()
if isinstance(data, dict):
return jsonify({'dim': self.embedding_dim})
sentences = data
emb = self.get_emb(sentences)
str_emb = base64.b64encode(pickle.dumps(emb))
return str_emb.decode('ascii')
def get_emb(self, query: Union[List[str], str, torch.Tensor]):
if isinstance(query, str):
query = [query]
elif isinstance(query, torch.Tensor):
sentence_list = []
for q in query:
text = self.tokenizer.ids_to_text(q)
sentence_list.append(text)
query = sentence_list
emb = self.bert_model.encode_multi_process(
sentences=query, pool=self.pool, batch_size=self.sentence_bert_batch
)
return emb
class SentenceBertServer(object):
"""
Flask SentenceBERT server, which helps to calculate str/token embeddings
"""
def __init__(
self,
name: str,
devices: str,
tokenizer: TokenizerSpec,
sentence_bert: str = 'all-mpnet-base-v2',
sentence_bert_batch: int = 4,
):
self.app = Flask(__name__, static_url_path='')
if devices is None or not torch.cuda.is_available():
device_list = None
else:
device_list = ['cuda:' + str(device) for device in devices.split(',')]
self.bert_model = SentenceTransformer(sentence_bert)
self.name = name
self.tokenizer = tokenizer
self.pool = self.bert_model.start_multi_process_pool(device_list)
self.sentence_bert_batch = sentence_bert_batch
api = Api(self.app)
api.add_resource(
SentenceBertResource,
'/knn',
resource_class_args=[self.bert_model, self.tokenizer, self.pool, self.sentence_bert_batch,],
)
def run(self, url, port=None):
if port is None:
port = BERT_RETRIEVER_PORT_NUM
threading.Thread(target=lambda: self.app.run(host=url, threaded=True, port=port)).start()
def start_sentence_bert_server(
name: str,
devices: str,
tokenizer: TokenizerSpec,
sentence_bert: str = 'all-mpnet-base-v2',
sentence_bert_batch: int = 4,
port: int = None,
):
"""
Start the sentence bert server method.
It only starts the server at rank 0 worker.
Doesn't support multiple nodes yet.
"""
# register the bert model port number
server = SentenceBertServer(name, devices, tokenizer, sentence_bert, sentence_bert_batch,)
server.run("0.0.0.0", port=port)
# sleep to make sure the sentence bert server is full started.
time.sleep(2)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/retrieval_services/bert_service.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import threading
from typing import List, Union
import faiss
import numpy as np
import torch
from flask import Flask, jsonify, request
from flask_restful import Api, Resource
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.modules.common.megatron.retrieval_services.retrieval_service import (
DynamicFaissRetrievalService,
FaissRetrievalService,
)
from nemo.collections.nlp.modules.common.megatron.retrieval_services.util import lock
weights = None
class ComboRetrievalResource(Resource):
"""
Combo Faiss Retrieval Flask resource.
The PUT method is to get KNN tokens, add new chunks, reset index.
"""
def __init__(self, retrieval_services, weight_container):
self.retrieval_services = retrieval_services
self.updatable = any([service.updatable for service in retrieval_services])
self.weight_container = weight_container
weights = np.array(weight_container[0])
# normalize the weights
weights = weights / weights.sum()
self.weight_container[0] = weights
def put(self):
data = request.get_json()
if 'neighbors' in data:
sentences = data['sentences']
# do knn query
num_neighbors = data['neighbors']
with lock: # Need to get lock to keep multiple threads from hitting code
neighbors = self.get_knn(sentences, num_neighbors)
return jsonify(neighbors.tolist())
elif 'reset' in data:
with lock: # Need to get lock to keep multiple threads from hitting code
self.reset()
return "success"
elif 'update_weight' in data:
with lock:
self.update_weights(data['update_weight'])
return "success"
elif 'index_name' in data:
with lock:
# serialize the index
index = self.index
if hasattr(faiss, 'index_gpu_to_cpu'):
index = faiss.index_gpu_to_cpu(index)
faiss.write_index(index, data['index_name'] + '_' + self.output_filename + '.index')
# save the data
with open(self.output_filename + '.pkl', 'bw') as f:
pickle.dump(self.ds, f)
else:
sentences = data['sentences']
add_eos = data['add_eos']
# update the index
with lock: # Need to get lock to keep multiple threads from hitting code
self.add_docs_to_index(sentences, add_eos)
return "success"
def reset(self):
output = 'success'
if not self.updatable:
return 'no dynamic service, no action is performed'
for i, service in enumerate(self.retrieval_services):
if service.updatable:
service.reset()
return output
def update_weights(self, weights):
weights = np.array(weights)
# normalize the weights
weights = weights / weights.sum()
self.weight_container[0] = weights
def get_knn(self, query: Union[List[str], str, torch.Tensor], neighbors):
weights = self.weight_container[0]
if neighbors == 0:
return self.retrieval_services[0].get_knn(query, 0)
total_neighbors = 0
results = []
for i, service in enumerate(self.retrieval_services):
k = int(neighbors * weights[i])
if i == len(self.retrieval_services) - 1:
k = neighbors - total_neighbors
total_neighbors += k
if k == 0:
# empty, skip it
continue
result = service.get_knn(query, k)
results.append(result)
return np.concatenate(results, axis=1)
def add_docs_to_index(self, query: List[str], add_eos: bool = True):
"""
Add documents to the Faiss index
Args:
docs: List[str], list of documents that is going to be added to the index
add_eos: bool, whether add the eos in the end
"""
output = 'success'
if not self.updatable:
if not self.updatable:
return 'no dynamic service, no action is performed'
for i, service in enumerate(self.retrieval_services):
if service.updatable:
service.add_docs_to_index(query, add_eos)
return output
def write_index(self, index_name: str):
"""
write the dynamic index into a file
Args:
index_name: str, index name
"""
output = 'success'
if not self.updatable:
if not self.updatable:
return 'no dynamic service, no action is performed'
for i, service in enumerate(self.retrieval_services):
if service.updatable:
service.write_index(index_name)
return output
class ComboRetrievalServer(object):
"""
Flask Combo Retrieval server, which helps to aggregate different retrieval services
"""
def __init__(
self, tokenizer: TokenizerSpec, services_cfg: list,
):
self.app = Flask(__name__, static_url_path='')
services = []
weights = []
for service_cfg in services_cfg:
weights.append(service_cfg.weight)
if service_cfg.type == 'FaissRetrievalService':
service = FaissRetrievalService(
tokenizer=tokenizer, service_ip=service_cfg.service_ip, service_port=service_cfg.service_port
)
elif service_cfg.type == 'DynamicFaissRetrievalService':
service = DynamicFaissRetrievalService(
tokenizer=tokenizer, service_ip=service_cfg.service_ip, service_port=service_cfg.service_port
)
else:
raise ValueError(f'Unsupported retrieval service {service_cfg.type}')
services.append(service)
self.weight_container = [weights]
self.tokenizer = tokenizer
api = Api(self.app)
api.add_resource(
ComboRetrievalResource, '/knn', resource_class_args=[services, self.weight_container,],
)
def run(self, url, port=None):
threading.Thread(target=lambda: self.app.run(host=url, threaded=True, port=port)).start()
| NeMo-main | nemo/collections/nlp/modules/common/megatron/retrieval_services/combo_retrieval_server.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import logging
import threading
from typing import List, Union
import numpy as np
import torch
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.modules.common.megatron.retrieval_services.util import request_data
log = logging.getLogger('retrieval')
log.setLevel(logging.ERROR)
lock = threading.Lock()
PORT_NUM = 17179
PORT_NUM_DYN = 17180
class RetrievalService:
"""
Abstract class for Retrieval Service.
"""
@abc.abstractmethod
def get_knn(self, query: Union[List[str], str, torch.Tensor], neighbors: int):
"""Get K-nearest neighbor chunks based on the input query
Args:
query (Union[List[str], str, torch.Tensor]): query str, list of str or token ids in torch.Tensor type
neighbors (int): number of neighbors to query
"""
pass
@abc.abstractmethod
def add_docs_to_index(self, docs: List[str], add_eos: bool = True):
"""
Add documents to the Faiss index
Args:
docs: List[str], list of documents that is going to be added to the index
add_eos: bool, whether add the eos in the end
"""
raise NotImplementedError()
class FaissRetrievalService(RetrievalService):
"""
Static retrieval service client class.
It implements the retrieval services interface, has a simple client to do KNN queries.
"""
def __init__(
self, tokenizer: TokenizerSpec, service_ip: str = None, service_port: int = None, updatable: bool = False,
):
self.updatable = updatable
self.tokenizer = tokenizer
self.service_ip = service_ip
self.service_port = service_port
def get_knn(self, query: Union[List[str], str, torch.Tensor], neighbors):
"""Get K-nearest neighbor chunks based on the input query
Args:
query (Union[List[str], str, torch.Tensor]): query str, list of str or token ids in torch.Tensor type
neighbors (int): number of neighbors to query
"""
if isinstance(query, torch.Tensor):
sentence_list = []
for q in query:
text = self.tokenizer.ids_to_text(q)
sentence_list.append(text)
query = sentence_list
data = {'sentences': query}
data['neighbors'] = neighbors
result = request_data(data, self.service_ip, self.service_port)
result = np.array(result)
return result
class DynamicFaissRetrievalService(FaissRetrievalService):
"""
Dynamic retrieval service client class.
It implements the retrieval services interface, has a simple client to add, reset and query
the dynamic retrieval index.
"""
def __init__(
self, tokenizer: TokenizerSpec, service_ip: str = None, service_port: int = None,
):
super().__init__(tokenizer=tokenizer, service_ip=service_ip, service_port=service_port, updatable=True)
def add_docs_to_index(self, query: List[str], add_eos: bool = True):
"""
Add documents to the Faiss index
Args:
docs: List[str], list of documents that is going to be added to the index
add_eos: bool, whether add the eos in the end
"""
if isinstance(query, torch.Tensor):
sentence_list = []
for q in query:
text = self.tokenizer.ids_to_text(q)
sentence_list.append(text)
query = sentence_list
data = {'sentences': query, 'add_eos': add_eos}
return request_data(data, self.service_ip, self.service_port)
def write_index(self, index_name: str):
"""
Write the dynamic index and document storage into file
Args:
index_name: str, the index name used for the file name
"""
data = {'index_name': index_name}
return request_data(data, self.service_ip, self.service_port)
def reset(self):
"""
Write the dynamic index and document storage into file
Args:
index_name: str, the index name used for the file name
"""
data = {'reset': None}
return request_data(data, self.service_ip, self.service_port)
class ComboRetrievalService(DynamicFaissRetrievalService):
"""
Combo retrieval service client class.
It implements the retrieval services interface, has a simple client to add, reset, query, update weights
"""
def __init__(
self, tokenizer: TokenizerSpec, service_ip: str = None, service_port: int = None,
):
super().__init__(tokenizer=tokenizer, service_ip=service_ip, service_port=service_port)
def update_weights(self, weights: List[float]):
""" update the weights between the children services
Args:
weights (List[float]): weights for children services
"""
data = {"update_weight": weights}
return request_data(data, self.service_ip, self.service_port)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/retrieval_services/retrieval_service.py |
# coding=utf-8
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) Microsoft Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
# Most of the code here has been copied from:
# https://github.com/microsoft/mup
| NeMo-main | nemo/collections/nlp/modules/common/megatron/mup/__init__.py |
# coding=utf-8
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) Microsoft Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
from copy import copy
class InfDim:
'''A dimension with a base dimension, used for calculating μP scaling.
An `InfDim` object is made up of 2 numbers: a dimension and a base
dimension. If the base dimension is None, then this object represents a
"finite", or "non-width" dimension. Otherwise, it represents an "infinite",
or "width" dimension.
'''
def __init__(self, base_dim, dim):
self.base_dim = base_dim
self.dim = dim
def isinf(self):
return self.base_dim is not None
def width_mult(self):
'''Width multiplier used for calculating μP scaling.
If finite, return 1.
If infinite, return dim / base_dim.
'''
if self.isinf():
return self.dim / self.base_dim
return 1
def __repr__(self):
return f'InfDim({self.base_dim}, {self.dim})'
def __str__(self):
if self.isinf():
return repr(self)
return f'FinDim({self.dim})'
def __eq__(self, other: object) -> bool:
if not isinstance(other, InfDim):
return False
return self.base_dim == other.base_dim and self.dim == other.dim
class InfShape(tuple):
'''A tuple of `InfDim`s.
This is intended to be attached to each parameter tensor `p` as `p.infshape`.
'''
def __init__(self, *args, **kwargs):
tuple.__init__(*args, **kwargs)
for dim in self:
if not isinstance(dim, InfDim):
raise ValueError('Elements of InfShape needs to be of class InfDim')
# set main to be the last dimension that is infinite
# for inf x inf this is fanin
# for inf x fin or fin x inf it's the unique inf dim
# user can set this manually if necessary
self.main_idx = self.main = None
for i, dim in list(enumerate(self))[::-1]:
if dim.isinf():
self.main_idx = i
self.main = dim
break
def fanin_fanout(self):
assert len(self) >= 2, 'fanin, fanout undefined for 1-dimensional weights'
return self[1], self[0]
def fanin_fanout_mult_ratio(self):
fanin, fanout = self.fanin_fanout()
return fanin.width_mult() / fanout.width_mult()
def ninf(self):
return sum(1 for dim in self if dim.isinf())
def width_mult(self):
if self.main is not None:
return self.main.width_mult()
return 1
def base_shape(self):
return [d.base_dim for d in self]
def shape(self):
return [d.dim for d in self]
def __repr__(self):
r = tuple.__repr__(self)[1:-1]
return f'InfShape([{r}])'
def serialize(self):
d = {'base_shape': [], 'shape': []}
for infdim in self:
d['shape'].append(infdim.dim)
d['base_shape'].append(infdim.base_dim)
return d
def __eq__(self, other: object) -> bool:
if not isinstance(other, InfShape):
return False
return all(d == dd for d, dd in zip(self, other))
@classmethod
def deserialize(cls, d):
infshape = []
for base_dim, dim in zip(d['base_shape'], d['shape']):
infshape.append(InfDim(base_dim, dim))
return InfShape(infshape)
@classmethod
def from_base_shape(cls, bsh):
return InfShape([InfDim(bd, None) for bd in bsh])
def zip_infshape(base_dims, dims, fin_if_same=True):
infshape = []
for bd, d in zip(base_dims, dims):
if isinstance(bd, InfDim):
# retain bd's base_dim but overwrite dim
infdim = copy(bd)
infdim.dim = d
infshape.append(infdim)
elif isinstance(bd, int):
if bd == d and fin_if_same:
infshape.append(InfDim(None, d))
else:
infshape.append(InfDim(bd, d))
else:
raise ValueError(f'unhandled base_dim type: {type(bd)}')
return InfShape(infshape)
if __name__ == '__main__':
infshape = InfShape([InfDim(None, 100), InfDim(128, 1024), InfDim(128, 128)])
print(infshape)
print(f'{infshape.ninf()} dims are inf')
print(f'width_mult {infshape.width_mult()}')
print(zip_infshape([64, 128, 1024], [32, 128, 2048]))
| NeMo-main | nemo/collections/nlp/modules/common/megatron/mup/infshape.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) Microsoft Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
# Most of the code here has been copied from:
# https://github.com/microsoft/mup
import torch
from nemo.collections.nlp.modules.common.megatron.module import MegatronModule
from nemo.collections.nlp.modules.common.megatron.utils import parallel_lm_logits
from nemo.utils import logging
try:
from megatron.core import parallel_state
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
class MuReadout(MegatronModule):
"""Drop-in replacement for all output linear layers.
An "output" linear layer is one that maps from a width dimension (e.g.,
`d_model` in a Transformer) to a non-width dimension (e.g., vocab size).
This layer implements the version of μP with a 1/width multiplier and a
constant variance initialization for both weights and biases.
Arguments:
mpu_vocab_size: model parallel size of vocabulary.
parallel_output: wether output logits being distributed or not.
"""
def __init__(self, mpu_vocab_size, parallel_output):
super(MuReadout, self).__init__()
self.bias = torch.nn.Parameter(torch.zeros(mpu_vocab_size))
self.bias.model_parallel = True
self.bias.partition_dim = 0
self.bias.stride = 1
self.parallel_output = parallel_output
self.warn_once = False
def forward(self, hidden_states, word_embeddings_weight):
if hasattr(word_embeddings_weight, 'infshape'):
width_mult = word_embeddings_weight.infshape.width_mult()
else:
width_mult = 1.0
if not self.warn_once:
logging.warning("need to set_shape before use mu-Transfer readout layer")
self.warn_once = True
async_tensor_model_parallel_allreduce = parallel_state.get_tensor_model_parallel_world_size() > 1
output = parallel_lm_logits(
hidden_states / width_mult,
word_embeddings_weight,
self.parallel_output,
bias=self.bias,
async_tensor_model_parallel_allreduce=async_tensor_model_parallel_allreduce,
)
return output
def rescale_linear_bias(linear):
'''Rescale bias in nn.Linear layers to convert SP initialization to μP initialization.
Warning: This method is NOT idempotent and should be called only once
unless you know what you are doing.
'''
if hasattr(linear, '_has_rescaled_params') and linear._has_rescaled_params:
raise RuntimeError(
"`rescale_linear_bias` has been called once before already. Unless you know what you are doing, usually you should not be calling `rescale_linear_bias` more than once.\n"
"If you called `set_base_shapes` on a model loaded from a checkpoint, or just want to re-set the base shapes of an existing model, make sure to set the flag `rescale_params=False`.\n"
"To bypass this error and *still rescale biases*, set `linear._has_rescaled_params=False` before this call."
)
if linear.bias is None:
return
fanin_mult = linear.weight.infshape[1].width_mult()
linear.bias.data *= fanin_mult ** 0.5
linear._has_rescaled_params = True
| NeMo-main | nemo/collections/nlp/modules/common/megatron/mup/layer.py |
# coding=utf-8
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) Microsoft Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
# Most of the code here has been copied from:
# https://github.com/microsoft/mup
from copy import deepcopy
import yaml
from torch import nn
from torch.nn import Linear
from torch.nn.modules.conv import _ConvNd
from nemo.collections.nlp.modules.common.megatron.mup.infshape import InfShape, zip_infshape
from nemo.collections.nlp.modules.common.megatron.mup.layer import MuReadout, rescale_linear_bias
__BSH_COMMENT__ = '''\
# This is a base shape file encoded in yaml
# - `null` indicates a dimension is "finite", i.e. a non-"width" dimension
# - a number indicates the base dimension of an "infinite" dimension, i.e. some notion of "width"
'''
def get_shapes(model):
return {name: param.shape for name, param in model.named_parameters()}
def get_infshapes(model):
return {name: param.infshape for name, param in model.named_parameters()}
def save_base_shapes(model_or_shapes, file):
if isinstance(model_or_shapes, nn.Module):
sh = get_infshapes(model_or_shapes)
elif isinstance(model_or_shapes, dict):
sh = deepcopy(model_or_shapes)
else:
raise ValueError()
sh = {k: s.base_shape() for k, s in sh.items()}
s = yaml.dump(sh, None, indent=4)
s = __BSH_COMMENT__ + s
with open(file, 'w') as f:
f.write(s)
def load_base_shapes(filename):
'''Get a dict of `InfShape` from a filename.'''
with open(filename, 'r') as f:
d = yaml.safe_load(f)
return {k: InfShape.from_base_shape(v) for k, v in d.items()}
def _dataparallel_hack(base_shapes, shapes):
'''Fix module name discrepancy caused by (Distributed)DataParallel module.
The parameters of a (Distributed)DataParallel module all have names that
start with 'module'. This causes a mismatch from non-DataParallel modules.
This function tries to match `base_shapes` to `shapes`: if the latter starts
with 'module', then make the former too; likewise if not.
'''
if all(k.startswith('module.') for k in shapes) and all(not k.startswith('module.') for k in base_shapes):
return {'module.' + k: v for k, v in base_shapes.items()}, shapes
if all(not k.startswith('module.') for k in shapes) and all(k.startswith('module.') for k in base_shapes):
return {k.strip('module.'): v for k, v in base_shapes.items()}, shapes
return base_shapes, shapes
def _extract_shapes(x):
'''
Input:
x: can be any of the following:
- `nn.Module`
- dict of shapes
- dict of `InfShape`
- str of path to a base shapes (.bsh) file
Output:
If `x` is dict of `InfShape`, then output itself.
If `x` is path, then output a dict of `InfShapes` loaded from `x`.
Else, output the shapes (not `InfShape`) associated to `x`
'''
if isinstance(x, nn.Module):
x_shapes = get_shapes(x)
elif isinstance(x, dict):
x_shapes = deepcopy(x)
elif isinstance(x, str):
# x is file name
x_shapes = load_base_shapes(x)
else:
raise ValueError(f'unhandled x type: {type(x)}')
return x_shapes
def _zip_infshape_dict(base_shapes, shapes):
'''make a dict of `InfShape` from two dicts of shapes.
Inputs:
base_shapes: dict of base shapes or InfShape objects
shapes: dict of shapes
Output:
dict of `InfShape` using `zip_infshape`
'''
base_shapes, shapes = _dataparallel_hack(base_shapes, shapes)
basenames = set(base_shapes.keys())
names = set(shapes.keys())
assert basenames == names, (
f'`base_shapes` has extra names {basenames - names}. ' f'`shapes` has extra names {names - basenames}.'
)
infshapes = {}
for name, bsh in base_shapes.items():
infshapes[name] = zip_infshape(bsh, shapes[name])
return infshapes
def zip_infshapes(base, target):
'''make a dict of `InfShape` from models or dicts.
Inputs:
base: a base `nn.Module` or a dict of shapes
target: a target `nn.Module` or a dict of shapes
Output:
dict of `InfShape` using `zip_infshape`
'''
base_shapes = _extract_shapes(base)
target_shapes = _extract_shapes(target)
return _zip_infshape_dict(base_shapes, target_shapes)
def clear_dims(infshape_dict):
'''
Input:
infshape_dict: dict of `InfShape`
Output:
the same dict but where all `InfDim` in all `InfShape`
have their `dim` attribute set to None
'''
d = deepcopy(infshape_dict)
for _, v in d.items():
for infdim in v:
infdim.dim = None
return d
def make_base_shapes(base_shapes, delta_shapes, savefile=None):
'''Make a base shape object from a base model/shapes and a delta model/shapes.
Inputs:
base:
a base `nn.Module` or a dict of shapes
delta:
a "delta" model or a dict of shapes, for the sole purpose of
determining which dimensions are "width" and will be scaled up and
down in the target model.
savefile:
if a string, then the resulting base shape object is serialized to
this location via yaml encoding.
Outputs:
base infshapes
'''
bsh = clear_dims(zip_infshapes(base_shapes, delta_shapes))
if savefile is not None:
save_base_shapes(bsh, savefile)
return bsh
def apply_infshapes(model, infshapes):
for name, p in model.named_parameters():
p.infshape = infshapes[name]
def set_base_shapes(model, base, rescale_params=True, delta=None, savefile=None, do_assert=True):
'''Sets the `p.infshape` attribute for each parameter `p` of `model`.
Inputs:
model: nn.Module instance
base: The base model.
Can be nn.Module, a dict of shapes, a str, or None.
If None, then defaults to `model`
If str, then treated as filename for yaml encoding of a dict of base shapes.
rescale_params:
assuming the model is initialized using the default pytorch init (or
He initialization etc that scale the same way with fanin): If True
(default), rescales parameters to have the correct (μP) variances.
do_assert:
Output:
same object as `model`, after setting the `infshape` attribute of each parameter.
'''
if base is None:
base = model
base_shapes = _extract_shapes(base)
if delta is not None:
delta_shapes = _extract_shapes(delta)
base_shapes = _zip_infshape_dict(base_shapes, delta_shapes)
shapes = get_shapes(model)
infshapes = _zip_infshape_dict(base_shapes, shapes)
if savefile is not None:
save_base_shapes(infshapes, savefile)
apply_infshapes(model, infshapes)
if do_assert:
assert_hidden_size_inf(model)
if rescale_params:
for name, module in model.named_modules():
if isinstance(module, MuReadout):
module._rescale_parameters()
elif isinstance(module, (Linear, _ConvNd)):
rescale_linear_bias(module)
return model
def assert_hidden_size_inf(model):
'''
This tests for any `nn.Linear` whose output dimension is finite but input
dimension is infinite and is not of type `MuReadout`. Such `nn.Linear`
modules should not exist in a correctly parametrized models.
'''
for name, module in model.named_modules():
if isinstance(module, Linear) and not isinstance(module, MuReadout):
if not module.weight.infshape[0].isinf() and module.weight.infshape[1].isinf():
assert False, (
f'{name} has infinite fan-in and finite fan-out dimensions but is not type `MuReadout`. '
'To resolve this, either change the module to `MuReadout` or change the fan-out to an infinite dimension.'
)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/mup/shape.py |
# coding=utf-8
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) Microsoft Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
# Most of the code here has been copied from:
# https://github.com/microsoft/mup
'''
Initializer functions mirroring those of `torch.nn.init`. They serve as
drop-in replacements after the user has called `set_base_shapes` on their
model.
All of the initializers here are designed to 1) behave exactly the same
as the torch versions when the model shapes are equal to their base shapes,
and 2) to scale with width correctly (according to μP), when the model shapes
differ from the base shapes. In general, this means deviating from the
torch version behaviors.
'''
import math
import warnings
import torch
from torch.nn.init import (
_calculate_correct_fan,
_calculate_fan_in_and_fan_out,
_no_grad_fill_,
_no_grad_normal_,
_no_grad_uniform_,
calculate_gain,
)
def constant_std_init_(tensor, sampler_):
assert hasattr(tensor, 'infshape'), 'Please call set_base_shapes(...)'
if tensor.infshape.ninf() <= 1:
sampler_(tensor)
elif tensor.infshape.ninf() == 2:
sampler_(tensor, scale=tensor.infshape.width_mult() ** -0.5)
else:
raise NotImplementedError()
return tensor
def uniform_(tensor, a=0, b=1):
'''Drop-in replacement of `torch.nn.init.uniform_`.
Note:
- if using this function, ensure `a` and `b` do not depend on fan-in,
fan-out, or other notions of width, e.g. if a = 0, b = 1.
- `tensor` should have `infshape` attribute set by `set_base_shapes`.
'''
assert hasattr(tensor, 'infshape'), 'Please call set_base_shapes(...)'
if a != -b:
assert tensor.infshape.ninf() == 1, 'Sampler for (inf, inf) tensors should have mean 0'
def sampler_(tensor, scale=1):
_no_grad_uniform_(tensor, a * scale, b * scale)
return constant_std_init_(tensor, sampler_)
def normal_(tensor, mean=0, std=1):
'''Drop-in replacement of `torch.nn.init.normal_`.
Note:
- if using this function, ensure `mean` and `std` do not depend on
fan-in, fan-out, or other notions of width, e.g. if mean = 0, std =
1.
- `tensor` should have `infshape` attribute set by `set_base_shapes`.
'''
if mean != 0:
assert tensor.infshape.ninf() == 1, 'Sampler for (inf, inf) tensors should have mean 0'
def sampler_(tensor, scale=1):
_no_grad_normal_(tensor, mean=mean * scale, std=std * scale)
return constant_std_init_(tensor, sampler_)
def ones_(tensor):
'''Same as `torch.nn.init.ones_`.
Note:
- `tensor` should have `infshape` attribute set by `set_base_shapes`.
'''
assert tensor.infshape.ninf() == 1, 'Sampler for (inf, inf) tensors should have mean 0'
def sampler_(tensor, scale=1):
_no_grad_fill_(tensor, scale)
return constant_std_init_(tensor, sampler_)
def eye_(tensor):
'''Same as `torch.nn.init.eye_`.
Note:
- `tensor` should have `infshape` attribute set by `set_base_shapes`.
'''
assert tensor.infshape.ninf() == 1, 'Sampler for (inf, inf) tensors should have mean 0'
return torch.nn.init.eye_(tensor)
def _inf_fan_adjust_xavier(scale, tensor):
fan_out, fan_in = tensor.infshape[:2]
# following are needed to accomodate SP models where all infshapes are finite so base_dims are Nones
fan_out_base_dim = fan_out.base_dim or fan_out.dim
fan_in_base_dim = fan_in.base_dim or fan_in.dim
scale *= math.sqrt((fan_out.dim + fan_in.dim) / (fan_out_base_dim + fan_in_base_dim))
if tensor.infshape.ninf() <= 1:
# should have fixed scale
pass
elif tensor.infshape.ninf() == 2:
# should scale like fanin
assert fan_out.isinf() and fan_in.isinf()
scale /= math.sqrt(fan_in.width_mult())
else:
raise NotImplementedError('can only handle 2 inf dimensions currently')
return scale
def xavier_uniform_(tensor, gain=1.0):
'''Drop-in replacement of `torch.nn.init.xavier_uniform_`.
Note:
- if using this function, ensure `gain` does not depend on fan-in,
fan-out, or other notions of width, e.g. if gain = 1.
- `tensor` should have `infshape` attribute set by `set_base_shapes`.
'''
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
std = _inf_fan_adjust_xavier(std, tensor)
a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
return _no_grad_uniform_(tensor, -a, a)
def xavier_normal_(tensor, gain=1.0):
'''Drop-in replacement of `torch.nn.init.xavier_normal_`.
Note:
- if using this function, ensure `gain` does not depend on fan-in,
fan-out, or other notions of width, e.g. if gain = 1.
- `tensor` should have `infshape` attribute set by `set_base_shapes`.
'''
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
std = _inf_fan_adjust_xavier(std, tensor)
return _no_grad_normal_(tensor, 0.0, std)
def _inf_fan_adjust_kaiming(scale, tensor, mode):
fan_out, fan_in = tensor.infshape[:2]
if tensor.infshape.ninf() == 0:
return scale
elif tensor.infshape.ninf() == 1:
# should have fixed scale
if mode == 'fan_in' and fan_in.isinf():
scale *= fan_in.width_mult() ** 0.5
elif mode == 'fan_out' and fan_out.isinf():
scale *= fan_out.width_mult() ** 0.5
elif tensor.infshape.ninf() == 2:
# should scale like fanin
assert fan_out.isinf() and fan_in.isinf()
if mode == 'fan_out':
scale *= math.sqrt(fan_out.width_mult() / fan_in.width_mult())
else:
raise NotImplementedError('can only handle <=2 inf dimensions currently')
return scale
def kaiming_normal_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'):
'''Drop-in replacement of `torch.nn.init.kaiming_normal_`.
Note:
- if using this function, ensure `a` does not depend on fan-in,
fan-out, or other notions of width, e.g. if a = 0.
- `tensor` should have `infshape` attribute set by `set_base_shapes`.
'''
if 0 in tensor.shape:
warnings.warn("Initializing zero-element tensors is a no-op")
return tensor
fan = _calculate_correct_fan(tensor, mode)
gain = calculate_gain(nonlinearity, a)
std = _inf_fan_adjust_kaiming(gain / math.sqrt(fan), tensor, mode)
with torch.no_grad():
return tensor.normal_(0, std)
def kaiming_uniform_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'):
'''Drop-in replacement of `torch.nn.init.kaiming_uniform_`.
Note:
- if using this function, ensure `a` does not depend on fan-in,
fan-out, or other notions of width, e.g. if a = 0.
- `tensor` should have `infshape` attribute set by `set_base_shapes`.
'''
if 0 in tensor.shape:
warnings.warn("Initializing zero-element tensors is a no-op")
return tensor
fan = _calculate_correct_fan(tensor, mode)
gain = calculate_gain(nonlinearity, a)
std = _inf_fan_adjust_kaiming(gain / math.sqrt(fan), tensor, mode)
bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
with torch.no_grad():
return tensor.uniform_(-bound, bound)
try:
from torch.nn.init import _no_grad_trunc_normal_
def trunc_normal_(tensor, mean=0, std=1, a=-2, b=2):
'''Drop-in replacement of `torch.nn.init.trunc_normal_`.
Note:
- if using this function, ensure `mean`, `std`, `a`, `b` do not
depend on fan-in, fan-out, or other notions of width, e.g. if
mean = 0, std = 1, a = -2, b = 2.
- `tensor` should have `infshape` attribute set by
`set_base_shapes`.
'''
if mean != 0 or a != -b:
assert tensor.infshape.ninf() == 1, 'Sampler for (inf, inf) tensors should have mean 0'
def sampler_(tensor, scale=1):
_no_grad_trunc_normal_(tensor, mean=mean * scale, std=std * scale, a=a * scale, b=b * scale)
return constant_std_init_(tensor, sampler_)
except (ImportError, ModuleNotFoundError):
warnings.warn(
'Failed to import _no_grad_trunc_normal_ from torch.nn.init; '
'you might be running an older version of torch. trunc_normal_ will not work.'
)
def trunc_normal_(tensor, mean=0, std=1, a=-2, b=2):
warnings.warn('Please upgrade your Pytorch version before using truncated normal.')
| NeMo-main | nemo/collections/nlp/modules/common/megatron/mup/init.py |
# coding=utf-8
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) Microsoft Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
# Most of the code here has been copied from:
# https://github.com/microsoft/mup
'''
Optimizers with μP scaling.
Here we provide 3 ready-to-go optimizers MuAdam, MuAdamW, and MuSGD.
However, the user can easily convert their own optimizer to a μP
optimizer: if your `optimizer` is "Adam-like", such as RMSProp and Adagrad,
that involves normalizing the gradient entrywise, then the following creates
the desired μP optimizer:
def MuOptimizer(params, **kwargs):
return MuAdam(params, impl=optimizer, **kwargs)
On the other hand, if your `optimizer` is "SGD-like", such as ASGD, then
the following creates the desired μP optimizer:
def MuOptimizer(params, **kwargs):
return MuSGD(params, impl=optimizer, **kwargs)
See Appendix B in our paper for discussions of other optimizers.
'''
from collections import defaultdict
from torch.optim import SGD, Adam, AdamW
def process_param_groups(params, **kwargs):
param_groups = list(params)
if not isinstance(param_groups[0], dict):
param_groups = [{'params': param_groups}]
for param_group in param_groups:
if 'lr' not in param_group:
param_group['lr'] = kwargs['lr']
if 'weight_decay' not in param_group:
param_group['weight_decay'] = kwargs.get('weight_decay', 0.0)
return param_groups
def MuAdam(params, impl=Adam, decoupled_wd=True, **kwargs):
'''Adam with μP scaling.
Note for this to work properly, your model needs to have its base shapes set
already using `mup.set_base_shapes`.
Inputs:
impl: the specific Adam-like optimizer implementation from torch.optim or
elsewhere
decoupled_wd: if True, skips the mup scaling for weight decay, which should
be used for optimizer implementations that decouple weight decay from
learning rate. See https://github.com/microsoft/mup/issues/1 for a use case.
Outputs:
An instance of `impl` with refined parameter groups, each of which has the correctly
scaled learning rate according to mup.
'''
new_param_groups = []
for param_group in process_param_groups(params, **kwargs):
# For every existing param group, we split into several new groups
def new_group():
new_g = {k: v for k, v in param_group.items() if k != 'params'}
new_g['params'] = []
return new_g
# The matrix-like weights might need multiple groups since weights
# might have different width multipliers
matrix_like_p = defaultdict(new_group) # key is width_mult
vector_like_p = new_group()
for p in param_group['params']:
assert hasattr(p, 'infshape'), (
f'A parameter with shape {p.shape} does not have `infshape` attribute. '
'Did you forget to call `mup.set_base_shapes` on the model?'
)
if p.infshape.ninf() == 2:
matrix_like_p[p.infshape.width_mult()]['params'].append(p)
elif p.infshape.ninf() > 2:
raise NotImplementedError('more than 2 inf dimensions')
else:
vector_like_p['params'].append(p)
for width_mult, group in matrix_like_p.items():
# Scale learning rate and weight decay accordingly
group['lr'] /= width_mult
if not decoupled_wd:
group['weight_decay'] *= width_mult
new_param_groups.extend(list(matrix_like_p.values()) + [vector_like_p])
return impl(new_param_groups, **kwargs)
def MuAdamW(params, **kwargs):
'''AdamW with μP scaling.
Note for this to work properly, your model needs to have its base shapes set
already using `mup.set_base_shapes`.
'''
return MuAdam(params, impl=AdamW, **kwargs)
def MuSGD(params, impl=SGD, decoupled_wd=False, **kwargs):
'''SGD with μP scaling.
Note for this to work properly, your model needs to have its base shapes set
already using `mup.set_base_shapes`.
Inputs:
impl: the specific SGD-like optimizer implementation from torch.optim or
elsewhere
decoupled_wd: if True, skips the mup scaling for weight decay, which should
be used for optimizer implementations that decouple weight decay from
learning rate. See https://github.com/microsoft/mup/issues/1 for a use case.
Outputs:
An instance of `impl` with refined parameter groups, each of which has the correctly
scaled learning rate according to mup.
'''
new_param_groups = []
for param_group in process_param_groups(params, **kwargs):
# For every existing param group, we split into several new groups
def new_group():
new_g = {k: v for k, v in param_group.items() if k != 'params'}
new_g['params'] = []
return new_g
# The matrix-like weights might need multiple groups since weights
# might have different width multipliers
vector_like_p = defaultdict(new_group) # key is width mult
matrix_like_p = defaultdict(new_group) # key is fan_in/out ratio
fixed_p = new_group()
for p in param_group['params']:
assert hasattr(p, 'infshape'), (
f'A parameter with shape {p.shape} does not have `infshape` attribute. '
'Did you forget to call `mup.set_base_shapes` on the model?'
)
if p.infshape.ninf() == 1:
vector_like_p[p.infshape.width_mult()]['params'].append(p)
elif p.infshape.ninf() == 2:
matrix_like_p[p.infshape.fanin_fanout_mult_ratio()]['params'].append(p)
elif p.infshape.ninf() > 2:
raise NotImplementedError('more than 2 inf dimensions')
else:
fixed_p['params'].append(p)
for width_mult, group in vector_like_p.items():
# Scale learning rate and weight decay accordingly
group['lr'] *= width_mult
if not decoupled_wd:
group['weight_decay'] /= width_mult
for shape_ratio, group in matrix_like_p.items():
group['lr'] /= shape_ratio
if not decoupled_wd:
group['weight_decay'] *= shape_ratio
new_param_groups.extend(list(matrix_like_p.values()) + list(vector_like_p.values()) + [fixed_p])
return impl(new_param_groups, **kwargs)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/mup/optim.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-main | nemo/collections/nlp/modules/common/megatron/adapters/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from megatron.core.models.gpt.gpt_embedding import GPTEmbedding
from megatron.core.transformer.attention import SelfAttention
from megatron.core.transformer.transformer_layer import TransformerLayer
from megatron.core.utils import make_viewless_tensor
from nemo.collections.nlp.modules.common.megatron.adapters.parallel_adapters import (
AdapterName,
LoraKQVAdapterConfig,
ParallelLinearAdapterConfig,
PromptEncoderAdapterConfig,
)
from nemo.core import adapter_mixins
def swap_mcore_mixin(module, mcore_mixin):
"""
Casts module to mcore_mixin and register corresponding adapters.
"""
module.__class__ = mcore_mixin
module.mcore_register_adapters()
class MCoreAdapterModuleMixin(adapter_mixins.AdapterModuleMixin):
def mcore_register_adapters(self):
"""
Performs any necessary setup after swapping class.
Must use self.set_accepted_adapter_types([<NeMo adapter config>_target_]) to register adapter.
"""
raise NotImplementedError("Mcore mixins should implement setup_adapters on a subclass of MyBase")
class MCoreSelfAttentionMixin(SelfAttention, MCoreAdapterModuleMixin):
def mcore_register_adapters(self):
"""
Setup NeMo LoRA adapter to this MCore layer.
"""
self.set_accepted_adapter_types([LoraKQVAdapterConfig._target_]) # only self attn (packed qkv) for now
self.linear_qkv.return_layernorm_output = True # need layernorm output for lora mlp
def get_query_key_value_tensors(self, hidden_states, key_value_states=None):
"""
Derives `query`, `key` and `value` tensors from `hidden_states`.
"""
# Attention heads [sq, b, h] --> [sq, b, ng * (np/ng + 2) * hn)]
(mixed_qkv, layernorm_output), _ = self.linear_qkv(hidden_states)
# LoRA logic
if self.is_adapter_available():
lora_kqv_adapter = self.get_adapter_module(AdapterName.LORA_KQV_ADAPTER)
if lora_kqv_adapter:
lora_mixed_qkv = lora_kqv_adapter(layernorm_output)
mixed_qkv = mixed_qkv + lora_mixed_qkv
# [sq, b, hp] --> [sq, b, ng, (np/ng + 2) * hn]
new_tensor_shape = mixed_qkv.size()[:-1] + (
self.num_query_groups_per_partition,
(
(self.num_attention_heads_per_partition // self.num_query_groups_per_partition + 2)
* self.hidden_size_per_attention_head
),
)
mixed_qkv = mixed_qkv.view(*new_tensor_shape)
# [sq, b, ng, (np/ng + 2) * hn] --> [sq, b, ng, np/ng * hn], [sq, b, ng, hn], [sq, b, ng, hn]
(query, key, value) = torch.split(
mixed_qkv,
[
(
self.num_attention_heads_per_partition
// self.num_query_groups_per_partition
* self.hidden_size_per_attention_head
),
self.hidden_size_per_attention_head,
self.hidden_size_per_attention_head,
],
dim=3,
)
# [sq, b, ng, np/ng * hn] -> [sq, b, np, hn]
query = query.reshape(query.size(0), query.size(1), -1, self.hidden_size_per_attention_head)
return query, key, value
class MCoreGPTEmbeddingMixin(GPTEmbedding, MCoreAdapterModuleMixin):
def mcore_register_adapters(self):
"""
Setup NeMo ptuning adapter to this MCore layer.
"""
self.set_accepted_adapter_types([PromptEncoderAdapterConfig._target_])
def forward(self, input_ids, position_ids):
encoder_input = super().forward(input_ids, position_ids)
if self.is_adapter_available():
_sq, _bs, _hs = encoder_input.size()
ptuning_adapter = self.get_adapter_module(AdapterName.PTUNING_ADAPTER)
v = ptuning_adapter.virtual_tokens
if ptuning_adapter and _sq >= v: # The sequence should be longer the v to insert virtual embeddings.
virtual_embeddings = ptuning_adapter(_bs)
encoder_input = encoder_input[
v:, :, :
] # the first v tokens are pads so that they can be swapped out with virtual embeddings.
encoder_input = torch.concat([virtual_embeddings, encoder_input], dim=0)
return encoder_input
class MCoreTransformerLayerMixin(TransformerLayer, MCoreAdapterModuleMixin):
def mcore_register_adapters(self):
self.set_accepted_adapter_types([ParallelLinearAdapterConfig._target_])
def forward(
self,
hidden_states,
attention_mask,
encoder_output=None,
enc_dec_attn_mask=None,
inference_params=None,
rotary_pos_emb=None,
):
# hidden_states: [s, b, h]
# Layer norm at the beginning of the transformer layer.
layernorm_output = self.input_layernorm(hidden_states)
# Self attention.
attention_output_with_bias = self.self_attention(
layernorm_output, attention_mask, inference_params=inference_params, rotary_pos_emb=rotary_pos_emb,
)
# adapter logic
if self.is_adapter_available():
adapter_1 = self.get_adapter_module(AdapterName.PRE_ATTN_ADAPTER)
if adapter_1:
attention_output, bias = attention_output_with_bias
attention_output = (
adapter_1(attention_output) + attention_output
) # simple adapter call with residual connection
attention_output_with_bias = (attention_output, bias)
# Residual connection.
if self.config.apply_residual_connection_post_layernorm:
residual = layernorm_output
else:
residual = hidden_states
# bias_dropout_add fusion returning fp32 instead of bf16
with self.bias_dropout_add_exec_handler():
layernorm_input = self.bias_dropout_add_func(
attention_output_with_bias, residual, self.config.hidden_dropout
)
# Layer norm post the self attention.
layernorm_output = self.post_self_attn_layernorm(layernorm_input)
# MLP.
mlp_output_with_bias = self.mlp(layernorm_output)
# adapter logic
if self.is_adapter_available():
adapter_2 = self.get_adapter_module(AdapterName.POST_ATTN_ADAPTER)
if adapter_2:
mlp_output, bias = mlp_output_with_bias
mlp_output = adapter_2(mlp_output) + mlp_output # simple adapter call with residual connection
mlp_output_with_bias = (mlp_output, bias)
# Second residual connection.
if self.config.apply_residual_connection_post_layernorm:
residual = layernorm_output
else:
residual = layernorm_input
with self.bias_dropout_add_exec_handler():
output = self.bias_dropout_add_func(mlp_output_with_bias, residual, self.config.hidden_dropout)
# Jit compiled function creates 'view' tensor. This tensor
# potentially gets saved in the MPU checkpoint function context,
# which rejects view tensors. While making a viewless tensor here
# won't result in memory savings (like the data loader, or
# p2p_communication), it serves to document the origin of this
# 'view' tensor.
output = make_viewless_tensor(inp=output, requires_grad=output.requires_grad, keep_graph=True)
return output
| NeMo-main | nemo/collections/nlp/modules/common/megatron/adapters/mcore_mixins.py |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import logging
from dataclasses import dataclass
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.init as init
from nemo.collections.common.parts.adapter_modules import AdapterModuleUtil
from nemo.collections.common.parts.utils import activation_registry
from nemo.collections.nlp.modules.common.megatron.fused_bias_gelu import fused_bias_gelu
from nemo.collections.nlp.modules.common.megatron.utils import ApexGuardDefaults, init_method_const, init_method_normal
from nemo.core.classes.mixins import adapter_mixin_strategies
try:
from apex.normalization.fused_layer_norm import MixedFusedLayerNorm
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
try:
from megatron.core import ModelParallelConfig
from megatron.core.tensor_parallel import ColumnParallelLinear, RowParallelLinear
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelParallelConfig = ApexGuardDefaults
HAVE_MEGATRON_CORE = False
class AdapterName(str, enum.Enum):
"""
Names for adapters used in NLP Adapters and IA3. Note: changing this will break backward compatibility.
"""
MLP_INFUSED = "mlp_infused_adapter"
KEY_INFUSED = "key_infused_adapter"
VALUE_INFUSED = "value_infused_adapter"
PRE_ATTN_ADAPTER = 'adapter_1'
POST_ATTN_ADAPTER = 'adapter_2'
PTUNING_ADAPTER = "ptuning_adapter"
LORA_KQV_ADAPTER = "lora_kqv_adapter"
LORA_KV_ADAPTER = "lora_kv_adapter"
LORA_Q_ADAPTER = "lora_q_adapter"
class InfusedAdapter(nn.Module, AdapterModuleUtil):
def __init__(
self, in_features: int, model_parallel_config: Optional[ModelParallelConfig] = None, **kwargs
) -> None:
super().__init__()
if model_parallel_config is None:
model_parallel_config = ModelParallelConfig()
self.scalers = nn.Parameter(torch.ones(in_features))
# cast all parameters when using amp O2 training
if model_parallel_config.bf16:
self.bfloat16()
elif model_parallel_config.fp16:
self.half()
# Setup adapter strategy
self.setup_adapter_strategy(adapter_mixin_strategies.ReturnResultAdapterStrategy())
def forward(self, x):
x = x * self.scalers[None, None, :]
return x
class MLPInfusedAdapter(InfusedAdapter):
"""
MLPInfusedAdapter is basically a clone of InfusedAdapter. We do this to make the adapter_mixin agnostic to adapter names
and only check adapter class types.
"""
pass
@dataclass
class InfusedAdapterConfig:
in_features: int
_target_: str = "{0}.{1}".format(InfusedAdapter.__module__, InfusedAdapter.__name__)
@dataclass
class MLPInfusedAdapterConfig(InfusedAdapterConfig):
_target_: str = "{0}.{1}".format(MLPInfusedAdapter.__module__, MLPInfusedAdapter.__name__)
class ParallelLinearAdapter(nn.Module, AdapterModuleUtil):
def __init__(
self,
in_features: int,
out_features: int,
dim: int,
activation: str = 'swish',
norm_position: Optional[str] = 'post',
norm_type: Optional[str] = 'mixedfusedlayernorm',
column_init_method: str = 'xavier', # TODO: (@adithyare) should rename this to input_init_method to be more precise.
row_init_method: str = 'zero', # TODO: (@adithyare) should rename this to output_init_method to be more precise.
gather_output: bool = True,
dropout: float = 0.0,
model_parallel_config: Optional[ModelParallelConfig] = None,
**kwargs,
):
super().__init__()
if not HAVE_APEX:
logging.info("Apex is required to use ParallelLinearAdapters.")
raise RuntimeError("ParallelLinearAdapter can not run without Apex.")
if not HAVE_MEGATRON_CORE:
logging.info("Megatron-core is required to use ParallelLinearAdapters.")
raise RuntimeError("ParallelLinearAdapter can not run without Megatron-core.")
self.activation = activation_registry[activation]()
self.norm_position = norm_position
# megatron_gpt_peft_models will provide this arg, but deprecated ones do not.
# in case this arg is not provided, use the dummy default config.
if model_parallel_config is None:
model_parallel_config = ModelParallelConfig()
self.linear_in = ColumnParallelLinear(
in_features,
dim,
config=model_parallel_config,
bias=False,
gather_output=True,
init_method=self._get_init_fn(column_init_method),
)
if gather_output:
self.linear_out = RowParallelLinear(
dim,
out_features,
config=model_parallel_config,
bias=False,
init_method=self._get_init_fn(row_init_method),
)
else:
# (@adithyare) we use this option to mirror the behavior a column parallel layer with two low-rank column parallel layers
# if the original column parallel layer uses gather_output=False, then we will use the self.liner_out layer defined below.
self.linear_out = ColumnParallelLinear(
dim,
out_features,
config=model_parallel_config,
bias=False,
gather_output=False,
init_method=self._get_init_fn(row_init_method),
)
if self.norm_position in ["pre", "post"]:
ln_features = in_features if self.norm_position == "pre" else out_features
if norm_type == 'mixedfusedlayernorm':
self.layer_norm = MixedFusedLayerNorm(ln_features, 1e-5, sequence_parallel_enbaled=False)
elif norm_type == 'layernorm':
self.layer_norm = nn.LayerNorm(ln_features)
else:
raise NotImplementedError("norm_type should be either mixedfusedlayernorm or layernorm")
else:
self.layer_norm = None
if dropout > 0.0:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
# cast all parameters when using amp O2 training
if model_parallel_config.bf16:
self.bfloat16()
elif model_parallel_config.fp16:
self.half()
# Setup adapter strategy
self.setup_adapter_strategy(adapter_mixin_strategies.ReturnResultAdapterStrategy())
def _get_init_fn(self, init_method: str):
if init_method == 'xavier':
init_fn = init.xavier_normal_
elif init_method == 'normal':
init_fn = init_method_normal(0.2)
elif init_method == "zero":
init_fn = init_method_const(0.0)
else:
raise NotImplementedError("out_init_method should be zero, normal or xavier")
return init_fn
def adapter_unfreeze(self,):
"""
Can be customized to allow for selective training of only some params in the PEFT.
"""
super().adapter_unfreeze()
def forward(self, x):
if self.norm_position == 'pre':
x = self.layer_norm(x)
x, _ = self.linear_in(x) # (@adithyare) ColumnLinear returns output and bias, we are ignoring the bias term.
x = self.activation(x)
x, _ = self.linear_out(x)
if self.norm_position == 'post':
x = self.layer_norm(x)
# Add dropout if available
if self.dropout is not None:
x = self.dropout(x)
return x
@dataclass
class ParallelLinearAdapterConfig:
in_features: int
out_features: int
dim: int
activation: str = 'swish'
norm_position: Optional[str] = 'post'
norm_type: Optional[str] = 'mixedfusedlayernorm'
column_init_method: str = 'xavier'
row_init_method: str = 'zero'
gather_output: bool = True
dropout: float = 0.0
_target_: str = "{0}.{1}".format(ParallelLinearAdapter.__module__, ParallelLinearAdapter.__name__)
class LoraKQVAdapter(ParallelLinearAdapter):
"""
Lora Adapters are the same arch as regular adapters but with potentially different input and output feature sizes
and they do not use an bottleneck activation function
"""
pass
class LoraKVAdapter(ParallelLinearAdapter):
"""
Lora Adapters are the same arch as regular adapters but with potentially different input and output feature sizes
and they do not use an bottleneck activation function
"""
pass
class LoraQAdapter(ParallelLinearAdapter):
"""
Lora Adapters are the same arch as regular adapters but with potentially different input and output feature sizes
and they do not use an bottleneck activation function
"""
pass
@dataclass
class LoraKQVAdapterConfig(ParallelLinearAdapterConfig):
_target_: str = "{0}.{1}".format(LoraKQVAdapter.__module__, LoraKQVAdapter.__name__)
@dataclass
class LoraQAdapterConfig(ParallelLinearAdapterConfig):
_target_: str = "{0}.{1}".format(LoraQAdapter.__module__, LoraQAdapter.__name__)
@dataclass
class LoraKVAdapterConfig(ParallelLinearAdapterConfig):
_target_: str = "{0}.{1}".format(LoraKVAdapter.__module__, LoraKVAdapter.__name__)
class PromptEncoderAdapter(nn.Module, AdapterModuleUtil):
"""
The Tensor Parallel MLP prompt encoder network that is used to generate the virtual
token embeddings for p-tuning. It only have two layers.
TODO: (@adithyare) Need to add all the functionality from the PromptEncoder class
"""
def __init__(
self,
virtual_tokens: int,
bottleneck_dim: int,
embedding_dim: int,
init_std: float,
output_dim: int,
model_parallel_config: Optional[ModelParallelConfig] = None,
**kwargs,
):
"""
Initializes the Tensor Model parallel MLP PromptEncoderMLP module.
Args:
virtual_tokens: the number of vitural tokens
hidden_size: hidden dimension
output_size: the output dimension
init_std: the MLP init std value
"""
super().__init__()
self.bottleneck_dim = bottleneck_dim
self.embedding_dim = embedding_dim
self.output_dim = output_dim
self.virtual_tokens = virtual_tokens
self.activation = "gelu"
if model_parallel_config is None:
model_parallel_config = ModelParallelConfig()
sequence_parallel = False
gradient_accumulation_fusion = False
# (@adithyare) the persistent=False will not pollute the indices into the state_dict of this module.
self.register_buffer("indices", torch.LongTensor(list(range(self.virtual_tokens))), persistent=False)
self.embedding = torch.nn.Embedding(self.virtual_tokens, self.embedding_dim)
self.register_buffer("inference_table", torch.Tensor(self.virtual_tokens, self.output_dim), persistent=True)
self.is_inference_ready = False
self.first = ColumnParallelLinear(
self.embedding_dim,
self.bottleneck_dim,
config=model_parallel_config,
gather_output=False,
init_method=init_method_normal(init_std),
skip_bias_add=True,
bias=True,
)
self.second = RowParallelLinear(
self.bottleneck_dim,
self.output_dim,
config=model_parallel_config,
input_is_parallel=True,
init_method=init_method_normal(init_std),
skip_bias_add=True,
bias=True,
)
# cast all parameters when using amp O2 training
if model_parallel_config.bf16:
self.bfloat16()
elif model_parallel_config.fp16:
self.half()
# Setup adapter strategy
self.setup_adapter_strategy(adapter_mixin_strategies.ReturnResultAdapterStrategy())
def set_inference_table(self, prompt_representation: torch.Tensor):
"""
This method caches the output representation from the Encoder and saves it inside `self.inference_table`.
"""
prompt_representation = prompt_representation.detach().clone()
self.inference_table.data = prompt_representation
self.is_inference_ready = True
return True
def clear_inference_table(self,):
self.inference_table.fill_(0.0)
self.is_inference_ready = False
def get_inference_table(self,):
return self.inference_table.data
def inner_forward(self,):
input_embeds = self.embedding(self.indices).unsqueeze(0)
intermediate_parallel, bias_parallel = self.first(input_embeds)
intermediate_parallel = fused_bias_gelu(intermediate_parallel, bias_parallel)
output_embeds, bias_parallel = self.second(intermediate_parallel)
output_embeds = output_embeds + bias_parallel
output_embeds = output_embeds.transpose(0, 1)
return output_embeds
def forward(self, batch_size: int, use_cached_reps: bool = False) -> torch.Tensor:
"""
Forward pass through the encoder with caching of prompt representations
"""
if use_cached_reps:
output_embeds = self.get_inference_table().unsqueeze(1)
else:
if self.training:
if self.is_inference_ready:
self.clear_inference_table()
output_embeds = self.inner_forward()
else:
output_embeds = self.inner_forward()
if not self.is_inference_ready:
output_embeds = self.inner_forward()
self.set_inference_table(output_embeds.squeeze(1))
output_embeds = self.get_inference_table().unsqueeze(1)
output_embeds = output_embeds.expand(self.virtual_tokens, batch_size, self.output_dim)
return output_embeds
@dataclass
class PromptEncoderAdapterConfig:
virtual_tokens: int
bottleneck_dim: int
embedding_dim: int
init_std: float
output_dim: int
_target_: str = "{0}.{1}".format(PromptEncoderAdapter.__module__, PromptEncoderAdapter.__name__)
class ParallelLinearAdapterWeightTying(ParallelLinearAdapter):
"""
Extends parallel linear adapter for weight tying by providing a position embedding and convenience methods for tying weights
"""
def __init__(
self,
in_features: int,
out_features: int,
dim: int,
activation: str = 'swish',
norm_position: Optional[str] = 'post',
norm_type: Optional[str] = 'mixedfusedlayernorm',
column_init_method: str = 'xavier', # TODO: (@adithyare) should rename this to input_init_method to be more precise.
row_init_method: str = 'zero', # TODO: (@adithyare) should rename this to output_init_method to be more precise.
gather_output: bool = True,
dropout: float = 0.0,
num_position_embeddings: int = 1,
dim_position_embeddings: int = 1024,
position_embedding_strategy: Optional[str] = "add",
model_parallel_config: Optional[ModelParallelConfig] = None,
**kwargs,
):
self.position_embeddings = None
self.mlp = None
self.position_embedding_strategy = position_embedding_strategy
assert self.position_embedding_strategy in ["add", "concat", "mlpconcat", "biasadd", None]
if self.position_embedding_strategy == "concat":
in_features += dim_position_embeddings
elif self.position_embedding_strategy == "mlpconcat":
in_features += dim_position_embeddings
elif self.position_embedding_strategy == "biasadd":
assert (
out_features == dim_position_embeddings
), "adapter output feature size should match position emb size to bias add"
elif self.position_embedding_strategy == "add":
assert (
in_features == dim_position_embeddings
), "adapter input feature size should match position emb size to add"
super().__init__(
in_features,
out_features,
dim,
activation,
norm_position,
norm_type,
column_init_method,
row_init_method,
gather_output,
dropout,
model_parallel_config,
**kwargs,
)
if self.position_embedding_strategy:
self.position_embeddings = torch.nn.Embedding(num_position_embeddings, dim_position_embeddings)
self.position_embeddings.weight.data.fill_(0.0)
if self.position_embedding_strategy == "mlpconcat":
self.mlp = torch.nn.Sequential(
torch.nn.Linear(dim_position_embeddings, dim_position_embeddings, bias=False),
torch.nn.GELU(),
torch.nn.Linear(dim_position_embeddings, dim_position_embeddings, bias=False),
)
self.register_buffer("position_id", torch.LongTensor([1]), persistent=False)
def set_position(self, position_id):
self.position_id *= position_id
def tie_weights(self, position_id, adapter):
self.set_position(position_id)
if self.linear_in:
self.linear_in.weight = adapter.linear_in.weight
if self.linear_out:
self.linear_out.weight = adapter.linear_out.weight
if self.layer_norm:
self.layer_norm.weight = adapter.layer_norm.weight
self.layer_norm.bias = adapter.layer_norm.bias
if self.mlp:
self.mlp[0].weight = adapter.mlp[0].weight
self.mlp[2].weight = adapter.mlp[2].weight
if self.position_embeddings:
self.position_embeddings.weight = adapter.position_embeddings.weight
return True
def forward(self, x):
if self.position_embedding_strategy:
pos = self.position_embeddings(self.position_id).unsqueeze(0)
if self.position_embedding_strategy == "add":
pos = pos.expand_as(x)
x = x + pos
elif self.position_embedding_strategy == "concat":
pos = pos.expand(x.shape[0], x.shape[1], pos.shape[2])
x = torch.cat((x, pos), dim=2)
elif self.position_embedding_strategy == "mlpconcat":
pos = pos.expand(x.shape[0], x.shape[1], pos.shape[2])
pos = self.mlp(pos)
x = torch.cat((x, pos), dim=2)
if self.norm_position == 'pre':
x = self.layer_norm(x)
x, _ = self.linear_in(x) # (@adithyare) ColumnLinear returns output and bias, we are ignoring the bias term.
x = self.activation(x)
x, _ = self.linear_out(x)
if self.norm_position == 'post':
x = self.layer_norm(x)
if self.position_embedding_strategy == "biasadd":
pos = pos.expand_as(x)
x = x + pos
# Add dropout if available
if self.dropout is not None:
x = self.dropout(x)
return x
@dataclass
class ParallelLinearAdapterWeightTyingConfig:
in_features: int
out_features: int
dim: int
activation: str = 'swish'
norm_position: Optional[str] = 'post'
norm_type: Optional[str] = 'mixedfusedlayernorm'
column_init_method: str = 'xavier'
row_init_method: str = 'zero'
gather_output: bool = True
dropout: float = 0.0
num_position_embeddings: int = 1
dim_position_embeddings: int = 1024
position_embedding_strategy: Optional[str] = "concat"
_target_: str = "{0}.{1}".format(
ParallelLinearAdapterWeightTying.__module__, ParallelLinearAdapterWeightTying.__name__
)
class LoraKQVAdapterWeightTying(ParallelLinearAdapterWeightTying):
"""
TODO
"""
pass
@dataclass
class LoraKQVAdapterWeightTyingConfig(ParallelLinearAdapterWeightTyingConfig):
_target_: str = "{0}.{1}".format(LoraKQVAdapterWeightTying.__module__, LoraKQVAdapterWeightTying.__name__)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/adapters/parallel_adapters.py |
# coding=utf-8
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from einops import rearrange
from torch import einsum, nn
__all__ = ['RotaryEmbedding', 'apply_rotary_pos_emb']
class RotaryEmbedding(nn.Module):
"""
Implements Rotary Position Embedding from https://arxiv.org/abs/2104.09864.
"""
def __init__(self, dim: int, seq_len_interpolation_factor: int = None):
"""
Args:
dim (int): rotary embedding dimension
seq_len_interpolation_factor (int): if not None, discrete positions will be interpolated
by this factor via the trick in https://arxiv.org/abs/2306.15595.
"""
super().__init__()
self.seq_len_interpolation_factor = seq_len_interpolation_factor
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, max_seq_len, offset=0):
seq = torch.arange(max_seq_len, device=self.inv_freq.device) + offset
if self.seq_len_interpolation_factor is not None:
seq = seq.type_as(self.inv_freq)
seq *= 1 / self.seq_len_interpolation_factor
freqs = einsum('i , j -> i j', seq.type_as(self.inv_freq), self.inv_freq)
# first part even vector components, second part odd vector components,
# 2 * dim in dimension size
emb = torch.cat((freqs, freqs), dim=-1)
# emb [seq_length, .., dim]
return rearrange(emb, 'n d -> n 1 1 d')
def _rotate_half(x):
"""
change sign so the last dimension
[A, B, C, D] -> [-C, -D, A, B]
"""
x = rearrange(x, '... (j d) -> ... j d', j=2)
x1, x2 = x.unbind(dim=-2)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(t, freqs):
"""
input tensor t is of shape [seq_length, ..., dim]
rotary positional embeding tensor freqs is of shape [seq_length, ..., dim]
check https://kexue.fm/archives/8265 for detailed formulas
"""
rot_dim = freqs.shape[-1]
# ideally t_pass is empty so rotary pos embedding is applied to all tensor t
t, t_pass = t[..., :rot_dim], t[..., rot_dim:]
# first part is cosine component
# second part is sine component, need to change signs with _rotate_half method
t = (t * freqs.cos()) + (_rotate_half(t) * freqs.sin())
return torch.cat((t, t_pass), dim=-1)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/position_embedding/rotary_position_embedding.py |
# coding=utf-8
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
__all__ = ['ALiBiRelativePositionEmbedding']
def get_slopes(n):
def get_slopes_power_of_2(n):
start = 2 ** (-(2 ** -(math.log2(n) - 3)))
ratio = start
return [start * ratio ** i for i in range(n)]
if math.log2(n).is_integer():
slopes = get_slopes_power_of_2(n)
else:
closest_power_of_2 = 2 ** math.floor(math.log2(n))
slopes = (
get_slopes_power_of_2(closest_power_of_2)
+ get_slopes(2 * closest_power_of_2)[0::2][: n - closest_power_of_2]
)
return slopes
def build_slopes(num_attention_heads, num_attention_heads_alibi):
"""
Builds a slopes tensor.
"""
slopes = (
torch.Tensor(get_slopes(num_attention_heads_alibi) + [0] * (num_attention_heads - num_attention_heads_alibi))
.unsqueeze(-1)
.unsqueeze(-1)
)
if torch.cuda.is_available():
slopes = slopes.to(torch.cuda.current_device())
return slopes
def build_relative_position(max_seq_len, full=True):
"""
full=True: shape (max_seq_len, max_seq_len)
full=False: shape (max_seq_len)
"""
relative_position = torch.arange(1 - max_seq_len, 1)[None, :].mul(-1) # (1, max_seq_len)
if full:
memory_position = torch.arange(1 - max_seq_len, 1)[:, None].mul(-1)
relative_position = torch.abs(memory_position - relative_position) # (max_seq_len, max_seq_len)
if torch.cuda.is_available():
relative_position = relative_position.to(torch.cuda.current_device())
return relative_position
class ALiBiRelativePositionEmbedding(torch.nn.Module):
"""
ALiBi (Attention with Linear Biases) relative position embedding for auto-regressive decoder
and joint encoder (symmetric for forward and backward distance).
Based on https://arxiv.org/bas/2108.12409
"""
def __init__(
self, bidirectional, num_attention_heads, layer_type, num_attention_heads_alibi=None, max_seq_len=512,
):
"""
Args:
bidirectional: Whether to use bidirectional relative position embedding
num_attention_heads: Number of attention heads
layer_type: Layer type. Can be one of [LayerType.encoder or LayerType.decoder]. Willdetermine the bias construction
num_attention_heads_alibi: Number of attention heads for which alibi bias will be used
max_seq_len: Maximum sequence length for precomputed relative positions. Larger sizes will result in more memory usage by computing alibi mask on-the-fly.
"""
super().__init__()
if (num_attention_heads_alibi is None) or (num_attention_heads_alibi <= 0):
num_attention_heads_alibi = num_attention_heads
if num_attention_heads_alibi > num_attention_heads:
raise ValueError(
f"num_attention_heads_alibi ({num_attention_heads_alibi}) cannot be larger than num_attention_heads ({num_attention_heads})"
)
self.bidirectional = bidirectional
self.num_attention_heads = num_attention_heads
# LayerType.encoder or LayerType.decoder. Is only needed to determine the group for the all_reduce
self.layer_type = layer_type
# define the size of pre-computed relative position slopes.
# define the number of attention heads for which alibi mask will be pre-computed (the rest are disabled).
self.num_attention_heads_alibi = num_attention_heads_alibi
# Larger sizes will result in more memory usage by computing alibi mask on-the-fly.
self.max_seq_len = max_seq_len
# cache the slopes
self.slopes = build_slopes(num_attention_heads, num_attention_heads_alibi)
# cache the relative position bias. shape (num_attention_heads, max_seq_len, max_seq_len)
# if we use causal attention (not bidrectional), we can use singleton relative position
self.relative_position = (
build_relative_position(max_seq_len, full=bidirectional).unsqueeze(0).expand(num_attention_heads, -1, -1)
)
def forward(self, query_seq_length, key_seq_length):
# used cached relative position if possible
max_seq_len = max(query_seq_length, key_seq_length)
if max_seq_len > self.max_seq_len:
relative_position = (
build_relative_position(max_seq_len, full=self.bidirectional)
.unsqueeze(0)
.expand(self.num_attention_heads, -1, -1)
)
else:
relative_position = self.relative_position
# shape (num_attention_heads, query_seq_length, key_seq_length)
relative_position = relative_position[:, -query_seq_length:, -key_seq_length:]
# if not bidirectional, mask out the future positions
# shape (1, num_heads, query_length, key_length)
return -relative_position.unsqueeze(0) * self.slopes
| NeMo-main | nemo/collections/nlp/modules/common/megatron/position_embedding/alibi_relative_position_embedding.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from einops import rearrange
from nemo.utils.decorators import experimental
def fixed_pos_embedding(x):
seq_len, dim = x.shape
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim) / dim))
sinusoid_inp = torch.einsum("i , j -> i j", torch.arange(0, seq_len, dtype=torch.float), inv_freq).to(x)
return torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)
def rotate_every_two(x):
x1 = x[:, :, ::2]
x2 = x[:, :, 1::2]
x = torch.stack((-x2, x1), dim=-1)
return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')\
def duplicate_interleave(m):
"""
A simple version of `torch.repeat_interleave` for duplicating a matrix while interleaving the copy.
"""
dim0 = m.shape[0]
m = m.view(-1, 1) # flatten the matrix
m = m.repeat(1, 2) # repeat all elements into the 2nd dimension
m = m.view(dim0, -1) # reshape into a matrix, interleaving the copy
return m
def apply_rotary_pos_emb(x, sin, cos, scale=1):
sin, cos = map(lambda t: duplicate_interleave(t * scale), (sin, cos))
# einsum notation for lambda t: repeat(t[offset:x.shape[1]+offset,:], "n d -> () n () (d j)", j=2)
return (x * cos) + (rotate_every_two(x) * sin)
@experimental
class XPOSPositionEmbedding(nn.Module):
def __init__(self, head_dim, scale_base=2048):
super().__init__()
self.head_dim = head_dim
self.scale_base = scale_base
self.register_buffer("scale", (torch.arange(0, head_dim, 2) + 0.4 * head_dim) / (1.4 * head_dim))
def forward(self, x, offset=0, downscale=False):
length, b = x.shape[0], x.shape[1]
x = rearrange(x, 's b np hn -> (b np) s hn')
min_pos = -(length + offset) // 2
max_pos = length + offset + min_pos
scale = self.scale ** torch.arange(min_pos, max_pos, 1).to(self.scale).div(self.scale_base)[:, None]
sin, cos = fixed_pos_embedding(scale)
if scale.shape[0] > length:
scale = scale[-length:]
sin = sin[-length:]
cos = cos[-length:]
if downscale:
scale = 1 / scale
x = apply_rotary_pos_emb(x, sin, cos, scale)
x = rearrange(x, '(b np) s hn -> s b np hn', b=b)
return x
| NeMo-main | nemo/collections/nlp/modules/common/megatron/position_embedding/xpos_position_embedding.py |
# coding=utf-8
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.modules.common.megatron.position_embedding.alibi_relative_position_embedding import (
ALiBiRelativePositionEmbedding,
)
from nemo.collections.nlp.modules.common.megatron.position_embedding.kerple_relative_position_embedding import (
KERPLERelativePositionEmbedding,
)
from nemo.collections.nlp.modules.common.megatron.position_embedding.rotary_position_embedding import RotaryEmbedding
from nemo.collections.nlp.modules.common.megatron.position_embedding.sandwich_relative_position_embedding import (
SandwichRelativePositionEmbedding,
)
from nemo.collections.nlp.modules.common.megatron.position_embedding.t5_relative_position_embedding import (
T5RelativePositionEmbedding,
)
from nemo.collections.nlp.modules.common.megatron.position_embedding.xpos_position_embedding import (
XPOSPositionEmbedding,
)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/position_embedding/__init__.py |
# coding=utf-8
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from nemo.collections.nlp.modules.common.megatron.position_embedding.alibi_relative_position_embedding import (
build_relative_position,
build_slopes,
)
__all__ = ['KERPLERelativePositionEmbedding']
class KERPLERelativePositionEmbedding(torch.nn.Module):
"""
kerple (Attention with Linear Biases) relative position embedding for auto-regressive decoder
and joint encoder (symmetric for forward and backward distance).
Based on https://arxiv.org/bas/2108.12409
"""
def __init__(
self, bidirectional, num_attention_heads, layer_type, num_attention_heads_kerple=None, max_seq_len=512,
):
"""
Args:
bidirectional: Whether to use bidirectional relative position embedding
num_attention_heads: Number of attention heads
layer_type: Layer type. Can be one of [LayerType.encoder or LayerType.decoder]. Willdetermine the bias construction
num_attention_heads_kerple: Number of attention heads for which kerple bias will be used
max_seq_len: Maximum sequence length for precomputed relative positions. Larger sizes will result in more memory usage by computing kerple mask on-the-fly.
"""
super().__init__()
if (num_attention_heads_kerple is None) or (num_attention_heads_kerple <= 0):
num_attention_heads_kerple = num_attention_heads
if num_attention_heads_kerple > num_attention_heads:
raise ValueError(
f"num_attention_heads_kerple ({num_attention_heads_kerple}) cannot be larger than num_attention_heads ({num_attention_heads})"
)
self.bidirectional = bidirectional
self.num_attention_heads = num_attention_heads
# LayerType.encoder or LayerType.decoder. Is only needed to determine the group for the all_reduce
self.layer_type = layer_type
# define the size of pre-computed relative position slopes.
# define the number of attention heads for which kerple mask will be pre-computed (the rest are disabled).
self.num_attention_heads_kerple = num_attention_heads_kerple
# Larger sizes will result in more memory usage by computing kerple mask on-the-fly.
self.max_seq_len = max_seq_len
# initialize the slopes
self.kerple_b = torch.nn.Parameter(build_slopes(num_attention_heads, num_attention_heads_kerple))
self.kerple_a = torch.nn.Parameter(torch.ones_like(self.kerple_b))
self.kerple_p = torch.nn.Parameter(torch.ones_like(self.kerple_b))
# cache the relative position bias. shape (num_attention_heads, max_seq_len, max_seq_len)
# if we use causal attention (not bidrectional), we can use singleton relative position
self.relative_position = (
build_relative_position(max_seq_len, full=True).unsqueeze(0).expand(num_attention_heads, -1, -1)
)
def forward(self, query_seq_length, key_seq_length):
# used cached relative position if possible
max_seq_len = max(query_seq_length, key_seq_length)
if max_seq_len > self.max_seq_len:
relative_position = (
build_relative_position(max_seq_len, full=True).unsqueeze(0).expand(self.num_attention_heads, -1, -1)
)
else:
relative_position = self.relative_position
# shape (num_attention_heads, query_seq_length, key_seq_length)
relative_position = relative_position[:, -query_seq_length:, -key_seq_length:]
# if not bidirectional, mask out the future positions
if not self.bidirectional:
relative_position = torch.tril(relative_position)
# shape (1, num_heads, query_length, key_length)
return -self.kerple_b * torch.log(1 + self.kerple_a * relative_position.unsqueeze(0).pow(self.kerple_p))
| NeMo-main | nemo/collections/nlp/modules/common/megatron/position_embedding/kerple_relative_position_embedding.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
class T5RelativePositionEmbedding(torch.nn.Module):
"""Relative Position Embedding implementation from the T5 paper : https://arxiv.org/abs/1910.10683"""
def __init__(
self,
init_method,
bidirectional,
num_attention_heads,
layer_type,
relative_position_num_buckets=32,
relative_position_max_distance=128,
):
super(T5RelativePositionEmbedding, self).__init__()
self.relative_position_num_buckets = relative_position_num_buckets
self.relative_position_max_distance = relative_position_max_distance
self.self_attention_relative_position_bucket = None
self.inter_attention_relative_position_bucket = None
self.self_attention_relative_position_bias = None
self.inter_attention_relative_position_bias = None
self.bidirectional = bidirectional
# LayerType.encoder or LayerType.decoder. Is only needed to determine the group for the all_reduce
self.layer_type = layer_type
# Relative position Embedding
# Relative Position embedding (all attention layers).
self.relative_position_embedding = torch.nn.Embedding(self.relative_position_num_buckets, num_attention_heads)
self._relative_position_embedding_key = 'relative_position_embedding'
init_method(self.relative_position_embedding.weight)
def _relative_position_bucket(self, relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from HuggingFace T5 Model:
https://github.com/huggingface/transformers/blob/b5e2b183af5e40e33a4dc7659e697d137259d56e
/src/transformers/models/t5/modeling_t5.py#L354
Translate relative position to a bucket number for relative attention. The relative position
is defined as memory_position - query_position, i.e. the distance in tokens from the attending
position to the attended-to position. If bidirectional=False, then positive relative positions
are invalid. We use smaller buckets for small absolute relative_position and larger buckets
for larger absolute relative_positions. All relative positions >=max_distance map to the same
bucket. All relative positions <=-max_distance map to the same bucket. This should allow for
more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position,
containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_postion_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_postion_if_large = torch.min(
relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_position, relative_postion_if_large)
return relative_buckets
def _compute_relative_position_bucket(self, query_length, key_length):
"""
Adapted from HuggingFace T5 Model
https://github.com/huggingface/transformers/blob/b5e2b183af5e40e33a4dc7659e697d137259d56e/
src/transformers/models/t5/modeling_t5.py#L401
"""
"""Compute binned relative position bias"""
device = self.relative_position_embedding.weight.device
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
relative_position = memory_position - context_position # shape (query_length, key_length)
relative_position_bucket_tensor = self._relative_position_bucket(
relative_position, # shape (query_length, key_length)
bidirectional=self.bidirectional,
num_buckets=self.relative_position_num_buckets,
max_distance=self.relative_position_max_distance,
)
return relative_position_bucket_tensor
def _compute_relative_position_bias(self, relative_position_bucket):
# shape (query_length, key_length, num_heads)
values = self.relative_position_embedding(relative_position_bucket)
# shape (1, num_heads, query_length, key_length)
values = values.permute([2, 0, 1]).unsqueeze(0)
return values
def forward(self, query_seq_length, key_seq_length):
self_attention_relative_position_bucket = self._compute_relative_position_bucket(
query_seq_length, key_seq_length
)
return self._compute_relative_position_bias(self_attention_relative_position_bucket)
| NeMo-main | nemo/collections/nlp/modules/common/megatron/position_embedding/t5_relative_position_embedding.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nemo.collections.nlp.modules.common.megatron.position_embedding.alibi_relative_position_embedding import (
build_relative_position,
)
from nemo.utils.decorators import experimental
__all__ = ['SandwichRelativePositionEmbedding']
@experimental
class SandwichRelativePositionEmbedding(torch.nn.Module):
"""
Dissecting Transformer Length Extrapolation via the Lens of Receptive Field Analysis
Based on https://arxiv.org/abs/2212.10356
"""
def __init__(
self, bidirectional, num_attention_heads, layer_type, hidden_size, max_seq_len=512,
):
"""
Args:
num_attention_heads: Number of attention heads
hidden_size: Hidden size per attention head
"""
super().__init__()
self.bidirectional = bidirectional
self.layer_type = layer_type
self.num_attention_heads = num_attention_heads
self.hidden_size = hidden_size
self.max_seq_len = max_seq_len
self.relative_position = build_relative_position(max_seq_len, full=True)
def forward(self, query_seq_length, key_seq_length):
# used cached relative position if possible
max_seq_len = max(query_seq_length, key_seq_length)
if max_seq_len > self.max_seq_len:
relative_position = build_relative_position(max_seq_len, full=True)
else:
relative_position = self.relative_position
# shape (query_seq_length, key_seq_length)
relative_position = relative_position[-query_seq_length:, -key_seq_length:]
# if not bidirectional, mask out the future positions
if not self.bidirectional:
relative_position = torch.tril(relative_position)
inv_freq = 1.0 / (
10000
** (2 * torch.arange(1, self.hidden_size / 2 + 1, device=relative_position.device) / self.hidden_size)
)
_bias = torch.sum((relative_position[:, :, None].repeat(1, 1, len(inv_freq)) * inv_freq).cos(), axis=2)
bias = _bias.repeat(self.num_attention_heads, 1, 1)
_bias_scales = torch.arange(1, self.num_attention_heads + 1, 1, device=relative_position.device)
bias_scales = _bias_scales[:, None, None]
scaled_bias = (bias - self.hidden_size / 2) / (bias_scales * 8 / self.num_attention_heads).unsqueeze(0)
return scaled_bias
| NeMo-main | nemo/collections/nlp/modules/common/megatron/position_embedding/sandwich_relative_position_embedding.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from nemo.collections.nlp.modules.common.megatron.utils import ApexGuardDefaults, init_method_normal
try:
from megatron.core import ModelParallelConfig, tensor_parallel
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
# fake missing classes with None attributes
ModelParallelConfig = ApexGuardDefaults()
tensor_parallel = ApexGuardDefaults()
HAVE_MEGATRON_CORE = False
__all__ = ["MegatronBaseHiddenTransform", "MegatronGaussianHiddenTransform"]
class MegatronBaseHiddenTransform(torch.nn.Module):
"""Base class to apply hidden state transformations"""
def __init__(self, name: str = "", model_parallel_cfg: ModelParallelConfig = None):
super().__init__()
self.name = name
self.model_parallel_cfg = model_parallel_cfg
def __str__(self):
return super().__str__() + f"(name={self.name})"
@property
def input_names(self):
"""
Provide here all required inputs
"""
return []
@property
def output_names(self):
"""
Provide here all generated outputs
"""
return []
def _validate_inputs(self, inputs):
"""Validate inputs"""
# validate inputs
if not set(self.input_names).issubset(set(inputs.keys())):
raise ValueError(f"Inputs should contain {self.input_names}, but got {inputs.keys()}")
def _transform(self, inputs, batch_data=None):
"""
Implement your own transformations.
We expect here shapes to be [S x B x H] for Sequence, Batch, Hidden sizes (due to tensor parallel support).
"""
# by default we pass inputs.
outputs = inputs.copy()
return outputs
def transform(self, inputs, batch_data=None):
"""Apply a transformations on the inputs (hiddens is always assumed)"""
# validate inputs
self._validate_inputs(inputs)
outputs = self._transform(inputs, batch_data=batch_data)
return outputs
class MegatronGaussianHiddenTransform(MegatronBaseHiddenTransform):
"""
Constructes a diagonal Gaussian distribution from the hidden states and samples from it using reparametrization.
"""
def __init__(
self,
hidden_size,
ffn_hidden_size=None,
min_logvar=-6,
init_method_std=0.02,
name="cond_gaussian",
model_parallel_cfg: ModelParallelConfig = None,
):
super().__init__(name=name, model_parallel_cfg=model_parallel_cfg)
# limit smaller allowed variance (for numerical stability)
self.min_logvar = min_logvar
self.hidden_size = hidden_size
if ffn_hidden_size is None:
ffn_hidden_size = hidden_size * 2
self.ffn_hidden_size = ffn_hidden_size
# project hiddens to mean and log variance (support tensor parallelism)
self.hiddens_to_mean_logvar = tensor_parallel.ColumnParallelLinear(
hidden_size,
ffn_hidden_size, # NOTE: When using *glu, divide ffn dim by 2/3 to keep overall params the same.
gather_output=True,
init_method=init_method_normal(init_method_std),
skip_bias_add=False,
bias=True,
config=self.model_parallel_cfg,
)
@property
def input_names(self):
"""
Provide here all required inputs
"""
return ["hiddens", "hiddens_mask"]
@property
def output_names(self):
"""
Provide here all generated outputs
"""
return ["z_mean", "z_logvar", "z", "z_log_prob"]
def _transform(self, inputs, batch_data=None):
"""
We expect here shapes to be [S x B x H] for Sequence, Batch, Hidden sizes (due to tensor parallel support).
inputs:
hiddens: accepts a tensor of shape [S x B x H]
outputs:
z: a sample from Gaussian a tensor of shape [S x B x H]
z_mean: mean of Gaussian a tensor of shape [S x B x H]
z_logvar: log variance of Gaussian a tensor of shape [S x B x H]
z_log_prob: log probability of z over posterior log q(z|x) a tensor of shape [S x B x H]
"""
hiddens = inputs["hiddens"]
# compute distribution's parameters (or use cached ones)
if "z_mean" in inputs and "z_logvar" in inputs:
z_mean = inputs["z_mean"]
z_logvar = inputs["z_logvar"]
else:
# ColumnLinear returns output and bias, we ignore bias here (already added to hiddens)
z_mean, z_logvar = self.hiddens_to_mean_logvar(hiddens)[0].chunk(2, dim=-1)
# clamp logvar
z_logvar = z_logvar.clamp(min=self.min_logvar)
# sample z with reparametrization (or use cached one)
if "z" in inputs:
z = inputs["z"]
z_log_prob = inputs.get("z_log_prob", None)
else:
e = torch.randn_like(hiddens)
z = (z_logvar * 0.5).exp() * e + z_mean
z_log_prob = None
if z_log_prob is None:
# compute log probability of z under a diagonal Gaussian distribution
z_log_prob = -0.5 * (math.log(2 * math.pi) + z_logvar + (z - z_mean).pow(2) / z_logvar.exp())
# sum over the last dimension (hidden_size)
z_log_prob = z_log_prob.sum(dim=-1)
return {
"z": z, # [S x B x H]
"z_mean": z_mean, # [S x B x H]
"z_logvar": z_logvar, # [S x B x H]
"z_log_prob": z_log_prob, # [S x B]
}
| NeMo-main | nemo/collections/nlp/modules/common/megatron/hiddens/megatron_hidden_transform.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.modules.common.megatron.hiddens.megatron_hidden_loss import *
from nemo.collections.nlp.modules.common.megatron.hiddens.megatron_hidden_transform import *
from nemo.collections.nlp.modules.common.megatron.hiddens.megatron_hiddens import *
| NeMo-main | nemo/collections/nlp/modules/common/megatron/hiddens/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
__all__ = ["MegatronBaseHiddenLoss", "MegatronAMIMHiddenLoss", "MegatronVAEHiddenLoss"]
class MegatronBaseHiddenLoss(torch.nn.Module):
"""
Base class to calculate hidden state loss.
Returned dict includes a loss value and additional outputs.
"""
def __init__(self, loss_weight=1.0, name=""):
super().__init__()
self.name = name
self.loss_weight = float(loss_weight)
def __str__(self):
return super().__str__() + f"(name={self.name})"
def _validate_inputs(self, inputs):
"""Validate inputs"""
# validate inputs
if not set(self.input_names).issubset(set(inputs.keys())):
raise ValueError(f"Inputs should contain {self.input_names}, but got {inputs.keys()}")
@property
def input_names(self):
"""Returns and caches input names"""
# we always expect hiddens_mask to be used to mask out loss of padded elements
return self._input_names() + ["hiddens_mask"]
def _input_names(self):
"""Add here all required inputs"""
return []
def _loss(self, inputs, batch_data=None):
"""
We expect input shapes to be [S x B x H] for Sequence, Batch, Hidden sizes (due to tensor parallel support).
We return a dictionary with dimensions [B x S x H], [B x S], [B], or [].
Implement your own loss calculations. Must return "loss" key.
loss shape - [B x S] for Batch, Sequence sizes
batch_data - a dictionary of additional data that can be used to calculate loss
Returns:
dict: a dictionary with loss and additional outputs (must include "loss" key)
example: {"loss": 0.0}
"""
raise NotImplementedError("Please implement loss calculations in child class")
def loss(self, inputs, batch_data=None):
"""A wrapper around custom _loss that adds a weighted loss and name to the output dict"""
self._validate_inputs(inputs)
loss_dict = self._loss(inputs, batch_data=batch_data)
if "loss" not in loss_dict:
raise KeyError("Loss dict must contain 'loss' key")
# average loss over active steps only. loss [B x S]
loss = loss_dict["loss"]
# hiddens_mask has shape of [B x S]
hiddens_mask = inputs["hiddens_mask"].to(loss)
loss = loss * hiddens_mask
# sequence level loss [B x S] -> batch level loss [B]
loss = loss.sum(dim=1) / hiddens_mask.sum(dim=1).clamp(min=1.0)
# compute batch level weighted loss (scalar)
weighted_loss = loss.sum() * self.loss_weight
# store updated losses
loss_dict["loss"] = loss
loss_dict["weighted_loss"] = weighted_loss
loss_dict["weight_loss"] = torch.tensor(self.loss_weight).to(weighted_loss)
return loss_dict
class MegatronAMIMHiddenLoss(MegatronBaseHiddenLoss):
"""
Based on <https://arxiv.org/abs/2003.02645>
Implements A-MIM loss with a unit Normal anchor.
A-MIM - asymmetric MIM (without sampling)
"""
def __init__(self, loss_weight=1.0, hidden_aggregation_method="sum", name="mim"):
super().__init__(
name=name, loss_weight=loss_weight,
)
# allows to determine how to aggregate hidden loss over hidden dimension
self.hidden_aggregation_method = hidden_aggregation_method
def _input_names(self):
"""Add here all required inputs"""
return ["z", "z_log_prob"]
def _loss(self, inputs, batch_data=None):
"""
We expect input shapes to be [S x B x H] for Sequence, Batch, Hidden sizes (due to tensor parallel support).
We return a dictionary with dimensions [B x S x H], [B x S], [B], or [].
Implement your own loss calculations. Must return "loss" key.
loss shape - [B x S] for Batch, Sequence sizes
batch_data - a dictionary of additional data that can be used to calculate loss
"""
z = inputs["z"]
# get posterior
log_prob_q_z_given_x = inputs["z_log_prob"]
# compute log prob of anchor a unit Normal distribution
log_prob_P_z = -0.5 * (math.log(2 * math.pi) + z.pow(2))
# aggregate over hidden dimension, default is sum
log_prob_P_z = getattr(log_prob_P_z, self.hidden_aggregation_method)(dim=-1)
# A-MIM loss = log_p_x_given_z - 0.5 * (log_prob_P_z + log_prob_q_z_given_x)
# here we return only the hidden loss part
loss = -0.5 * (log_prob_P_z + log_prob_q_z_given_x)
# return losses shaped [B x S]
return {
"loss": loss.transpose(0, 1),
"log_prob_P_z": log_prob_P_z.transpose(0, 1),
"log_prob_q_z_given_x": log_prob_q_z_given_x.transpose(0, 1),
}
class MegatronVAEHiddenLoss(MegatronBaseHiddenLoss):
"""
Based on <https://arxiv.org/abs/1312.6114>
Implements VAE loss with a unit Normal anchor.
"""
def __init__(self, loss_weight=1.0, min_kl_value=None, name="vae"):
super().__init__(
name=name, loss_weight=loss_weight,
)
# minimum value for KL divergence
if min_kl_value is None:
self.min_kl_value = min_kl_value
else:
self.min_kl_value = float(min_kl_value)
def _input_names(self):
"""Add here all required inputs"""
return ["z", "z_log_prob"]
def _loss(self, inputs, batch_data=None):
"""
We expect input shapes to be [S x B x H] for Sequence, Batch, Hidden sizes (due to tensor parallel support).
We return a dictionary with dimensions [B x S x H], [B x S], [B], or [].
Implement your own loss calculations. Must return "loss" key.
loss shape - [B x S] for Batch, Sequence sizes
batch_data - a dictionary of additional data that can be used to calculate loss
"""
z = inputs["z"]
# get posterior
log_prob_q_z_given_x = inputs["z_log_prob"]
# compute log prob of anchor a unit Normal distribution
log_prob_p_z = -0.5 * (math.log(2 * math.pi) + z.pow(2)).sum(dim=-1)
# VAE loss = log_p_x_given_z - KL(q(z|x) || p(z))
kl_div = log_prob_q_z_given_x - log_prob_p_z
# here we return only the hidden loss part
loss = -kl_div
# return losses shaped [B x S]
return {
"loss": loss.transpose(0, 1),
"kl_div": kl_div.transpose(0, 1),
"log_prob_p_z": log_prob_p_z.transpose(0, 1),
"log_prob_q_z_given_x": log_prob_q_z_given_x.transpose(0, 1),
}
| NeMo-main | nemo/collections/nlp/modules/common/megatron/hiddens/megatron_hidden_loss.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
In order to register external hidden transforms and losses please use the following methods:
* register_hidden_loss(cls_name: str, class_path: str)
* register_hidden_transform(cls_name: str, class_path: str)
See example config in: examples/nlp/language_modeling/conf/megatron_hiddens_base_config.yaml
"""
import functools
import itertools
from typing import List
import torch
from omegaconf.dictconfig import DictConfig
from omegaconf.omegaconf import OmegaConf
from nemo.collections.nlp.modules.common.megatron.hiddens.megatron_hidden_loss import MegatronBaseHiddenLoss
from nemo.collections.nlp.modules.common.megatron.hiddens.megatron_hidden_transform import MegatronBaseHiddenTransform
from nemo.collections.nlp.modules.common.megatron.utils import ApexGuardDefaults
from nemo.utils import logging
from nemo.utils.model_utils import import_class_by_path
try:
from megatron.core import ModelParallelConfig
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
# fake missing classes with None attributes
ModelParallelConfig = ApexGuardDefaults()
HAVE_MEGATRON_CORE = False
__all__ = [
"MegatronHiddensModule",
"get_registered_hiddens",
"register_hidden_loss",
"register_hidden_transform",
"get_hiddens_module",
]
# a registry of all hidden transforms (maps name to class path)
_LOSS_CLASS_REGISTRY = {
"a_mim": "nemo.collections.nlp.modules.common.megatron.hiddens.megatron_hidden_loss.MegatronAMIMHiddenLoss",
"vae": "nemo.collections.nlp.modules.common.megatron.hiddens.megatron_hidden_loss.MegatronVAEHiddenLoss",
}
# a registry of all hidden losses (maps name to class path)
_TRANSFORM_CLASS_REGISTRY = {
"cond_gaussian": "nemo.collections.nlp.modules.common.megatron.hiddens.megatron_hidden_transform.MegatronGaussianHiddenTransform",
}
def get_registered_hiddens():
"""
Return:
A dictionary with all registered hidden transforms and losses.
Example:
{
"loss": ["a_mim", "vae"],
"transform": ["cond_gaussian"],
}
"""
return {
"loss": list(_LOSS_CLASS_REGISTRY.keys()),
"transform": list(_TRANSFORM_CLASS_REGISTRY.keys()),
}
def register_hidden_loss(cls_name: str, class_path: str):
"""
Register a hidden loss.
Args:
cls_name: name of the class
class_path: path to the class (e.g., "nemo.collections.nlp.modules.common.megatron.hiddens.megatron_hidden_transform.MegatronGaussianHiddenTransform")
"""
if cls_name in _LOSS_CLASS_REGISTRY:
raise ValueError(f"Cannot register duplicate hidden loss ({cls_name})")
_LOSS_CLASS_REGISTRY[cls_name] = class_path
logging.info(f"Registered hidden loss {cls_name} at {class_path}")
def register_hidden_transform(cls_name: str, class_path: str):
"""
Register a hidden transform.
Args:
cls_name: name of the class
class_path: path to the class (e.g., "nemo.collections.nlp.modules.common.megatron.hiddens.megatron_hidden_transform.MegatronGaussianHiddenTransform")
"""
if cls_name in _TRANSFORM_CLASS_REGISTRY:
raise ValueError(f"Cannot register duplicate hidden transform ({cls_name})")
_TRANSFORM_CLASS_REGISTRY[cls_name] = class_path
logging.info(f"Registered hidden transform {cls_name} at {class_path}")
def get_hiddens_module(cfg=None, model_parallel_cfg: ModelParallelConfig = None):
"""Build a MegatronHiddensModule from a configuration cfg"""
# Build a hiddens module if config is provided.
if cfg is None:
return None
logging.info(f"NOTE: Adding hiddens transforms and losses")
# build all hidden transforms. We support a list or a dictionary of transforms (list enforces order)
transform_cfg = cfg.get("transform", [])
if isinstance(transform_cfg, (DictConfig, dict)):
transform_cfg = [transform_cfg]
hidden_transforms = []
# here we expect transform_cfg to be a list of dictionaries
for cur_list_cfg in transform_cfg:
for name, cur_cfg in cur_list_cfg.items():
cls_kwargs = OmegaConf.to_container(cur_cfg)
cls_kwargs["model_parallel_cfg"] = model_parallel_cfg
if not "cls_name" in cls_kwargs:
raise KeyError(f"Missing 'cls_name' in hidden transform {name}")
cls_name = cls_kwargs.pop("cls_name")
# add name based on dictionary if not given in conf
if "name" not in cls_kwargs:
cls_kwargs["name"] = name
if cls_name not in _TRANSFORM_CLASS_REGISTRY:
raise KeyError(f"Unknown hidden transform {cls_name}, available: {_TRANSFORM_CLASS_REGISTRY.keys()}")
try:
cur_transform = import_class_by_path(_TRANSFORM_CLASS_REGISTRY[cls_name])(**cls_kwargs)
except Exception as e:
logging.error(f"Failed to build hidden transform {name} with cfg={cur_cfg}")
raise e
hidden_transforms.append(cur_transform)
logging.info(f"Added transform {name} with cfg={cur_cfg}")
# build all hidden losses
loss_cfg = cfg.get("loss", [])
if isinstance(loss_cfg, (DictConfig, dict)):
loss_cfg = [loss_cfg]
hidden_loss_transforms = []
# here we expect loss_cfg to be a list of dictionaries
for cur_list_cfg in loss_cfg:
for name, cur_cfg in cur_list_cfg.items():
cls_kwargs = OmegaConf.to_container(cur_cfg)
if not "cls_name" in cls_kwargs:
raise KeyError(f"Missing 'cls_name' in hidden loss {name}")
cls_name = cls_kwargs.pop("cls_name")
# add name based on dictionary if not given in conf
if "name" not in cls_kwargs:
cls_kwargs["name"] = name
if cls_name not in _LOSS_CLASS_REGISTRY:
raise KeyError(f"Unknown hidden loss {cls_name}, available: {_LOSS_CLASS_REGISTRY.keys()}")
try:
cur_loss = import_class_by_path(_LOSS_CLASS_REGISTRY[cls_name])(**cls_kwargs)
except Exception as e:
logging.error(f"Failed to build hidden loss {name} with cfg={cur_cfg}")
raise e
hidden_loss_transforms.append(cur_loss)
logging.info(f"Added loss {name} with cfg={cur_cfg}")
enc_output_name = cfg.get("enc_output_name", "hiddens")
return MegatronHiddensModule(
hidden_transforms=hidden_transforms,
hidden_loss_transforms=hidden_loss_transforms,
enc_output_name=enc_output_name,
)
class MegatronHiddensModule(torch.nn.Module):
"""
This class jointly handles the hidden transforms and hidden loss transforms.
It helps in validating, and applying the transforms.
"""
def __init__(
self,
hidden_transforms: List[MegatronBaseHiddenLoss] = [],
hidden_loss_transforms: List[MegatronBaseHiddenTransform] = [],
enc_output_name: str = "hiddens", # name (key) of the encoder output
tokens_loss_weight: float = 1.0, # weight of the tokens loss
loss_prefix: str = "hiddens_", # if not None or "", add this prefix to all loss names
):
super().__init__()
self.hidden_transforms = hidden_transforms
self.hidden_loss_transforms = hidden_loss_transforms
self.enc_output_name = enc_output_name
self.tokens_loss_weight = tokens_loss_weight
self.loss_prefix = loss_prefix
# register all hidden / loss transforms as submodules to support learned parameters
if not all([isinstance(ht, MegatronBaseHiddenLoss) for ht in self.hidden_loss_transforms]):
raise TypeError(
f"hidden_loss_transforms should be a list of MegatronBaseHiddenLoss, but got {hidden_loss_transforms}"
)
self.hidden_loss_transforms = torch.nn.ModuleList(self.hidden_loss_transforms)
if not all([isinstance(ht, MegatronBaseHiddenTransform) for ht in self.hidden_transforms]):
raise TypeError(
f"hidden_transforms should be a list of MegatronBaseHiddenTransform, but got {hidden_transforms}"
)
self.hidden_transforms = torch.nn.ModuleList(self.hidden_transforms)
# validate the inputs and outputs of all hidden transforms (make sure there are no duplicate output names)
duplicate_names = {}
# initialize with available outputs from hidden transforms with hiddens and mask as default
hidden_outputs = set(["hiddens", "hiddens_mask", "enc_output"])
for ht in self.hidden_transforms:
# validate that all required inputs are available by order of hidden transforms
cur_input_names = set(ht.input_names)
if not cur_input_names.issubset(hidden_outputs):
raise ValueError(
f"Hidden transform {ht.name} requires inputs {cur_input_names - hidden_outputs} that are not available"
)
# collect all duplicate output names
cur_hidden_outputs = set(ht.output_names)
if not cur_hidden_outputs.isdisjoint(hidden_outputs):
duplicate_names[ht.name] = list(cur_hidden_outputs.intersection(hidden_outputs))
hidden_outputs.update(cur_hidden_outputs)
# fail here reporting all duplicate output names
if duplicate_names:
raise ValueError(
f"Hidden transforms have duplicate outputs {{name: [duplicate outputs]}} = {duplicate_names}"
)
# validate that all loss transforms are supported by output of hidden transforms ("hiddens" is given by default)
loss_inputs = set(itertools.chain(*[lt.input_names for lt in self.hidden_loss_transforms]))
if not loss_inputs.issubset(hidden_outputs):
loss_inputs_dict = {lt.name: lt.input_names for lt in self.hidden_loss_transforms}
raise ValueError(
f"Loss transforms inputs = {loss_inputs - hidden_outputs} are not supported by hidden transforms with hidden_outputs = {hidden_outputs}, expected inputs per loss = {loss_inputs_dict}"
)
@functools.cached_property
def hidden_outputs(self):
"""Get the hidden outputs from all the hidden transforms"""
all_output_names = [ht.output_names for ht in self.hidden_transforms] + [["hiddens", "hiddens_mask"]]
output_names = set().union(*all_output_names)
return list(output_names)
@functools.cached_property
def loss_inputs(self):
"""Get the loss inputs from all the loss transforms"""
loss_inputs = set().union(*[lt.input_names for lt in self.hidden_loss_transforms])
return list(loss_inputs)
def apply_hidden_transforms(self, inputs, batch_data=None):
"""
Apply hidden transforms
Args:
inputs: a dictionary of inputs, with "hiddens" as the default key for hidden states
batch_data: a dictionary of batch data (e.g. "input_features"), optional
Returns:
outputs: a dictionary of outputs, collecting
"""
outputs = inputs.copy()
for hidden_transform in self.hidden_transforms:
# make sure to collect all outputs from hidden transforms
outputs.update(hidden_transform.transform(outputs, batch_data=batch_data))
# update final encoder output
outputs["enc_output"] = outputs[self.enc_output_name]
return outputs
def apply_loss_transforms(self, outputs, batch_data=None):
"""
Apply loss transforms
Args:
outputs: a dictionary of outputs (after hidden transforms)
batch_data: a dictionary of batch data (e.g. "target_ids"), optional
Returns:
loss_dict: a dictionary of all losses,
{
loss: joint loss (float),
<name>_*: loss values from loss transforms, could be loss, or loss elements
}
"""
loss_dict = {}
joint_loss = 0.0
for i, loss_transform in enumerate(self.hidden_loss_transforms):
cur_loss_dict = loss_transform.loss(outputs, batch_data=batch_data)
joint_loss = joint_loss + cur_loss_dict["weighted_loss"]
cur_loss_dict.pop("weighted_loss")
# add name to loss values
if loss_transform.name:
cur_loss_dict = {f"{loss_transform.name}_{k}": v for k, v in cur_loss_dict.items()}
# check if cur_loss keys are unique - we do not allow to override keys
dup_keys = set(cur_loss_dict.keys()).intersection(set(loss_dict.keys()))
if len(dup_keys):
raise ValueError(
f"Loss transform ({i}) {loss_transform} is trying to override the following loss keys {list(dup_keys)}"
)
# update loss dict
loss_dict.update(cur_loss_dict)
# joint weighted loss (float)
loss_dict["loss"] = joint_loss
# add prefix to all loss keys (default to 'hiddens_')
if self.loss_prefix:
loss_dict = {f"{self.loss_prefix}{k}": v for k, v in loss_dict.items()}
# add tokens loss weight (to be used by caller, or be ignored)
loss_dict["tokens_loss_weight"] = torch.tensor(self.tokens_loss_weight).to(joint_loss)
return loss_dict
| NeMo-main | nemo/collections/nlp/modules/common/megatron/hiddens/megatron_hiddens.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-main | nemo/collections/nlp/modules/dialogue_state_tracking/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
from torch import nn as nn
from nemo.collections.nlp.modules.common.classifier import Classifier
from nemo.core.classes import typecheck
from nemo.core.neural_types import ChannelType, LogitsType, NeuralType
__all__ = ['SGDEncoder']
ACT2FN = {"tanh": nn.functional.tanh, "relu": nn.functional.relu}
class SGDEncoder(Classifier):
"""
Neural module which encodes BERT hidden states
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""
Returns definitions of module output ports.
"""
return {
"logits": NeuralType(('B', 'T'), LogitsType()),
'hidden_states': NeuralType(('B', 'T', 'C'), ChannelType()),
}
def __init__(
self, hidden_size: int, activation: str = 'tanh', dropout: float = 0.0, use_transformer_init: bool = True,
) -> None:
"""
Args:
hidden_size: hidden size of the BERT model
activation: activation function applied
dropout: dropout ratio
use_transformer_init: use transformer initialization
"""
super().__init__(hidden_size=hidden_size, dropout=dropout)
self.fc = nn.Linear(hidden_size, hidden_size)
if activation not in ACT2FN:
raise ValueError(f'{activation} is not in supported ' + '{ACTIVATIONS_F.keys()}')
self.activation = ACT2FN[activation]
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.post_init(use_transformer_init=use_transformer_init)
@typecheck()
def forward(self, hidden_states):
"""
Args:
hidden_states: bert output hidden states
"""
first_token_hidden_states = hidden_states[:, 0]
logits = self.fc(first_token_hidden_states)
logits = self.activation(logits)
logits = self.dropout1(logits)
return logits, self.dropout2(hidden_states)
| NeMo-main | nemo/collections/nlp/modules/dialogue_state_tracking/sgd_encoder.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
import torch
from torch import nn as nn
from nemo.core.classes import NeuralModule, typecheck
from nemo.core.neural_types import LogitsType, NeuralType
__all__ = ['SGDDecoder']
class LogitsQA(nn.Module):
def __init__(self, num_classes: int, embedding_dim: int):
"""Get logits for elements by conditioning on input embedding.
Args:
num_classes: An int containing the number of classes for which logits are to be generated.
embedding_dim: hidden size of the BERT
Returns:
A tensor of shape (batch_size, num_classes) containing the logits.
"""
super().__init__()
self.num_classes = num_classes
self.utterance_proj = nn.Linear(embedding_dim, embedding_dim)
self.activation = nn.functional.gelu
self.layer1 = nn.Linear(embedding_dim, num_classes)
def forward(self, encoded_utterance):
"""
Args:
encoded_utterance: [CLS] token hidden state from BERT encoding of the utterance
"""
# Project the utterance embeddings.
utterance_embedding = self.utterance_proj(encoded_utterance)
utterance_embedding = self.activation(utterance_embedding)
logits = self.layer1(utterance_embedding)
return logits
class SGDDecoder(NeuralModule):
"""
SGDDecoder
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""
Returns definitions of module output ports.
"""
return {
"logit_intent_status": NeuralType(('B', 'T'), LogitsType()), #'B'
"logit_req_slot_status": NeuralType(('B', 'T'), LogitsType()), #'B'
"logit_cat_slot_status": NeuralType(('B', 'T'), LogitsType()),
"logit_cat_slot_value_status": NeuralType(('B', 'T'), LogitsType()), #'B'
"logit_noncat_slot_status": NeuralType(('B', 'T'), LogitsType()),
"logit_spans": NeuralType(('B', 'T', 'D'), LogitsType()),
}
def __init__(self, embedding_dim: int) -> None:
"""Get logits for elements by conditioning on utterance embedding.
Args:
embedding_dim: hidden size of the BERT
"""
super().__init__()
projection_module = LogitsQA
self.intent_layer = projection_module(1, embedding_dim)
self.requested_slots_layer = projection_module(1, embedding_dim)
self.cat_slot_value_layer = projection_module(1, embedding_dim)
# Slot status values: none, dontcare, active.
self.slot_status_layer = projection_module(3, embedding_dim)
# dim 2 for non_categorical slot - to represent start and end position
self.noncat_layer1 = nn.Linear(embedding_dim, embedding_dim)
self.noncat_activation = nn.functional.gelu
self.noncat_layer2 = nn.Linear(embedding_dim, 2)
@typecheck()
def forward(self, encoded_utterance, token_embeddings, utterance_mask):
"""
Args:
encoded_utterance: [CLS] token hidden state from BERT encoding of the utterance
token_embeddings: token embeddings from BERT encoding of the utterance
utterance_mask: utterance mask wiht 0 for padding
"""
_, _ = encoded_utterance.size()
logit_intent_status = self._get_intents(encoded_utterance)
logit_req_slot_status = self._get_requested_slots(encoded_utterance)
logit_cat_slot_status, logit_cat_slot_value_status = self._get_categorical_slot_goals(encoded_utterance)
(logit_noncat_slot_status, logit_spans) = self._get_noncategorical_slot_goals(
encoded_utterance=encoded_utterance, utterance_mask=utterance_mask, token_embeddings=token_embeddings
)
return (
logit_intent_status,
logit_req_slot_status,
logit_cat_slot_status,
logit_cat_slot_value_status,
logit_noncat_slot_status,
logit_spans,
)
def _get_intents(self, encoded_utterance):
"""Obtain logits for intents.
Args:
encoded_utterance: representation of utterance
"""
logits = self.intent_layer(encoded_utterance=encoded_utterance,)
return logits
def _get_requested_slots(self, encoded_utterance):
"""Obtain logits for requested slots.
Args:
encoded_utterance: representation of utterance
"""
logits = self.requested_slots_layer(encoded_utterance=encoded_utterance)
return logits
def _get_categorical_slot_goals(self, encoded_utterance):
"""
Obtain logits for status and values for categorical slots
Slot status values: none, dontcare, active
Args:
encoded_utterance: representation of utterance
"""
# Predict the status of all categorical slots.
status_logits = self.slot_status_layer(encoded_utterance=encoded_utterance)
value_status_logits = self.cat_slot_value_layer(encoded_utterance=encoded_utterance)
return status_logits, value_status_logits
def _get_noncategorical_slot_goals(self, encoded_utterance, utterance_mask, token_embeddings):
"""
Obtain logits for status and slot spans for non-categorical slots.
Slot status values: none, dontcare, active
Args:
encoded_utterance: [CLS] token hidden state from BERT encoding of the utterance
utterance_mask: utterance mask wiht 0 for padding
token_embeddings: token embeddings from BERT encoding of the utterance
"""
status_logits = self.slot_status_layer(encoded_utterance=encoded_utterance)
# Project the combined embeddings to obtain logits, Shape: (batch_size, max_num_slots, max_num_tokens, 2)
span_logits = self.noncat_layer1(token_embeddings)
span_logits = self.noncat_activation(span_logits)
span_logits = self.noncat_layer2(span_logits)
# Mask out invalid logits for padded tokens.
utterance_mask = utterance_mask.to(bool) # Shape: (batch_size, max_num_tokens).
repeated_utterance_mask = utterance_mask.unsqueeze(-1)
negative_logits = (torch.finfo(span_logits.dtype).max * -0.7) * torch.ones(
span_logits.size(), device=span_logits.get_device(), dtype=span_logits.dtype
)
span_logits = torch.where(repeated_utterance_mask, span_logits, negative_logits)
return status_logits, span_logits
def _get_negative_logits(self, logits):
"""Returns tensor with negative logits that will be used to mask out unused values for a particular service
Args:
logits: logits whose shape and type will be used to create negative tensor
"""
negative_logits = (torch.finfo(logits.dtype).max * -0.7) * torch.ones(
logits.size(), dtype=logits.dtype, device=logits.get_device()
)
return negative_logits
| NeMo-main | nemo/collections/nlp/modules/dialogue_state_tracking/sgd_decoder.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.data_utils import *
from nemo.collections.nlp.data.entity_linking.entity_linking_dataset import EntityLinkingDataset
from nemo.collections.nlp.data.information_retrieval.information_retrieval_dataset import (
BertInformationRetrievalDataset,
)
from nemo.collections.nlp.data.language_modeling.l2r_lm_dataset import (
L2RLanguageModelingDataset,
TarredL2RLanguageModelingDataset,
)
from nemo.collections.nlp.data.language_modeling.lm_bert_dataset import (
BertPretrainingDataset,
BertPretrainingPreprocessedDataloader,
)
from nemo.collections.nlp.data.language_modeling.sentence_dataset import SentenceDataset, TarredSentenceDataset
from nemo.collections.nlp.data.machine_translation.machine_translation_dataset import (
TarredTranslationDataset,
TranslationDataset,
)
from nemo.collections.nlp.data.question_answering_squad.qa_dataset import SquadDataset
from nemo.collections.nlp.data.text2sparql.text2sparql_dataset import Text2SparqlDataset
from nemo.collections.nlp.data.text_normalization.decoder_dataset import TextNormalizationDecoderDataset
from nemo.collections.nlp.data.text_normalization.tagger_dataset import TextNormalizationTaggerDataset
from nemo.collections.nlp.data.text_normalization.test_dataset import TextNormalizationTestDataset
from nemo.collections.nlp.data.token_classification.token_classification_dataset import (
BertTokenClassificationDataset,
BertTokenClassificationInferDataset,
)
from nemo.collections.nlp.data.zero_shot_intent_recognition.zero_shot_intent_dataset import (
ZeroShotIntentDataset,
ZeroShotIntentInferenceDataset,
)
| NeMo-main | nemo/collections/nlp/data/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.zero_shot_intent_recognition.zero_shot_intent_dataset import (
ZeroShotIntentInferenceDataset,
calc_class_weights_from_dataloader,
)
| NeMo-main | nemo/collections/nlp/data/zero_shot_intent_recognition/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
from typing import Dict, List, Optional
import torch
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.data.data_utils.data_preprocessing import (
DataProcessor,
fill_class_weights,
get_freq_weights,
get_label_stats,
)
from nemo.collections.nlp.data.glue_benchmark.data_processors import InputExample
from nemo.collections.nlp.data.glue_benchmark.glue_benchmark_dataset import GLUEDataset
from nemo.collections.nlp.parts.utils_funcs import tensor2list
from nemo.core.neural_types import CategoricalValuesType, ChannelType, MaskType, NeuralType
from nemo.utils import logging
__all__ = ['ZeroShotIntentProcessor', 'ZeroShotIntentDataset', 'ZeroShotIntentInferenceDataset']
class ZeroShotIntentProcessor(DataProcessor):
"""
Processor for entailment data sets used to train NLI models for zero shot intent classification.
"""
def __init__(self, sent1_col: int, sent2_col: int, label_col: int, num_classes: int):
"""
Args:
sent1_col: the index of the column containing the premise (or sentence 1)
sent2_col: the index of the column containing the hypothesis (or sentence 2)
label_col: the index of the column containing the label
num_classes: number of classes in the data (should be either 2 or 3, corresponding to
labels ['entailment', 'not_entailment'] or ["contradiction", "entailment", "neutral"])
"""
self.sent1_col = sent1_col
self.sent2_col = sent2_col
self.label_col = label_col
self.num_classes = num_classes
def get_train_examples(self, file_path: str):
"""Gets a collection of `InputExample`s for the train set."""
return self._create_examples(self._read_tsv(file_path), "train")
def get_dev_examples(self, file_path: str):
"""Gets a collection of `InputExample`s for the dev set."""
return self._create_examples(self._read_tsv(file_path), "dev")
def get_labels(self):
"""Gets the list of labels for this data set."""
if self.num_classes == 2:
return ['not_entailment', 'entailment']
elif self.num_classes == 3:
return ["contradiction", "entailment", "neutral"]
else:
raise ValueError("num_classes must be either 2 or 3!")
def _create_examples(self, lines: List[str], set_type: str):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[self.sent1_col]
text_b = line[self.sent2_col]
label = line[self.label_col]
if label == "-":
continue
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ZeroShotIntentDataset(GLUEDataset):
"""
Dataset for training a NLI model for zero shot intent recognition. Similar to GLUE/MNLI
dataset, but allows the user to specify which columns in the data files contain the
premise, hypothesis, and gold label.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
'labels': NeuralType(tuple('B'), CategoricalValuesType()),
}
def __init__(
self,
file_path: str,
tokenizer: TokenizerSpec,
max_seq_length: str,
sent1_col: int,
sent2_col: int,
label_col: int,
num_classes: int,
use_cache: bool = True,
):
"""
Args:
file_path: path to file
tokenizer: such as AutoTokenizer
max_seq_length: max sequence length including [CLS] and [SEP]
sent1_col: the index of the column containing the premise (or sentence 1)
sent2_col: the index of the column containing the hypothesis (or sentence 2)
label_col: the index of the column containing the label
num_classes: number of classes in the data (should be either 2 or 3, corresponding to
labels ['entailment', 'not_entailment'] or ["contradiction", "entailment", "neutral"])
use_cache: whether to use data cache
"""
self.task_name = "mnli" # for compatibility with parent class
data_dir, file_name = os.path.split(file_path)
logging.info(f'Processing {file_name}')
self.tokenizer = tokenizer
evaluate = False if 'train' in file_name else True
processor = ZeroShotIntentProcessor(sent1_col, sent2_col, label_col, num_classes)
self.label_list = processor.get_labels()
if not evaluate:
self.examples = processor.get_train_examples(file_path)
# check the labels found in the training set
all_train_labels = [example.label for example in self.examples]
unique_labels = set(all_train_labels)
if len(unique_labels) != num_classes:
raise ValueError(
"Number of classes specified in config doesn't match the number found in the training data!"
)
elif len(unique_labels) == 2:
if not unique_labels == set(self.label_list):
raise ValueError(
f"Found unexpected labels! For a two-class model, labels are expected to be {self.label_list}"
)
elif len(unique_labels) == 3:
if not unique_labels == set(self.label_list):
raise ValueError(
f"Found unexpected labels! For a three-class model, labels are expected to be {self.label_list}"
)
# save the label map for reference
label_file = os.path.join(data_dir, "label_ids.csv")
with open(label_file, "w") as out:
out.write('\n'.join(self.label_list))
logging.info(f'Labels: {self.label_list}')
logging.info(f'Label mapping saved to : {label_file}')
else:
self.examples = processor.get_dev_examples(file_path)
processor_name = type(processor).__name__
vocab_size = getattr(tokenizer, "vocab_size", 0)
cached_features_file = os.path.join(
data_dir,
"cached_{}_{}_{}_{}_{}".format(
processor_name, file_name, tokenizer.name, str(max_seq_length), str(vocab_size)
),
)
if use_cache and os.path.exists(cached_features_file):
logging.info(f"loading from {cached_features_file}")
with open(cached_features_file, "rb") as reader:
self.features = pickle.load(reader)
else:
token_params = {
'bos_token': None,
'eos_token': tokenizer.eos_token,
'pad_token': tokenizer.pad_token,
'cls_token': tokenizer.cls_token,
'sep_token_extra': tokenizer.eos_token if 'roberta' in tokenizer.name.lower() else None,
}
self.features = self.convert_examples_to_features(
self.examples, self.label_list, max_seq_length, tokenizer, output_mode="classification", **token_params
)
master_device = not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0
if master_device:
logging.info(f'Saving train features into {cached_features_file}')
with open(cached_features_file, "wb") as writer:
pickle.dump(self.features, writer)
class ZeroShotIntentInferenceDataset(GLUEDataset):
"""
Similar to ZeroShotIntentDataset, but gets utterances and candidate labels from lists
rather than sentence pairs and labels from a file.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
'labels': NeuralType(tuple('B'), CategoricalValuesType()),
}
def __init__(
self,
queries: List[str],
candidate_labels: List[str],
tokenizer: TokenizerSpec,
max_seq_length: int,
hypothesis_template: str,
):
"""
Args:
queries: list of utterances to classify
candidate_labels: list of candidate labels
tokenizer: such as AutoTokenizer
max_seq_length: max sequence length including [CLS] and [SEP]
hypothesis_template: template used to turn each candidate label into a NLI-style hypothesis
"""
logging.info(f'Processing queries for inference')
self.tokenizer = tokenizer
token_params = {
'bos_token': None,
'eos_token': tokenizer.eos_token,
'pad_token': tokenizer.pad_token,
'cls_token': tokenizer.cls_token,
'sep_token_extra': tokenizer.eos_token if 'roberta' in tokenizer.name.lower() else None,
}
self.examples = []
for i, query in enumerate(queries):
for j, candidate_label in enumerate(candidate_labels):
guid = "query-%s-label-%s" % (i, j)
text_a = query
text_b = hypothesis_template.format(candidate_label)
label = 3 # dummy label for inference; training labels are 0, 1, 2 or 0, 1
self.examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
self.features = self.convert_examples_to_features(
self.examples, [0, 1, 2, 3], max_seq_length, tokenizer, output_mode="classification", **token_params
)
def calc_class_weights_from_dataloader(
dataloader: 'torch.utils.data.DataLoader', num_classes: int, data_dir: str
) -> List[float]:
"""
Calculate the weights of each class to be used for weighted loss. This is similar to the function calc_class_weights
in text_classification_dataset, but it gets the labels from a dataloader rather than from a file.
Args:
dataloader: the dataloader for the training set
num_classes: number of classes in the dataset
"""
labels = []
for batch in dataloader:
labels.extend(tensor2list(batch[-1]))
logging.info(f'Calculating label frequency stats...')
total_sents, sent_label_freq, max_id = get_label_stats(
labels, os.path.join(data_dir, 'sentence_stats.tsv'), verbose=False
)
if max_id >= num_classes:
raise ValueError(f'Found an invalid label! Labels should be from [0, num_classes-1].')
class_weights_dict = get_freq_weights(sent_label_freq)
logging.info(f'Total Sentence Pairs: {total_sents}')
logging.info(f'Class Frequencies: {sent_label_freq}')
logging.info(f'Class Weights: {class_weights_dict}')
class_weights = fill_class_weights(weights=class_weights_dict, max_id=num_classes - 1)
return class_weights
| NeMo-main | nemo/collections/nlp/data/zero_shot_intent_recognition/zero_shot_intent_dataset.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.entity_linking.entity_linking_dataset import EntityLinkingDataset
| NeMo-main | nemo/collections/nlp/data/entity_linking/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import array
import pickle as pkl
from typing import Optional
import torch
from nemo.collections.nlp.data.data_utils.data_preprocessing import find_newlines, load_data_indices
from nemo.core.classes import Dataset
from nemo.utils import logging
__all__ = ['EntityLinkingDataset']
class EntityLinkingDataset(Dataset):
"""
Parent class for entity linking encoder training and index
datasets
Args:
tokenizer (obj): huggingface tokenizer,
data_file (str): path to tab separated column file where data
pairs apear in the format
concept_ID\tconcept_synonym1\tconcept_synonym2\n
newline_idx_file (str): path to pickle file containing location
of data_file newline characters
max_seq_length (int): maximum length of a concept in tokens
is_index_data (bool): Whether dataset will be used for building
a nearest neighbors index
"""
def __init__(
self,
tokenizer: object,
data_file: str,
newline_idx_file: Optional[str] = None,
max_seq_length: Optional[int] = 512,
is_index_data: bool = False,
):
self.tokenizer = tokenizer
# Try and load pair indices file if already exists
newline_indices, newline_idx_file, _ = load_data_indices(newline_idx_file, data_file, "newline_indices")
# If pair indices file doesn't exists, generate and store them
if newline_indices is None:
logging.info("Getting datafile newline indices")
with open(data_file, "rb") as f:
contents = f.read()
newline_indices = find_newlines(contents)
newline_indices = array.array("I", newline_indices)
# Store data file indicies to avoid generating them again
with open(newline_idx_file, "wb") as f:
pkl.dump(newline_indices, f)
self.newline_indices = newline_indices
self.data_file = data_file
self.num_lines = len(newline_indices)
self.max_seq_length = max_seq_length
self.is_index_data = is_index_data
logging.info(f"Loaded dataset with {self.num_lines} examples")
def __len__(self):
return self.num_lines
def __getitem__(self, idx):
concept_offset = self.newline_indices[idx]
with open(self.data_file, "r", encoding='utf-8-sig') as f:
# Find data pair within datafile using byte offset
f.seek(concept_offset)
concept = f.readline()[:-1]
concept = concept.strip().split("\t")
if self.is_index_data:
concept_id, concept = concept
return (int(concept_id), concept)
else:
concept_id, concept1, concept2 = concept
return (int(concept_id), concept1, concept2)
def _collate_fn(self, batch):
"""collate batch of input_ids, segment_ids, input_mask, and label
Args:
batch: A list of tuples of format (concept_ID, concept_synonym1, concept_synonym2).
"""
if self.is_index_data:
concept_ids, concepts = zip(*batch)
concept_ids = list(concept_ids)
concepts = list(concepts)
else:
concept_ids, concepts1, concepts2 = zip(*batch)
concept_ids = list(concept_ids)
concept_ids.extend(concept_ids) # Need to double label list to match each concept
concepts = list(concepts1)
concepts.extend(concepts2)
batch = self.tokenizer(
concepts,
add_special_tokens=True,
padding=True,
truncation=True,
max_length=self.max_seq_length,
return_token_type_ids=True,
return_attention_mask=True,
return_length=True,
)
return (
torch.LongTensor(batch["input_ids"]),
torch.LongTensor(batch["token_type_ids"]),
torch.LongTensor(batch["attention_mask"]),
torch.LongTensor(concept_ids),
)
| NeMo-main | nemo/collections/nlp/data/entity_linking/entity_linking_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.question_answering.data_processor.qa_processing import QAProcessor
from nemo.collections.nlp.data.question_answering.dataset import BERTQADataset, GPTQADataset, QADataset, S2SQADataset
from nemo.collections.nlp.data.question_answering.input_example import (
BERTQAInputExample,
GPTQAInputExample,
QAExample,
S2SQAInputExample,
)
| NeMo-main | nemo/collections/nlp/data/question_answering/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from tqdm import trange
from nemo.collections.nlp.data.question_answering.data_processor.qa_processing import INFERENCE_MODE, TRAINING_MODE
from nemo.collections.nlp.data.question_answering.dataset.qa_dataset import QADataset
from nemo.collections.nlp.data.question_answering.input_example.qa_bert_input_example import BERTQAInputExample
from nemo.utils import logging
class BERTQADataset(QADataset):
""" Creates a Dataset for BERT architecture based Exractive QA """
def __init__(
self,
data_file: str,
processor: object,
tokenizer: object,
keep_doc_spans: str = False,
doc_stride: int = 128,
max_query_length: int = 64,
max_seq_length: int = 512,
version_2_with_negative: bool = False,
num_samples: int = -1,
mode: str = TRAINING_MODE,
use_cache: bool = False,
):
super().__init__(
data_file=data_file, processor=processor, tokenizer=tokenizer, mode=mode, num_samples=num_samples
)
self.keep_doc_spans = keep_doc_spans
self.doc_stride = doc_stride
self.max_query_length = max_query_length
self.max_seq_length = max_seq_length
self.version_2_with_negative = version_2_with_negative
self.num_samples = num_samples
self.mode = mode
self.use_cache = use_cache
# structures for hashing to reduce memory use
self.input_mask_id = 0
self.input_mask_id_to_input_mask = {}
self.input_mask_to_input_mask_id = {}
self.segment_mask_id = 0
self.segment_mask_id_to_segment_mask = {}
self.segment_mask_to_segment_mask_id = {}
self._set_cached_features_filename()
if use_cache and os.path.exists(self.cached_features_file):
if self.mode == TRAINING_MODE:
del self.examples
del self.processor
(
self.features,
self.input_mask_id_to_input_mask,
self.input_mask_to_input_mask_id,
self.segment_mask_id_to_segment_mask,
self.segment_mask_to_segment_mask_id,
) = QADataset.load_features_from_cache(self.cached_features_file)
else:
self._convert_examples_to_features()
if use_cache:
items_to_pickle = [
self.features,
self.input_mask_id_to_input_mask,
self.input_mask_to_input_mask_id,
self.segment_mask_id_to_segment_mask,
self.segment_mask_to_segment_mask_id,
]
QADataset.dump_features_to_cache(self.cached_features_file, items_to_pickle)
logging.info("Converting dict features into object features")
for i in trange(len(self.features)):
self.features[i] = BERTQAInputExample(**self.features[i])
def _set_cached_features_filename(self):
""" Creates cache filename using dataset config parameters """
vocab_size = getattr(self.tokenizer, "vocab_size", 0)
self.cached_features_file = (
self.data_file
+ '_cache'
+ '_{}_{}_{}_{}_{}_{}_{}'.format(
self.mode,
self.tokenizer.name,
str(vocab_size),
str(self.max_seq_length),
str(self.doc_stride),
str(self.max_query_length),
str(self.num_samples),
)
)
def _convert_examples_to_features(self):
""" Converts loaded examples to features """
logging.info(f"Preprocessing data into features.")
has_groundtruth = self.mode != INFERENCE_MODE
unique_id = 1000000000
text_to_tokens_dict = {}
self.features = []
for example_index in trange(len(self.examples)):
if example_index % 1000 == 0:
QADataset.check_if_sufficient_memory()
example = self.examples[example_index]
if example.question_text not in text_to_tokens_dict:
text_to_tokens_dict[example.question_text] = self.tokenizer.text_to_tokens(example.question_text)[
: self.max_query_length
]
query_tokens = text_to_tokens_dict[example.question_text]
# context: index of token -> index of word
tok_to_orig_index = []
# context: index of word -> index of first token in token list
orig_to_tok_index = []
# context without white spaces after tokenization
all_doc_tokens = []
# doc tokens is word separated context
(
doc_tokens,
char_to_word_offset,
start_position,
end_position,
context_text,
) = QADataset.get_doc_tokens_and_offset_from_context_id(
example.context_id,
example.start_position_character,
example.is_impossible,
example.answer_text,
self.processor.doc_id_to_context_text,
)
example.start_position = start_position
example.end_position = end_position
if self.mode != TRAINING_MODE:
example.doc_tokens = doc_tokens
# the text to tokens step is the slowest step
for (i, token) in enumerate(doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
if token not in text_to_tokens_dict:
text_to_tokens_dict[token] = self.tokenizer.text_to_tokens(token)
sub_tokens = text_to_tokens_dict[token]
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
# idx of query token start and end in context
tok_start_position = None
tok_end_position = None
if has_groundtruth and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if has_groundtruth and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = QADataset.improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, self.tokenizer, example.answer_text
)
# The -3 accounts for tokenizer.cls_token, tokenizer.sep_token and tokenizer.sep_token
# doc_spans contains all possible contexts options of given length
max_tokens_for_doc = self.max_seq_length - len(query_tokens) - 3
doc_spans = QADataset.get_docspans(all_doc_tokens, max_tokens_for_doc, self.doc_stride)
doc_spans = QADataset.keep_relevant_docspans(
doc_spans, tok_start_position, tok_end_position, self.keep_doc_spans
)
# make compatible for hashing
doc_spans = tuple(doc_spans)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = [self.tokenizer.cls_token] + query_tokens + [self.tokenizer.sep_token]
segment_ids = [0 for i in range(len(tokens))]
token_is_max_context = {}
# maps context tokens idx in final input -> word idx in context
token_to_orig_map = {}
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = QADataset.check_is_max_context(doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append(self.tokenizer.sep_token)
segment_ids.append(1)
input_ids = self.tokenizer.tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens.
# Only real tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < self.max_seq_length:
input_ids.append(self.tokenizer.pad_id)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == self.max_seq_length
assert len(input_mask) == self.max_seq_length
assert len(segment_ids) == self.max_seq_length
# calculate start and end position in final array
# of tokens in answer if no answer,
# 0 for both pointing to tokenizer.cls_token
start_position = 0
end_position = 0
if has_groundtruth and not example.is_impossible:
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if has_groundtruth and example.is_impossible:
# if our document chunk does not contain
# an annotation we throw it out, since there is nothing
# to predict.
start_position = 0
end_position = 0
if example_index < 1:
logging.info("*** Example ***")
logging.info("unique_id: %s" % (unique_id))
logging.info("example_index: %s" % (example_index))
logging.info("doc_span_index: %s" % (doc_span_index))
logging.info("tokens: %s" % " ".join(tokens))
logging.info(
"token_to_orig_map: %s" % " ".join(["%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()])
)
logging.info(
"token_is_max_context: %s"
% " ".join(["%d:%s" % (x, y) for (x, y) in token_is_max_context.items()])
)
logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if has_groundtruth and example.is_impossible:
logging.info("impossible example")
if has_groundtruth and not example.is_impossible:
answer_text = " ".join(tokens[start_position : (end_position + 1)])
logging.info("start_position: %d" % (start_position))
logging.info("end_position: %d" % (end_position))
logging.info("answer: %s" % (answer_text))
# memoization to save CPU memory for large datasets
input_mask = tuple(input_mask)
if input_mask in self.input_mask_to_input_mask_id:
feature_input_mask_id = self.input_mask_to_input_mask_id[input_mask]
else:
self.input_mask_id_to_input_mask[self.input_mask_id] = input_mask
self.input_mask_to_input_mask_id[input_mask] = self.input_mask_id
feature_input_mask_id = self.input_mask_id
self.input_mask_id += 1
segment_mask = tuple(segment_ids)
if segment_mask in self.segment_mask_to_segment_mask_id:
feature_segment_mask_id = self.segment_mask_to_segment_mask_id[segment_mask]
else:
self.segment_mask_id_to_segment_mask[self.segment_mask_id] = segment_mask
self.segment_mask_to_segment_mask_id[segment_mask] = self.segment_mask_id
feature_segment_mask_id = self.segment_mask_id
self.segment_mask_id += 1
if self.mode == TRAINING_MODE:
input_feature = {
"unique_id": unique_id,
"input_ids": input_ids,
"input_mask": feature_input_mask_id,
"segment_ids": feature_segment_mask_id,
"start_position": start_position,
"end_position": end_position,
}
else:
input_feature = {
"unique_id": unique_id,
"input_ids": input_ids,
"input_mask": feature_input_mask_id,
"segment_ids": feature_segment_mask_id,
"start_position": start_position,
"end_position": end_position,
"example_index": example_index,
"doc_span_index": doc_span_index,
"tokens": tokens,
"token_to_orig_map": token_to_orig_map,
"token_is_max_context": token_is_max_context,
"is_impossible": example.is_impossible,
}
self.features.append(input_feature)
unique_id += 1
# delete self.examples during training mode to save memory
if self.mode == TRAINING_MODE:
self.examples = []
del self.processor
def __getitem__(self, idx: int):
feature = self.features[idx]
if self.mode == INFERENCE_MODE:
return (
np.array(feature.input_ids),
np.array(self.segment_mask_id_to_segment_mask[feature.segment_ids]),
np.array(self.input_mask_id_to_input_mask[feature.input_mask]),
np.array(feature.unique_id),
)
else:
return (
np.array(feature.input_ids),
np.array(self.segment_mask_id_to_segment_mask[feature.segment_ids]),
np.array(self.input_mask_id_to_input_mask[feature.input_mask]),
np.array(feature.unique_id),
np.array(feature.start_position),
np.array(feature.end_position),
)
| NeMo-main | nemo/collections/nlp/data/question_answering/dataset/qa_bert_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.question_answering.dataset.qa_bert_dataset import BERTQADataset
from nemo.collections.nlp.data.question_answering.dataset.qa_dataset import QADataset
from nemo.collections.nlp.data.question_answering.dataset.qa_gpt_dataset import GPTQADataset
from nemo.collections.nlp.data.question_answering.dataset.qa_s2s_dataset import S2SQADataset
| NeMo-main | nemo/collections/nlp/data/question_answering/dataset/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
from tqdm import trange
from nemo.collections.nlp.data.question_answering.data_processor.qa_processing import INFERENCE_MODE, TRAINING_MODE
from nemo.collections.nlp.data.question_answering.dataset.qa_dataset import QADataset
from nemo.collections.nlp.data.question_answering.input_example.qa_s2s_input_example import S2SQAInputExample
from nemo.utils import logging
class S2SQADataset(QADataset):
""" Creates a Dataset for T5/BART architecture based Generative QA """
def __init__(
self,
data_file: str,
processor: object,
tokenizer: object,
keep_doc_spans: str = False,
doc_stride: int = 128,
max_query_length: int = 64,
max_seq_length: int = 512,
max_answer_length: int = 64,
check_if_answer_in_context: bool = False,
num_samples: int = -1,
mode: str = TRAINING_MODE,
use_cache: bool = False,
):
super().__init__(
data_file=data_file, processor=processor, tokenizer=tokenizer, mode=mode, num_samples=num_samples
)
self.keep_doc_spans = keep_doc_spans
self.doc_stride = doc_stride
self.max_query_length = max_query_length
self.max_seq_length = max_seq_length
self.max_answer_length = max_answer_length
self.check_if_answer_in_context = check_if_answer_in_context
self.num_samples = num_samples
self.mode = mode
self.use_cache = use_cache
self._set_cached_features_filename()
if use_cache and os.path.exists(self.cached_features_file):
# delete self.examples during training mode to save memory
if self.mode == TRAINING_MODE:
del self.examples
del self.processor
self.features = QADataset.load_features_from_cache(self.cached_features_file)
else:
self._convert_examples_to_features()
if use_cache:
QADataset.dump_features_to_cache(self.cached_features_file, self.features)
logging.info("Converting dict features into object features")
for i in trange(len(self.features)):
self.features[i] = S2SQAInputExample(**self.features[i])
def _set_cached_features_filename(self):
""" Creates cache filename using dataset config parameters """
vocab_size = getattr(self.tokenizer, "vocab_size", 0)
self.cached_features_file = (
self.data_file
+ '_cache'
+ '_{}_{}_{}_{}_{}_{}_{}'.format(
self.mode,
self.tokenizer.name,
str(vocab_size),
str(self.max_query_length),
str(self.max_seq_length),
str(self.max_answer_length),
str(self.num_samples),
)
)
def _convert_examples_to_features(self):
"""
Iterates through each QA example, formats into input and output template,
and encodes the input and output template
Input template: `context: <context text> question: <question text>`
Output template: `<answer text>`
"""
logging.info(f"Preprocessing data into features.")
unique_id = 1000000000
self.features = []
context_prefix = "context: "
context_prefix_tokens = self.tokenizer.tokenizer.tokenize(context_prefix)
for example_index in trange(len(self.examples)):
if example_index % 1000 == 0:
S2SQADataset.check_if_sufficient_memory()
example = self.examples[example_index]
query_tokens, formatted_query = self._prep_query(example)
context_tokens, context_spans = self._prep_context(example, query_tokens, context_prefix_tokens)
unique_id = self._encode_all_context_spans(
unique_id, context_spans, context_tokens, formatted_query, example, example_index,
)
# delete self.examples during training mode to save memory
if self.mode == TRAINING_MODE:
self.examples = []
del self.processor
def _prep_query(self, example):
"""
Formats a question into input format: ` question: <question text>`
The space at the start allows concatention with the context for input
"""
formatted_query = f" question: {example.question_text}"
query_tokens = self.tokenizer.tokenizer.tokenize(formatted_query)[: self.max_query_length]
return query_tokens, formatted_query
def _prep_context(self, example, query_tokens, context_prefix_tokens):
"""
Calculates the maximum possible length for a given context given a question
as inputs are of fixed length
Divides the context into multiple spans based on the calculated max length
"""
context_tokens = self.tokenizer.tokenizer.tokenize(example.context_text)
max_context_length = (
self.max_seq_length
- len(query_tokens)
- len(context_prefix_tokens)
- 1 # -1 accounts for </s> token in T5/BART
)
context_spans = S2SQADataset.get_docspans(context_tokens, max_context_length, self.doc_stride)
context_spans = tuple(context_spans)
return context_tokens, context_spans
def _encode_all_context_spans(
self, unique_id, context_spans, context_tokens, formatted_query, example, example_index,
):
"""
Fromats all spans extracted from a single context as:
`context: <context span text> question: <question text> answer: <answer text>` and encodes
If the answer text (example.answer_text) is not present in a given context span,
the answer is converted to a blank answer
"""
for context_span_idx, context_span in enumerate(context_spans):
# format query and context span text
context_span_tokens = context_tokens[context_span.start : context_span.start + context_span.length]
context_span_text = self.tokenizer.tokenizer.convert_tokens_to_string(context_span_tokens)
source = f"context: {context_span_text}{formatted_query}"
# encode input
encoded_input_dict = self.tokenizer.tokenizer(
source, truncation=True, max_length=self.max_seq_length, padding="max_length", return_tensors="pt",
)
input_ids = torch.squeeze(encoded_input_dict["input_ids"])
input_attn_mask = torch.squeeze(encoded_input_dict["attention_mask"])
# encode output based on mode and is question answerable given context
labels = self._encode_answer(example, context_span_text)
# create dictionary features
feature = {
"unique_id": unique_id,
"input_ids": input_ids,
"input_attn_mask": input_attn_mask,
"labels": labels,
"example_index": example_index,
"context_span_index": context_span_idx,
"is_impossible": example.is_impossible,
}
self.features.append(feature)
unique_id += 1
return unique_id
def _encode_answer(self, example, context_span_text):
"""
Answer is set and encoded as:
- blank if in inference mode, else
- blank if question is unanswerable given context, else
- blank if answer text is not present in context span
and the check flag is set to true, else
- formatted answer
"""
is_answer_in_context_check = (
self.check_if_answer_in_context # checks if the flag for this check is set
and example.answer_text # checks if answer text is valid, i.e. question is not unanswerable
and example.answer_text not in context_span_text # checks if answer text is a substring of context
)
if (
self.mode == INFERENCE_MODE
or example.is_impossible # question not answerable given context
or is_answer_in_context_check
):
target = ""
else:
target = example.answer_text
encoded_output_dict = self.tokenizer.tokenizer(
target, truncation=True, max_length=self.max_answer_length, padding="max_length", return_tensors="pt",
)
labels = torch.squeeze(encoded_output_dict["input_ids"])
labels[labels == self.tokenizer.tokenizer.pad_token_id] = -100
return labels
def __getitem__(self, idx: int):
feature = self.features[idx]
if self.mode == INFERENCE_MODE:
return (
np.array(feature.input_ids),
np.array(feature.input_attn_mask),
np.array(feature.unique_id),
)
else:
return (
np.array(feature.input_ids),
np.array(feature.input_attn_mask),
np.array(feature.unique_id),
np.array(feature.labels),
)
| NeMo-main | nemo/collections/nlp/data/question_answering/dataset/qa_s2s_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import numpy as np
import torch
from tqdm import trange
from nemo.collections.nlp.data.question_answering.data_processor.qa_processing import INFERENCE_MODE, TRAINING_MODE
from nemo.collections.nlp.data.question_answering.dataset.qa_dataset import QADataset
from nemo.collections.nlp.data.question_answering.input_example.qa_gpt_input_example import GPTQAInputExample
from nemo.utils import logging
class GPTQADataset(QADataset):
""" Creates a Dataset for GPT architecture based Generative QA """
def __init__(
self,
data_file: str,
processor: object,
tokenizer: object,
keep_doc_spans: str = False,
doc_stride: int = 128,
max_query_length: int = 64,
max_seq_length: int = 512,
max_answer_length: int = 64,
check_if_answer_in_context: bool = False,
num_samples: int = -1,
mode: str = TRAINING_MODE,
use_cache: bool = False,
):
super().__init__(
data_file=data_file, processor=processor, tokenizer=tokenizer, mode=mode, num_samples=num_samples
)
self.keep_doc_spans = keep_doc_spans
self.doc_stride = doc_stride
self.max_query_length = max_query_length
self.max_seq_length = max_seq_length
self.max_answer_length = max_answer_length
self.check_if_answer_in_context = check_if_answer_in_context
self.num_samples = num_samples
self.mode = mode
self.use_cache = use_cache
self._set_cached_features_filename()
if use_cache and os.path.exists(self.cached_features_file):
# delete self.examples during training mode to save memory
if self.mode == TRAINING_MODE:
del self.examples
del self.processor
self.features = QADataset.load_features_from_cache(self.cached_features_file)
else:
self._convert_examples_to_features()
if use_cache:
QADataset.dump_features_to_cache(self.cached_features_file, self.features)
logging.info("Converting dict features into object features")
for i in trange(len(self.features)):
self.features[i] = GPTQAInputExample(**self.features[i])
def _set_cached_features_filename(self):
""" Creates cache filename using dataset config parameters """
vocab_size = getattr(self.tokenizer, "vocab_size", 0)
self.cached_features_file = (
self.data_file
+ '_cache'
+ '_{}_{}_{}_{}_{}_{}_{}'.format(
self.mode,
self.tokenizer.name,
str(vocab_size),
str(self.max_query_length),
str(self.max_seq_length),
str(self.max_answer_length),
str(self.num_samples),
)
)
def _convert_examples_to_features(self):
"""
Iterates through each QA example, formats into template and encodes
Template: `context: <context text> question: <question text> answer:<answer text>`
"""
logging.info(f"Preprocessing data into features.")
unique_id = 1000000000
self.features = []
context_prefix = "context: "
query_prefix = " question: "
answer_prefix = " answer:"
context_prefix_tokens = self.tokenizer.tokenizer.tokenize(context_prefix)
answer_prefix_tokens = self.tokenizer.tokenizer.tokenize(answer_prefix)
for example_index in trange(len(self.examples)):
if example_index % 1000 == 0:
GPTQADataset.check_if_sufficient_memory()
example = self.examples[example_index]
formatted_query, query_tokens_length = self._prep_query(query_prefix, example)
formatted_answer, answer_tokens_length = self._prep_answer(example)
context_tokens, context_spans = self._prep_context(
example, query_tokens_length, answer_tokens_length, context_prefix_tokens, answer_prefix_tokens,
)
unique_id = self._encode_all_context_spans(
unique_id,
context_spans,
context_tokens,
context_prefix,
formatted_query,
answer_prefix,
formatted_answer,
example,
example_index,
)
# delete self.examples during training mode to save memory
if self.mode == TRAINING_MODE:
self.examples = []
del self.processor
def _prep_query(self, query_prefix, example):
"""
Formats a question into input format: ` question: <question text>`
The space at the start allows concatention with the context and answer for input
Returns formatted query, query tokens, and length of query tokens
"""
formatted_query = f"{query_prefix}{example.question_text}"
return self._get_truncated_sentence_and_len(formatted_query, self.max_query_length)
def _prep_answer(self, example):
"""
Formats an answer into suitable model input:
- In inference mode, answer is returned as an empty string, else
- Sets EOS token as answer if question is impossible to answer, else
- Appends answer with EOS token as the final answer
Returns formatted answer string, answer tokens, and length of answer tokens
"""
if self.mode == INFERENCE_MODE:
target = ""
elif example.is_impossible: # example is impossible to answer given context
target = self.tokenizer.tokenizer.eos_token
else:
target = f"{example.answer_text}{self.tokenizer.tokenizer.eos_token}"
return self._get_truncated_sentence_and_len(target, self.max_answer_length)
def _prep_context(
self, example, query_tokens_length, answer_tokens_length, context_prefix_tokens, answer_prefix_tokens,
):
"""
Calculates the maximum possible length for a given context given a question
as inputs are fixed length
Divides the context into multiple spans based on the calculated max length
"""
context_tokens = self.tokenizer.tokenizer.tokenize(example.context_text)
max_context_length = (
self.max_seq_length
- query_tokens_length
- answer_tokens_length
- len(context_prefix_tokens)
- len(answer_prefix_tokens)
- 1 # -1 accounts for EOS token
)
context_spans = GPTQADataset.get_docspans(context_tokens, max_context_length, self.doc_stride)
context_spans = tuple(context_spans)
return context_tokens, context_spans
def _encode_all_context_spans(
self,
unique_id,
context_spans,
context_tokens,
context_prefix,
formatted_query,
answer_prefix,
formatted_answer,
example,
example_index,
):
"""
Formats all spans extracted from a single context as:
`context: <context span text> question: <question text> answer:<answer text>`
<answer text> is set as:
- blank if in inference mode, else
- EOS token if answer text is not present in context span
and the check flag is set to true, else
- formatted answer
"""
for context_span_idx, context_span in enumerate(context_spans):
context_span_tokens = context_tokens[context_span.start : context_span.start + context_span.length]
context_span_text = self.tokenizer.tokenizer.convert_tokens_to_string(context_span_tokens)
input_without_answer = f"{context_prefix}{context_span_text}{formatted_query}{answer_prefix}"
_, training_mask_end = self._get_truncated_sentence_and_len(input_without_answer, self.max_seq_length)
is_answer_in_context_check = (
self.check_if_answer_in_context # checks if the flag for this check is set
and example.answer_text # checks if answer text is valid, i.e. question is not unanswerable
and example.answer_text not in context_span_text # checks if answer text is a substring of context
)
if self.mode == INFERENCE_MODE:
input_to_encode = input_without_answer
elif is_answer_in_context_check:
input_to_encode = f"{input_without_answer}{self.tokenizer.tokenizer.eos_token}"
else:
input_to_encode = f"{input_without_answer}{formatted_answer}"
encoded_input_dict = self.tokenizer.tokenizer(
input_to_encode,
truncation=True,
max_length=self.max_seq_length,
padding="max_length",
return_tensors="pt",
)
input_ids = torch.squeeze(encoded_input_dict["input_ids"])
input_attn_mask = torch.squeeze(encoded_input_dict["attention_mask"])
labels = GPTQADataset.update_labels_for_no_pad_loss(input_ids, training_mask_end, input_attn_mask)
# create dictionary features
feature = {
"unique_id": unique_id,
"input_ids": input_ids,
"input_attn_mask": input_attn_mask,
"training_mask_end": training_mask_end,
"labels": labels,
"example_index": example_index,
"context_span_index": context_span_idx,
"is_impossible": example.is_impossible,
}
self.features.append(feature)
unique_id += 1
return unique_id
def _get_truncated_sentence_and_len(self, sentence, max_length):
if not sentence:
return "", 0
tokens = self.tokenizer.tokenizer.tokenize(sentence)[:max_length]
trunc_sentence = self.tokenizer.tokenizer.convert_tokens_to_string(tokens)
seq_length = len(tokens)
return trunc_sentence, seq_length
@classmethod
def update_labels_for_no_pad_loss(cls, input_ids, training_mask_end, input_attn_mask):
"""
Loss mask for GPT is constructed to ignore loss for padding tokens
GPT eos token is same as pas token and needs to be excluded from loss mask
This is done using the attention mask inversion as described in:
https://github.com/huggingface/transformers/issues/7135#issuecomment-1172962080
"""
labels = copy.copy(torch.squeeze(input_ids))
inv_bool_attn_mask = torch.eq(torch.squeeze(input_attn_mask), 0)
labels.data = torch.tensor(
[
-100 if ((i < training_mask_end) or (inv_bool_attn_mask[i])) else labels.data[i]
for i in range(len(labels.data))
]
)
return labels
def __getitem__(self, idx: int):
feature = self.features[idx]
if self.mode == INFERENCE_MODE:
return (
np.array(feature.input_ids),
np.array(feature.input_attn_mask),
np.array(feature.unique_id),
np.array(feature.training_mask_end),
)
else:
return (
np.array(feature.input_ids),
np.array(feature.input_attn_mask),
np.array(feature.unique_id),
np.array(feature.training_mask_end),
np.array(feature.labels),
)
| NeMo-main | nemo/collections/nlp/data/question_answering/dataset/qa_gpt_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import pickle
from functools import lru_cache
from typing import List
import psutil
import torch
from nemo.collections.nlp.data.data_utils import is_whitespace
from nemo.collections.nlp.data.question_answering.data_processor.qa_processing import (
EVALUATION_MODE,
INFERENCE_MODE,
TRAINING_MODE,
)
from nemo.core.classes import Dataset
from nemo.utils import logging
class QADataset(Dataset):
''' Abstract base class for QA Datasets with common utility methods '''
def __init__(
self, data_file: str, processor: object, tokenizer: object, mode: str, num_samples: int, **kwargs,
):
self.mode = mode
self.data_file = data_file
self.processor = processor
self.tokenizer = tokenizer
self.features = None
if self.mode not in [TRAINING_MODE, EVALUATION_MODE, INFERENCE_MODE]:
raise ValueError(
f"mode should be either {TRAINING_MODE}, {EVALUATION_MODE}, {INFERENCE_MODE} but got {self.mode}"
)
# get examples from processor and keep according to limit
self.examples = self.processor.get_examples()
if num_samples == 0:
raise ValueError(
f"num_samples has to be positive or -1 (to use the entire dataset), however got {num_samples}."
)
elif num_samples > 0:
self.examples = self.examples[:num_samples]
def __len__(self):
return len(self.features)
def __getitem__(self, idx: int):
raise NotImplementedError
@staticmethod
def load_features_from_cache(cached_filename):
logging.info(f"loading from {cached_filename}")
with open(cached_filename, "rb") as reader:
features = pickle.load(reader)
return features
@staticmethod
def dump_features_to_cache(cached_filename, features):
master_device = not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0
if master_device:
logging.info(f"Saving train features into cached file {cached_filename}")
with open(cached_filename, "wb") as writer:
pickle.dump(features, writer)
@staticmethod
def check_if_sufficient_memory():
"""
Check if there is sufficient memory to prevent system from being unresponsive
Otherwise system can become unresponsive as memory is slowly filled up, possibly leading to system unable to kill process
Interrupts run if CPU memory use is more than 75%, to leave some capacity for model loading
"""
percent_memory = psutil.virtual_memory().percent
if percent_memory > 75:
raise ValueError('Please use a device with more CPU ram or a smaller dataset')
@staticmethod
@lru_cache(maxsize=10000)
def get_best_span_index(doc_spans, position):
"""
For a particular position, identify which doc_span gives the most context around token
Helper function for check_is_max_context; see check_is_max_context for more details
"""
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return best_span_index
@staticmethod
def check_is_max_context(doc_spans, cur_span_index, position):
"""
Check if this is the 'max context' doc span for the token.
Because of the sliding window approach taken to scoring documents,
a single token can appear in multiple documents.
Example:
Doc: the man went to the store and bought a gallon of milk
Span A: the man went to the
Span B: to the store and bought
Span C: and bought a gallon of
...
Now the word 'bought' will have two scores from spans B and C. We only
want to consider the score with "maximum context", which we define as
the *minimum* of its left and right context (the *sum* of left and
right context will always be the same, of course).
In the example the maximum context for 'bought' would be span C since
it has 1 left context and 3 right context, while span B has 4 left context
and 0 right context.
Code adapted from the code by the Google AI and HuggingFace.
"""
best_span_index = QADataset.get_best_span_index(doc_spans, position)
return cur_span_index == best_span_index
@staticmethod
def get_docspans(all_doc_tokens, max_tokens_for_doc, doc_stride):
"""
Get docspans which are sliding window spans from a document
Args:
all_doc_tokens: list of all tokens in document
max_tokens_for_doc: maximum number of tokens in each doc span
doc_stride: stride size which sliding window moves with
Returns:
doc_spans: all possible doc_spans from document
"""
_DocSpan = collections.namedtuple("DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
return doc_spans
@staticmethod
def get_average_dist_to_tok_start_and_end(doc_span, tok_start_position, tok_end_position):
"""
Find distance between doc_span and answer_span to determine if doc_span is likely to be useful for the answer
Helper function to filter out doc_spans that may not be helpful
Args:
doc_span
tok_start_position: start position of answer in document
tok_end_position: end position of answer in document
Returns:
average distance of doc_span to answer
"""
center_answer = (tok_start_position + tok_end_position) // 2
dist_to_start = abs(doc_span.start - center_answer)
dist_to_end = abs(doc_span.start + doc_span.length - 1 - center_answer)
return (dist_to_start + dist_to_end) // 2
@staticmethod
def keep_relevant_docspans(doc_spans, tok_start_position, tok_end_position, mode):
"""
Filters out doc_spans, which might not be relevant to answering question,
which can be helpful when document is extremely long leading to many doc_spans with no answers
Args:
doc_spans: all possible doc_spans
tok_start_position: start position of answer in document
tok_end_position: end position of answer in document
mode:
all: do not filter
only_positive: only keep doc_spans containing the answer
limited_negative: only keep 10 doc_spans that are nearest to answer
Returns:
doc_spans: doc_spans after filtering
"""
if mode == 'all':
return doc_spans
elif mode == 'only_positive':
if tok_start_position in [-1, None] or tok_end_position in [-1, None]:
return []
else:
return [
doc_span
for doc_span in doc_spans
if tok_start_position >= doc_span.start
and tok_end_position <= doc_span.start + doc_span.length - 1
]
elif mode == 'limited_negative':
n_candidates = 10
if tok_start_position in [-1, None] or tok_end_position in [-1, None]:
pass
else:
doc_spans.sort(
key=lambda doc_span: QADataset.get_average_dist_to_tok_start_and_end(
doc_span, tok_start_position, tok_end_position
)
)
return doc_spans[:n_candidates]
else:
raise ValueError('mode can only be in {all, only_positive and limited_negative')
@staticmethod
def split_into_words(context_text):
"""
Split on whitespace so that different tokens
may be attributed to their original position.
ex: context_text = "hi yo"
char_to_word_offset = [0, 0, 0, 1, 1]
doc_tokens = ["hi", "yo"]
"""
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in context_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
return doc_tokens, char_to_word_offset
@staticmethod
def get_doc_tokens_and_offset_from_context_id(
context_id, start_position_character, is_impossible, answer_text, context_id_to_context_text
):
start_position, end_position = 0, 0
context_text = context_id_to_context_text[context_id]
doc_tokens, char_to_word_offset = QADataset.split_into_words(context_text)
# Start end end positions only has a value during evaluation.
if start_position_character is not None and not is_impossible:
# start_position is index of word, end_position inclusive
start_position = char_to_word_offset[start_position_character]
end_position = char_to_word_offset[
min(start_position_character + len(answer_text) - 1, len(char_to_word_offset) - 1)
]
return doc_tokens, char_to_word_offset, start_position, end_position, context_text
@staticmethod
def improve_answer_span(
doc_tokens: List[str], input_start: int, input_end: int, tokenizer: object, orig_answer_text: str,
):
""" Returns tokenized answer spans that better match the annotated answer """
tok_answer_text = " ".join(tokenizer.text_to_tokens(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start : (new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
| NeMo-main | nemo/collections/nlp/data/question_answering/dataset/qa_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Dict, List, Optional
@dataclass
class BERTQAInputExample(object):
""" A single set of features of a QA example for BERT-like model """
unique_id: int
input_ids: List[int]
input_mask: List[int]
segment_ids: List[int]
example_index: int = None
doc_span_index: int = None
tokens: List[str] = None
token_to_orig_map: Dict[int, int] = None
token_is_max_context: Dict[int, bool] = None
start_position: Optional[int] = None
end_position: Optional[int] = None
is_impossible: Optional[int] = None
| NeMo-main | nemo/collections/nlp/data/question_answering/input_example/qa_bert_input_example.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import List
@dataclass
class QAExample(object):
""" A single training/test example for a QA dataset, as loaded from disk """
qas_id: str # The example's unique identifier
question_text: str
context_text: str
context_id: int
answer_text: str
start_position_character: int # The character position of the start of the answer, 0 indexed
title: str
answers: List[
str
] = None # None by default, this is used during evaluation. Holds answers as well as their start positions
is_impossible: bool = False # False by default, set to True if the example has no possible answer
| NeMo-main | nemo/collections/nlp/data/question_answering/input_example/qa_input_example.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.question_answering.input_example.qa_bert_input_example import BERTQAInputExample
from nemo.collections.nlp.data.question_answering.input_example.qa_gpt_input_example import GPTQAInputExample
from nemo.collections.nlp.data.question_answering.input_example.qa_input_example import QAExample
from nemo.collections.nlp.data.question_answering.input_example.qa_s2s_input_example import S2SQAInputExample
| NeMo-main | nemo/collections/nlp/data/question_answering/input_example/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.