python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torch import nn
from fairseq.models import FairseqEncoder
class CTCDecoder(FairseqEncoder):
def __init__(self, dictionary, in_dim):
super().__init__(dictionary)
self.proj = nn.Linear(in_dim, len(dictionary))
def forward(self, src_tokens, src_lengths=None, **kwargs):
encoder_out = self.proj(src_tokens)
return {"encoder_out": encoder_out}
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_to_speech/modules/ctc_decoder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from fairseq.models import FairseqEncoder
from fairseq.modules import LayerNorm, TransformerEncoderLayer
class TransformerEncoderNoEmb(FairseqEncoder):
"""Transformer encoder without token embeddings."""
def __init__(self, args):
super().__init__(None)
self.layers = nn.ModuleList(
[TransformerEncoderLayer(args) for _ in range(args.encoder_layers)]
)
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(args.encoder_embed_dim)
else:
self.layer_norm = None
def forward(self, x, encoder_padding_mask, return_all_hiddens=False):
encoder_states = []
for layer in self.layers:
x = layer(x, encoder_padding_mask)
if return_all_hiddens:
encoder_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask]
if encoder_padding_mask is not None and encoder_padding_mask.any()
else [], # B x T
"encoder_embedding": [], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
def reorder_encoder_out(self, encoder_out, new_order):
new_encoder_out = (
[]
if len(encoder_out["encoder_out"]) == 0
else [x.index_select(1, new_order) for x in encoder_out["encoder_out"]]
)
new_encoder_padding_mask = (
[]
if len(encoder_out["encoder_padding_mask"]) == 0
else [
x.index_select(0, new_order)
for x in encoder_out["encoder_padding_mask"]
]
)
new_encoder_embedding = (
[]
if len(encoder_out["encoder_embedding"]) == 0
else [
x.index_select(0, new_order) for x in encoder_out["encoder_embedding"]
]
)
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [], # B x T
"src_lengths": [], # B x 1
}
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_to_speech/modules/transformer_encoder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from fairseq.models.transformer import Linear
class StackedEmbedding(nn.Embedding):
"""Embedding module that supports stacked units -> single embedding"""
def __init__(self, num_embeddings, embed_dim, padding_idx, num_stacked=1):
super().__init__(num_embeddings, embed_dim, padding_idx)
# follow transformer.Embedding
nn.init.normal_(self.weight, mean=0, std=embed_dim**-0.5)
nn.init.constant_(self.weight[padding_idx], 0)
self.offset = (
4 # skip <bos>, <pad>, <eos>, <unk>, specific to fairseq dictionary
)
self.vocab_size = num_embeddings - self.offset
self.num_stacked = num_stacked
if self.num_stacked > 1:
self.project_in_dim = Linear(embed_dim * num_stacked, embed_dim, bias=False)
def forward(self, input):
if self.num_stacked == 1:
return super().forward(input)
# expand input indices
mask = input >= self.offset
stacked_input = []
cum_input = input.new_zeros(input.shape)
for i in range(1, self.num_stacked + 1):
div = pow(self.vocab_size, i)
next_input = torch.remainder(input - self.offset - cum_input, div)
cum_input += next_input
next_input = torch.floor_divide(next_input, div // self.vocab_size)
stacked_input.append((next_input + self.offset) * mask + input * ~mask)
stacked_input = torch.stack(stacked_input[::-1], dim=2)
embed = super().forward(stacked_input).view(input.size(0), input.size(1), -1)
embed = self.project_in_dim(embed)
return embed
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_to_speech/modules/stacked_embedding.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional
from torch import Tensor
from fairseq.models.transformer import Linear
from fairseq.models.transformer.transformer_decoder_aug import AugTransformerDecoder
class AugTransformerUnitDecoder(AugTransformerDecoder):
"""Based on Transformer decoder, with support to decoding stacked units"""
def __init__(
self,
args,
dictionary,
embed_tokens,
no_encoder_attn=False,
output_projection=None,
):
super().__init__(
args, dictionary, embed_tokens, no_encoder_attn, output_projection
)
self.n_frames_per_step = args.n_frames_per_step
self.out_proj_n_frames = (
Linear(
self.output_embed_dim,
self.output_embed_dim * self.n_frames_per_step,
bias=False,
)
if self.n_frames_per_step > 1
else None
)
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
encoder_out_aug: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention, should be of size T x B x C
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
encoder_out_aug=encoder_out_aug,
incremental_state=incremental_state,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
)
if not features_only:
bsz, seq_len, d = x.size()
if self.out_proj_n_frames:
x = self.out_proj_n_frames(x)
x = self.output_layer(x.view(bsz, seq_len, self.n_frames_per_step, d))
x = x.view(bsz, seq_len * self.n_frames_per_step, -1)
if (
incremental_state is None and self.n_frames_per_step > 1
): # teacher-forcing mode in training
x = x[
:, : -(self.n_frames_per_step - 1), :
] # remove extra frames after <eos>
return x, extra
def upgrade_state_dict_named(self, state_dict, name):
if self.n_frames_per_step > 1:
move_keys = [
(
f"{name}.project_in_dim.weight",
f"{name}.embed_tokens.project_in_dim.weight",
)
]
for from_k, to_k in move_keys:
if from_k in state_dict and to_k not in state_dict:
state_dict[to_k] = state_dict[from_k]
del state_dict[from_k]
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_to_speech/modules/transformer_decoder_aug.py |
EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_to_speech/modules/__init__.py |
|
#!/usr/bin/env python3
import math
import torch
import torch.nn as nn
from fairseq.data.data_utils import compute_mask_indices
from fairseq.models import FairseqEncoder
from fairseq.models.wav2vec import ConvFeatureExtractionModel
from fairseq.modules import GradMultiply, LayerNorm, SamePad, TransformerEncoderLayer
# Transformer encoder with wave input, it is adopted from wav2vec 2.0 Encoder.
# use wav input
# use trained position embedding so it is easier to match with text input
class SpeechWavTransformerEncoder(FairseqEncoder):
# extra parameters for speech encoder besides those defined in transformermodel
@staticmethod
def add_args(parser):
parser.add_argument(
"--dropout-input",
type=float,
metavar="D",
help="dropout to apply to the input (after feat extr)",
)
parser.add_argument(
"--dropout-features",
type=float,
metavar="D",
help="dropout to apply to the unmasked features (after feat extr)",
)
parser.add_argument(
"--speech-extractor-mode",
type=str,
default="layer_norm",
choices=["default", "layer_norm"],
help="feature extractor norm",
)
parser.add_argument(
"--speech-conv-bias",
action="store_true",
help="include bias in speech conv encoder",
)
parser.add_argument(
"--conv-feature-layers",
default="[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512,2,2)] + [(512,2,2)]",
help="string describing convolutional feature extraction layers in form of a python list that contains [(dim, kernel_size, stride), ...]",
)
parser.add_argument(
"--speech-mask-length",
type=int,
help="repeat the mask indices multiple times",
)
parser.add_argument(
"--speech-mask-prob",
type=float,
help="probability of replacing a token with mask",
)
parser.add_argument(
"--speech-mask-selection",
type=str,
choices=["static", "uniform", "normal", "poisson"],
help="how to choose masks",
)
parser.add_argument(
"--speech-mask-other",
type=float,
help="stdev of the mask length in case of 'normal' selection strategy",
)
parser.add_argument(
"--speech-no-mask-overlap",
action="store_true",
help="whether to allow masks to overlap",
)
parser.add_argument(
"--speech-mask-min-space",
type=int,
help="min space between spans (if no overlap is enabled)",
)
parser.add_argument(
"--speech-mask-channel-length",
type=int,
help="repeat the mask indices multiple times",
)
parser.add_argument(
"--speech-mask-channel-prob",
type=float,
help="probability of replacing a token with mask",
)
parser.add_argument(
"--speech-mask-channel-selection",
type=str,
choices=["static", "uniform", "normal", "poisson"],
help="how to choose masks",
)
parser.add_argument(
"--speech-mask-channel-other",
type=float,
help="stdev of the mask length in case of 'normal' selection strategy",
)
parser.add_argument(
"--speech-no-mask-channel-overlap",
action="store_true",
help="whether to allow masks to overlap",
)
parser.add_argument(
"--no-scale-feature",
action="store_true",
help="no scale for the calculated features",
)
parser.add_argument(
"--speech-mask-channel-min-space",
type=int,
help="min space between spans (if no overlap is enabled)",
)
parser.add_argument(
"--feature-grad-mult",
type=float,
help="reset feature grad mult in wav2vec 2.0 to this",
)
# positional embeddings
parser.add_argument(
"--conv-pos",
type=int,
default=128,
help="number of filters for convolutional positional embeddings",
)
parser.add_argument(
"--conv-pos-groups",
type=int,
default=16,
help="number of groups for convolutional positional embedding",
)
# model configures
parser.add_argument(
"--speech-encoder-layers",
type=int,
help="number of speech encoder layers",
)
parser.add_argument(
"--text-encoder-layers",
type=int,
help="number of text encoder layers",
)
def __init__(self, args, alway_mask=False):
super().__init__(args)
self.args = args
self.dropout = args.dropout
self.embedding_dim = args.encoder_embed_dim
self.feat_scale = math.sqrt(args.encoder_embed_dim)
if args.no_scale_feature:
self.feat_scale = 1.0
subsample = ConvFeatureExtractionModel(
conv_layers=eval(args.conv_feature_layers),
dropout=0.0,
mode=args.speech_extractor_mode, # default, layer_norm
conv_bias=args.speech_conv_bias,
)
self.feature_enc_layers = eval(args.conv_feature_layers)
self.subsample = subsample
self.feat_proj = (
nn.Linear(self.feature_enc_layers[-1][0], self.embedding_dim)
if self.feature_enc_layers[-1][0] != self.embedding_dim
else None
)
self.feat_layer_norm = LayerNorm(self.feature_enc_layers[-1][0])
self.embed_positions = nn.Conv1d(
self.embedding_dim,
self.embedding_dim,
kernel_size=args.conv_pos,
padding=args.conv_pos // 2,
groups=args.conv_pos_groups,
)
std = math.sqrt(4 / (args.conv_pos * self.embedding_dim))
nn.init.normal_(self.embed_positions.weight, mean=0, std=std)
nn.init.constant_(self.embed_positions.bias, 0)
self.embed_positions = nn.utils.weight_norm(
self.embed_positions, name="weight", dim=2
)
self.embed_positions = nn.Sequential(
self.embed_positions, SamePad(args.conv_pos), nn.GELU()
)
self.mask_prob = args.speech_mask_prob
self.mask_selection = args.speech_mask_selection
self.mask_other = args.speech_mask_other
self.mask_length = args.speech_mask_length
self.no_mask_overlap = args.speech_no_mask_overlap
self.mask_min_space = args.speech_mask_min_space
self.mask_channel_prob = args.speech_mask_channel_prob
self.mask_channel_selection = args.speech_mask_channel_selection
self.mask_channel_other = args.speech_mask_channel_other
self.mask_channel_length = args.speech_mask_channel_length
self.no_mask_channel_overlap = args.speech_no_mask_channel_overlap
self.mask_channel_min_space = args.speech_mask_channel_min_space
self.dropout_input = nn.Dropout(args.dropout_input)
self.dropout_features = nn.Dropout(args.dropout_features)
self.feature_grad_mult = args.feature_grad_mult
self.mask_emb = nn.Parameter(
torch.FloatTensor(args.encoder_embed_dim).uniform_()
)
self.layers = nn.ModuleList(
[TransformerEncoderLayer(args) for _ in range(args.encoder_layers)]
)
self.layer_norm = LayerNorm(args.encoder_embed_dim)
self.normalize_before = args.encoder_normalize_before
self.alway_mask = alway_mask
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
return torch.floor((input_length - kernel_size) / stride + 1)
for i in range(len(self.feature_enc_layers)):
input_lengths = _conv_out_length(
input_lengths,
self.feature_enc_layers[i][1],
self.feature_enc_layers[i][2],
)
return input_lengths.to(torch.long)
def apply_mask(self, x, padding_mask):
B, T, C = x.shape
if self.mask_prob > 0:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob,
self.mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x[mask_indices] = self.mask_emb
else:
mask_indices = None
if self.mask_channel_prob > 0:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
return x, mask_indices
def forward(
self,
src_tokens,
src_lengths,
return_all_hiddens=False,
padding_mask=None,
features_only=True,
):
mask = self.training or self.alway_mask
if self.feature_grad_mult > 0 and self.training:
features = self.subsample(src_tokens)
if self.feature_grad_mult != 1.0:
features = GradMultiply.apply(features, self.feature_grad_mult)
else:
with torch.no_grad():
features = self.subsample(src_tokens)
features = features.transpose(1, 2)
features = self.feat_layer_norm(features)
if self.feat_proj is not None:
features = self.feat_proj(features)
if padding_mask is not None:
input_lengths = (1 - padding_mask.long()).sum(-1)
else:
input_lengths = src_lengths
# apply conv formula to get real output_lengths
output_lengths = self._get_feat_extract_output_lengths(input_lengths)
padding_mask = torch.zeros(
features.shape[:2], dtype=features.dtype, device=features.device
)
# these two operations makes sure that all values
# before the output lengths indices are attended to
padding_mask[
(
torch.arange(padding_mask.shape[0], device=padding_mask.device),
output_lengths - 1,
)
] = 1
padding_mask = (1 - padding_mask.flip([-1]).cumsum(-1).flip([-1])).bool()
features = self.feat_scale * features if self.feat_scale != 1.0 else features
unmasked_features = features.clone()
features = self.dropout_input(features)
unmasked_features = self.dropout_features(unmasked_features)
if mask:
x, mask_indices = self.apply_mask(features, padding_mask)
else:
x = features
mask_indices = None
def cal_transformer_layers(x, encoder_padding_mask, return_all_hiddens=False):
# x: B x T x C
positions = self.embed_positions(x.transpose(1, 2)).transpose(1, 2)
x = x + positions
if not self.normalize_before:
x = self.layer_norm(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_states = []
for layer in self.layers:
x = layer(x, encoder_padding_mask)
if return_all_hiddens:
encoder_states.append(x)
if self.normalize_before:
x = self.layer_norm(x)
return x, encoder_states
x, encoder_states = cal_transformer_layers(x, padding_mask, return_all_hiddens)
if features_only:
return {
"encoder_out": [x], # [T x B x C]
"encoder_padding_mask": [padding_mask]
if padding_mask is not None
else [], # B x T
"encoder_embedding": [], #
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
"mask_indices": [mask_indices],
}
x_unmasked = x
if self.mask_prob > 0 or self.mask_channel_prob > 0:
x_unmasked, _ = cal_transformer_layers(unmasked_features, padding_mask)
return {
"encoder_out": [x], # [T x B x C]
"encoder_unmasked_out": [x_unmasked], # [T x B x C]
"encoder_padding_mask": [padding_mask]
if padding_mask is not None
else [], # B x T
"encoder_embedding": [], #
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
"mask_indices": [mask_indices] if mask_indices is not None else [], # B X T
}
def reorder_encoder_out(self, encoder_out, new_order):
new_encoder_out = (
[]
if len(encoder_out["encoder_out"]) == 0
else [x.index_select(1, new_order) for x in encoder_out["encoder_out"]]
)
new_encoder_padding_mask = (
[]
if len(encoder_out["encoder_padding_mask"]) == 0
else [
x.index_select(0, new_order)
for x in encoder_out["encoder_padding_mask"]
]
)
new_encoder_embedding = (
[]
if len(encoder_out["encoder_embedding"]) == 0
else [
x.index_select(0, new_order) for x in encoder_out["encoder_embedding"]
]
)
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [], # B x T
"src_lengths": [], # B x 1
}
class StackedSpeechWavTransformerEncoder(FairseqEncoder):
def __init__(self, speech_enc, text_enc_layers, text_layer_norm):
super().__init__(None)
self.speech_encoder = speech_enc
self.text_encoder_layers = text_enc_layers
self.final_layer_norm = text_layer_norm
def forward(
self,
src_tokens,
src_lengths=None,
return_all_hiddens=False,
padding_mask=None,
features_only=True,
):
out = self.speech_encoder.forward(
src_tokens,
src_lengths,
return_all_hiddens,
padding_mask=padding_mask,
features_only=features_only,
)
x = out["encoder_out"][0]
encoder_padding_mask = None
if len(out["encoder_padding_mask"]) > 0:
encoder_padding_mask = out["encoder_padding_mask"][0]
def cal_text_layers(x, padding_mask, return_all_hiddens=False):
encoder_states = []
for layer in self.text_encoder_layers:
x = layer(x, padding_mask)
if return_all_hiddens:
encoder_states.append(x)
if self.final_layer_norm is not None:
x = self.final_layer_norm(x)
return x, encoder_states
x, encoder_states = cal_text_layers(x, encoder_padding_mask, return_all_hiddens)
if features_only:
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask]
if encoder_padding_mask is not None
else [], # B x T
"encoder_embedding": [], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
x_u = out["encoder_unmasked_out"][0]
x_u, _ = cal_text_layers(x_u, encoder_padding_mask)
return {
"encoder_out": [x], # [T x B x C]
"encoder_unmasked_out": [x_u], # [T x B x C]
"encoder_padding_mask": [encoder_padding_mask]
if encoder_padding_mask is not None
else [], # B x T
"encoder_embedding": [], #
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
"mask_indices": out["mask_indices"], # B X T
}
def reorder_encoder_out(self, encoder_out, new_order):
return self.speech_encoder.reorder_encoder_out(encoder_out, new_order)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_to_text/s2t_wav_transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from fairseq import checkpoint_utils, utils
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.speech_to_text.modules.convolution import infer_conv_output_dim
from fairseq.models.transformer import Embedding, TransformerDecoder
from fairseq.modules import LayerNorm, PositionalEmbedding, TransformerEncoderLayer
logger = logging.getLogger(__name__)
@register_model("convtransformer")
class ConvTransformerModel(FairseqEncoderDecoderModel):
"""
Transformer-based Speech translation model from ESPNet-ST
https://arxiv.org/abs/2004.10234
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--activation-fn",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads",
)
parser.add_argument(
"--decoder-normalize-before",
action="store_true",
help="apply layernorm before each decoder block",
)
parser.add_argument(
"--decoder-output-dim",
type=int,
metavar="N",
help="decoder output dimension (extra linear layer if different from decoder embed dim)",
)
parser.add_argument(
"--share-decoder-input-output-embed",
action="store_true",
help="share decoder input and output embeddings",
)
parser.add_argument(
"--layernorm-embedding",
action="store_true",
help="add layernorm to embedding",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
parser.add_argument(
"--load-pretrained-encoder-from",
type=str,
metavar="STR",
help="model to take encoder weights from (for initialization)",
)
parser.add_argument(
"--load-pretrained-decoder-from",
type=str,
metavar="STR",
help="model to take decoder weights from (for initialization)",
)
parser.add_argument(
"--conv-out-channels",
type=int,
metavar="INT",
help="the number of output channels of conv layer",
)
@classmethod
def build_encoder(cls, args):
encoder = ConvTransformerEncoder(args)
if getattr(args, "load_pretrained_encoder_from", None) is not None:
encoder = checkpoint_utils.load_pretrained_component_from_model(
component=encoder, checkpoint=args.load_pretrained_encoder_from
)
return encoder
@classmethod
def build_decoder(cls, args, task, embed_tokens):
decoder = TransformerDecoderNoExtra(args, task.target_dictionary, embed_tokens)
if getattr(args, "load_pretrained_decoder_from", None) is not None:
decoder = checkpoint_utils.load_pretrained_component_from_model(
component=decoder, checkpoint=args.load_pretrained_decoder_from
)
return decoder
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
return Embedding(num_embeddings, embed_dim, padding_idx)
decoder_embed_tokens = build_embedding(
task.target_dictionary, args.decoder_embed_dim
)
encoder = cls.build_encoder(args)
decoder = cls.build_decoder(args, task, decoder_embed_tokens)
return cls(encoder, decoder)
@staticmethod
@torch.jit.unused
def set_batch_first(lprobs):
lprobs.batch_first = True
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample)
if self.training:
self.set_batch_first(lprobs)
return lprobs
def output_layout(self):
return "BTD"
"""
The forward method inherited from the base class has a **kwargs argument in
its input, which is not supported in torchscript. This method overrites the forward
method definition without **kwargs.
"""
def forward(self, src_tokens, src_lengths, prev_output_tokens):
encoder_out = self.encoder(src_tokens=src_tokens, src_lengths=src_lengths)
decoder_out = self.decoder(
prev_output_tokens=prev_output_tokens, encoder_out=encoder_out
)
return decoder_out
class ConvTransformerEncoder(FairseqEncoder):
"""Conv + Transformer encoder"""
def __init__(self, args):
"""Construct an Encoder object."""
super().__init__(None)
self.dropout = args.dropout
self.embed_scale = (
1.0 if args.no_scale_embedding else math.sqrt(args.encoder_embed_dim)
)
self.padding_idx = 1
self.in_channels = 1
self.input_dim = args.input_feat_per_channel
self.conv = torch.nn.Sequential(
torch.nn.Conv2d(1, args.conv_out_channels, 3, stride=2, padding=3 // 2),
torch.nn.ReLU(),
torch.nn.Conv2d(
args.conv_out_channels,
args.conv_out_channels,
3,
stride=2,
padding=3 // 2,
),
torch.nn.ReLU(),
)
transformer_input_dim = infer_conv_output_dim(
self.in_channels, self.input_dim, args.conv_out_channels
)
self.out = torch.nn.Linear(transformer_input_dim, args.encoder_embed_dim)
self.embed_positions = PositionalEmbedding(
args.max_source_positions,
args.encoder_embed_dim,
self.padding_idx,
learned=False,
)
self.transformer_layers = nn.ModuleList([])
self.transformer_layers.extend(
[TransformerEncoderLayer(args) for i in range(args.encoder_layers)]
)
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(args.encoder_embed_dim)
else:
self.layer_norm = None
def pooling_ratio(self):
return 4
def forward(self, src_tokens, src_lengths):
"""Encode input sequence.
:param torch.Tensor xs: input tensor
:param torch.Tensor masks: input mask
:return: position embedded tensor and mask
:rtype Tuple[torch.Tensor, torch.Tensor]:
"""
bsz, max_seq_len, _ = src_tokens.size()
x = (
src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim)
.transpose(1, 2)
.contiguous()
)
x = self.conv(x)
bsz, _, output_seq_len, _ = x.size()
x = x.transpose(1, 2).transpose(0, 1).contiguous().view(output_seq_len, bsz, -1)
x = self.out(x)
x = self.embed_scale * x
subsampling_factor = int(max_seq_len * 1.0 / output_seq_len + 0.5)
input_len_0 = (src_lengths.float() / subsampling_factor).ceil().long()
input_len_1 = x.size(0) * torch.ones([src_lengths.size(0)]).long().to(
input_len_0.device
)
input_lengths = torch.min(input_len_0, input_len_1)
encoder_padding_mask = lengths_to_padding_mask(input_lengths)
positions = self.embed_positions(encoder_padding_mask).transpose(0, 1)
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
for layer in self.transformer_layers:
x = layer(x, encoder_padding_mask)
if not encoder_padding_mask.any():
maybe_encoder_padding_mask = None
else:
maybe_encoder_padding_mask = encoder_padding_mask
return {
"encoder_out": [x],
"encoder_padding_mask": [maybe_encoder_padding_mask]
if maybe_encoder_padding_mask is not None
else [],
"encoder_embedding": [],
"encoder_states": [],
"src_tokens": [],
"src_lengths": [],
}
@torch.jit.export
def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)]
if len(encoder_out["encoder_padding_mask"]) == 0:
new_encoder_padding_mask = []
else:
new_encoder_padding_mask = [
(encoder_out["encoder_padding_mask"][0]).index_select(0, new_order)
]
if len(encoder_out["encoder_embedding"]) == 0:
new_encoder_embedding = []
else:
new_encoder_embedding = [
(encoder_out["encoder_embedding"][0]).index_select(0, new_order)
]
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out,
"encoder_padding_mask": new_encoder_padding_mask,
"encoder_embedding": new_encoder_embedding,
"encoder_states": encoder_states,
"src_tokens": [],
"src_lengths": [],
}
class TransformerDecoderNoExtra(TransformerDecoder):
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
# call scriptable method from parent class
x, _ = self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
return x, None
@register_model_architecture(model_name="convtransformer", arch_name="convtransformer")
def base_architecture(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.max_source_positions = getattr(args, "max_source_positions", 3000)
args.max_target_positions = getattr(args, "max_target_positions", 1024)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.conv_out_channels = getattr(args, "conv_out_channels", args.encoder_embed_dim)
@register_model_architecture("convtransformer", "convtransformer_espnet")
def convtransformer_espnet(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_to_text/convtransformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import logging
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
from fairseq import checkpoint_utils, utils
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqEncoderModel,
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.speech_to_speech.modules.ctc_decoder import CTCDecoder
from fairseq.models.speech_to_text.hub_interface import S2THubInterface
from fairseq.models.transformer import (
Embedding,
TransformerDecoder,
TransformerModelBase,
)
from fairseq.models.wav2vec import Wav2VecEncoder
from fairseq.modules.layer_norm import LayerNorm
logger = logging.getLogger(__name__)
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
return Embedding(num_embeddings, embed_dim, padding_idx)
class Conv1dAdaptor(nn.Module):
def __init__(
self,
in_dim,
out_dim,
n_layers=3,
kernel_size=3,
stride=2,
layerdrop=0.0,
layernorm=False,
proj=False,
):
super().__init__()
self.proj, self.proj_ln = None, None
self.post_proj, self.post_proj_ln = None, None
if proj:
self.proj = nn.Sequential(
nn.Linear(in_dim, in_dim * 4), nn.ReLU(), nn.Linear(in_dim * 4, in_dim)
)
self.proj_ln = LayerNorm(in_dim)
self.post_proj = nn.Sequential(
nn.Linear(out_dim, out_dim * 4),
nn.ReLU(),
nn.Linear(out_dim * 4, out_dim),
)
self.post_proj_ln = LayerNorm(out_dim)
self.layers = nn.ModuleList(
nn.Conv1d(
in_dim if i == 0 else out_dim,
out_dim * 2,
kernel_size,
stride=stride,
padding=kernel_size // 2,
)
for i in range(n_layers)
)
self.stride = stride
self.layerdrop = layerdrop
self.layernorm = LayerNorm(in_dim) if layernorm else None
@classmethod
def add_args(cls, parser):
parser.add_argument("--adaptor-n-layers", type=int)
parser.add_argument("--adaptor-kernel-size", type=int)
parser.add_argument("--adaptor-stride", type=int)
parser.add_argument("--adaptor-layerdrop", type=float)
parser.add_argument("--adaptor-layernorm", action="store_true")
parser.add_argument("--adaptor-proj", action="store_true")
def forward(self, x, padding_mask: Optional[torch.Tensor]):
if self.layernorm is not None:
x = self.layernorm(x)
if self.proj is not None:
x = x + 0.5 * self.proj(x)
x = self.proj_ln(x)
if padding_mask is not None:
x = utils.index_put(x, padding_mask.T, 0)
# T x B x C -> B x C x T
x = x.transpose(0, 1).transpose(1, 2)
out_lens = None
if padding_mask is not None:
out_lens = (~padding_mask).sum(1).float()
for layer in self.layers:
layerdrop_prob = np.random.random()
if not self.training or (layerdrop_prob > self.layerdrop):
x = nn.functional.glu(layer(x), dim=1)
if padding_mask is not None:
out_lens = ((out_lens - 1) / self.stride + 1).floor()
# B x C x T -> T x B x C
x = x.transpose(1, 2).transpose(0, 1)
if self.post_proj is not None:
x = x + 0.5 * self.post_proj(x)
x = self.post_proj_ln(x)
out_padding_mask = None
if padding_mask is not None:
out_padding_mask = lengths_to_padding_mask(out_lens.long())
x = utils.index_put(x, out_padding_mask.T, 0)
return x, out_padding_mask
def add_wav2vec_asr_args(parser):
parser.add_argument("--w2v-path", help="path to wav2vec 2.0 model")
parser.add_argument(
"--no-pretrained-weights",
action="store_true",
help="if true, does not load pretrained weights",
)
parser.add_argument(
"--dropout-input",
type=float,
metavar="D",
help="dropout to apply to the input (after feat extr)",
)
parser.add_argument(
"--final-dropout",
type=float,
metavar="D",
help="dropout after transformer and before final projection",
)
parser.add_argument(
"--apply-mask", action="store_true", help="apply masking during fine-tuning"
)
parser.add_argument(
"--dropout",
type=float,
metavar="D",
help="dropout probability inside wav2vec 2.0 model",
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights inside wav2vec 2.0 model",
)
parser.add_argument(
"--activation-dropout",
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN inside wav2vec 2.0 model",
)
parser.add_argument(
"--mask-length", type=int, help="repeat the mask indices multiple times"
)
parser.add_argument(
"--mask-prob", type=float, help="probability of replacing a token with mask"
)
parser.add_argument(
"--mask-selection",
type=str,
choices=["static", "uniform", "normal", "poisson"],
help="how to choose masks",
)
parser.add_argument(
"--mask-other",
type=float,
help="stdev of the mask length in case of 'normal' selection strategy",
)
parser.add_argument(
"--no-mask-overlap",
action="store_true",
help="whether to allow masks to overlap",
)
parser.add_argument(
"--mask-channel-length", type=int, help="repeat the mask indices multiple times"
)
parser.add_argument(
"--mask-channel-prob",
type=float,
help="probability of replacing a token with mask",
)
parser.add_argument(
"--mask-channel-selection",
type=str,
choices=["static", "uniform", "normal", "poisson"],
help="how to choose masks",
)
parser.add_argument(
"--mask-channel-other",
type=float,
help="stdev of the mask length in case of 'normal' selection strategy",
)
parser.add_argument(
"--no-mask-channel-overlap",
action="store_true",
help="whether to allow masks to overlap",
)
parser.add_argument(
"--freeze-finetune-updates",
type=int,
metavar="N",
help="dont finetune wav2vec for this many updates",
)
parser.add_argument(
"--feature-grad-mult",
type=float,
metavar="D",
help="reset feature grad mult in wav2vec 2.0 to this",
)
parser.add_argument(
"--layerdrop",
type=float,
metavar="D",
help="probability of dropping a layer in wav2vec 2.0",
)
parser.add_argument(
"--max-positions",
type=int,
metavar="N",
help="Max input positions to be used in the conformer encoder in wav2vec 2.0",
)
parser.add_argument("--encoder-proj", action="store_true")
parser.add_argument("--w2v-args", default=None)
parser.add_argument(
"--remove-weight-norm",
action="store_true",
help="if set, then the weight-norm (in one pos_conv layer) is removed from the model",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension to be used when w2v_path is None and no encoder_proj is set",
)
def need_finetuning(ft_params, param_name):
if ft_params == "all":
return True
ft_params_list = ft_params.split(",")
for ft_param in ft_params_list:
if ft_param in param_name:
return True
return False
class Wav2VecEncoderWithAdaptor(FairseqEncoder):
def build_adaptor(self, args):
adaptor = None
if args.adaptor_n_layers > 0:
adaptor = Conv1dAdaptor(
args.decoder_embed_dim,
args.decoder_embed_dim,
n_layers=args.adaptor_n_layers,
kernel_size=args.adaptor_kernel_size,
stride=args.adaptor_stride,
layerdrop=args.adaptor_layerdrop,
layernorm=args.adaptor_layernorm,
proj=args.adaptor_proj,
)
return adaptor
def __init__(self, args):
super().__init__(None)
self.w2v_encoder = Wav2VecEncoder(args)
self.is_v0_arch = not args.adaptor_proj
self.w2v_proj_ln = None
if not self.is_v0_arch and self.w2v_encoder.proj is not None:
self.w2v_proj_ln = LayerNorm(args.decoder_embed_dim)
self.adaptor = self.build_adaptor(args)
self.num_updates = 0
self.freezing_updates = args.w2v_freezing_updates
self.finetuning_params = args.finetune_w2v_params
for k, p in self.w2v_encoder.w2v_model.named_parameters():
p.requires_grad = need_finetuning(self.finetuning_params, k)
@classmethod
def add_args(cls, parser):
"""Add model-specific arguments to the parser."""
add_wav2vec_asr_args(parser)
parser.add_argument(
"--normalize",
action="store_true",
help="if set, normalizes input to have 0 mean and unit variance",
)
parser.add_argument(
"--finetune-w2v-params",
type=str,
metavar="STR",
help="comma-separated param strings to finetune.",
)
parser.add_argument("--w2v-freezing-updates", type=int)
parser.add_argument("--load-pretrained-encoder-from", type=str, metavar="STR")
Conv1dAdaptor.add_args(parser)
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
self.num_updates = num_updates
def forward(self, src_tokens, src_lengths=None, **kwargs):
if (
self.freezing_updates is not None
and self.num_updates > self.freezing_updates
):
for p in self.w2v_encoder.w2v_model.parameters():
p.requires_grad = True
padding_mask = lengths_to_padding_mask(src_lengths)
out = self.w2v_encoder.forward(src_tokens, padding_mask, tbc=True)
x, padding_mask = out["encoder_out"], out["padding_mask"]
if self.w2v_proj_ln is not None:
x = self.w2v_proj_ln(x)
if self.adaptor is not None:
x, padding_mask = self.adaptor(x, padding_mask)
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": []
if padding_mask is None
else [padding_mask], # B x T
"encoder_embedding": [], # B x T x C
"encoder_states": [], # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
def reorder_encoder_out(self, encoder_out, new_order):
new_encoder_out = (
[]
if len(encoder_out["encoder_out"]) == 0
else [x.index_select(1, new_order) for x in encoder_out["encoder_out"]]
)
new_encoder_padding_mask = (
[]
if len(encoder_out["encoder_padding_mask"]) == 0
else [
x.index_select(0, new_order)
for x in encoder_out["encoder_padding_mask"]
]
)
new_encoder_embedding = (
[]
if len(encoder_out["encoder_embedding"]) == 0
else [
x.index_select(0, new_order) for x in encoder_out["encoder_embedding"]
]
)
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [], # B x T
"src_lengths": [], # B x 1
}
def add_decoder_args(parser):
parser.add_argument(
"--activation-fn",
type=str,
default="relu",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--decoder-dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--decoder-attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--decoder-activation-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension"
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads",
)
parser.add_argument(
"--decoder-normalize-before",
action="store_true",
help="apply layernorm before each decoder block",
)
parser.add_argument(
"--layernorm-embedding", action="store_true", help="add layernorm to embedding"
)
parser.add_argument(
"--decoder-layerdrop",
type=float,
metavar="D",
help="layerdrop probability for decoder",
)
parser.add_argument(
"--decoder-learned-pos",
action="store_true",
help="learn positional embedding in decoder",
)
parser.add_argument(
"--share-decoder-input-output-embed",
action="store_true",
help="share decoder input and output embeddings",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
parser.add_argument(
"--load-pretrained-decoder-from",
type=str,
metavar="STR",
help="model to take decoder weights from (for initialization)",
)
parser.add_argument(
"--finetune-decoder-params",
type=str,
metavar="STR",
help="comma-separated param strings to finetune.",
)
def remove_weight_norm_from_model(model):
from functools import reduce
layers_with_wn = []
for param_name, _ in model.named_parameters():
if param_name.endswith("_g"):
# retrieve the module with this param_name
module_names = param_name.split(".")[
:-1
] # exclude the actual parameter name
wn_module = reduce(getattr, module_names, model)
layers_with_wn.append(wn_module)
for wn_module in layers_with_wn:
torch.nn.utils.remove_weight_norm(wn_module)
logger.warning(f"Weight norm removed from module with {wn_module}\n")
@register_model("xm_transformer")
class XMTransformerModel(FairseqEncoderDecoderModel):
@classmethod
def hub_models(cls):
base_url = "http://dl.fbaipublicfiles.com/fairseq/s2t"
model_ids = [
"xm_transformer_600m-es_en-multi_domain",
"xm_transformer_600m-ru_en-multi_domain",
"xm_transformer_600m-fr_en-multi_domain",
"xm_transformer_600m-en_es-multi_domain",
"xm_transformer_600m-en_ru-multi_domain",
"xm_transformer_600m-en_fr-multi_domain",
"xm_transformer_600m-en_zh-multi_domain",
"xm_transformer_600m-en_ar-multi_domain",
"xm_transformer_600m-en_tr-multi_domain",
"xm_transformer_600m-en_vi-multi_domain",
"xm_transformer-21_en-xls_r_300m",
"xm_transformer-en_15-xls_r_300m",
"xm_transformer-21_en-xls_r_1b",
"xm_transformer-en_15-xls_r_1b",
"xm_transformer-21_en-xls_r_2b",
"xm_transformer-en_15-xls_r_2b",
"xm_transformer-22_16-xls_r_2b",
"xm_transformer_s2ut_800m-es-en-st-asr-bt_h1_2022",
"xm_transformer_s2ut_800m-en-es-st_plus_asr",
"xm_transformer_s2ut_800m-hk-en-h1_2022",
"xm_transformer_s2ut_800m-en-hk-h1_2022",
]
return {i: f"{base_url}/{i}.tar.gz" for i in model_ids}
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
config_yaml="config.yaml",
task="speech_to_text",
generation_args=None,
**kwargs,
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
config_yaml=config_yaml,
task=task,
generation_args=generation_args,
**kwargs,
)
return S2THubInterface(x["args"], x["task"], x["models"][0])
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def add_args(cls, parser):
"""Add model-specific arguments to the parser."""
Wav2VecEncoderWithAdaptor.add_args(parser)
add_decoder_args(parser)
parser.add_argument("--checkpoint-activations", action="store_true")
parser.add_argument("--offload-activations", action="store_true")
parser.add_argument("--min-params-to-wrap", type=int, metavar="N")
@classmethod
def maybe_load_pretrained(cls, component, checkpoint: Optional[str] = None):
if checkpoint is None:
return component
_load = checkpoint_utils.load_pretrained_component_from_model
try:
return _load(component, checkpoint)
except RuntimeError as e:
logger.warning(e)
return _load(component, checkpoint, strict=False)
@classmethod
def build_encoder(cls, args):
_args = copy.deepcopy(args)
if not args.adaptor_proj and not args.encoder_proj: # V0 arch
if args.w2v_path:
state = checkpoint_utils.load_checkpoint_to_cpu(args.w2v_path)
if state.get("cfg") is not None:
encoder_embed_dim = state["cfg"]._content["model"][
"encoder_embed_dim"
]
elif state.get("args") is not None:
encoder_embed_dim = state["args"].encoder_embed_dim
else:
raise ValueError(f"Invalid config in {args.w2v_path}")
_args.decoder_embed_dim = encoder_embed_dim
del state
else:
_args.decoder_embed_dim = args.encoder_embed_dim
encoder = Wav2VecEncoderWithAdaptor(_args)
encoder = cls.maybe_load_pretrained(
encoder, getattr(args, "load_pretrained_encoder_from", None)
)
if args.remove_weight_norm:
# remove the wn for EMA usage
logger.warning("Removing weight norm from wav2vec encoder")
remove_weight_norm_from_model(encoder)
return encoder
@classmethod
def get_decoder_args_from_checkpoint(cls, ckpt_args):
assert "model" in ckpt_args, "Model args not found in checkpoint cfg!"
decoder_args = {}
for k, v in ckpt_args["model"].__dict__.items():
if "decoder" in k:
decoder_args[k] = v
return decoder_args
@classmethod
def override_decoder_args(cls, cli_args, decoder_args_dict):
for k, v in decoder_args_dict.items():
if v != getattr(cli_args, k, None):
logger.warning(
f"Overriding decoder arg {k}: from {getattr(cli_args, k, None)} to {v}"
)
setattr(cli_args, k, v)
return cli_args
@classmethod
def build_decoder(cls, args, task, embed_tokens):
_args = copy.deepcopy(args)
if args.adaptor_proj or args.encoder_proj: # not V0 arch
_args.encoder_embed_dim = _args.decoder_embed_dim
_args.dropout = args.decoder_dropout
_args.attention_dropout = args.decoder_attention_dropout
_args.activation_dropout = args.decoder_activation_dropout
_args.layerdrop = _args.decoder_layerdrop
decoder = TransformerDecoder(_args, task.target_dictionary, embed_tokens)
decoder = cls.maybe_load_pretrained(
decoder, getattr(args, "load_pretrained_decoder_from", None)
)
for k, p in decoder.named_parameters():
p.requires_grad = need_finetuning(args.finetune_decoder_params, k)
return decoder
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if getattr(args, "load_pretrained_decoder_from", None) is not None:
ckpt = torch.load(getattr(args, "load_pretrained_decoder_from", None))
decoder_args_dict = cls.get_decoder_args_from_checkpoint(ckpt["cfg"])
args = cls.override_decoder_args(args, decoder_args_dict)
decoder_embed_tokens = build_embedding(
task.target_dictionary, args.decoder_embed_dim
)
encoder = cls.build_encoder(args)
decoder = cls.build_decoder(args, task, decoder_embed_tokens)
base_model = cls(encoder, decoder)
# set up multitask decoders
base_model.multitask_decoders = {}
for i, (task_name, task_obj) in enumerate(task.multitask_tasks.items()):
# dummy auxiliary decoder
if task_obj.args.get_loss_weight(0) == 0:
continue
task_decoder = cls.build_multitask_decoder(
args, task_obj.args, task_obj.target_dictionary, args.decoder_embed_dim
)
setattr(base_model, f"{task_name}_decoder", task_decoder)
decoder_model_cls = (
FairseqEncoderModel
if task_obj.args.decoder_type == "ctc"
else FairseqLanguageModel
)
base_model.multitask_decoders[task_name] = decoder_model_cls(
getattr(base_model, f"{task_name}_decoder")
)
return base_model
@classmethod
def build_multitask_decoder(
cls,
args,
mtl_args,
tgt_dict,
in_dim,
is_first_pass_decoder=False,
):
decoder_args = mtl_args.decoder_args
decoder_args.encoder_embed_dim = in_dim
if mtl_args.decoder_type == "transformer":
if is_first_pass_decoder:
task_decoder = cls.build_text_decoder(args, tgt_dict)
else:
from fairseq.models.speech_to_speech import (
base_multitask_text_transformer_decoder_arch,
)
base_multitask_text_transformer_decoder_arch(decoder_args) # 2L
task_decoder = TransformerDecoder(
decoder_args,
tgt_dict,
embed_tokens=TransformerModelBase.build_embedding(
decoder_args,
tgt_dict,
decoder_args.decoder_embed_dim,
),
)
elif mtl_args.decoder_type == "ctc":
task_decoder = CTCDecoder(
dictionary=tgt_dict,
in_dim=in_dim,
)
else:
raise NotImplementedError(
"currently only support multitask decoder_type 'transformer', 'ctc'"
)
return task_decoder
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
return_all_hiddens=False,
**kwargs,
):
"""
The forward method inherited from the base class has a **kwargs
argument in its input, which is not supported in torchscript. This
method overwrites the forward method definition without **kwargs.
"""
encoder_out = self.encoder(
src_tokens=src_tokens, src_lengths=src_lengths, **kwargs
)
decoder_out = self.decoder(
prev_output_tokens=prev_output_tokens, encoder_out=encoder_out
)
if return_all_hiddens:
decoder_out[-1]["encoder_states"] = encoder_out["encoder_out"]
# NOTE: from the top layer
decoder_out[-1]["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
]
return decoder_out
def upgrade_state_dict(self, state_dict):
for k, _ in state_dict.items():
if "adaptor.layers" in state_dict:
new = k.replace("adaptor.layers", "adaptor_layers")
state_dict[new] = state_dict[k]
del state_dict[k]
def set_default_w2v_encoder_args(args):
args.no_pretrained_weights = getattr(args, "no_pretrained_weights", False)
args.dropout_input = getattr(args, "dropout_input", 0)
args.final_dropout = getattr(args, "final_dropout", 0)
args.apply_mask = getattr(args, "apply_mask", False)
args.dropout = getattr(args, "dropout", 0)
args.attention_dropout = getattr(args, "attention_dropout", 0)
args.activation_dropout = getattr(args, "activation_dropout", 0)
args.encoder_proj = getattr(args, "encoder_proj", False)
args.remove_weight_norm = getattr(args, "remove_weight_norm", False)
args.mask_length = getattr(args, "mask_length", 10)
args.mask_prob = getattr(args, "mask_prob", 0.5)
args.mask_selection = getattr(args, "mask_selection", "static")
args.mask_other = getattr(args, "mask_other", 0)
args.no_mask_overlap = getattr(args, "no_mask_overlap", False)
args.mask_channel_length = getattr(args, "mask_channel_length", 10)
args.mask_channel_prob = getattr(args, "mask_channel_prob", 0.5)
args.mask_channel_before = getattr(args, "mask_channel_before", False)
args.mask_channel_selection = getattr(args, "mask_channel_selection", "static")
args.mask_channel_other = getattr(args, "mask_channel_other", 0)
args.no_mask_channel_overlap = getattr(args, "no_mask_channel_overlap", False)
args.freeze_finetune_updates = getattr(args, "freeze_finetune_updates", 0)
args.feature_grad_mult = 0.1
args.layerdrop = getattr(args, "layerdrop", 0.0)
args.normalize = getattr(args, "normalize", False)
args.finetune_w2v_params = getattr(args, "finetune_w2v_params", "all")
args.w2v_freezing_updates = getattr(args, "w2v_freezing_updates", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
def set_default_adaptor_args(args):
args.adaptor_n_layers = getattr(args, "adaptor_n_layers", 3)
args.adaptor_kernel_size = getattr(args, "adaptor_kernel_size", 3)
args.adaptor_stride = getattr(args, "adaptor_stride", 2)
args.adaptor_layerdrop = getattr(args, "adaptor_layerdrop", 0.0)
args.adaptor_layernorm = getattr(args, "adaptor_layernorm", False)
args.adaptor_proj = getattr(args, "adaptor_proj", False)
def set_default_transformer_decoder_args(args):
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4 * 1024)
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_attention_dropout = getattr(args, "decoder_attention_dropout", 0.0)
args.decoder_activation_dropout = getattr(args, "decoder_activation_dropout", 0.0)
args.decoder_dropout = getattr(args, "decoder_dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
args.finetune_decoder_params = getattr(args, "finetune_decoder_params", "all")
def set_default_general_args(args):
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
args.min_params_to_wrap = getattr(args, "min_params_to_wrap", int(1e8))
args.max_positions = getattr(args, "max_positions", 3000)
@register_model_architecture(model_name="xm_transformer", arch_name="xm_transformer")
def base_architecture(args):
set_default_general_args(args)
set_default_w2v_encoder_args(args)
set_default_adaptor_args(args)
set_default_transformer_decoder_args(args)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_to_text/xm_transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from pathlib import Path
import torch
from fairseq import checkpoint_utils
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models import FairseqEncoder, register_model, register_model_architecture
from fairseq.models.speech_to_text.modules.convolution import (
Conv1dSubsampler,
Conv2dSubsampler,
)
from fairseq.models.speech_to_text.s2t_transformer import (
S2TTransformerEncoder,
S2TTransformerModel,
)
from fairseq.models.speech_to_text.s2t_transformer import (
base_architecture as transformer_base_architecture,
)
from fairseq.modules import PositionalEmbedding, RelPositionalEncoding
from fairseq.modules.conformer_layer import ConformerEncoderLayer
logger = logging.getLogger(__name__)
class S2TConformerEncoder(FairseqEncoder):
"""Conformer Encoder for speech translation based on https://arxiv.org/abs/2005.08100"""
def __init__(self, args):
super().__init__(None)
self.encoder_freezing_updates = args.encoder_freezing_updates
self.num_updates = 0
self.embed_scale = math.sqrt(args.encoder_embed_dim)
if args.no_scale_embedding:
self.embed_scale = 1.0
self.padding_idx = 1
self.conv_version = args.conv_version
if self.conv_version == "s2t_transformer":
self.subsample = Conv1dSubsampler(
args.input_feat_per_channel * args.input_channels,
args.conv_channels,
args.encoder_embed_dim,
[int(k) for k in args.conv_kernel_sizes.split(",")],
)
elif self.conv_version == "convtransformer":
self.subsample = Conv2dSubsampler(
args.input_channels,
args.input_feat_per_channel,
args.conv_out_channels,
args.encoder_embed_dim,
)
self.pos_enc_type = args.pos_enc_type
if self.pos_enc_type == "rel_pos":
self.embed_positions = RelPositionalEncoding(
args.max_source_positions, args.encoder_embed_dim
)
elif self.pos_enc_type == "rope":
self.embed_positions = None
else: # Use absolute positional embedding
self.pos_enc_type = "abs"
self.embed_positions = PositionalEmbedding(
args.max_source_positions, args.encoder_embed_dim, self.padding_idx
)
self.linear = torch.nn.Linear(args.encoder_embed_dim, args.encoder_embed_dim)
self.dropout = torch.nn.Dropout(args.dropout)
self.conformer_layers = torch.nn.ModuleList(
[
ConformerEncoderLayer(
embed_dim=args.encoder_embed_dim,
ffn_embed_dim=args.encoder_ffn_embed_dim,
attention_heads=args.encoder_attention_heads,
dropout=args.dropout,
depthwise_conv_kernel_size=args.depthwise_conv_kernel_size,
attn_type=args.attn_type,
pos_enc_type=self.pos_enc_type,
use_fp16=args.fp16,
)
for _ in range(args.encoder_layers)
]
)
def _forward(self, src_tokens, src_lengths, return_all_hiddens=False):
"""
Args:
src_tokens: Input source tokens Tensor of shape B X T X C
src_lengths: Lengths Tensor corresponding to input source tokens
return_all_hiddens: If true will append the self attention states to the encoder states
Returns:
encoder_out: Tensor of shape B X T X C
encoder_padding_mask: Optional Tensor with mask
encoder_embedding: Optional Tensor. Always empty here
encoder_states: List of Optional Tensors wih self attention states
src_tokens: Optional Tensor. Always empty here
src_lengths: Optional Tensor. Always empty here
"""
x, input_lengths = self.subsample(src_tokens, src_lengths) # returns T X B X C
encoder_padding_mask = lengths_to_padding_mask(input_lengths)
x = self.embed_scale * x
if self.pos_enc_type == "rel_pos":
positions = self.embed_positions(x)
elif self.pos_enc_type == "rope":
positions = None
else:
positions = self.embed_positions(encoder_padding_mask).transpose(0, 1)
x += positions
positions = None
x = self.linear(x)
x = self.dropout(x)
encoder_states = []
# x is T X B X C
for layer in self.conformer_layers:
x, _ = layer(x, encoder_padding_mask, positions)
if return_all_hiddens:
encoder_states.append(x)
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask]
if encoder_padding_mask.any()
else [], # B x T
"encoder_embedding": [], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
def forward(self, src_tokens, src_lengths, return_all_hiddens=False):
if self.num_updates < self.encoder_freezing_updates:
with torch.no_grad():
x = self._forward(
src_tokens,
src_lengths,
return_all_hiddens=return_all_hiddens,
)
else:
x = self._forward(
src_tokens,
src_lengths,
return_all_hiddens=return_all_hiddens,
)
return x
def reorder_encoder_out(self, encoder_out, new_order):
"""Required method for a FairseqEncoder. Calls the method from the parent class"""
return S2TTransformerEncoder.reorder_encoder_out(self, encoder_out, new_order)
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
self.num_updates = num_updates
@register_model("s2t_conformer")
class S2TConformerModel(S2TTransformerModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
S2TTransformerModel.add_args(parser)
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="dimension of input features per channel",
)
parser.add_argument(
"--input-channels",
type=int,
metavar="N",
help="number of chennels of input features",
)
parser.add_argument(
"--depthwise-conv-kernel-size",
type=int,
metavar="N",
help="kernel size of depthwise convolution layers",
)
parser.add_argument(
"--attn-type",
type=str,
metavar="STR",
help="If not specified uses fairseq MHA. Other valid option is espnet",
)
parser.add_argument(
"--pos-enc-type",
type=str,
metavar="STR",
help="Must be specified in addition to attn-type=espnet for rel_pos and rope",
)
@classmethod
def build_encoder(cls, args):
encoder = S2TConformerEncoder(args)
pretraining_path = getattr(args, "load_pretrained_encoder_from", None)
if pretraining_path is not None:
if not Path(pretraining_path).exists():
logger.warning(
f"skipped pretraining because {pretraining_path} does not exist"
)
else:
encoder = checkpoint_utils.load_pretrained_component_from_model(
component=encoder, checkpoint=pretraining_path
)
logger.info(f"loaded pretrained encoder from: {pretraining_path}")
return encoder
@register_model_architecture("s2t_conformer", "s2t_conformer")
def conformer_base_architecture(args):
args.attn_type = getattr(args, "attn_type", None)
args.pos_enc_type = getattr(args, "pos_enc_type", "abs")
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.input_channels = getattr(args, "input_channels", 1)
args.max_source_positions = getattr(args, "max_source_positions", 6000)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.dropout = getattr(args, "dropout", 0.1)
args.encoder_layers = getattr(args, "encoder_layers", 16)
args.depthwise_conv_kernel_size = getattr(args, "depthwise_conv_kernel_size", 31)
transformer_base_architecture(args)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_to_text/s2t_conformer.py |
#!/usr/bin/env python3
from ast import literal_eval
from typing import List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import checkpoint_utils, utils
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
@register_model("s2t_berard")
class BerardModel(FairseqEncoderDecoderModel):
"""Implementation of a model similar to https://arxiv.org/abs/1802.04200
Paper title: End-to-End Automatic Speech Translation of Audiobooks
An implementation is available in tensorflow at
https://github.com/eske/seq2seq
Relevant files in this implementation are the config
(https://github.com/eske/seq2seq/blob/master/config/LibriSpeech/AST.yaml)
and the model code
(https://github.com/eske/seq2seq/blob/master/translate/models.py).
The encoder and decoder try to be close to the original implementation.
The attention is an MLP as in Bahdanau et al.
(https://arxiv.org/abs/1409.0473).
There is no state initialization by averaging the encoder outputs.
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
parser.add_argument(
"--input-layers",
type=str,
metavar="EXPR",
help="List of linear layer dimensions. These "
"layers are applied to the input features and "
"are followed by tanh and possibly dropout.",
)
parser.add_argument(
"--dropout",
type=float,
metavar="D",
help="Dropout probability to use in the encoder/decoder. "
"Note that this parameters control dropout in various places, "
"there is no fine-grained control for dropout for embeddings "
"vs LSTM layers for example.",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="Number of encoder input channels. " "Typically value is 1.",
)
parser.add_argument(
"--conv-layers",
type=str,
metavar="EXPR",
help="List of conv layers " "(format: (channels, kernel, stride)).",
)
parser.add_argument(
"--num-blstm-layers",
type=int,
metavar="N",
help="Number of encoder bi-LSTM layers.",
)
parser.add_argument(
"--lstm-size", type=int, metavar="N", help="LSTM hidden size."
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="Embedding dimension of the decoder target tokens.",
)
parser.add_argument(
"--decoder-hidden-dim",
type=int,
metavar="N",
help="Decoder LSTM hidden dimension.",
)
parser.add_argument(
"--decoder-num-layers",
type=int,
metavar="N",
help="Number of decoder LSTM layers.",
)
parser.add_argument(
"--attention-dim",
type=int,
metavar="N",
help="Hidden layer dimension in MLP attention.",
)
parser.add_argument(
"--output-layer-dim",
type=int,
metavar="N",
help="Hidden layer dim for linear layer prior to output projection.",
)
parser.add_argument(
"--load-pretrained-encoder-from",
type=str,
metavar="STR",
help="model to take encoder weights from (for initialization)",
)
parser.add_argument(
"--load-pretrained-decoder-from",
type=str,
metavar="STR",
help="model to take decoder weights from (for initialization)",
)
@classmethod
def build_encoder(cls, args, task):
encoder = BerardEncoder(
input_layers=literal_eval(args.input_layers),
conv_layers=literal_eval(args.conv_layers),
in_channels=args.input_channels,
input_feat_per_channel=args.input_feat_per_channel,
num_blstm_layers=args.num_blstm_layers,
lstm_size=args.lstm_size,
dropout=args.dropout,
)
if getattr(args, "load_pretrained_encoder_from", None) is not None:
encoder = checkpoint_utils.load_pretrained_component_from_model(
component=encoder, checkpoint=args.load_pretrained_encoder_from
)
return encoder
@classmethod
def build_decoder(cls, args, task):
decoder = LSTMDecoder(
dictionary=task.target_dictionary,
embed_dim=args.decoder_embed_dim,
num_layers=args.decoder_num_layers,
hidden_size=args.decoder_hidden_dim,
dropout=args.dropout,
encoder_output_dim=2 * args.lstm_size, # bidirectional
attention_dim=args.attention_dim,
output_layer_dim=args.output_layer_dim,
)
if getattr(args, "load_pretrained_decoder_from", None) is not None:
decoder = checkpoint_utils.load_pretrained_component_from_model(
component=decoder, checkpoint=args.load_pretrained_decoder_from
)
return decoder
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
encoder = cls.build_encoder(args, task)
decoder = cls.build_decoder(args, task)
return cls(encoder, decoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
# lprobs is a (B, T, D) tensor
lprobs.batch_first = True
return lprobs
class BerardEncoder(FairseqEncoder):
def __init__(
self,
input_layers: List[int],
conv_layers: List[Tuple[int]],
in_channels: int,
input_feat_per_channel: int,
num_blstm_layers: int,
lstm_size: int,
dropout: float,
):
"""
Args:
input_layers: list of linear layer dimensions. These layers are
applied to the input features and are followed by tanh and
possibly dropout.
conv_layers: list of conv2d layer configurations. A configuration is
a tuple (out_channels, conv_kernel_size, stride).
in_channels: number of input channels.
input_feat_per_channel: number of input features per channel. These
are speech features, typically 40 or 80.
num_blstm_layers: number of bidirectional LSTM layers.
lstm_size: size of the LSTM hidden (and cell) size.
dropout: dropout probability. Dropout can be applied after the
linear layers and LSTM layers but not to the convolutional
layers.
"""
super().__init__(None)
self.input_layers = nn.ModuleList()
in_features = input_feat_per_channel
for out_features in input_layers:
if dropout > 0:
self.input_layers.append(
nn.Sequential(
nn.Linear(in_features, out_features), nn.Dropout(p=dropout)
)
)
else:
self.input_layers.append(nn.Linear(in_features, out_features))
in_features = out_features
self.in_channels = in_channels
self.input_dim = input_feat_per_channel
self.conv_kernel_sizes_and_strides = []
self.conv_layers = nn.ModuleList()
lstm_input_dim = input_layers[-1]
for conv_layer in conv_layers:
out_channels, conv_kernel_size, conv_stride = conv_layer
self.conv_layers.append(
nn.Conv2d(
in_channels,
out_channels,
conv_kernel_size,
stride=conv_stride,
padding=conv_kernel_size // 2,
)
)
self.conv_kernel_sizes_and_strides.append((conv_kernel_size, conv_stride))
in_channels = out_channels
lstm_input_dim //= conv_stride
lstm_input_dim *= conv_layers[-1][0]
self.lstm_size = lstm_size
self.num_blstm_layers = num_blstm_layers
self.lstm = nn.LSTM(
input_size=lstm_input_dim,
hidden_size=lstm_size,
num_layers=num_blstm_layers,
dropout=dropout,
bidirectional=True,
)
self.output_dim = 2 * lstm_size # bidirectional
if dropout > 0:
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = None
def forward(self, src_tokens, src_lengths=None, **kwargs):
"""
Args
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
bsz, max_seq_len, _ = src_tokens.size()
# (B, C, T, feat)
x = (
src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim)
.transpose(1, 2)
.contiguous()
)
for input_layer in self.input_layers:
x = input_layer(x)
x = torch.tanh(x)
for conv_layer in self.conv_layers:
x = conv_layer(x)
bsz, _, output_seq_len, _ = x.size()
# (B, C, T, feat) -> (B, T, C, feat) -> (T, B, C, feat) ->
# (T, B, C * feat)
x = x.transpose(1, 2).transpose(0, 1).contiguous().view(output_seq_len, bsz, -1)
input_lengths = src_lengths.clone()
for k, s in self.conv_kernel_sizes_and_strides:
p = k // 2
input_lengths = (input_lengths.float() + 2 * p - k) / s + 1
input_lengths = input_lengths.floor().long()
packed_x = nn.utils.rnn.pack_padded_sequence(x, input_lengths)
h0 = x.new(2 * self.num_blstm_layers, bsz, self.lstm_size).zero_()
c0 = x.new(2 * self.num_blstm_layers, bsz, self.lstm_size).zero_()
packed_outs, _ = self.lstm(packed_x, (h0, c0))
# unpack outputs and apply dropout
x, output_lengths = nn.utils.rnn.pad_packed_sequence(packed_outs)
if self.dropout is not None:
x = self.dropout(x)
encoder_padding_mask = (
lengths_to_padding_mask(output_lengths).to(src_tokens.device).t()
)
return {
"encoder_out": x, # (T, B, C)
"encoder_padding_mask": encoder_padding_mask, # (T, B)
}
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(1, new_order)
return encoder_out
class MLPAttention(nn.Module):
"""The original attention from Badhanau et al. (2014)
https://arxiv.org/abs/1409.0473, based on a Multi-Layer Perceptron.
The attention score between position i in the encoder and position j in the
decoder is: alpha_ij = V_a * tanh(W_ae * enc_i + W_ad * dec_j + b_a)
"""
def __init__(self, decoder_hidden_state_dim, context_dim, attention_dim):
super().__init__()
self.context_dim = context_dim
self.attention_dim = attention_dim
# W_ae and b_a
self.encoder_proj = nn.Linear(context_dim, self.attention_dim, bias=True)
# W_ad
self.decoder_proj = nn.Linear(
decoder_hidden_state_dim, self.attention_dim, bias=False
)
# V_a
self.to_scores = nn.Linear(self.attention_dim, 1, bias=False)
def forward(self, decoder_state, source_hids, encoder_padding_mask):
"""The expected input dimensions are:
decoder_state: bsz x decoder_hidden_state_dim
source_hids: src_len x bsz x context_dim
encoder_padding_mask: src_len x bsz
"""
src_len, bsz, _ = source_hids.size()
# (src_len*bsz) x context_dim (to feed through linear)
flat_source_hids = source_hids.view(-1, self.context_dim)
# (src_len*bsz) x attention_dim
encoder_component = self.encoder_proj(flat_source_hids)
# src_len x bsz x attention_dim
encoder_component = encoder_component.view(src_len, bsz, self.attention_dim)
# 1 x bsz x attention_dim
decoder_component = self.decoder_proj(decoder_state).unsqueeze(0)
# Sum with broadcasting and apply the non linearity
# src_len x bsz x attention_dim
hidden_att = torch.tanh(
(decoder_component + encoder_component).view(-1, self.attention_dim)
)
# Project onto the reals to get attentions scores (src_len x bsz)
attn_scores = self.to_scores(hidden_att).view(src_len, bsz)
# Mask + softmax (src_len x bsz)
if encoder_padding_mask is not None:
attn_scores = (
attn_scores.float()
.masked_fill_(encoder_padding_mask, float("-inf"))
.type_as(attn_scores)
) # FP16 support: cast to float and back
# srclen x bsz
normalized_masked_attn_scores = F.softmax(attn_scores, dim=0)
# Sum weighted sources (bsz x context_dim)
attn_weighted_context = (
source_hids * normalized_masked_attn_scores.unsqueeze(2)
).sum(dim=0)
return attn_weighted_context, normalized_masked_attn_scores
class LSTMDecoder(FairseqIncrementalDecoder):
def __init__(
self,
dictionary,
embed_dim,
num_layers,
hidden_size,
dropout,
encoder_output_dim,
attention_dim,
output_layer_dim,
):
"""
Args:
dictionary: target text dictionary.
embed_dim: embedding dimension for target tokens.
num_layers: number of LSTM layers.
hidden_size: hidden size for LSTM layers.
dropout: dropout probability. Dropout can be applied to the
embeddings, the LSTM layers, and the context vector.
encoder_output_dim: encoder output dimension (hidden size of
encoder LSTM).
attention_dim: attention dimension for MLP attention.
output_layer_dim: size of the linear layer prior to output
projection.
"""
super().__init__(dictionary)
self.num_layers = num_layers
self.hidden_size = hidden_size
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
self.embed_tokens = nn.Embedding(num_embeddings, embed_dim, padding_idx)
if dropout > 0:
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = None
self.layers = nn.ModuleList()
for layer_id in range(num_layers):
input_size = embed_dim if layer_id == 0 else encoder_output_dim
self.layers.append(
nn.LSTMCell(input_size=input_size, hidden_size=hidden_size)
)
self.context_dim = encoder_output_dim
self.attention = MLPAttention(
decoder_hidden_state_dim=hidden_size,
context_dim=encoder_output_dim,
attention_dim=attention_dim,
)
self.deep_output_layer = nn.Linear(
hidden_size + encoder_output_dim + embed_dim, output_layer_dim
)
self.output_projection = nn.Linear(output_layer_dim, num_embeddings)
def forward(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs
):
encoder_padding_mask = encoder_out["encoder_padding_mask"]
encoder_outs = encoder_out["encoder_out"]
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
bsz, seqlen = prev_output_tokens.size()
srclen = encoder_outs.size(0)
# embed tokens
embeddings = self.embed_tokens(prev_output_tokens)
x = embeddings
if self.dropout is not None:
x = self.dropout(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# initialize previous states (or get from cache during incremental
# generation)
cached_state = utils.get_incremental_state(
self, incremental_state, "cached_state"
)
if cached_state is not None:
prev_hiddens, prev_cells = cached_state
else:
prev_hiddens = [encoder_out["encoder_out"].mean(dim=0)] * self.num_layers
prev_cells = [x.new_zeros(bsz, self.hidden_size)] * self.num_layers
attn_scores = x.new_zeros(bsz, srclen)
attention_outs = []
outs = []
for j in range(seqlen):
input = x[j, :, :]
attention_out = None
for i, layer in enumerate(self.layers):
# the previous state is one layer below except for the bottom
# layer where the previous state is the state emitted by the
# top layer
hidden, cell = layer(
input,
(
prev_hiddens[(i - 1) % self.num_layers],
prev_cells[(i - 1) % self.num_layers],
),
)
if self.dropout is not None:
hidden = self.dropout(hidden)
prev_hiddens[i] = hidden
prev_cells[i] = cell
if attention_out is None:
attention_out, attn_scores = self.attention(
hidden, encoder_outs, encoder_padding_mask
)
if self.dropout is not None:
attention_out = self.dropout(attention_out)
attention_outs.append(attention_out)
input = attention_out
# collect the output of the top layer
outs.append(hidden)
# cache previous states (no-op except during incremental generation)
utils.set_incremental_state(
self, incremental_state, "cached_state", (prev_hiddens, prev_cells)
)
# collect outputs across time steps
x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size)
attention_outs_concat = torch.cat(attention_outs, dim=0).view(
seqlen, bsz, self.context_dim
)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
attention_outs_concat = attention_outs_concat.transpose(0, 1)
# concat LSTM output, attention output and embedding
# before output projection
x = torch.cat((x, attention_outs_concat, embeddings), dim=2)
x = self.deep_output_layer(x)
x = torch.tanh(x)
if self.dropout is not None:
x = self.dropout(x)
# project back to size of vocabulary
x = self.output_projection(x)
# to return the full attn_scores tensor, we need to fix the decoder
# to account for subsampling input frames
# return x, attn_scores
return x, None
def reorder_incremental_state(self, incremental_state, new_order):
super().reorder_incremental_state(incremental_state, new_order)
cached_state = utils.get_incremental_state(
self, incremental_state, "cached_state"
)
if cached_state is None:
return
def reorder_state(state):
if isinstance(state, list):
return [reorder_state(state_i) for state_i in state]
return state.index_select(0, new_order)
new_state = tuple(map(reorder_state, cached_state))
utils.set_incremental_state(self, incremental_state, "cached_state", new_state)
@register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard")
def berard(args):
"""The original version: "End-to-End Automatic Speech Translation of
Audiobooks" (https://arxiv.org/abs/1802.04200)
"""
args.input_layers = getattr(args, "input_layers", "[256, 128]")
args.conv_layers = getattr(args, "conv_layers", "[(16, 3, 2), (16, 3, 2)]")
args.num_blstm_layers = getattr(args, "num_blstm_layers", 3)
args.lstm_size = getattr(args, "lstm_size", 256)
args.dropout = getattr(args, "dropout", 0.2)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128)
args.decoder_num_layers = getattr(args, "decoder_num_layers", 2)
args.decoder_hidden_dim = getattr(args, "decoder_hidden_dim", 512)
args.attention_dim = getattr(args, "attention_dim", 512)
args.output_layer_dim = getattr(args, "output_layer_dim", 128)
args.load_pretrained_encoder_from = getattr(
args, "load_pretrained_encoder_from", None
)
args.load_pretrained_decoder_from = getattr(
args, "load_pretrained_decoder_from", None
)
@register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard_256_3_3")
def berard_256_3_3(args):
"""Used in
* "Harnessing Indirect Training Data for End-to-End Automatic Speech
Translation: Tricks of the Trade" (https://arxiv.org/abs/1909.06515)
* "CoVoST: A Diverse Multilingual Speech-To-Text Translation Corpus"
(https://arxiv.org/pdf/2002.01320.pdf)
* "Self-Supervised Representations Improve End-to-End Speech Translation"
(https://arxiv.org/abs/2006.12124)
"""
args.decoder_num_layers = getattr(args, "decoder_num_layers", 3)
berard(args)
@register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard_512_3_2")
def berard_512_3_2(args):
args.num_blstm_layers = getattr(args, "num_blstm_layers", 3)
args.lstm_size = getattr(args, "lstm_size", 512)
args.dropout = getattr(args, "dropout", 0.3)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
args.decoder_num_layers = getattr(args, "decoder_num_layers", 2)
args.decoder_hidden_dim = getattr(args, "decoder_hidden_dim", 1024)
args.attention_dim = getattr(args, "attention_dim", 512)
args.output_layer_dim = getattr(args, "output_layer_dim", 256)
berard(args)
@register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard_512_5_3")
def berard_512_5_3(args):
args.num_blstm_layers = getattr(args, "num_blstm_layers", 5)
args.lstm_size = getattr(args, "lstm_size", 512)
args.dropout = getattr(args, "dropout", 0.3)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
args.decoder_num_layers = getattr(args, "decoder_num_layers", 3)
args.decoder_hidden_dim = getattr(args, "decoder_hidden_dim", 1024)
args.attention_dim = getattr(args, "attention_dim", 512)
args.output_layer_dim = getattr(args, "output_layer_dim", 256)
berard(args)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_to_text/berard.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .berard import * # noqa
from .convtransformer import * # noqa
from .multi_modality_model import * # noqa
from .s2t_conformer import * # noqa
from .s2t_transformer import * # noqa
from .s2t_wav_transformer import * # noqa
from .xm_transformer import * # noqa
from .xm_transformer_unity import * # noqa
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_to_text/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from torch import Tensor
from fairseq import checkpoint_utils, utils
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.speech_to_text.hub_interface import S2THubInterface
from fairseq.models.speech_to_text.modules.convolution import (
Conv1dSubsampler,
Conv2dSubsampler,
)
from fairseq.models.transformer import Embedding, TransformerDecoder
from fairseq.modules import (
FairseqDropout,
LayerNorm,
PositionalEmbedding,
TransformerEncoderLayer,
)
logger = logging.getLogger(__name__)
@register_model("s2t_transformer")
class S2TTransformerModel(FairseqEncoderDecoderModel):
"""Adapted Transformer model (https://arxiv.org/abs/1706.03762) for
speech-to-text tasks. The Transformer encoder/decoder remains the same.
A trainable input subsampler is prepended to the Transformer encoder to
project inputs into the encoder dimension as well as downsample input
sequence for computational efficiency."""
@classmethod
def hub_models(cls):
base_url = "http://dl.fbaipublicfiles.com/fairseq/s2t"
model_ids = [
"s2t_transformer_s-en-asr-librispeech",
"s2t_transformer_m-en-asr-librispeech",
"s2t_transformer_l-en-asr-librispeech",
]
return {i: f"{base_url}/{i}.tar.gz" for i in model_ids}
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
config_yaml="config.yaml",
**kwargs,
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
config_yaml=config_yaml,
**kwargs,
)
return S2THubInterface(x["args"], x["task"], x["models"][0])
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# input
parser.add_argument(
"--conv-kernel-sizes",
type=str,
metavar="STR",
help="kernel sizes of Conv1d (s2t_transformer) subsampling layers",
)
parser.add_argument(
"--conv-channels",
type=int,
metavar="N",
help="# of channels in Conv1d (s2t_transformer) subsampling layers",
)
parser.add_argument(
"--conv-out-channels",
type=int,
metavar="N",
help="# of channels in Conv2d (convtransformer) subsampling layers",
)
parser.add_argument(
"--conv-version",
type=str,
default="s2t_transformer",
choices=["s2t_transformer", "convtransformer"],
help="version of frontend convolutional layers",
)
# Transformer
parser.add_argument(
"--activation-fn",
type=str,
default="relu",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads",
)
parser.add_argument(
"--decoder-normalize-before",
action="store_true",
help="apply layernorm before each decoder block",
)
parser.add_argument(
"--share-decoder-input-output-embed",
action="store_true",
help="share decoder input and output embeddings",
)
parser.add_argument(
"--layernorm-embedding",
action="store_true",
help="add layernorm to embedding",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
parser.add_argument(
"--load-pretrained-encoder-from",
type=str,
metavar="STR",
help="model to take encoder weights from (for initialization)",
)
parser.add_argument(
"--encoder-freezing-updates",
type=int,
metavar="N",
help="freeze encoder for first N updates",
)
@classmethod
def build_encoder(cls, args):
encoder = S2TTransformerEncoder(args)
pretraining_path = getattr(args, "load_pretrained_encoder_from", None)
if pretraining_path is not None:
if not Path(pretraining_path).exists():
logger.warning(
f"skipped pretraining because {pretraining_path} does not exist"
)
else:
encoder = checkpoint_utils.load_pretrained_component_from_model(
component=encoder, checkpoint=pretraining_path
)
logger.info(f"loaded pretrained encoder from: {pretraining_path}")
return encoder
@classmethod
def build_decoder(cls, args, task, embed_tokens):
return TransformerDecoderScriptable(args, task.target_dictionary, embed_tokens)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
return Embedding(num_embeddings, embed_dim, padding_idx)
decoder_embed_tokens = build_embedding(
task.target_dictionary, args.decoder_embed_dim
)
args.tgt_dict_size = len(task.target_dictionary)
encoder = cls.build_encoder(args)
decoder = cls.build_decoder(args, task, decoder_embed_tokens)
return cls(encoder, decoder)
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
def get_ctc_target(self, sample: Optional[Dict[str, Tensor]]):
return sample["target"], sample["target_lengths"]
def get_ctc_output(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
sample: Optional[Dict[str, Tensor]],
):
encoder_out = net_output[1]["encoder_out"]["encoder_out"][0]
logits = self.encoder.ctc_proj(encoder_out) # T x B x C
out = utils.log_softmax(logits.float(), dim=-1)
padding_mask = net_output[1]["encoder_out"]["encoder_padding_mask"]
lens = out.new_full((out.shape[1],), out.shape[0]).long()
if len(padding_mask) > 0:
lens -= padding_mask[0].sum(dim=-1)
return out, lens
def forward(self, src_tokens, src_lengths, prev_output_tokens):
"""
The forward method inherited from the base class has a **kwargs
argument in its input, which is not supported in torchscript. This
method overwrites the forward method definition without **kwargs.
"""
encoder_out = self.encoder(src_tokens=src_tokens, src_lengths=src_lengths)
decoder_out = self.decoder(
prev_output_tokens=prev_output_tokens, encoder_out=encoder_out
)
return decoder_out
class S2TTransformerEncoder(FairseqEncoder):
"""Speech-to-text Transformer encoder that consists of input subsampler and
Transformer encoder."""
def __init__(self, args):
super().__init__(None)
self.encoder_freezing_updates = args.encoder_freezing_updates
self.num_updates = 0
self.dropout_module = FairseqDropout(
p=args.dropout, module_name=self.__class__.__name__
)
self.embed_scale = math.sqrt(args.encoder_embed_dim)
if args.no_scale_embedding:
self.embed_scale = 1.0
self.padding_idx = 1
self.conv_version = args.conv_version
if self.conv_version == "s2t_transformer":
self.subsample = Conv1dSubsampler(
args.input_feat_per_channel * args.input_channels,
args.conv_channels,
args.encoder_embed_dim,
[int(k) for k in args.conv_kernel_sizes.split(",")],
)
elif self.conv_version == "convtransformer":
self.subsample = Conv2dSubsampler(
args.input_channels,
args.input_feat_per_channel,
args.conv_out_channels,
args.encoder_embed_dim,
)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, args.encoder_embed_dim, self.padding_idx
)
self.transformer_layers = nn.ModuleList(
[TransformerEncoderLayer(args) for _ in range(args.encoder_layers)]
)
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(args.encoder_embed_dim)
else:
self.layer_norm = None
self.ctc_proj = None
if getattr(args, "ctc_weight", 0.0) > 0.0:
self.ctc_proj = nn.Linear(args.encoder_embed_dim, args.tgt_dict_size)
def _forward(self, src_tokens, src_lengths, return_all_hiddens=False):
x, input_lengths = self.subsample(src_tokens, src_lengths)
x = self.embed_scale * x
encoder_padding_mask = lengths_to_padding_mask(input_lengths)
positions = self.embed_positions(encoder_padding_mask).transpose(0, 1)
x += positions
x = self.dropout_module(x)
encoder_states = []
for layer in self.transformer_layers:
x = layer(x, encoder_padding_mask)
if return_all_hiddens:
encoder_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask]
if encoder_padding_mask.any()
else [], # B x T
"encoder_embedding": [], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
def forward(self, src_tokens, src_lengths, return_all_hiddens=False):
if self.num_updates < self.encoder_freezing_updates:
with torch.no_grad():
x = self._forward(
src_tokens, src_lengths, return_all_hiddens=return_all_hiddens
)
else:
x = self._forward(
src_tokens, src_lengths, return_all_hiddens=return_all_hiddens
)
return x
def reorder_encoder_out(self, encoder_out, new_order):
new_encoder_out = (
[]
if len(encoder_out["encoder_out"]) == 0
else [x.index_select(1, new_order) for x in encoder_out["encoder_out"]]
)
new_encoder_padding_mask = (
[]
if len(encoder_out["encoder_padding_mask"]) == 0
else [
x.index_select(0, new_order)
for x in encoder_out["encoder_padding_mask"]
]
)
new_encoder_embedding = (
[]
if len(encoder_out["encoder_embedding"]) == 0
else [
x.index_select(0, new_order) for x in encoder_out["encoder_embedding"]
]
)
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [], # B x T
"src_lengths": [], # B x 1
}
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
self.num_updates = num_updates
class TransformerDecoderScriptable(TransformerDecoder):
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
# call scriptable method from parent class
x, _ = self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
extra = {"encoder_out": encoder_out} if incremental_state is None else None
return x, extra
@register_model_architecture(model_name="s2t_transformer", arch_name="s2t_transformer")
def base_architecture(args):
args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0)
# Convolutional subsampler
args.input_channels = getattr(args, "input_channels", 1)
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5") # for Conv1d
args.conv_channels = getattr(args, "conv_channels", 1024) # for Conv1d
args.conv_out_channels = getattr(args, "conv_out_channels", 256) # for Conv2d
args.conv_version = getattr(args, "conv_version", "s2t_transformer")
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
@register_model_architecture("s2t_transformer", "s2t_transformer_s")
def s2t_transformer_s(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 8)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.dropout = getattr(args, "dropout", 0.1)
base_architecture(args)
@register_model_architecture("s2t_transformer", "s2t_transformer_xs")
def s2t_transformer_xs(args):
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.decoder_layers = getattr(args, "decoder_layers", 3)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 4)
args.dropout = getattr(args, "dropout", 0.3)
s2t_transformer_s(args)
@register_model_architecture("s2t_transformer", "s2t_transformer_sp")
def s2t_transformer_sp(args):
args.encoder_layers = getattr(args, "encoder_layers", 16)
s2t_transformer_s(args)
@register_model_architecture("s2t_transformer", "s2t_transformer_m")
def s2t_transformer_m(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 512 * 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.dropout = getattr(args, "dropout", 0.15)
base_architecture(args)
@register_model_architecture("s2t_transformer", "s2t_transformer_mp")
def s2t_transformer_mp(args):
args.encoder_layers = getattr(args, "encoder_layers", 16)
s2t_transformer_m(args)
@register_model_architecture("s2t_transformer", "s2t_transformer_l")
def s2t_transformer_l(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024 * 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.2)
base_architecture(args)
@register_model_architecture("s2t_transformer", "s2t_transformer_lp")
def s2t_transformer_lp(args):
args.encoder_layers = getattr(args, "encoder_layers", 16)
s2t_transformer_l(args)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_to_text/s2t_transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.models import FairseqDecoder, FairseqEncoder
# a container for different encoders with training samples from different modality
# each time, only one encoder is selected
class MultiModalityEncoder(FairseqEncoder):
def __init__(self, dictionary):
super().__init__(dictionary)
def select_encoder(self, mode, **kwargs):
raise NotImplementedError("Model must implement the select_encoder method")
return None, kwargs
# def post_encoder(self, encoder_out, src_tokens, src_lengths, mode, **kwargs):
# # Default do nothing
# return encoder_out
# get sample data from JointSpeechTextDataset
def forward(self, src_tokens, src_lengths=None, mode="", **kwargs):
encoder, kwargs = self.select_encoder(mode, **kwargs)
# return self.post_encoder(encoder(src_tokens, src_lengths, **kwargs), src_tokens, src_lengths, mode, **kwargs)
return encoder(src_tokens, src_lengths, **kwargs)
# a container for different decoders with training samples from different modality
# each time, only one decoder is selected
class MultiInputDecoder(FairseqDecoder):
def __init__(self, dictionary):
super().__init__(dictionary)
def select_decoder(self, mode, **kwargs):
raise NotImplementedError("Model must implement the select_decoder method")
return None, kwargs
def forward(
self, prev_output_tokens, encoder_out, incremental_state=None, mode="", **kwargs
):
decoder, kwargs = self.select_decoder(mode, **kwargs)
return decoder(
prev_output_tokens,
encoder_out,
incremental_state=incremental_state,
**kwargs
)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_to_text/multi_modality_model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import logging
from fairseq.models import (
FairseqEncoderModel,
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.speech_to_speech.modules.ctc_decoder import CTCDecoder
from fairseq.models.speech_to_speech.modules.transformer_encoder import (
TransformerEncoderNoEmb,
)
from fairseq.models.speech_to_text.xm_transformer import XMTransformerModel
from fairseq.models.speech_to_text.xm_transformer import (
base_architecture as xm_t_base_architecture,
)
from fairseq.models.speech_to_text.xm_transformer import (
build_embedding,
need_finetuning,
set_default_adaptor_args,
set_default_general_args,
set_default_transformer_decoder_args,
set_default_w2v_encoder_args,
)
from fairseq.models.transformer import Linear, TransformerDecoder, TransformerModelBase
from fairseq.models.transformer.transformer_decoder_aug import AugTransformerDecoder
logger = logging.getLogger(__name__)
def unit_transformer_decoder_arch_base(
args, decoder_layers=6, decoder_embed_dim=768, decoder_attention_heads=12
):
args.encoder_layers = decoder_layers
args.decoder_layers = decoder_layers
args.decoder_embed_dim = decoder_embed_dim
args.decoder_ffn_embed_dim = decoder_embed_dim * 4
args.decoder_attention_heads = decoder_attention_heads
args.encoder_embed_dim = args.decoder_embed_dim
args.decoder_output_dim = decoder_embed_dim
args.decoder_input_dim = decoder_embed_dim
def unit_transformer_decoder_arch_large(
args, decoder_layers=12, decoder_embed_dim=1024, decoder_attention_heads=16
):
args.encoder_layers = decoder_layers
args.decoder_layers = decoder_layers
args.decoder_embed_dim = decoder_embed_dim
args.decoder_ffn_embed_dim = decoder_embed_dim * 4
args.decoder_attention_heads = decoder_attention_heads
args.encoder_embed_dim = args.decoder_embed_dim
args.decoder_output_dim = decoder_embed_dim
args.decoder_input_dim = decoder_embed_dim
@register_model("unity_xm_transformer")
class XMTransformerModelUnitY(XMTransformerModel):
@classmethod
def hub_models(cls):
base_url = "http://dl.fbaipublicfiles.com/fairseq/s2t"
model_ids = []
return {i: f"{base_url}/{i}.tar.gz" for i in model_ids}
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def add_args(cls, parser):
"""Add model-specific arguments to the parser."""
XMTransformerModel.add_args(parser)
parser.add_argument(
"--translation-decoder-layers",
type=int,
default=4,
metavar="N",
help="num decoder layers in the first-pass translation module",
)
parser.add_argument(
"--synthesizer-encoder-layers",
type=int,
default=0,
metavar="N",
help="num encoder layers in the second-pass synthesizer module",
)
parser.add_argument(
"--synthesizer-augmented-cross-attention",
action="store_true",
default=False,
help="augmented cross-attention over speech encoder output",
)
parser.add_argument(
"--load-pretrained-aux-decoder-from",
type=str,
metavar="STR",
help="model to take decoder weights from (for initialization)",
)
@classmethod
def build_text_decoder(cls, args, tgt_dict):
_args = copy.deepcopy(args)
if args.adaptor_proj or args.encoder_proj: # not V0 arch
_args.encoder_embed_dim = _args.decoder_embed_dim
_args.dropout = args.decoder_dropout
_args.attention_dropout = args.decoder_attention_dropout
_args.activation_dropout = args.decoder_activation_dropout
_args.layerdrop = _args.decoder_layerdrop
_args.decoder_layers = _args.translation_decoder_layers
embed_tokens = build_embedding(tgt_dict, _args.decoder_embed_dim)
decoder = TransformerDecoder(_args, tgt_dict, embed_tokens)
if getattr(args, "load_pretrained_aux_decoder_from", None) is not None:
decoder = cls.maybe_load_pretrained(
decoder, getattr(args, "load_pretrained_aux_decoder_from", None)
)
for k, p in decoder.named_parameters():
p.requires_grad = need_finetuning(args.finetune_decoder_params, k)
return decoder
@classmethod
def build_decoder(cls, args, task, aug_attn=False):
_args = copy.deepcopy(args)
_args.layerdrop = 0.0 # turn off layerdrop for shallow layers
_args.encoder_embed_dim = args.decoder_embed_dim
proj = None
if args.decoder_embed_dim != _args.decoder_embed_dim:
proj = Linear(args.decoder_embed_dim, _args.decoder_embed_dim)
embed_tokens = build_embedding(task.target_dictionary, _args.decoder_embed_dim)
decoder_cls = AugTransformerDecoder if aug_attn else TransformerDecoder
decoder = decoder_cls(_args, task.target_dictionary, embed_tokens)
if getattr(args, "load_pretrained_decoder_from", None) is not None:
# load all layers first and then discard the bottom layers
embed_tokens = build_embedding(
task.target_dictionary, _args.decoder_embed_dim
)
decoder_tmp = decoder_cls(_args, task.target_dictionary, embed_tokens)
decoder_tmp = cls.maybe_load_pretrained(
decoder_tmp, getattr(_args, "load_pretrained_decoder_from", None)
)
state_dict = decoder_tmp.state_dict()
for k, p in decoder.named_parameters():
p.data = state_dict[k].data
p.requires_grad = need_finetuning(_args.finetune_decoder_params, k)
decoder.layers = decoder.layers[-_args.decoder_layers :]
return decoder, proj, _args
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
xm_t_base_architecture(args)
encoder = cls.build_encoder(args)
decoder, proj, unit_args = cls.build_decoder(
args,
task,
aug_attn=getattr(args, "synthesizer_augmented_cross_attention", False),
)
base_model = cls(encoder, decoder)
setattr(base_model, "proj", proj)
base_model.t2u_augmented_cross_attn = getattr(
args, "synthesizer_augmented_cross_attention", False
)
# set up multitask decoders
base_model.mt_task_name = None
base_model.multitask_decoders = {}
has_first_pass_decoder = False
for task_name, task_obj in task.multitask_tasks.items():
if task_obj.is_first_pass_decoder:
has_first_pass_decoder = True
base_model.mt_task_name = task_name
task_decoder = cls.build_multitask_decoder(
args,
task_obj.args,
task_obj.target_dictionary,
args.decoder_embed_dim,
task_obj.is_first_pass_decoder,
)
setattr(base_model, f"{task_name}_decoder", task_decoder)
decoder_model_cls = (
FairseqEncoderModel
if task_obj.args.decoder_type == "ctc"
else FairseqLanguageModel
)
base_model.multitask_decoders[task_name] = decoder_model_cls(
getattr(base_model, f"{task_name}_decoder")
)
assert has_first_pass_decoder, "set at least one intermediate non-CTC decoder"
# set up encoder on top of the auxiliary MT decoder
if getattr(args, "synthesizer_encoder_layers", 0) > 0:
base_model.synthesizer_encoder = cls.build_t2u_encoder(unit_args)
else:
base_model.synthesizer_encoder = None
return base_model
@classmethod
def build_t2u_encoder(cls, args):
_args = copy.deepcopy(args)
_args.encoder_layers = _args.synthesizer_encoder_layers
_args.encoder_embed_dim = args.decoder_embed_dim
_args.encoder_ffn_embed_dim = args.decoder_ffn_embed_dim
_args.encoder_attention_heads = args.decoder_attention_heads
_args.encoder_normalize_before = True
return TransformerEncoderNoEmb(_args)
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
prev_output_tokens_mt,
return_all_hiddens=False,
tgt_speaker=None,
**kwargs,
):
"""
The forward method inherited from the base class has a **kwargs
argument in its input, which is not supported in torchscript. This
method overwrites the forward method definition without **kwargs.
"""
encoder_out = self.encoder(
src_tokens=src_tokens, src_lengths=src_lengths, **kwargs
)
# 1. MT decoder
mt_decoder = getattr(self, f"{self.mt_task_name}_decoder")
mt_decoder_out = mt_decoder(
prev_output_tokens_mt,
encoder_out=encoder_out,
)
x = mt_decoder_out[1]["inner_states"][-1]
if mt_decoder.layer_norm is not None:
x = mt_decoder.layer_norm(x)
if self.proj is not None:
x = self.proj(x)
mt_decoder_padding_mask = None
if prev_output_tokens_mt.eq(mt_decoder.padding_idx).any():
mt_decoder_padding_mask = prev_output_tokens_mt.eq(mt_decoder.padding_idx)
# 2. T2U encoder
if self.synthesizer_encoder is not None:
t2u_encoder_out = self.synthesizer_encoder(
x,
mt_decoder_padding_mask,
)
else:
t2u_encoder_out = {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [mt_decoder_padding_mask], # B x T
}
# 3. T2U decoder
if self.t2u_augmented_cross_attn:
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
encoder_out_aug=t2u_encoder_out,
)
else:
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=t2u_encoder_out,
)
if return_all_hiddens:
decoder_out[-1]["encoder_states"] = encoder_out["encoder_out"]
# NOTE: from the top layer
decoder_out[-1]["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
]
decoder_out[-1]["mt_decoder_out"] = mt_decoder_out
return decoder_out
@register_model_architecture(
model_name="unity_xm_transformer", arch_name="unity_xm_transformer"
)
def base_architecture_unity(args):
set_default_general_args(args)
set_default_w2v_encoder_args(args)
set_default_adaptor_args(args)
set_default_transformer_decoder_args(args)
args.layernorm_embedding = False
args.decoder_learned_pos = False
# for old models
@register_model_architecture(
model_name="unity_xm_transformer", arch_name="xm_transformer_t2"
)
def base_architecture_unity_legacy(args):
base_architecture_unity(args)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_to_text/xm_transformer_unity.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import logging
from collections.abc import Iterable
from itertools import repeat
from typing import List, Optional, Tuple
import torch
from torch import Tensor
# ------------------------------------------------------------------------------
# assert_equal()
# ------------------------------------------------------------------------------
def assert_equal(value1, value2, name1=None, name2=None):
"""Asserts two values are equal otherwise raise an error."""
str_name1 = "" if name1 is None else "{} ".format(name1)
str_name2 = "" if name2 is None else "{} ".format(name2)
if value1 != value2:
str_value1 = "{}" if name1 is None else "({})"
str_value1 = str_value1.format(value1)
str_value2 = "{}" if name2 is None else "({})"
str_value2 = str_value2.format(value2)
raise ValueError(
"Expected {}{} == {}{}".format(str_name1, str_value1, str_name2, str_value2)
)
def fill_config(config, key, value):
if value is not None:
if key not in config or config[key] is None:
config[key] = value
assert_equal(value, config[key], "value", f'config["{key}"]')
# ------------------------------------------------------------------------------
# check_and_return_expected()
# ------------------------------------------------------------------------------
def check_and_return_expected(value, undefined_value, expected_value, name=None):
"""
Return the expected value while checking if the given value is undefined or
equal to the expected value.
"""
if (undefined_value is None and value is None) or (undefined_value == value):
return expected_value
if value != expected_value:
str_name = "" if name is None else "{} ".format(name)
str_value = "{}" if name is None else "({})"
str_value = str_value.format(value)
raise ValueError(
"Expected {}{} == {}".format(str_name, str_value, expected_value)
)
return expected_value
# ------------------------------------------------------------------------------
# get_time_axis()
# ------------------------------------------------------------------------------
def get_time_axis(layout):
"""
Extract the time axis from the layout, for example for breaking sequence into
segments.
"""
if layout in ["TB", "TBD"]:
return 0
if layout in ["BT", "BTD"]:
return 1
if layout in ["BCTD"]:
return 2
raise ValueError("Unsupported layout = {}".format(layout))
# ------------------------------------------------------------------------------
# get_batch_axis()
# ------------------------------------------------------------------------------
def get_batch_axis(layout):
"""
Extract the batch axis from the layout
"""
if layout in ["TB", "TBD"]:
return 1
if layout in ["BT", "BTD", "BCTD"]:
return 0
raise ValueError("Unsupported layout = {}".format(layout))
# ------------------------------------------------------------------------------
# monotonically_increasing_and_bounded()
# ------------------------------------------------------------------------------
def monotonically_increasing_and_bounded(iterable, min=None, max=None):
"""
Check if the elements in the given iterable are monotonically increasing and
bounded by upper/lower bounds.
"""
if not isinstance(iterable, Iterable):
raise TypeError(
"Expected iterable to be of type Iterable, got ({})".format(
iterable.__class__.__name__
)
)
for i in range(len(iterable)):
if min is not None and iterable[i] < min:
return False
if max is not None and iterable[i] > max:
return False
if i > 0 and iterable[i] <= iterable[i - 1]:
return False
return True
# ------------------------------------------------------------------------------
# to_pair()
# ------------------------------------------------------------------------------
def to_pair(value, name):
"""Make a pair (of type tuple) of given value."""
if isinstance(value, Iterable):
if len(value) != 2:
raise ValueError(
"Expected `{}` to have exactly 2 elements, got: ({})".format(
name, value
)
)
return value
return tuple(repeat(value, 2))
# ------------------------------------------------------------------------------
# infer_conv_output_attrs()
# ------------------------------------------------------------------------------
# TODO(cfyeh): figure out if we can get `output_dim` without calling the module.
def infer_conv_output_attrs(
module, input_channels, input_dim, batch_size=1, max_length=8
):
"""Get output attributes of a module with input."""
input = torch.randn(batch_size, input_channels, max_length, input_dim)
output = module(input)
output_channels = output.shape[1]
output_dim = output.shape[-1]
return output_channels, output_dim
# ------------------------------------------------------------------------------
# NoOp
# ------------------------------------------------------------------------------
class NoOp(torch.nn.Module):
"""
NoOp simply passes the input as the output.
"""
def __init__(self):
super().__init__()
def forward(self, input: Tensor) -> Tensor:
return input
# ------------------------------------------------------------------------------
# Permute: a torch.nn.Module applies permutation on the input tensor.
# ------------------------------------------------------------------------------
class Permute(torch.nn.Module):
def __init__(self, dims):
super().__init__()
self.dims = dims
def forward(self, input: Tensor) -> Tensor:
return input.permute(self.dims).contiguous()
# ------------------------------------------------------------------------------
# lengths_to_padding_mask()
# ------------------------------------------------------------------------------
def lengths_to_padding_mask(lengths: Tensor) -> Tensor:
"""Convert lengths of shape (B, ) to padding mask."""
batch_size = lengths.shape[0]
max_length = int(torch.max(lengths).item())
padding_mask = torch.arange( # [0, ..., T-1]
max_length, device=lengths.device, dtype=lengths.dtype
).expand(batch_size, max_length) >= lengths.unsqueeze(1)
return padding_mask
# ------------------------------------------------------------------------------
# lengths_to_attention_mask()
# ------------------------------------------------------------------------------
def lengths_to_attention_mask(
lengths: Tensor,
left_context: Optional[int] = None,
right_context: Optional[int] = None,
) -> Optional[Tensor]:
"""
Generate attention mask based on (lengths, left_context, right_context).
left_context is None means unlimited left context.
right_context is None means unlimited right context.
"""
if left_context is None and right_context is None:
return None
max_length = int(torch.max(lengths).item())
# For example, with `max_length` == 5,
# indices = tensor([
# [ 0, 1, 2, 3, 4, 5],
# [-1, 0, 1, 2, 3, 4],
# [-2, -1, 0, 1, 2, 3],
# [-3, -2, -1, 0, 1, 2],
# [-4, -3, -2, -1, 0, 1],
# [-5, -4, -3, -2, -1, 0],
# ])
# In some cases the second torch.arange is created on cpu which causes a
# failure. Adding the device option to guard against it.
indices = torch.arange(
max_length, device=lengths.device, dtype=lengths.dtype
).expand(max_length, max_length) - torch.arange(
max_length, device=lengths.device
).view(
max_length, -1
)
# For example, with `max_length` == 5,
# bool_mask = tensor([
# [True, True, True, True, True],
# [True, True, True, True, True],
# [True, True, True, True, True],
# [True, True, True, True, True],
# [True, True, True, True, True],
# ])
bool_mask = (
torch.tensor([True]).to(device=lengths.device).expand(max_length, max_length)
)
# For example, with `max_length` == 5, left_context == 2
# left_mask = tensor([
# [ True, True, True, True, True],
# [ True, True, True, True, True],
# [ True, True, True, True, True],
# [False, True, True, True, True],
# [False, False, True, True, True],
# ])
if left_context is not None:
left_mask = indices >= -left_context
bool_mask = bool_mask & left_mask
# For example, with `max_length` == 5, right_context == 1
# right_mask = tensor([
# [True, True, False, False, False],
# [True, True, True, False, False],
# [True, True, True, True, False],
# [True, True, True, True, True],
# [True, True, True, True, True],
# ])
if right_context is not None:
right_mask = indices <= right_context
bool_mask = bool_mask & right_mask
bool_mask = (~bool_mask).to(device=lengths.device)
return bool_mask
# ------------------------------------------------------------------------------
# infer_output_norm()
# ------------------------------------------------------------------------------
def infer_output_norm(module, output_norm=None):
"""
Infer the output norm (string and module) needed on the module gvien desired
output normalization.
"""
if output_norm == module.output_norm():
# output_norm already matches module.output_norm().
return (None, NoOp())
if output_norm is None and module.output_norm() is not None:
logger = logging.getLogger("infer_output_norm()")
logger.warning(
"trying to set output_norm ({}) ".format(output_norm)
+ "but got module.output_norm() ({}), ".format(module.output_norm())
+ "the combined output_norm() will be ({})".format(module.output_norm())
)
return (None, NoOp())
if output_norm == "log_softmax":
if module.output_norm() is not None:
raise ValueError(
"incompatible output_norm ({}) ".format(output_norm)
+ "and module.output_norm() ({})".format(module.output_norm())
)
else:
return ("log_softmax", torch.nn.LogSoftmax(dim=-1))
if output_norm == "softmax":
if module.output_norm() is not None:
raise ValueError(
"incompatible output_norm ({}) ".format(output_norm)
+ "and module.output_norm() ({})".format(module.output_norm())
)
else:
return ("softmax", torch.nn.Softmax(dim=-1))
raise ValueError(
"output_norm ({}) not in ".format(output_norm)
+ "supported list = [None, softmax, log_softmax]"
)
# ------------------------------------------------------------------------------
# infer_channels_from_layout()
# ------------------------------------------------------------------------------
def infer_channels_from_layout(layout, channels):
"""Extract the number of channels from the layout."""
if layout in ("TBD", "BTD"):
if channels is not None and channels != 1:
raise ValueError(
"Expected channels ({}) to be 1 for layout = {}".format(
channels, layout
)
)
if channels is None:
return 1
return channels
# ------------------------------------------------------------------------------
# pad_sequence()
# ------------------------------------------------------------------------------
@torch.jit.export
def pad_sequence(
sequence: Tensor,
time_axis: int,
extra_left_context: int = 0,
extra_right_context: int = 0,
) -> Tensor:
"""Pad extra left/right contexts to the sequence."""
if extra_left_context == 0 and extra_right_context == 0:
return sequence
tensors_to_concat = []
if extra_left_context:
size = (extra_left_context,)
fill_value = 0
indices = torch.full(
size=size,
fill_value=fill_value,
dtype=torch.long,
device=sequence.device,
)
left_padding = torch.index_select(sequence, time_axis, indices)
tensors_to_concat.append(left_padding)
tensors_to_concat.append(sequence)
# NOTE(cfyeh): for efficiency reason we pad 0 instead of the last frame for
# extra right contexts.
if extra_right_context:
size = list(sequence.shape)
size[time_axis] = extra_right_context
right_padding = torch.zeros(size, dtype=sequence.dtype, device=sequence.device)
tensors_to_concat.append(right_padding)
padded_sequence = torch.cat(tensors_to_concat, dim=time_axis)
return padded_sequence
# ------------------------------------------------------------------------------
# sequence_to_segments()
# ------------------------------------------------------------------------------
@torch.jit.export
def sequence_to_segments(
sequence: Tensor,
time_axis: int,
lengths: Tensor,
segment_size: Optional[int] = None,
extra_left_context: int = 0,
extra_right_context: int = 0,
) -> List[Tuple[Tensor, Tensor]]:
"""Breaks sequence into segments."""
sequence = pad_sequence(
sequence=sequence,
time_axis=time_axis,
extra_left_context=extra_left_context,
extra_right_context=extra_right_context,
)
lengths = lengths + extra_left_context + extra_right_context
segments: List[Tuple[Tensor, Tensor]] = []
if segment_size is None:
segments.append((sequence, lengths))
return segments
offset = 0
end = sequence.shape[time_axis]
step = segment_size
size = extra_left_context + segment_size + extra_right_context
while offset + extra_left_context + extra_right_context < end:
clamped_size = min(size, end - offset)
segment_lengths = torch.clamp(lengths - offset, min=0, max=clamped_size)
indices = torch.arange(
start=offset,
end=(offset + clamped_size),
step=1,
dtype=torch.long,
device=sequence.device,
)
segment_tensor = torch.index_select(sequence, time_axis, indices)
segments.append((segment_tensor, segment_lengths))
offset = offset + step
return segments
# ------------------------------------------------------------------------------
# segments_to_sequence()
# ------------------------------------------------------------------------------
@torch.jit.export
def segments_to_sequence(
segments: List[Tuple[Tensor, Tensor]], time_axis: int
) -> Tuple[Tensor, Tensor]:
"""Concatenate segments into a full sequence."""
if len(segments) == 1:
return segments[0]
tensors_to_concat: List[Tensor] = []
lengths_to_stack: List[Tensor] = []
for tensor, lengths in segments:
tensors_to_concat.append(tensor)
lengths_to_stack.append(lengths)
sequence = torch.cat(tensors_to_concat, dim=time_axis)
lengths = torch.stack(lengths_to_stack, dim=0)
lengths = torch.sum(lengths, dim=0)
return sequence, lengths
def lengths_to_encoder_padding_mask(lengths, batch_first: bool = False):
"""
convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor
Args:
lengths: a (B, )-shaped tensor
batch_first: whether to return a (B, T) tensor
Return:
max_length: maximum length of B sequences
encoder_padding_mask: a (max_length, B) binary mask, where
[t, b] = False for t < lengths[b] and True otherwise
TODO:
kernelize this function if benchmarking shows this function is slow
"""
max_lengths = torch.max(lengths).item()
bsz = lengths.size(0)
encoder_padding_mask = torch.arange(
max_lengths
).to( # a (T, ) tensor with [0, ..., T-1]
lengths.device
).view( # move to the right device
1, max_lengths
).expand( # reshape to (1, T)-shaped tensor
bsz, -1
) > lengths.view( # expand to (B, T)-shaped tensor
bsz, 1
).expand(
-1, max_lengths
)
if not batch_first:
return encoder_padding_mask.t(), max_lengths
else:
return encoder_padding_mask, max_lengths
# ------------------------------------------------------------------------------
# attention suppression
# ------------------------------------------------------------------------------
def attention_suppression(attention_weights: Tensor, scale: float):
# B, H, qlen, klen -> B, H, qlen, 1
attention_prob = torch.nn.functional.softmax(attention_weights.float(), dim=-1)
attention_nozeros = attention_prob.to(torch.bool)
nozeros_sum = torch.sum(attention_nozeros.to(torch.float), dim=-1, keepdim=True)
# For very sparse situation, we need get round about 0s
key_sum = torch.sum(attention_prob, dim=-1, keepdim=True)
# nozeros_sum should > 1
key_mean = key_sum / (nozeros_sum + 1e-8)
# std calculation
dis = (attention_prob - key_mean) * (attention_prob - key_mean)
# if attention_prob[i] < threshold, then dis_masked[i] = 0; for all i
dis_masked = torch.where(
attention_nozeros, dis, attention_prob.new_zeros(attention_prob.size())
)
key_var = torch.sum(dis_masked, dim=-1, keepdim=True)
key_var = key_var / (nozeros_sum - 1.0 + 1e-8)
key_std = torch.sqrt(key_var)
key_thread = key_mean - scale * key_std
# if attention_prob[i] >= key_thread, then attention_prob[i]
# , otherwise "-inf"
inf_tensor = attention_prob.new_zeros(attention_prob.size()).detach()
inf_tensor[:] = float("-inf")
attention_weights_float = torch.where(
attention_prob < key_thread,
inf_tensor,
attention_weights.float(),
)
return attention_weights_float.type_as(attention_weights)
def layer_norm_backward_hook(module, grad_input, grad_output, clamp_value):
return tuple(torch.clamp(v, min=-clamp_value, max=clamp_value) for v in grad_input)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_to_text/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from argparse import Namespace
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
import fairseq.data.audio.feature_transforms.utterance_cmvn as utt_cmvn
from fairseq.data import encoders
from fairseq.data.audio.audio_utils import convert_waveform as convert_wav
from fairseq.data.audio.audio_utils import get_fbank
from fairseq.data.audio.audio_utils import get_waveform as get_wav
from fairseq.data.audio.speech_to_text_dataset import SpeechToTextDataset
logger = logging.getLogger(__name__)
class S2THubInterface(nn.Module):
def __init__(self, cfg, task, model):
super().__init__()
self.cfg = cfg
self.task = task
self.model = model
self.model.eval()
self.generator = self.task.build_generator([self.model], self.cfg.generation)
@classmethod
def get_model_input(cls, task, audio: Union[str, torch.Tensor]):
input_type = task.data_cfg.hub.get("input_type", "fbank80")
if input_type == "fbank80_w_utt_cmvn":
if isinstance(audio, str):
feat = utt_cmvn.UtteranceCMVN()(get_fbank(audio))
feat = feat.unsqueeze(0) # T x D -> 1 x T x D
else:
import torchaudio.compliance.kaldi as kaldi
feat = kaldi.fbank(audio, num_mel_bins=80).numpy() # 1 x T x D
elif input_type in {"waveform", "standardized_waveform"}:
if isinstance(audio, str):
feat, sr = get_wav(audio) # C x T
feat, _ = convert_wav(
feat, sr, to_sample_rate=16_000, to_mono=True
) # C x T -> 1 x T
else:
feat = audio.numpy()
else:
raise ValueError(f"Unknown value: input_type = {input_type}")
src_lengths = torch.Tensor([feat.shape[1]]).long()
src_tokens = torch.from_numpy(feat) # 1 x T (x D)
if input_type == "standardized_waveform":
with torch.no_grad():
src_tokens = F.layer_norm(src_tokens, src_tokens.shape)
return {
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
"prev_output_tokens": None,
},
"target_lengths": None,
"speaker": None,
}
@classmethod
def detokenize(cls, task, tokens):
text = task.tgt_dict.string(tokens)
tkn_cfg = task.data_cfg.bpe_tokenizer
tokenizer = encoders.build_bpe(Namespace(**tkn_cfg))
return text if tokenizer is None else tokenizer.decode(text)
@classmethod
def get_prefix_token(cls, task, lang):
prefix_size = int(task.data_cfg.prepend_tgt_lang_tag)
prefix_tokens = None
if prefix_size > 0:
assert lang is not None
lang_tag = SpeechToTextDataset.get_lang_tag_idx(lang, task.tgt_dict)
prefix_tokens = torch.Tensor([lang_tag]).long().unsqueeze(0)
return prefix_tokens
@classmethod
def get_prediction(
cls, task, model, generator, sample, tgt_lang=None, synthesize_speech=False
) -> Union[str, Tuple[str, Tuple[torch.Tensor, int]]]:
_tgt_lang = tgt_lang or task.data_cfg.hub.get("tgt_lang", None)
prefix = cls.get_prefix_token(task, _tgt_lang)
pred_tokens = generator.generate([model], sample, prefix_tokens=prefix)
pred = cls.detokenize(task, pred_tokens[0][0]["tokens"])
eos_token = task.data_cfg.config.get("eos_token", None)
if eos_token:
pred = " ".join(pred.split(" ")[:-1])
if synthesize_speech:
pfx = f"{_tgt_lang}_" if task.data_cfg.prepend_tgt_lang_tag else ""
tts_model_id = task.data_cfg.hub.get(f"{pfx}tts_model_id", None)
speaker = task.data_cfg.hub.get(f"{pfx}speaker", None)
if tts_model_id is None:
logger.warning("TTS model configuration not found")
else:
_repo, _id = tts_model_id.split(":")
tts_model = torch.hub.load(_repo, _id, verbose=False)
pred = (pred, tts_model.predict(pred, speaker=speaker))
return pred
def predict(
self,
audio: Union[str, torch.Tensor],
tgt_lang: Optional[str] = None,
synthesize_speech: bool = False,
) -> Union[str, Tuple[str, Tuple[torch.Tensor, int]]]:
# `audio` is either a file path or a 1xT Tensor
# return either text or (text, synthetic speech)
sample = self.get_model_input(self.task, audio)
return self.get_prediction(
self.task,
self.model,
self.generator,
sample,
tgt_lang=tgt_lang,
synthesize_speech=synthesize_speech,
)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_to_text/hub_interface.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import torch
import torch.nn as nn
class Conv1dSubsampler(nn.Module):
"""Convolutional subsampler: a stack of 1D convolution (along temporal
dimension) followed by non-linear activation via gated linear units
(https://arxiv.org/abs/1911.08460)
Args:
in_channels (int): the number of input channels
mid_channels (int): the number of intermediate channels
out_channels (int): the number of output channels
kernel_sizes (List[int]): the kernel size for each convolutional layer
"""
def __init__(
self,
in_channels: int,
mid_channels: int,
out_channels: int,
kernel_sizes: List[int] = (3, 3),
):
super(Conv1dSubsampler, self).__init__()
self.n_layers = len(kernel_sizes)
self.conv_layers = nn.ModuleList(
nn.Conv1d(
in_channels if i == 0 else mid_channels // 2,
mid_channels if i < self.n_layers - 1 else out_channels * 2,
k,
stride=2,
padding=k // 2,
)
for i, k in enumerate(kernel_sizes)
)
def get_out_seq_lens_tensor(self, in_seq_lens_tensor):
out = in_seq_lens_tensor.clone()
for _ in range(self.n_layers):
out = ((out.float() - 1) / 2 + 1).floor().long()
return out
def forward(self, src_tokens, src_lengths):
bsz, in_seq_len, _ = src_tokens.size() # B x T x (C x D)
x = src_tokens.transpose(1, 2).contiguous() # -> B x (C x D) x T
for conv in self.conv_layers:
x = conv(x)
x = nn.functional.glu(x, dim=1)
_, _, out_seq_len = x.size()
x = x.transpose(1, 2).transpose(0, 1).contiguous() # -> T x B x (C x D)
return x, self.get_out_seq_lens_tensor(src_lengths)
def infer_conv_output_dim(in_channels, input_dim, out_channels):
sample_seq_len = 200
sample_bsz = 10
x = torch.randn(sample_bsz, in_channels, sample_seq_len, input_dim)
x = torch.nn.Conv2d(in_channels, out_channels, 3, stride=2, padding=3 // 2)(x)
x = torch.nn.Conv2d(out_channels, out_channels, 3, stride=2, padding=3 // 2)(x)
x = x.transpose(1, 2)
mb, seq = x.size()[:2]
return x.contiguous().view(mb, seq, -1).size(-1)
class Conv2dSubsampler(nn.Module):
"""Convolutional subsampler: a stack of 2D convolution based on ESPnet implementation
(https://github.com/espnet/espnet)
Args:
input_channels (int): the number of input channels
input_feat_per_channel (int): encoder input dimension per input channel
conv_out_channels (int): the number of output channels of conv layer
encoder_embed_dim (int): encoder dimentions
"""
def __init__(
self,
input_channels: int,
input_feat_per_channel: int,
conv_out_channels: int,
encoder_embed_dim: int,
):
super().__init__()
assert input_channels == 1, input_channels
self.conv = torch.nn.Sequential(
torch.nn.Conv2d(
input_channels, conv_out_channels, 3, stride=2, padding=3 // 2
),
torch.nn.ReLU(),
torch.nn.Conv2d(
conv_out_channels,
conv_out_channels,
3,
stride=2,
padding=3 // 2,
),
torch.nn.ReLU(),
)
transformer_input_dim = infer_conv_output_dim(
input_channels, input_feat_per_channel, conv_out_channels
)
self.out = torch.nn.Linear(transformer_input_dim, encoder_embed_dim)
def forward(self, src_tokens, src_lengths):
B, T_i, C = src_tokens.size()
x = src_tokens.view(B, T_i, 1, C).transpose(1, 2).contiguous()
x = self.conv(x)
B, _, T_o, _ = x.size()
x = x.transpose(1, 2).transpose(0, 1).contiguous().view(T_o, B, -1)
x = self.out(x)
subsampling_factor = int(T_i * 1.0 / T_o + 0.5)
input_len_0 = (src_lengths.float() / subsampling_factor).ceil().long()
input_len_1 = x.size(0) * torch.ones([src_lengths.size(0)]).long().to(
input_len_0.device
)
input_lengths = torch.min(input_len_0, input_len_1)
return x, input_lengths
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_to_text/modules/convolution.py |
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import re
from functools import partial
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
from torch import Tensor
from torch import device as Device
from fairseq.models import FairseqEncoder
from fairseq.models.speech_to_text.utils import (
NoOp,
attention_suppression,
layer_norm_backward_hook,
lengths_to_padding_mask,
segments_to_sequence,
)
try:
import torch.ao.quantization as quantization
from torch.ao.quantization.qconfig import (
default_dynamic_qconfig,
per_channel_dynamic_qconfig,
)
except ImportError:
import torch.quantization as quantization
from torch.quantization.qconfig import (
default_dynamic_qconfig,
per_channel_dynamic_qconfig,
)
class RelativePositionEmbedding(nn.Module):
"""
Implementation according to https://arxiv.org/abs/1803.02155
"""
def __init__(self, head_dim, max_position, norm_init=True):
super().__init__()
self.head_dim = head_dim
self.max_position = max_position
self.embeddings = nn.Parameter(torch.Tensor(max_position * 2 + 1, head_dim))
if norm_init:
nn.init.xavier_normal_(self.embeddings)
else:
nn.init.xavier_uniform_(self.embeddings)
def forward(self, input: Tensor):
output = nn.functional.embedding(input.long(), self.embeddings)
return output
class Fp32LayerNorm(nn.Module):
def __init__(
self,
input_dim,
clamp_grad=True,
max_grad_value=256,
eps=1e-5,
elementwise_affine=True,
):
super().__init__()
self.torch_module = torch.nn.LayerNorm(
input_dim, eps=eps, elementwise_affine=elementwise_affine
)
if clamp_grad:
hook = partial(layer_norm_backward_hook, clamp_value=max_grad_value)
self.torch_module.register_backward_hook(hook)
def forward(self, input):
output = torch.nn.functional.layer_norm(
input.float(),
self.torch_module.normalized_shape,
self.torch_module.weight.float()
if self.torch_module.weight is not None
else None,
self.torch_module.bias.float()
if self.torch_module.bias is not None
else None,
self.torch_module.eps,
).type_as(input)
return output
# ------------------------------------------------------------------------------
# PositionwiseFF
# ------------------------------------------------------------------------------
class PositionwiseFF(nn.Module):
"""
FFN layer in transformer.
Args:
input_dim: input embedding dimension
ffn_dim: FFN layer inner dimension
dropout_on_fc1: dropout for first linear layer
dropout_on_fc2: dropout fr second linear layer
activation_fn: activation function used after first linear layer. \
Only relu or gelu is supported.
"""
def __init__(
self, input_dim, ffn_dim, dropout_on_fc1, dropout_on_fc2, activation_fn
):
super(PositionwiseFF, self).__init__()
self.input_dim = input_dim
self.ffn_dim = ffn_dim
if activation_fn == "relu":
ac = nn.ReLU()
elif activation_fn == "gelu":
ac = nn.GELU()
else:
raise ValueError("Unsupported activation_fn = ({})".format(activation_fn))
# fc1 -> ac -> dropout -> fc2 -> dropout
self.module = nn.Sequential(
nn.Linear(input_dim, ffn_dim),
ac,
nn.Dropout(dropout_on_fc1),
nn.Linear(ffn_dim, input_dim),
nn.Dropout(dropout_on_fc2),
)
self.layer_norm = Fp32LayerNorm(input_dim)
def forward(self, input):
module_out = self.module(self.layer_norm(input))
output = module_out + input
return output
def quantize_(self, params=None):
if params and "per_channel" in params and params["per_channel"]:
qconfig = per_channel_dynamic_qconfig
else:
qconfig = default_dynamic_qconfig
quantization.quantize_dynamic(
self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True
)
return self
# ------------------------------------------------------------------------------
# SummarizationLayer
# ------------------------------------------------------------------------------
class SummarizationLayer(nn.Module):
def __init__(self, method, segment_size, embedding_dim):
super(SummarizationLayer, self).__init__()
self.segment_size = segment_size
self.embedding_dim = embedding_dim
nonlin_match = re.match(r"nonlinear\((?P<act>[a-z]+),(?P<dim>[0-9]+)\)", method)
self.method = method
if method == "mean":
self.module = nn.AvgPool1d(
kernel_size=segment_size,
stride=segment_size,
ceil_mode=True,
)
elif method == "max":
self.module = nn.MaxPool1d(
kernel_size=segment_size,
stride=segment_size,
ceil_mode=True,
)
elif method == "linear":
self.module = nn.Linear(segment_size, 1)
elif nonlin_match:
nonlin_args = nonlin_match.groupdict()
act_type = nonlin_args["act"]
hid_dim = int(nonlin_args["dim"])
if act_type == "relu":
act = nn.ReLU()
elif act_type == "gelu":
act = nn.GELU()
else:
raise ValueError("Unsupported activation_fn = ({})".format(act_type))
self.module = nn.Sequential(
nn.Linear(segment_size, hid_dim),
act,
nn.Linear(hid_dim, 1),
)
else:
raise ValueError("Unsupported summarization method = ({})".format(method))
def forward(self, input):
# T, B, D -> B, D, T
input = input.permute(1, 2, 0)
if self.method == "mean" or self.method == "max":
output = self.module(input)
output = output.permute(2, 0, 1)
return output
full_seg_length = input.size(2) // self.segment_size * self.segment_size
if full_seg_length > 0:
# at least one seg is full
B = input.size(0)
D = input.size(1)
input_todo = (
input[:, :, :full_seg_length]
.contiguous()
.view(B, -1, self.segment_size)
)
output = self.module(input_todo)
output = output.view(B, D, -1)
else:
output = input.new_zeros(input.size(0), input.size(1), 0)
left = input.size(2) - full_seg_length
if left > 0:
# when last seg is not full, use zeros as last memory placeholder
zeros = input.new_zeros(input.size(0), input.size(1), 1)
output = torch.cat([output, zeros], dim=2)
output = output.permute(2, 0, 1)
return output
# ------------------------------------------------------------------------------
# NoSegAugmentedMemoryMultiheadAttentionBmm
# ------------------------------------------------------------------------------
class NoSegAugmentedMemoryMultiheadAttentionBmm(nn.Module):
"""
Whole utterance augmented memory multihead attention using BMM.
Different with previous augmented memory multihead attention where
the utterance is chunked into segments. Here we use attention mask
achieve so. The input embedding [right_context, utterance, summary]
is a concatenation of right context, utterance and summary.
Right context block is the concatenation of all the right context for
each segments. [right_context_0, right_context_1, ..., right_context_n]
For example, if we have utterance = [v0, v1, v2, ...., v20]. segment
size 8, right_context size 4. Then the right context blocks =
[v8, v9, v10, v11, v16, v17, v18, v19, 0, 0, 0, 0], where v8, v9, v10,
and v11 are the right context for first segment. v16, v17, v18 and v19
are the right context for second segment. 0, 0, 0 and 0 are right context
for the last segment.
utterance is corresponding to input embedding sequence
summary is concatenation of average of each segments. [summary_0,
summary_1, ..., ].
In augmented memory multihead attention, the query is [right_context,
utterance, summary], key is [memory, right_context, utterance]. Different
with AugmentedMemoryMultiheadAttentionBmm, memory here is passed from
previous attention layer. For the first attention layer, memory is average
of each segment.
Memory is a concatenation of memory from each segments in previous attention
layer. For example, current layer is i, then memory is [m_0, m_1, ..., m_n].
Each m_k is the output from seg_k in layer i-1.
args:
input_dim: input embedding dimension
num_heads: number of heads in multihead self-attention
dropout: attention dropout
std_scale: if std_scale is not None. The weak attention suppression is
turned on. For std_scale = 0.5, all the attention smaller than
mean + 0.5 * std will be suppressed.
scaled_init: whether to use scaled init for linear weight
tanh_on_mem: whether to use tanh on memory output
use_mem: whether to use memory or not. When max_memory_size is 0, then
we don't have memory anymore.
layer_index: current self-attention layer index that is used in depth
initialization
max_relative_position: max relative position used in relative position
embedding
rpe_old_option: To be compatible with previous model. The previous model
was trained with attention += attention + rpe. The correct equation
should be attention = attention + rpe
"""
def __init__(
self,
input_dim,
num_heads,
dropout=0.0,
std_scale=None,
scaled_init=False,
tanh_on_mem=False,
use_mem=True,
mini_batches=False,
negative_inf="-inf",
layer_index=-1,
max_relative_position=0,
rpe_old_option=True,
):
if input_dim % num_heads:
raise ValueError(
"input_dim ({}) must be divisible by num_heads ({})".format(
input_dim, num_heads
)
)
super().__init__()
embed_dim = input_dim
self.e2h_kv = torch.nn.Linear(input_dim, 2 * input_dim, bias=True)
self.e2h_q = torch.nn.Linear(input_dim, input_dim, bias=True)
self.rpe_old_option = rpe_old_option
if max_relative_position > 0:
self.use_rpe = True
self.rpe_k = RelativePositionEmbedding(
head_dim=input_dim // num_heads,
max_position=max_relative_position,
)
self.rpe_v = RelativePositionEmbedding(
head_dim=input_dim // num_heads,
max_position=max_relative_position,
)
else:
self.use_rpe = False
self.rpe_k = None
self.rpe_v = None
if scaled_init:
if layer_index == -1:
gain = 1.0 / math.sqrt(2)
else:
# https://arxiv.org/abs/2005.09684 depthwise initialization
# stablize the training greatly. Use depthwise initialization to
# replace incremental loss.
gain = 1.0 / math.sqrt(layer_index + 1)
torch.nn.init.xavier_uniform_(self.e2h_kv.weight, gain=gain)
torch.nn.init.xavier_uniform_(self.e2h_q.weight, gain=gain)
self.out_proj = torch.nn.Linear(embed_dim, embed_dim, bias=True)
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.scaling = self.head_dim**-0.5
self.std_scale = std_scale
self.use_mem = use_mem
self.mini_batches = mini_batches
self.negative_inf = negative_inf
if tanh_on_mem:
self.squash_mem = torch.tanh
self.nonlinear_squash_mem = True
else:
self.squash_mem = NoOp()
self.nonlinear_squash_mem = False
def prepare_qkv(
self,
input: Tensor,
mems: Tensor,
lengths: Tensor,
summary_length: int,
lc_length: int,
):
# T: right_context length + utterance_length + summary_length
T, B, D = input.shape
mem_length = mems.size(0)
utterance_length = torch.max(lengths)
right_context_blocks_length = T - utterance_length - summary_length
rc_block = input[:right_context_blocks_length, :, :]
utterance_block = input[right_context_blocks_length : T - summary_length, :, :]
if B == 1:
padding_mask = None
else:
klengths = lengths + mem_length + right_context_blocks_length + lc_length
padding_mask = lengths_to_padding_mask(lengths=klengths)
mem_rc_input = torch.cat([mems, rc_block, utterance_block], dim=0)
# In training lc_length = 0
key_length = mem_rc_input.size(0) + lc_length
rc_input_sum = input
q = self.e2h_q(rc_input_sum)
kv = self.e2h_kv(mem_rc_input)
k, v = kv.chunk(chunks=2, dim=2)
result_qkv = (q, k, v)
input_shape = (T, B, D)
result_lengths_info = (
mem_length,
utterance_length,
right_context_blocks_length,
key_length,
)
if padding_mask is not None:
assert padding_mask.size(0) == B
assert padding_mask.size(1) == key_length
return result_qkv, input_shape, result_lengths_info, padding_mask
def prepare_attention_weights(
self,
q: Tensor,
new_k: Tensor,
new_v: Tensor,
input_shape: Tuple[int, int, int],
rpe: Optional[Tensor],
) -> Tuple[Tensor, Tensor, Tensor]:
T, B, D = input_shape
q = (
q.contiguous().view(-1, B * self.num_heads, self.head_dim).transpose(0, 1)
* self.scaling
)
k = (
new_k.contiguous()
.view(-1, B * self.num_heads, self.head_dim)
.transpose(0, 1)
)
v = (
new_v.contiguous()
.view(-1, B * self.num_heads, self.head_dim)
.transpose(0, 1)
)
attention_weights = torch.bmm(q, k.transpose(1, 2))
if self.use_rpe and rpe is not None and self.rpe_v is not None:
r_k = self.rpe_k(rpe)
# [q, B*h, d] * [q, k, d] -> [B*h, q, k]
attention_weights_rpe = torch.matmul(
q.transpose(0, 1), r_k.transpose(1, 2)
).transpose(0, 1)
attention_weights = attention_weights + attention_weights_rpe
attention_weights_float = attention_weights.float()
return attention_weights, attention_weights_float, v
def prepare_attention_output(
self,
attention_weights: Tensor,
attention_weights_float: Tensor,
v: Tensor,
input_shape: Tuple[int, int, int],
key_length: int,
padding_mask: Optional[Tensor],
rpe: Optional[Tensor],
) -> Tensor:
T, B, D = input_shape
if padding_mask is not None:
attention_weights_float = attention_weights_float.view(
B, self.num_heads, T, key_length
)
attention_weights_float = attention_weights_float.masked_fill(
padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf")
)
attention_weights_float = attention_weights_float.view(
B * self.num_heads, T, key_length
)
if self.std_scale is not None:
attention_weights_float = attention_suppression(
attention_weights_float, self.std_scale
)
attention_weights_float = torch.nn.functional.softmax(
attention_weights_float, dim=-1
)
attention_weights = attention_weights_float.type_as(attention_weights)
attention_probs = torch.nn.functional.dropout(
attention_weights, p=self.dropout, training=self.training
)
# [T, key_length, B, n_head]+ [key_length, B, n_head, d_head]
# -> [T, B, n_head, d_head]
attention = torch.bmm(attention_probs, v)
if self.use_rpe and rpe is not None and self.rpe_v is not None:
r_v = self.rpe_v(rpe)
attention_rpe = torch.matmul(
attention_probs.transpose(0, 1), r_v
).transpose(0, 1)
if self.rpe_old_option:
attention += attention + attention_rpe
else:
attention = attention + attention_rpe
assert list(attention.shape) == [B * self.num_heads, T, self.head_dim]
attention = attention.transpose(0, 1).contiguous().view(T, B, self.embed_dim)
rc_output_memory = self.out_proj(attention)
return rc_output_memory
@torch.jit.unused
def forward(
self,
input: Tensor,
lengths: Tensor,
mems: Tensor,
attention_mask: Tensor,
pre_mems: Optional[Tensor] = None,
left_context_key: Optional[Tensor] = None,
left_context_val: Optional[Tensor] = None,
rpe: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""
forward function for NoSegAugmentedMemoryMultiheadAttentionBmm in training.
args:
input: formed in the following way
[right_context_0, right_contex_1, ..., seg_0, seg_1,
..., summary_0, summary_1,..]
lengths: the length of query which is [seg_0, seg_1, ....]
mems: [mem_0, mem_1, ...].
attention_mask: attention mask for query = [right_context, query, summary]
key = [mem, right_context, query]. This is only used for traing.
"""
if self.use_mem:
mem_length = mems.size(0)
summary_length = mem_length + 1
if pre_mems is not None:
mems = torch.cat([pre_mems, mems], dim=0)
else:
mem_length = 0
summary_length = 0
# In training, lc_length = 0
if left_context_key is not None:
lc_length = left_context_key.size(0)
else:
lc_length = 0
results = self.prepare_qkv(
input=input,
mems=mems,
lengths=lengths,
summary_length=summary_length,
lc_length=lc_length,
)
result_qkv, input_shape, result_lengths_info, padding_mask = results
q, k, v = result_qkv
(
mem_length,
utterance_length,
right_context_blocks_length,
key_length,
) = result_lengths_info
if left_context_key is not None:
# add the cache key and value
new_k = torch.cat(
[
k[: mem_length + right_context_blocks_length, :, :],
left_context_key,
k[-utterance_length:, :, :],
],
dim=0,
)
new_v = torch.cat(
[
v[: mem_length + right_context_blocks_length, :, :],
left_context_val,
v[-utterance_length:, :, :],
],
dim=0,
)
next_k = new_k[mem_length + right_context_blocks_length :, :, :]
next_v = new_v[mem_length + right_context_blocks_length :, :, :]
else:
new_k = k
new_v = v
next_k = None
next_v = None
attention_weights, attention_weights_float, v = self.prepare_attention_weights(
q=q,
new_k=new_k,
new_v=new_v,
input_shape=input_shape,
rpe=rpe,
)
# mask attention
attention_mask = attention_mask.unsqueeze(0)
attention_weights_float = attention_weights_float.masked_fill(
attention_mask, float(self.negative_inf)
)
rc_output_memory = self.prepare_attention_output(
attention_weights=attention_weights,
attention_weights_float=attention_weights_float,
v=v,
input_shape=input_shape,
key_length=key_length,
padding_mask=padding_mask,
rpe=rpe,
)
if self.use_mem:
# next_m length equals to summary length - 1
# last memory is ignored
if self.mini_batches:
next_m = rc_output_memory[-summary_length:]
else:
next_m = rc_output_memory[-summary_length:-1]
next_m = self.squash_mem(next_m)
# rc and output
rc_output = rc_output_memory[:-summary_length]
if not self.nonlinear_squash_mem:
next_m = torch.clamp(next_m, min=-10, max=10)
else:
next_m = mems
rc_output = rc_output_memory
return rc_output, next_m, next_k, next_v
@torch.jit.export
def forward_jit(
self,
input: Tensor,
lengths: Tensor,
mems: Tensor,
left_context_key: Tensor,
left_context_val: Tensor,
rpe: Optional[Tensor],
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""
forward function for NoSegAugmentedMemoryMultiheadAttentionBmm in decoding.
args:
input: formed in the following way
[right_context_0, right_contex_1, ..., seg_0, seg_1,
..., summary_0, summary_1,..]
lengths: the length of query which is [seg_0, seg_1, ....]
mems: [mem_0, mem_1, ...].
left_context_key: left_context for key part. This is only used for online
decoding. In training, this is empty tensor
left_context_val: left_context for value part. This is only used for online
decoding. In training, this is empty tensor
"""
lc_length = left_context_key.size(0)
# In decoding, summary_length = 1 or 0
if self.use_mem:
summary_length = 1
else:
summary_length = 0
results = self.prepare_qkv(
input=input,
mems=mems,
lengths=lengths,
summary_length=summary_length,
lc_length=lc_length,
)
result_qkv, input_shape, result_lengths_info, padding_mask = results
q, k, v = result_qkv
(
mem_length,
utterance_length,
right_context_blocks_length,
key_length,
) = result_lengths_info
# add the cache key and value
new_k = torch.cat(
[
k[: mem_length + right_context_blocks_length, :, :],
left_context_key,
k[-utterance_length:, :, :],
],
dim=0,
)
new_v = torch.cat(
[
v[: mem_length + right_context_blocks_length, :, :],
left_context_val,
v[-utterance_length:, :, :],
],
dim=0,
)
next_k = new_k[mem_length + right_context_blocks_length :, :, :]
next_v = new_v[mem_length + right_context_blocks_length :, :, :]
attention_weights, attention_weights_float, v = self.prepare_attention_weights(
q=q,
new_k=new_k,
new_v=new_v,
input_shape=input_shape,
rpe=rpe,
)
# In online decoding, we don't have attention mask. But we still need
# to disable the attention from summary query to memory
attention_weights_float[:, -1, :mem_length] = float(self.negative_inf)
rc_output_memory = self.prepare_attention_output(
attention_weights=attention_weights,
attention_weights_float=attention_weights_float,
v=v,
input_shape=input_shape,
key_length=key_length,
padding_mask=padding_mask,
rpe=rpe,
)
# In decoding, summary length is 1
if self.use_mem:
next_m = rc_output_memory[-1:]
next_m = self.squash_mem(next_m)
# rc and output
rc_output = rc_output_memory[:-1]
if not self.nonlinear_squash_mem:
next_m = torch.clamp(next_m, min=-10, max=10)
else:
rc_output = rc_output_memory
# empty tensor as input mems
next_m = mems
return rc_output, next_m, next_k, next_v
def quantize_(self, params=None):
if params and "per_channel" in params and params["per_channel"]:
qconfig = per_channel_dynamic_qconfig
else:
qconfig = default_dynamic_qconfig
quantization.quantize_dynamic(
self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True
)
return self
class NoSegAugmentedMemoryTransformer(nn.Module):
"""
Whole utterance augmented memory transformer.
This is not pyspeech nn layer. It is used as a module in a master layer where
multiple transformers is used.
"""
def __init__(
self,
input_dim,
num_heads,
ffn_dim,
dropout_in_attn=0.0,
dropout_on_attn=None,
dropout_on_fc1=None,
dropout_on_fc2=None,
activation_fn="relu",
tanh_on_mem=False,
std_scale=None,
scaled_init=False,
segment_size=128,
use_mem=True,
mini_batches=False,
negative_inf="-inf",
layer_index=-1,
summarization_method="mean",
max_relative_position=0,
rpe_old_option=True,
):
super(NoSegAugmentedMemoryTransformer, self).__init__()
self.attention = NoSegAugmentedMemoryMultiheadAttentionBmm(
input_dim=input_dim,
num_heads=num_heads,
dropout=dropout_in_attn,
scaled_init=scaled_init,
tanh_on_mem=tanh_on_mem,
std_scale=std_scale,
use_mem=use_mem,
mini_batches=mini_batches,
negative_inf=negative_inf,
layer_index=layer_index,
max_relative_position=max_relative_position,
)
self.dropout = nn.Dropout(dropout_on_attn)
self.pos_ff = PositionwiseFF(
input_dim=input_dim,
ffn_dim=ffn_dim,
dropout_on_fc1=dropout_on_fc1,
dropout_on_fc2=dropout_on_fc2,
activation_fn=activation_fn,
)
self.layer_norm_pre = Fp32LayerNorm(input_dim)
self.layer_norm = Fp32LayerNorm(input_dim)
self.segment_size = segment_size
self.use_mem = use_mem
self.memory_op = SummarizationLayer(
summarization_method, segment_size, input_dim
)
def set_mini_batches(self, mini_batches):
self.attention.mini_batches = mini_batches
def gen_summary_queries(self, input):
sum_input = self.memory_op(input)
return sum_input
def pre_attention_ops(self, input, right_context_blocks):
rc_length = right_context_blocks.size(0)
input_length = input.size(0)
rc_and_input = torch.cat([right_context_blocks, input], dim=0)
residual_input = rc_and_input
rc_and_input = self.layer_norm_pre(rc_and_input)
query_input = rc_and_input[-input_length:, :, :]
return rc_length, input_length, residual_input, query_input, rc_and_input
def after_attention_ops(self, attention_output, residual_input):
output = self.dropout(attention_output)
output = output + residual_input
output = self.pos_ff(output)
output = self.layer_norm(output)
return output
@torch.jit.export
def forward_jit(
self,
input: Tensor,
lengths: Tensor,
mems: Tensor,
left_context_key: Tensor,
left_context_val: Tensor,
right_context_blocks: Tensor,
rpe: Optional[Tensor],
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
results = self.pre_attention_ops(input, right_context_blocks)
rc_length, input_length, residual_input, query_input, rc_and_input = results
# In online decoding, the summary query size is always 1 or 0
if self.use_mem:
summary_query = self.gen_summary_queries(query_input)
summary_query = summary_query[0:1, :, :]
rc_qu_su = torch.cat([rc_and_input, summary_query], dim=0)
else:
rc_qu_su = rc_and_input
rc_output, next_m, next_k, next_v = self.attention.forward_jit(
input=rc_qu_su,
lengths=lengths,
mems=mems,
left_context_key=left_context_key,
left_context_val=left_context_val,
rpe=rpe,
)
rc_output = self.after_attention_ops(rc_output, residual_input)
results = (
rc_output[-input_length:, :, :],
next_m,
rc_output[0:rc_length, :, :],
next_k,
next_v,
)
return results
@torch.jit.unused
def forward(
self,
input,
lengths,
mems,
right_context_blocks,
attention_mask,
pre_mems,
left_context_key,
left_context_val,
rpe,
):
results = self.pre_attention_ops(input, right_context_blocks)
rc_length, input_length, residual_input, query_input, rc_and_input = results
if self.use_mem:
summary_query = self.gen_summary_queries(query_input)
rc_qu_su = torch.cat([rc_and_input, summary_query], dim=0)
else:
rc_qu_su = rc_and_input
rc_output, next_m, next_k, next_v = self.attention(
input=rc_qu_su,
lengths=lengths,
mems=mems,
attention_mask=attention_mask,
pre_mems=pre_mems,
left_context_key=left_context_key,
left_context_val=left_context_val,
rpe=rpe,
)
# [TODO] Note memory did not go through pos_ff. What happen if we pass
# memory through the pos_ff as well?
rc_output = self.after_attention_ops(rc_output, residual_input)
results = (
rc_output[-input_length:, :, :],
next_m,
rc_output[0:rc_length, :, :],
next_k,
next_v,
)
return results
class NoSegAugmentedMemoryTransformerEncoderLayer(FairseqEncoder):
"""
Whole utterance augmented memory transformer encoder layer. This is a master layer
where we can define multiple augmented memory transformers. There are two reasons
to setup the master layer.
1. We only need to define once about the attention mask. All the layers in the master
layer share the same mask.
2. pyspeech nn layer has special input and output format. Defining one master layer is
easier to passing memory between different layes inside the master layer
args:
input_dim: input embedding dimension
num_heads: number of heads in multihead self-attention
ffn_dim: ffn dimension in FFN layer
num_layers: number of augmented memory transformer layers
dropout_in_attn: dropout used in multi-head self-attention
dropout_on_attn: dropout used for output from te multihead self-attention
dropout_on_fc1: dropout used in FFN layer for the first linear layer
dropout_on_fc2: dropout used in FFN layer for the second linear layer
segment_size: segment size for each segment
context_config: (left_context_size, right_context_size) defines the surround context size
for each segment
max_memory_size: maximum memory size used for each segment
scaled_init: whether use scaled init for weight initialization in attention layer
std_scale: if std_scale is not None. The weak attention suppression is
turned on. For std_scale = 0.5, all the attention smaller than
mean + 0.5 * std will be suppressed.
activation_fn: activation function used in FFN layer. [ReLU, GELU] supported
tanh_on_mem: whether use tanh on memory
mini_batches: use mini-btach training
negative_inf: the negative infinity value used in attention masking. default is "-inf".
For some situation, e.g. LM. it is better to use "-1e8" to avoid nan issue.
summarization_method: method to generate segment summrization embedding
max_relative_position: max relatie position for relative position embedding
rpe_old_option: To be compatible with previous model. The previous model
was trained with attention += attention + rpe. The correct equation
should be attention = attention + rpe
[TODO]: remove the rpe_old_option by the end of 2021 Q1.
"""
def __init__(
self,
input_dim,
num_heads,
ffn_dim,
num_layers=1,
dropout_in_attn=0.0,
dropout_on_attn=0.0,
dropout_on_fc1=0.0,
dropout_on_fc2=0.0,
segment_size=128,
context_config=(0, 0),
max_memory_size=0,
scaled_init=True,
std_scale=None,
activation_fn="relu",
tanh_on_mem=False,
mini_batches=False,
negative_inf="-inf",
deep_init=True,
summarization_method="mean",
max_relative_position=0,
rpe_old_option=True,
):
super().__init__(None)
if input_dim % num_heads:
raise ValueError(
"input_dim ({}) must be divisible by num_heads ({})".format(
input_dim, num_heads
)
)
# we used to support growing memory size. However, it will cause
# cross stream batching failure. Now we need to have exact max memory size
if max_memory_size < 0:
raise ValueError("max_memory_size must be >= 0")
# Only assign right_context. In decoding, left context will be cached.
# No need to let the online decoder to re-assign the left context
self.left_context, self.right_context = context_config
self.segment_size = segment_size
self.memory_dim = input_dim
self.max_memory_size = max_memory_size
self.mini_batches = mini_batches
if self.max_memory_size != 0:
self.use_mem = True
else:
self.use_mem = False
self.memory_op = SummarizationLayer(
summarization_method, segment_size, input_dim
)
self.layers = torch.nn.ModuleList()
self.num_layers = num_layers
self.max_relative_position = max_relative_position
if self.max_relative_position > 0:
self.use_rpe = True
else:
self.use_rpe = False
for i in range(self.num_layers):
if deep_init:
layer_index = i
else:
layer_index = -1
self.layers.append(
NoSegAugmentedMemoryTransformer(
num_heads=num_heads,
input_dim=input_dim,
ffn_dim=ffn_dim,
dropout_in_attn=dropout_in_attn,
dropout_on_attn=dropout_on_attn,
dropout_on_fc1=dropout_on_fc1,
dropout_on_fc2=dropout_on_fc2,
segment_size=segment_size,
std_scale=std_scale,
activation_fn=activation_fn,
tanh_on_mem=tanh_on_mem,
scaled_init=scaled_init,
use_mem=self.use_mem,
mini_batches=mini_batches,
negative_inf=negative_inf,
layer_index=layer_index,
summarization_method=summarization_method,
max_relative_position=max_relative_position,
rpe_old_option=rpe_old_option,
)
)
def set_mini_batches(self, mini_batches):
# handy function only used for unit test
self.mini_batches = mini_batches
for layer in self.layers:
layer.set_mini_batches(mini_batches)
def _get_relative_position(
self,
input: Tensor,
max_relative_position: int,
left_context_length: int,
past_length: int,
is_decoding: bool,
):
# For training, we copy the right context to the start of the utterance
# First dimension in distance is corresponding to query.
# [right context, utterance, summary vector]
# Second dimension in distance is corresponding to key.
# [Memory bank, right context, utterance]
# For summary vector in query part, the distance with
# all other position is 2*max_position. For memory bank in key,
# the distance with all other positions is 0.
T, B, D = input.shape
num_segs = math.ceil((T - self.right_context) / self.segment_size)
# utterance
u_st = past_length * self.segment_size
u_ed = u_st + T
utterance_ranges = torch.arange(u_st, u_ed - self.right_context)
# left context. Only in minibatch or decoding
left_context_ranges = torch.arange(u_st - left_context_length, u_st)
# Right context block
# right context + utterance
right_context_blocks = []
for i in range(0, num_segs - 1):
st = (i + 1) * self.segment_size + u_st
ed = st + self.right_context
assert ed < u_ed
temp = torch.arange(st, ed)
right_context_blocks.append(temp)
right_context_blocks.append(torch.arange(u_ed - self.right_context, u_ed))
right_context_ranges = torch.cat(right_context_blocks)
if self.use_mem:
# Memory bank
# The position for memory -n, .., -1
if is_decoding:
memory_size = min(past_length, self.max_memory_size)
else:
memory_size = num_segs + past_length - 1
memory_bank_ranges = torch.arange(
-max_relative_position - 1, -max_relative_position - 1 - memory_size, -1
)
# summary vector
# The position for summary vector as the T+max_relative_position+1.
# After the clamping, the relative position is max_relative_position
summary_pos_st = u_ed + max_relative_position + 1
summary_vector_ranges = torch.arange(
summary_pos_st, summary_pos_st + num_segs
)
key_ranges = torch.cat(
[
memory_bank_ranges,
right_context_ranges,
left_context_ranges,
utterance_ranges,
]
)
query_ranges = torch.cat(
[right_context_ranges, utterance_ranges, summary_vector_ranges]
)
else:
key_ranges = torch.cat(
[right_context_ranges, left_context_ranges, utterance_ranges]
)
query_ranges = torch.cat([right_context_ranges, utterance_ranges])
distance = key_ranges[None, :] - query_ranges[:, None]
distance_clamp = (
torch.clamp(distance, -max_relative_position, max_relative_position)
+ max_relative_position
)
distance_clamp = distance_clamp.to(input.device).long().detach()
return distance_clamp
def _get_attention_mask(self, input, past_length=0, left_context_cache=0):
# attention mask for each query contains three parts:
# 1. memory part
# 2. left_context + segment
# 3. right_context_block
# so for each segment and its correspoinding right context block,
# the attention matrix is formed by 9 parts:
# [0, m, 0, 0, right_context, 0, 0, seg, 0]
# [before memory, memory, after memory, before right context, right_context,
# after right context, before seg, seg, after seg]
#
# Query is formed in the way as [right_context_blocks, utterance, summary]
#
# Note: put m and right_context before segment is convenient
# for padding_mask operation.
# Key lengths = m_length + right_context_block_length + lengths
utterance_length, batch_size, _ = input.shape
summary_length = math.ceil(utterance_length / self.segment_size)
num_segs = summary_length
rc_length = self.right_context * num_segs
rc = self.right_context
lc = self.left_context
# using mini-batches, there is left context cache available for current
# sequence.
lcc = left_context_cache
# max_memory_size is 0 then we don't have memory and summary
# past_length is the memory carry from previous sequence
if self.use_mem:
mem_length = num_segs - 1 + past_length
else:
mem_length = 0
rc_mask = []
query_mask = []
summary_mask = []
for j in range(0, num_segs):
ssize = min(self.segment_size, utterance_length - j * self.segment_size)
rc_size = rc
rc_mat = []
q_mat = []
s_mat = []
m_start = max(j + past_length - self.max_memory_size, 0)
# max_memory_size is 0, then we don't use memory
if self.use_mem:
# part 0: before memory
rc_mat.append(input.new_zeros(rc_size, m_start))
q_mat.append(input.new_zeros(ssize, m_start))
s_mat.append(input.new_zeros(1, m_start))
# part 1: memory
col_1 = j + past_length - m_start
rc_mat.append(torch.ones(rc_size, col_1, device=input.device))
q_mat.append(torch.ones(ssize, col_1, device=input.device))
# based on D22875746, disable summary query attention
# on memeory is better for long form utterance
s_mat.append(input.new_zeros(1, col_1))
# part 2: after memory
col_2 = mem_length - (j + past_length)
rc_mat.append(input.new_zeros(rc_size, col_2))
q_mat.append(input.new_zeros(ssize, col_2))
s_mat.append(input.new_zeros(1, col_2))
# part 3: before right context
rc_start = j * rc
rc_mat.append(input.new_zeros(rc_size, rc_start))
q_mat.append(input.new_zeros(ssize, rc_start))
s_mat.append(input.new_zeros(1, rc_start))
# part 4: right context
rc_end = rc_start + rc
col_4 = rc
rc_mat.append(torch.ones(rc_size, col_4, device=input.device))
q_mat.append(torch.ones(ssize, col_4, device=input.device))
s_mat.append(torch.ones(1, col_4, device=input.device))
# part 5: after right context
col_5 = rc_length - rc_end
rc_mat.append(input.new_zeros(rc_size, col_5))
q_mat.append(input.new_zeros(ssize, col_5))
s_mat.append(input.new_zeros(1, col_5))
# part 6: before query segment
seg_start = max(j * self.segment_size + lcc - lc, 0)
rc_mat.append(input.new_zeros(rc_size, seg_start))
q_mat.append(input.new_zeros(ssize, seg_start))
s_mat.append(input.new_zeros(1, seg_start))
# part 7: query segment
# note: right context is put in right context block
# here we only need to consider about left context
seg_end = min((j + 1) * self.segment_size + lcc, utterance_length + lcc)
col_7 = seg_end - seg_start
rc_mat.append(torch.ones(rc_size, col_7, device=input.device))
q_mat.append(torch.ones(ssize, col_7, device=input.device))
s_mat.append(torch.ones(1, col_7, device=input.device))
# part 8: after query segment
col_8 = utterance_length + lcc - seg_end
rc_mat.append(input.new_zeros(rc_size, col_8))
q_mat.append(input.new_zeros(ssize, col_8))
s_mat.append(input.new_zeros(1, col_8))
rc_mask.append(torch.cat(rc_mat, dim=1))
query_mask.append(torch.cat(q_mat, dim=1))
summary_mask.append(torch.cat(s_mat, dim=1))
# no memory, then we don't need summary either
if self.use_mem:
attention_mask = (
1
- torch.cat(
[
torch.cat(rc_mask, dim=0),
torch.cat(query_mask, dim=0),
torch.cat(summary_mask, dim=0),
],
dim=0,
)
).to(torch.bool)
else:
attention_mask = (
1
- torch.cat(
[torch.cat(rc_mask, dim=0), torch.cat(query_mask, dim=0)], dim=0
)
).to(torch.bool)
return attention_mask
@torch.jit.export
def init_state(
self, batch_size: int, device: Optional[Device] = None
) -> List[Tensor]:
empty_memory = torch.zeros(
self.num_layers,
self.max_memory_size,
batch_size,
self.memory_dim,
device=device,
)
left_context_key = torch.zeros(
self.num_layers,
self.left_context,
batch_size,
self.memory_dim,
device=device,
)
left_context_val = torch.zeros(
self.num_layers,
self.left_context,
batch_size,
self.memory_dim,
device=device,
)
past_length = torch.zeros(1, batch_size, dtype=torch.int32, device=device)
return [empty_memory, left_context_key, left_context_val, past_length]
@torch.jit.export
def batch_state(self, states: List[List[Tensor]]) -> List[Tensor]:
if len(states) == 0:
return []
batched_m = []
batched_lc_key = []
batched_lc_val = []
batched_past_length = []
for state in states:
if len(state) == 0:
continue
m, lc_key, lc_val, past_length = state
batched_m.append(m)
batched_lc_key.append(lc_key)
batched_lc_val.append(lc_val)
batched_past_length.append(past_length)
if (
(len(batched_m) == 0)
or (len(batched_lc_key) == 0)
or (len(batched_lc_val) == 0)
or (len(batched_past_length) == 0)
):
return [
torch.tensor([]),
torch.tensor([]),
torch.tensor([]),
torch.tensor([]),
]
batched_m = torch.cat(batched_m, dim=2)
batched_lc_key = torch.cat(batched_lc_key, dim=2)
batched_lc_val = torch.cat(batched_lc_val, dim=2)
batched_past_length = torch.cat(batched_past_length, dim=1)
return [batched_m, batched_lc_key, batched_lc_val, batched_past_length]
@torch.jit.export
def reorder_state(self, state: List[Tensor], indices: Tensor) -> List[Tensor]:
if len(state) == 0:
return []
m, lc_key, lc_val, past_length = state
indices = indices.to(device=m.device)
reord_m = torch.index_select(m, 2, indices)
reord_lc_key = torch.index_select(lc_key, 2, indices)
reord_lc_val = torch.index_select(lc_val, 2, indices)
reord_past_length = torch.index_select(past_length, 1, indices)
return [reord_m, reord_lc_key, reord_lc_val, reord_past_length]
@torch.jit.export
def reset_state(self, state: List[Tensor], indices: Tensor) -> List[Tensor]:
m, lc_key, lc_val, past_length = state
m = m.index_fill(dim=2, index=indices, value=0.0)
lc_key = lc_key.index_fill(dim=2, index=indices, value=0.0)
lc_val = lc_val.index_fill(dim=2, index=indices, value=0.0)
past_length = past_length.index_fill(dim=1, index=indices, value=0)
return [m, lc_key, lc_val, past_length]
@torch.jit.export
def state_size(self) -> int:
return 4
@torch.jit.export
def batch_size_in_state(
self, state: Optional[List[Tensor]], sloppy: bool = True
) -> Optional[int]:
if state is None:
return None
return state[0].size(2)
def gen_summary_queries(self, input):
sum_input = self.memory_op(input)
return sum_input
def _gen_right_context_padded_input(self, input):
# This function deals with input that is already
# padded with right context (e.g. minibatch training)
right_context_blocks = []
T, B, D = input.shape
num_segs = math.ceil((T - self.right_context) / self.segment_size)
for i in range(0, num_segs - 1):
st = (i + 1) * self.segment_size
ed = st + self.right_context
assert ed < T
temp = input[st:ed, :, :]
right_context_blocks.append(temp)
# last segment right context is already available
right_context_blocks.append(input[T - self.right_context :, :, :])
return torch.cat(right_context_blocks, dim=0)
def _gen_segs_right_context(self, input, lengths):
segments = []
T, B, D = input.size()
nT = T - self.right_context
# assume input is right context padded
num_segs = math.ceil(nT / self.segment_size)
# pad zeros to the utterance to make sure each
# segment has the same right context. For the
for i in range(0, num_segs - 1):
st = i * self.segment_size
ed = min(T, st + self.segment_size + self.right_context)
temp = input[st:ed, :, :]
rest_lengths = torch.clamp(
lengths - self.segment_size, min=0, max=nT - (i + 1) * self.segment_size
)
segments.append((temp, lengths - rest_lengths + self.right_context))
lengths = rest_lengths
last_seg = input[st + self.segment_size :, :, :]
segments.append((last_seg, rest_lengths + self.right_context))
return segments
@torch.jit.unused
def forward(
self, input: Tensor, padding_masks: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor], List[Tensor]]:
# Xutai: originally the second argument is lengths.
lengths = (~padding_masks).sum(dim=1).long()
# mini batch training.
if self.mini_batches:
return self.forward_mini_batches(input, lengths, state)
# regular full sequence training. Note, assume the right context in provided
# in the input.
T, B, D = input.size()
right_context_blocks = self._gen_right_context_padded_input(input)
# generate the relative positional embedding
if self.use_rpe:
rpe = self._get_relative_position(
input=input,
max_relative_position=self.max_relative_position,
left_context_length=0,
past_length=0,
is_decoding=False,
)
else:
rpe = None
input = input[: T - self.right_context, :, :]
attention_mask = self._get_attention_mask(input)
# firt layer use each segment mean as memory
# ignore the last one seg average
if self.use_mem:
mems = self.gen_summary_queries(input)[:-1, :, :]
else:
mems = torch.zeros(0, input.size(1), input.size(2), device=input.device)
mems = mems.type_as(input)
output = input
all_outputs = []
for layer in self.layers:
output, mems, right_context_blocks, _, _ = layer(
input=output,
lengths=lengths,
attention_mask=attention_mask,
mems=mems,
right_context_blocks=right_context_blocks,
pre_mems=None,
left_context_key=None,
left_context_val=None,
rpe=rpe,
)
all_outputs.append(output)
return output, padding_masks, [], all_outputs
def forward_jit_mini_batch_init(
self,
seg: Tensor,
state: Optional[List[Tensor]] = None,
is_decoding: bool = False,
):
# Prepare state. In whole sequence training, state is ignored.
# For minibatch training, we need to prepare state
if state is None:
state = self.init_state(batch_size=seg.size(1), device=seg.device)
if seg.dtype == torch.half:
state = [state[0].half(), state[1].half(), state[2].half(), state[3]]
if self.use_mem:
# note input average only on seg, not on right context
# first layer use each segmetn mean as memory. the last
# one segment average is used in state
full_mems = self.gen_summary_queries(seg)
if is_decoding:
mems = full_mems[0:1, :, :]
state_mems = torch.cat([state[0][0], mems], dim=0)
else:
mems = full_mems[:-1, :, :]
state_mems = torch.cat([state[0][0], full_mems], dim=0)
else:
mems = state[0][0]
state_mems = mems
# track processed segment number or memory number
# the same batch as the same bumber of past length
past_length = state[3][0][0].item()
past_left_context = min(past_length * self.segment_size, self.left_context)
past_length = min(self.max_memory_size, past_length)
return state, mems, state_mems, past_length, past_left_context
def state_update_before(
self, layer: int, state: List[Tensor], past_length: int, past_left_context: int
):
pre_mems = state[0][layer][self.max_memory_size - past_length :, :, :]
lc_key = state[1][layer][self.left_context - past_left_context :, :, :]
lc_val = state[2][layer][self.left_context - past_left_context :, :, :]
return pre_mems, lc_key, lc_val
def state_update_after(
self,
layer: int,
state: List[Tensor],
mems: Tensor,
next_key: Tensor,
next_val: Tensor,
mems_list: List[Tensor],
lc_key_list: List[Tensor],
lc_val_list: List[Tensor],
):
# mems is used for next layer
if layer < self.num_layers - 1:
state_mems = torch.cat([state[0][layer + 1], mems], dim=0)
mems_list.append(state_mems[-self.max_memory_size :, :, :])
# when mems pass to next sequence, we need the last memory. when mems
# use for the next layer, we can ignore the last memory
mems = mems[:-1, :, :]
# note state[1][i] and state[2][i] original length equals to self.left_context
new_k = torch.cat([state[1][layer], next_key], dim=0)
new_v = torch.cat([state[2][layer], next_val], dim=0)
lc_key_list.append(new_k[-self.left_context :, :, :])
lc_val_list.append(new_v[-self.left_context :, :, :])
return mems_list, lc_key_list, lc_val_list, mems
def state_update_after_loop(
self,
state: List[Tensor],
mems_list: List[Tensor],
lc_key_list: List[Tensor],
lc_val_list: List[Tensor],
update_length: int,
):
state[0] = torch.stack(mems_list, dim=0)
state[1] = torch.stack(lc_key_list, dim=0)
state[2] = torch.stack(lc_val_list, dim=0)
state[3] = state[3] + update_length
return state
@torch.jit.unused
def forward_mini_batches(
self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor], List[Tensor]]:
T, B, D = input.size()
# input without right context
seg = input[: T - self.right_context, :, :]
# get right context blocks
right_context_blocks = self._gen_right_context_padded_input(input)
mems_list = []
lc_key_list = []
lc_val_list = []
results = self.forward_jit_mini_batch_init(seg, state, False)
state, mems, state_mems, past_length, past_left_context = results
# relative position embedding
if self.use_rpe:
rpe = self._get_relative_position(
input=input,
max_relative_position=self.max_relative_position,
left_context_length=past_left_context,
past_length=past_length,
is_decoding=False,
)
else:
rpe = None
# get attention mask based on seg (not include right context) and available
# left context
attention_mask = self._get_attention_mask(seg, past_length, past_left_context)
mems_list.append(state_mems[-self.max_memory_size :, :, :])
output = seg
i = 0
all_outputs = []
for layer in self.layers:
# In order to make cross stream batching work, mem, left context key
# and left context value in the state should always be the same shape.
# We use the past length to track the processed segment number. In this
# way, we take out the essential memory, left context key and left
# context val from the state. After finish the forward for current segment
# we add the new memory, left context key and left context value into the
# staate and trim out the oldest part to keep the shape consistent.
pre_mems, lc_key, lc_val = self.state_update_before(
i, state, past_length, past_left_context
)
output, mems, right_context_blocks, next_key, next_val = layer.forward(
input=output,
lengths=lengths,
attention_mask=attention_mask,
mems=mems,
right_context_blocks=right_context_blocks,
pre_mems=pre_mems,
left_context_key=lc_key,
left_context_val=lc_val,
rpe=rpe,
)
all_outputs.append(output)
mems_list, lc_key_list, lc_val_list, mems = self.state_update_after(
layer=i,
state=state,
mems=mems,
next_key=next_key,
next_val=next_val,
mems_list=mems_list,
lc_key_list=lc_key_list,
lc_val_list=lc_val_list,
)
i += 1
# update state
update_length = math.ceil((T - self.right_context) / self.segment_size)
state = self.state_update_after_loop(
state=state,
mems_list=mems_list,
lc_key_list=lc_key_list,
lc_val_list=lc_val_list,
update_length=update_length,
)
return output, lengths, state, all_outputs
def forward_jit_test(
self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor]]:
"""
This one simulate sequence encoder forward jit. This is for unit test purpose.
It is not used in training or decoding. Note, extra_right_context is set in
the model. In unit test, input = [utterance, right_context], lengths =
[utterance_length].
args:
input: input utterance
lengths: utterance input length
state: None here. input is whole utterance
"""
# [TODO] sequence_to_segment has bug in lengths.
seg_src_tokens_lengths = self._gen_segs_right_context(input, lengths)
seg_enc_tokens_lengths: List[Tuple[Tensor, Tensor]] = []
state: Optional[List[Tensor]] = None
for seg_src_tokens, seg_src_lengths in seg_src_tokens_lengths:
seg_enc_tokens, seg_enc_lengths, state = self.forward_jit(
input=seg_src_tokens, lengths=seg_src_lengths, state=state
)
seg_enc_tokens_lengths.append((seg_enc_tokens, seg_enc_lengths))
enc_tokens, enc_lengths = segments_to_sequence(
segments=seg_enc_tokens_lengths, time_axis=0
)
state = [] # returns trivial state
return enc_tokens, enc_lengths, state
@torch.jit.export
def forward_jit(
self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor]]:
"""
Forward helper for online decoding.
args:
input: [seg, right_context]. We assume in online we
always padding the right context to the preset right context size.
For the last segment, we may have short segment size, but right
context size is the same as other segments
lengths: utterance input length is the utterance segment length and
right context size
state: [memory, left_context_key, left_context_val]. To improve throughput,
in addition to memory, we also cache key and value for left_context in
multihead self-attention
"""
# In online decoding, input = [segment, right_context]
# Lengths = [segment_length, right_context_length]
# so we need strip right context in output
T, B, D = input.size()
rc_str = T - self.right_context
rc_end = T
right_context_blocks = input[rc_str:rc_end, :, :]
seg = input[:rc_str, :, :]
lengths = torch.clamp(lengths - self.right_context, min=0)
mems_list = []
lc_key_list = []
lc_val_list = []
results = self.forward_jit_mini_batch_init(seg, state, True)
state, mems, state_mems, past_length, past_left_context = results
# relative position embedding
if self.use_rpe:
rpe = self._get_relative_position(
input=input,
max_relative_position=self.max_relative_position,
left_context_length=past_left_context,
past_length=past_length,
is_decoding=True,
)
else:
rpe = None
# memory for first layer.
mems_list.append(state_mems[-self.max_memory_size :, :, :])
output = seg
i = 0
for layer in self.layers:
# In order to make cross stream batching work, mem, left context key
# and left context value in the state should always be the same shape.
# We use the past length to track the processed segment number. In this
# way, we take out the essential memory, left context key and left
# context val from the state. After finish the forward for current segment
# we add the new memory, left context key and left context value into the
# staate and trim out the oldest part to keep the shape consistent.
true_mems, lc_key, lc_val = self.state_update_before(
layer=i,
state=state,
past_length=past_length,
past_left_context=past_left_context,
)
output, mems, right_context_blocks, next_key, next_val = layer.forward_jit(
input=output,
lengths=lengths,
mems=true_mems,
right_context_blocks=right_context_blocks,
left_context_key=lc_key,
left_context_val=lc_val,
rpe=rpe,
)
# mems is used for next layer
mems_list, lc_key_list, lc_val_list, _ = self.state_update_after(
layer=i,
state=state,
mems_list=mems_list,
mems=mems,
next_key=next_key,
next_val=next_val,
lc_key_list=lc_key_list,
lc_val_list=lc_val_list,
)
i += 1
# update state
state = self.state_update_after_loop(
state=state,
mems_list=mems_list,
lc_key_list=lc_key_list,
lc_val_list=lc_val_list,
update_length=1,
)
return output, lengths, state
def quantize_(self, params=None):
if params and "per_channel" in params and params["per_channel"]:
qconfig = per_channel_dynamic_qconfig
else:
qconfig = default_dynamic_qconfig
quantization.quantize_dynamic(
self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True
)
return self
# ------------------------------------------------------------------------------
# Emformer encoder for seq2seq model
# This is a wrapper over the original emformer
# ------------------------------------------------------------------------------
def emformer_encoder(klass):
class SpeechEncoder(klass):
def __init__(self, args):
super().__init__(args)
stride = SpeechEncoder.conv_layer_stride(args)
trf_left_context = args.segment_left_context // stride
trf_right_context = args.segment_right_context // stride
context_config = [trf_left_context, trf_right_context]
self.transformer_layers = nn.ModuleList(
[
NoSegAugmentedMemoryTransformerEncoderLayer(
input_dim=args.encoder_embed_dim,
num_heads=args.encoder_attention_heads,
ffn_dim=args.encoder_ffn_embed_dim,
num_layers=args.encoder_layers,
dropout_in_attn=args.dropout,
dropout_on_attn=args.dropout,
dropout_on_fc1=args.dropout,
dropout_on_fc2=args.dropout,
activation_fn=args.activation_fn,
context_config=context_config,
segment_size=args.segment_length,
max_memory_size=args.max_memory_size,
scaled_init=True, # TODO: use constant for now.
tanh_on_mem=args.amtrf_tanh_on_mem,
)
]
)
def forward(self, src_tokens, src_lengths):
encoder_out = super().forward(src_tokens, src_lengths)
output = encoder_out["encoder_out"][0]
encoder_padding_masks = encoder_out["encoder_padding_mask"][0]
# This is because that in the original implementation
# the output didn't consider the last segment as right context.
encoder_padding_masks = encoder_padding_masks[:, : output.size(0)]
return {
"encoder_out": [output],
"encoder_padding_mask": [encoder_padding_masks],
"encoder_embedding": [],
"encoder_states": [],
"src_tokens": [],
"src_lengths": [],
}
@staticmethod
def conv_layer_stride(args):
# TODO: make it configurable from the args
return 4
SpeechEncoder.__name__ = klass.__name__
return SpeechEncoder
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_to_text/modules/emformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from fairseq.models import FairseqEncoder
from fairseq.models.speech_to_text import ConvTransformerEncoder
from fairseq.models.speech_to_text.utils import (
attention_suppression,
lengths_to_encoder_padding_mask,
segments_to_sequence,
sequence_to_segments,
)
from fairseq.modules import MultiheadAttention, TransformerEncoderLayer
# ------------------------------------------------------------------------------
# AugmentedMemoryConvTransformerEncoder
# ------------------------------------------------------------------------------
class AugmentedMemoryConvTransformerEncoder(ConvTransformerEncoder):
def __init__(self, args):
super().__init__(args)
args.encoder_stride = self.stride()
self.left_context = args.left_context // args.encoder_stride
self.right_context = args.right_context // args.encoder_stride
self.left_context_after_stride = args.left_context // args.encoder_stride
self.right_context_after_stride = args.right_context // args.encoder_stride
self.transformer_layers = nn.ModuleList([])
self.transformer_layers.extend(
[
AugmentedMemoryTransformerEncoderLayer(args)
for i in range(args.encoder_layers)
]
)
def stride(self):
# Hard coded here. Should infer from convs in future
stride = 4
return stride
def forward(self, src_tokens, src_lengths, states=None):
"""Encode input sequence.
:param torch.Tensor xs: input tensor
:param torch.Tensor masks: input mask
:return: position embedded tensor and mask
:rtype Tuple[torch.Tensor, torch.Tensor]:
"""
bsz, max_seq_len, _ = src_tokens.size()
x = (
src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim)
.transpose(1, 2)
.contiguous()
)
x = self.conv(x)
bsz, _, output_seq_len, _ = x.size()
x = x.transpose(1, 2).transpose(0, 1).contiguous().view(output_seq_len, bsz, -1)
x = self.out(x)
x = self.embed_scale * x
subsampling_factor = 1.0 * max_seq_len / output_seq_len
input_lengths = torch.max(
(src_lengths.float() / subsampling_factor).ceil().long(),
x.size(0) * src_lengths.new_ones([src_lengths.size(0)]).long(),
)
encoder_padding_mask, _ = lengths_to_encoder_padding_mask(
input_lengths, batch_first=True
)
# TODO: fix positional embedding
positions = self.embed_positions(encoder_padding_mask).transpose(0, 1)
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# State to store memory banks etc.
if states is None:
states = [
{"memory_banks": None, "encoder_states": None}
for i in range(len(self.transformer_layers))
]
for i, layer in enumerate(self.transformer_layers):
# x size:
# (self.left_size + self.segment_size + self.right_size)
# / self.stride, num_heads, dim
# TODO: Consider mask here
x = layer(x, states[i])
states[i]["encoder_states"] = x[
self.left_context_after_stride : -self.right_context_after_stride
]
lengths = (
(
~encoder_padding_mask[
:, self.left_context_after_stride : -self.right_context_after_stride
]
)
.sum(dim=1, keepdim=True)
.long()
)
return states[-1]["encoder_states"], lengths, states
# ------------------------------------------------------------------------------
# AugmentedMemoryTransformerEncoderLayer
# ------------------------------------------------------------------------------
class AugmentedMemoryTransformerEncoderLayer(TransformerEncoderLayer):
def __init__(self, args):
super().__init__(args)
self.left_context = args.left_context // args.encoder_stride
self.right_context = args.right_context // args.encoder_stride
def forward(self, x, state):
length, batch_size, x_dim = x.size()
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
# init_state
if state.get("memory_banks", None) is None:
state["memory_banks"] = []
# TODO reseach new sum_query method
seg_start = self.left_context
seg_end = length - self.right_context
if seg_start < seg_end:
summarization_query = torch.mean(x[seg_start:seg_end], keepdim=True, dim=0)
else:
summarization_query = x.new_zeros(1, batch_size, x_dim)
x = torch.cat([x, summarization_query], dim=0)
x = self.self_attn(input_and_summary=x, state=state)
x = self.dropout_module(x)
x = residual + x
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = residual + x
if not self.normalize_before:
x = self.final_layer_norm(x)
return x
def build_self_attention(self, embed_dim, args):
return AugmentedMemoryMultiheadAttention(
embed_dim=embed_dim,
num_heads=args.encoder_attention_heads,
dropout=args.attention_dropout,
self_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
tanh_on_mem=True,
max_memory_size=args.max_memory_size,
)
# ------------------------------------------------------------------------------
# AugmentedMemoryMultiheadAttention
# ------------------------------------------------------------------------------
class AugmentedMemoryMultiheadAttention(MultiheadAttention):
"""
Augmented Memory Attention from
Streaming Transformer-based Acoustic Models
Using Self-attention with Augmented Memory
https://arxiv.org/abs/2005.08042
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
q_noise=0.0,
qn_block_size=8,
tanh_on_mem=False,
memory_dim=None,
std_scale=0.5, # 0.5 based on https://arxiv.org/abs/2005.09137
max_memory_size=-1,
disable_mem_on_mem_attn=True,
):
super().__init__(
embed_dim,
num_heads,
kdim,
vdim,
dropout,
bias,
add_bias_kv,
add_zero_attn,
self_attention,
encoder_decoder_attention,
q_noise,
qn_block_size,
)
self.memory_dim = memory_dim if memory_dim is not None else embed_dim
self.std_scale = std_scale
self.disable_mem_on_mem_attn = disable_mem_on_mem_attn
# This Operator was used for factorization in PySpeech
self.v2e = lambda x: x
if tanh_on_mem:
self.squash_mem = torch.tanh
self.nonlinear_squash_mem = True
else:
self.squash_mem = lambda x: x
self.nonlinear_squash_mem = False
self.max_memory_size = max_memory_size
def forward(self, input_and_summary, state):
"""
input: Encoder states of current segment with left or right context,
plus one summarization query
"""
length, batch_size, _ = input_and_summary.shape
length = length - 1 # not include sum_query, last index
memory = state["memory_banks"]
# TODO: positional embedding on memory
if self.max_memory_size > -1 and len(memory) > self.max_memory_size:
# TODO: need to fix here
if self.max_memory_size == 0:
memory = memory.new_zeros(1, memory.size(1), self.memory_dim)
else:
memory = memory[-self.max_memory_size :]
memory_and_input = torch.cat(memory + [input_and_summary[:-1]], dim=0)
input_and_sum_query = input_and_summary
q = self.q_proj(self.v2e(input_and_sum_query))
k = self.k_proj(self.v2e(memory_and_input))
v = self.v_proj(self.v2e(memory_and_input))
q = (
q.contiguous()
.view(-1, batch_size * self.num_heads, self.head_dim)
.transpose(0, 1)
* self.scaling
)
k = (
k.contiguous()
.view(-1, batch_size * self.num_heads, self.head_dim)
.transpose(0, 1)
)
v = (
v.contiguous()
.view(-1, batch_size * self.num_heads, self.head_dim)
.transpose(0, 1)
)
attention_weights = torch.bmm(q, k.transpose(1, 2))
if self.disable_mem_on_mem_attn:
attention_weights = self.suppress_mem_on_mem_attention(
batch_size, self.num_heads, len(memory), attention_weights
)
if self.std_scale is not None:
attention_weights = attention_suppression(attention_weights, self.std_scale)
assert list(attention_weights.shape) == [
batch_size * self.num_heads,
length + 1,
length + len(memory),
]
attention_weights = torch.nn.functional.softmax(
attention_weights.float(), dim=-1
).type_as(attention_weights)
attention_probs = self.dropout_module(attention_weights)
# [T, T, B, n_head] + [T, B, n_head, d_head] -> [T, B, n_head, d_head]
attention = torch.bmm(attention_probs, v)
assert list(attention.shape) == [
batch_size * self.num_heads,
length + 1,
self.head_dim,
]
attention = (
attention.transpose(0, 1)
.contiguous()
.view(length + 1, batch_size, self.embed_dim)
)
output_and_memory = self.out_proj(attention)
next_m = output_and_memory[-1:]
next_m = self.squash_mem(next_m)
output = output_and_memory[:-1]
state["memory_banks"].append(next_m)
return output
def suppress_mem_on_mem_attention(
self, B: int, num_heads: int, mem_size: int, attention_weight: Tensor
):
"""
Arguments:
- B: batch size
- num_heads: number of attention heads
- mem_size: size of memory bank
- attention_weight: a [B*num_heads, T + 1, T + mem_size] vector
Return:
modified attention_weight with [B*num_heads, -1, :mem_size] = -inf
"""
attention_weight[:, -1, :mem_size] = float("-inf")
return attention_weight
# ------------------------------------------------------------------------------
# SequenceEncoder
# ------------------------------------------------------------------------------
class SequenceEncoder(FairseqEncoder):
"""
SequenceEncoder encodes sequences.
More specifically, `src_tokens` and `src_lengths` in `forward()` should
describe a batch of "complete" sequences rather than segments.
Segment-by-segment inference can be triggered by `segment_size`:
1) `segment_size` is None:
SequenceEncoder treats the input sequence as one single segment.
2) `segment_size` is not None (some int instead):
SequenceEncoder does the following:
1. breaks the input sequence into several segments
2. inference on each segment and collect the outputs
3. concatanete segment outputs into the output sequence.
Note that `segment_size` here shouldn't include additional left/right
contexts needed, for example if we wish to infer with LC-BLSTM where the
middle chunk size is 100 and right context is 20, `segment_size` should be
100.
"""
def __init__(self, args, module):
super().__init__(None)
self.module = module
self.input_time_axis = 1
self.output_time_axis = 0
self.segment_size = args.segment_size
self.left_context = args.left_context
self.right_context = args.right_context
def forward(
self,
src_tokens: Tensor,
src_lengths: Tensor,
states=None,
):
seg_src_tokens_lengths = sequence_to_segments(
sequence=src_tokens,
time_axis=self.input_time_axis,
lengths=src_lengths,
segment_size=self.segment_size,
extra_left_context=self.left_context,
extra_right_context=self.right_context,
)
seg_encoder_states_lengths: List[Tuple[Tensor, Tensor]] = []
for seg_src_tokens, seg_src_lengths in seg_src_tokens_lengths:
(seg_encoder_states, seg_enc_lengths, states) = self.module(
seg_src_tokens,
seg_src_lengths,
states=states,
)
seg_encoder_states_lengths.append((seg_encoder_states, seg_enc_lengths))
encoder_out, enc_lengths = segments_to_sequence(
segments=seg_encoder_states_lengths, time_axis=self.output_time_axis
)
encoder_padding_mask, _ = lengths_to_encoder_padding_mask(
enc_lengths, batch_first=True
)
if not encoder_padding_mask.any():
encoder_padding_mask = None
return {
"encoder_out": [encoder_out],
"encoder_padding_mask": [encoder_padding_mask],
"encoder_embedding": [],
"encoder_states": [states],
"src_tokens": [],
"src_lengths": [],
}
def incremental_encode(
self,
seg_src_tokens: Tensor,
seg_src_lengths: Tensor,
states=None,
):
"""
Different from forward function, this function takes segmented speech
as input, and append encoder states to previous states
"""
(seg_encoder_states, seg_enc_lengths, states) = self.module(
seg_src_tokens,
seg_src_lengths,
states=states,
)
return seg_encoder_states, seg_enc_lengths, states
# ------------------------------------------------------------------------------
# Augmented memory model decorator
# ------------------------------------------------------------------------------
def augmented_memory(klass):
class StreamSeq2SeqModel(klass):
@staticmethod
def add_args(parser):
super(StreamSeq2SeqModel, StreamSeq2SeqModel).add_args(parser)
parser.add_argument(
"--segment-size", type=int, required=True, help="Length of the segment."
)
parser.add_argument(
"--left-context",
type=int,
default=0,
help="Left context for the segment.",
)
parser.add_argument(
"--right-context",
type=int,
default=0,
help="Right context for the segment.",
)
parser.add_argument(
"--max-memory-size",
type=int,
default=-1,
help="Right context for the segment.",
)
StreamSeq2SeqModel.__name__ = klass.__name__
return StreamSeq2SeqModel
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_to_text/modules/augmented_memory_attention.py |
EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_to_text/modules/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .hubert import * # noqa
from .hubert_asr import * # noqa
| EXA-1-master | exa/libraries/fairseq/fairseq/models/hubert/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import copy
import logging
import math
from argparse import Namespace
from dataclasses import dataclass, field
from typing import Any, Optional
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from omegaconf import II, MISSING, open_dict
from fairseq import checkpoint_utils, tasks, utils
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.models import (
BaseFairseqModel,
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
)
from fairseq.models.hubert.hubert import MASKING_DISTRIBUTION_CHOICES
from fairseq.modules import LayerNorm, PositionalEmbedding, TransformerDecoderLayer
from fairseq.tasks import FairseqTask
logger = logging.getLogger(__name__)
@dataclass
class HubertAsrConfig(FairseqDataclass):
w2v_path: str = field(default=MISSING, metadata={"help": "path to hubert model"})
no_pretrained_weights: bool = field(
default=False,
metadata={"help": "if true, does not load pretrained weights"},
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
final_dropout: float = field(
default=0.0,
metadata={"help": "dropout after transformer and before final projection"},
)
dropout: float = field(
default=0.0,
metadata={"help": "dropout probability inside hubert model"},
)
attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights " "inside hubert model"
},
)
activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN " "inside hubert model"
},
)
encoder_embed_dim: Optional[int] = field(
default=768, metadata={"help": "encoder embedding dimension"}
)
# masking
apply_mask: bool = field(
default=False, metadata={"help": "apply masking during fine-tuning"}
)
mask_length: int = field(
default=10, metadata={"help": "repeat the mask indices multiple times"}
)
mask_prob: float = field(
default=0.5,
metadata={
"help": "probability of replacing a token with mask "
"(normalized by length)"
},
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose masks"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument "
"(used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
# channel masking
mask_channel_length: int = field(
default=10,
metadata={"help": "length of the mask for features (channels)"},
)
mask_channel_prob: float = field(
default=0.0,
metadata={"help": "probability of replacing a feature with 0"},
)
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument "
"(used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_channel_overlap: bool = field(
default=False,
metadata={"help": "whether to allow channel masks to overlap"},
)
freeze_finetune_updates: int = field(
default=0,
metadata={"help": "dont finetune hubert for this many updates"},
)
feature_grad_mult: float = field(
default=0.0,
metadata={"help": "reset feature grad mult in hubert to this"},
)
layerdrop: float = field(
default=0.0,
metadata={"help": "probability of dropping a layer in hubert"},
)
normalize: bool = II("task.normalize")
data: str = II("task.data")
# this holds the loaded hubert args
w2v_args: Any = None
@dataclass
class HubertCtcConfig(HubertAsrConfig):
pass
@register_model("hubert_ctc", dataclass=HubertCtcConfig)
class HubertCtc(BaseFairseqModel):
def __init__(self, cfg: HubertCtcConfig, w2v_encoder: BaseFairseqModel):
super().__init__()
self.cfg = cfg
self.w2v_encoder = w2v_encoder
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
@classmethod
def build_model(cls, cfg: HubertCtcConfig, task: FairseqTask):
"""Build a new model instance."""
w2v_encoder = HubertEncoder(cfg, task)
return cls(cfg, w2v_encoder)
def get_normalized_probs(self, net_output, log_probs):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output["encoder_out"]
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
def get_logits(self, net_output):
logits = net_output["encoder_out"]
padding = net_output["encoder_padding_mask"]
if padding is not None and padding.any():
padding = padding.T
logits[padding][..., 0] = 0
logits[padding][..., 1:] = float("-inf")
return logits
def forward(self, **kwargs):
x = self.w2v_encoder(**kwargs)
return x
@dataclass
class HubertSeq2SeqConfig(HubertAsrConfig):
decoder_embed_dim: int = field(
default=768, metadata={"help": "decoder embedding dimension"}
)
decoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "decoder embedding dimension for FFN"}
)
decoder_layers: int = field(default=6, metadata={"help": "num of decoder layers"})
decoder_layerdrop: float = field(
default=0.0, metadata={"help": "decoder layerdrop chance"}
)
decoder_attention_heads: int = field(
default=4, metadata={"help": "num decoder attention heads"}
)
decoder_learned_pos: bool = field(
default=False,
metadata={"help": "use learned positional embeddings in the decoder"},
)
decoder_normalize_before: bool = field(
default=False, metadata={"help": "apply layernorm before each decoder block"}
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if set, disables positional embeddings (outside self attention)"
},
)
decoder_dropout: float = field(
default=0.0, metadata={"help": "dropout probability in the decoder"}
)
decoder_attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights inside the decoder"
},
)
decoder_activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN inside the decoder"
},
)
max_target_positions: int = field(
default=2048, metadata={"help": "max target positions"}
)
share_decoder_input_output_embed: bool = field(
default=False, metadata={"help": "share decoder input and output embeddings"}
)
autoregressive: bool = II("task.autoregressive")
seq2seq_path: str = field(
default="",
metadata={"help": "reset_dict"},
)
reset_dict: bool = field(
default=False,
metadata={"help": "reset_dict"},
)
@register_model("hubert_seq2seq", dataclass=HubertSeq2SeqConfig)
class HubertSeq2SeqModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def build_model(cls, cfg: HubertSeq2SeqConfig, task: FairseqTask):
"""Build a new model instance."""
assert (
cfg.autoregressive
), "Please set task.autoregressive=true for seq2seq asr models"
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
return emb
decoder_embed_tokens = build_embedding(tgt_dict, cfg.decoder_embed_dim)
encoder = cls.build_encoder(cfg, task)
decoder = cls.build_decoder(cfg, tgt_dict, decoder_embed_tokens)
model = HubertSeq2SeqModel(encoder, decoder)
if cfg["seq2seq_path"]:
state = checkpoint_utils.load_checkpoint_to_cpu(cfg.seq2seq_path)
state = state["model"]
if cfg["reset_dict"]:
del state["decoder.embed_out"]
del state["decoder.embed_tokens.weight"]
model.load_state_dict(state, strict=False)
return model
@classmethod
def build_encoder(cls, cfg: HubertAsrConfig, task):
return HubertEncoder(cfg, task)
@classmethod
def build_decoder(cls, cfg: HubertSeq2SeqConfig, tgt_dict, embed_tokens):
return TransformerDecoder(cfg, tgt_dict, embed_tokens)
def forward(self, **kwargs):
encoder_out = self.encoder(**kwargs)
decoder_out = self.decoder(encoder_out=encoder_out, **kwargs)
return decoder_out
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
def load_state_dict(
self,
state_dict,
strict=True,
model_cfg=None,
args: Optional[Namespace] = None,
):
if model_cfg.reset_dict:
logger.warn("Overriding loading strict state dict!")
del state_dict["decoder.embed_out"]
del state_dict["decoder.embed_tokens.weight"]
return super().load_state_dict(state_dict, False, model_cfg, args)
return super().load_state_dict(state_dict, strict, model_cfg, args)
class HubertEncoder(FairseqEncoder):
def __init__(self, cfg: HubertAsrConfig, task):
self.apply_mask = cfg.apply_mask
arg_overrides = {
"dropout": cfg.dropout,
"activation_dropout": cfg.activation_dropout,
"dropout_input": cfg.dropout_input,
"attention_dropout": cfg.attention_dropout,
"mask_length": cfg.mask_length,
"mask_prob": cfg.mask_prob,
"mask_selection": cfg.mask_selection,
"mask_other": cfg.mask_other,
"no_mask_overlap": cfg.no_mask_overlap,
"mask_channel_length": cfg.mask_channel_length,
"mask_channel_prob": cfg.mask_channel_prob,
"mask_channel_selection": cfg.mask_channel_selection,
"mask_channel_other": cfg.mask_channel_other,
"no_mask_channel_overlap": cfg.no_mask_channel_overlap,
"encoder_layerdrop": cfg.layerdrop,
"feature_grad_mult": cfg.feature_grad_mult,
}
if cfg.w2v_args is None:
state = checkpoint_utils.load_checkpoint_to_cpu(cfg.w2v_path, arg_overrides)
w2v_args = state.get("cfg", None)
if w2v_args is None:
w2v_args = convert_namespace_to_omegaconf(state["args"])
cfg.w2v_args = w2v_args
else:
state = None
w2v_args = cfg.w2v_args
if isinstance(w2v_args, Namespace):
cfg.w2v_args = w2v_args = convert_namespace_to_omegaconf(w2v_args)
assert cfg.normalize == w2v_args.task.normalize, (
"Fine-tuning works best when data normalization is the same. "
"Please check that --normalize is set or unset for "
"both pre-training and here"
)
w2v_args.task.data = cfg.data
pretrain_task = tasks.setup_task(w2v_args.task)
if state is not None and "task_state" in state:
# This will load the stored "dictionaries" object
pretrain_task.load_state_dict(state["task_state"])
else:
pretrain_task.load_state_dict(task.state_dict())
model = pretrain_task.build_model(w2v_args.model, from_checkpoint=True)
if state is not None and not cfg.no_pretrained_weights:
# set strict=False because we omit some modules
model.load_state_dict(state["model"], strict=False)
model.remove_pretraining_modules()
super().__init__(pretrain_task.source_dictionary)
d = w2v_args.model.encoder_embed_dim
self.w2v_model = model
self.final_dropout = nn.Dropout(cfg.final_dropout)
self.freeze_finetune_updates = cfg.freeze_finetune_updates
self.num_updates = 0
if task.target_dictionary is not None and not cfg.autoregressive:
self.proj = Linear(d, len(task.target_dictionary))
elif getattr(cfg, "decoder_embed_dim", d) != d:
self.proj = Linear(d, cfg.decoder_embed_dim)
else:
self.proj = None
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
super().set_num_updates(num_updates)
self.num_updates = num_updates
def forward(self, source, padding_mask, tbc=True, **kwargs):
w2v_args = {
"source": source,
"padding_mask": padding_mask,
"mask": self.apply_mask and self.training,
}
ft = self.freeze_finetune_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
x, padding_mask = self.w2v_model.extract_features(**w2v_args)
if tbc:
# B x T x C -> T x B x C
x = x.transpose(0, 1)
x = self.final_dropout(x)
if self.proj:
x = self.proj(x)
return {
"encoder_out": x, # T x B x C
"encoder_padding_mask": padding_mask, # B x T
"padding_mask": padding_mask,
}
def reorder_encoder_out(self, encoder_out, new_order):
if encoder_out["encoder_out"] is not None:
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(0, new_order)
if encoder_out["padding_mask"] is not None:
encoder_out["padding_mask"] = encoder_out["padding_mask"].index_select(
0, new_order
)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return None
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self,
cfg: HubertSeq2SeqConfig,
dictionary,
embed_tokens,
no_encoder_attn=False,
):
super().__init__(dictionary)
self.dropout = cfg.decoder_dropout
self.share_input_output_embed = cfg.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = cfg.decoder_embed_dim
self.output_embed_dim = cfg.decoder_embed_dim
self.layerdrop = cfg.decoder_layerdrop
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = cfg.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
cfg.max_target_positions,
embed_dim,
self.padding_idx,
learned=cfg.decoder_learned_pos,
)
if not cfg.no_token_positional_embeddings
else None
)
# TODO: update this when transformer gets converted to dataclass configs
transformer_cfg = copy.deepcopy(cfg)
with open_dict(transformer_cfg):
transformer_cfg.dropout = transformer_cfg.decoder_dropout
transformer_cfg.attention_dropout = (
transformer_cfg.decoder_attention_dropout
)
transformer_cfg.activation_dropout = (
transformer_cfg.decoder_activation_dropout
)
self.layers = nn.ModuleList([])
self.layers.extend(
[
TransformerDecoderLayer(transformer_cfg, no_encoder_attn)
for _ in range(transformer_cfg.decoder_layers)
]
)
if not self.share_input_output_embed:
self.embed_out = nn.Parameter(
torch.Tensor(len(dictionary), self.output_embed_dim)
)
nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim**-0.5)
if transformer_cfg.decoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
if type(prev_output_tokens) == list:
max_len = max((len(x) for x in prev_output_tokens))
tmp = torch.zeros(
[len(prev_output_tokens), max_len], device=prev_output_tokens[0].device
)
for (i, p) in enumerate(prev_output_tokens):
tmp[i, : len(p)] = p
prev_output_tokens = tmp
prev_output_tokens = prev_output_tokens.long()
x, extra = self.extract_features(
prev_output_tokens, encoder_out, incremental_state
)
x = self.output_layer(x)
return x, extra
def extract_features(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused
):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
# embed positions
positions = (
self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if self.embed_positions is not None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
self_attn_padding_mask = None
if prev_output_tokens.eq(self.padding_idx).any():
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
for layer in self.layers:
dropout_probability = np.random.random()
if not self.training or (dropout_probability > self.layerdrop):
x, attn, _ = layer(
x,
encoder_out["encoder_out"] if encoder_out is not None else None,
encoder_out["padding_mask"] if encoder_out is not None else None,
incremental_state,
self_attn_mask=self.buffered_future_mask(x)
if incremental_state is None
else None,
self_attn_padding_mask=self_attn_padding_mask,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
return x, {"attn": attn, "inner_states": inner_states}
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
# project back to size of vocabulary
if self.share_input_output_embed:
return F.linear(features, self.embed_tokens.weight)
else:
return F.linear(features, self.embed_out)
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, "_future_mask")
or self._future_mask is None
or self._future_mask.device != tensor.device
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim**-0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
| EXA-1-master | exa/libraries/fairseq/fairseq/models/hubert/hubert_asr.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
from omegaconf import II
from fairseq import utils
from fairseq.data.data_utils import compute_mask_indices
from fairseq.data.dictionary import Dictionary
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model
from fairseq.models.wav2vec.wav2vec2 import (
EXTRACTOR_MODE_CHOICES,
MASKING_DISTRIBUTION_CHOICES,
LAYER_TYPE_CHOICES,
ConvFeatureExtractionModel,
TransformerEncoder,
)
from fairseq.modules import GradMultiply, LayerNorm
from fairseq.tasks.hubert_pretraining import (
HubertPretrainingConfig,
HubertPretrainingTask,
)
logger = logging.getLogger(__name__)
@dataclass
class HubertConfig(FairseqDataclass):
label_rate: float = II("task.label_rate")
extractor_mode: EXTRACTOR_MODE_CHOICES = field(
default="default",
metadata={
"help": "mode for feature extractor. default has a single group "
"norm with d groups in the first conv block, whereas layer_norm "
"has layer norms in every block (meant to use with normalize=True)"
},
)
encoder_layers: int = field(
default=12, metadata={"help": "num encoder layers in the transformer"}
)
encoder_embed_dim: int = field(
default=768, metadata={"help": "encoder embedding dimension"}
)
encoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "encoder embedding dimension for FFN"}
)
encoder_attention_heads: int = field(
default=12, metadata={"help": "num encoder attention heads"}
)
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="gelu", metadata={"help": "activation function to use"}
)
layer_type: LAYER_TYPE_CHOICES = field(
default="transformer", metadata={"help": "layer type in encoder"}
)
# dropouts
dropout: float = field(
default=0.1,
metadata={"help": "dropout probability for the transformer"},
)
attention_dropout: float = field(
default=0.1,
metadata={"help": "dropout probability for attention weights"},
)
activation_dropout: float = field(
default=0.0,
metadata={"help": "dropout probability after activation in FFN"},
)
encoder_layerdrop: float = field(
default=0.0,
metadata={"help": "probability of dropping a tarnsformer layer"},
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
dropout_features: float = field(
default=0.0,
metadata={"help": "dropout to apply to the features (after feat extr)"},
)
final_dim: int = field(
default=0,
metadata={
"help": "project final representations and targets to this many "
"dimensions. set to encoder_embed_dim is <= 0"
},
)
untie_final_proj: bool = field(
default=False,
metadata={"help": "use separate projection for each target"},
)
layer_norm_first: bool = field(
default=False,
metadata={"help": "apply layernorm first in the transformer"},
)
conv_feature_layers: str = field(
default="[(512,10,5)] + [(512,3,2)] * 4 + [(512,2,2)] * 2",
metadata={
"help": "string describing convolutional feature extraction "
"layers in form of a python list that contains "
"[(dim, kernel_size, stride), ...]"
},
)
conv_bias: bool = field(
default=False, metadata={"help": "include bias in conv encoder"}
)
logit_temp: float = field(
default=0.1, metadata={"help": "temperature to divide logits by"}
)
target_glu: bool = field(
default=False, metadata={"help": "adds projection + glu to targets"}
)
feature_grad_mult: float = field(
default=1.0,
metadata={"help": "multiply feature extractor var grads by this"},
)
# masking
mask_length: int = field(default=10, metadata={"help": "mask length"})
mask_prob: float = field(
default=0.65,
metadata={"help": "probability of replacing a token with mask"},
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose mask length"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument "
"(used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
mask_min_space: int = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
# channel masking
mask_channel_length: int = field(
default=10,
metadata={"help": "length of the mask for features (channels)"},
)
mask_channel_prob: float = field(
default=0.0,
metadata={"help": "probability of replacing a feature with 0"},
)
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument "
"(used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_channel_overlap: bool = field(
default=False,
metadata={"help": "whether to allow channel masks to overlap"},
)
mask_channel_min_space: int = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
# positional embeddings
conv_pos: int = field(
default=128,
metadata={"help": "number of filters for convolutional positional embeddings"},
)
conv_pos_groups: int = field(
default=16,
metadata={"help": "number of groups for convolutional positional embedding"},
)
latent_temp: Tuple[float, float, float] = field(
default=(2, 0.5, 0.999995),
metadata={"help": "legacy (to be removed)"},
)
# loss computation
skip_masked: bool = field(
default=False,
metadata={"help": "skip computing losses over masked frames"},
)
skip_nomask: bool = field(
default=False,
metadata={"help": "skip computing losses over unmasked frames"},
)
checkpoint_activations: bool = field(
default=False,
metadata={"help": "recompute activations and save memory for extra compute"},
)
# FP16 optimization
required_seq_len_multiple: int = field(
default=2,
metadata={
"help": "pad the input to encoder such that the sequence length is divisible by multiple"
},
)
# Conformer
depthwise_conv_kernel_size: int = field(
default=31,
metadata={
"help": "depthwise-conv-kernel-size for convolution in conformer layer"
},
)
attn_type: str = field(
default="",
metadata={"help": "if espnet use ESPNET MHA"},
)
pos_enc_type: str = field(
default="abs",
metadata={"help": "Positional encoding type to use in conformer"},
)
fp16: bool = field(default=False, metadata={"help": "If fp16 is being used"})
@register_model("hubert", dataclass=HubertConfig)
class HubertModel(BaseFairseqModel):
def __init__(
self,
cfg: HubertConfig,
task_cfg: HubertPretrainingConfig,
dictionaries: List[Dictionary],
) -> None:
super().__init__()
logger.info(f"HubertModel Config: {cfg}")
feature_enc_layers = eval(cfg.conv_feature_layers) # noqa
self.embed = feature_enc_layers[-1][0]
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
mode=cfg.extractor_mode,
conv_bias=cfg.conv_bias,
)
feature_ds_rate = np.prod([s for _, _, s in feature_enc_layers])
self.feat2tar_ratio = cfg.label_rate * feature_ds_rate / task_cfg.sample_rate
self.post_extract_proj = (
nn.Linear(self.embed, cfg.encoder_embed_dim)
if self.embed != cfg.encoder_embed_dim
else None
)
self.mask_prob = cfg.mask_prob
self.mask_selection = cfg.mask_selection
self.mask_other = cfg.mask_other
self.mask_length = cfg.mask_length
self.no_mask_overlap = cfg.no_mask_overlap
self.mask_min_space = cfg.mask_min_space
self.mask_channel_prob = cfg.mask_channel_prob
self.mask_channel_selection = cfg.mask_channel_selection
self.mask_channel_other = cfg.mask_channel_other
self.mask_channel_length = cfg.mask_channel_length
self.no_mask_channel_overlap = cfg.no_mask_channel_overlap
self.mask_channel_min_space = cfg.mask_channel_min_space
self.dropout_input = nn.Dropout(cfg.dropout_input)
self.dropout_features = nn.Dropout(cfg.dropout_features)
self.feature_grad_mult = cfg.feature_grad_mult
self.logit_temp = cfg.logit_temp
self.skip_masked = cfg.skip_masked
self.skip_nomask = cfg.skip_nomask
final_dim = cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim
self.mask_emb = nn.Parameter(
torch.FloatTensor(cfg.encoder_embed_dim).uniform_()
)
self.encoder = TransformerEncoder(cfg)
self.layer_norm = LayerNorm(self.embed)
self.target_glu = None
if cfg.target_glu:
self.target_glu = nn.Sequential(
nn.Linear(final_dim, final_dim * 2), nn.GLU()
)
self.untie_final_proj = cfg.untie_final_proj
if self.untie_final_proj:
self.final_proj = nn.Linear(
cfg.encoder_embed_dim, final_dim * len(dictionaries)
)
else:
self.final_proj = nn.Linear(cfg.encoder_embed_dim, final_dim)
# modules below are not needed during fine-tuning
if any([d is None for d in dictionaries]):
logger.info("cannot find dictionary. assume will be used for fine-tuning")
else:
self.num_classes = [len(d) for d in dictionaries]
self.label_embs_concat = nn.Parameter(
torch.FloatTensor(sum(self.num_classes), final_dim)
)
nn.init.uniform_(self.label_embs_concat)
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
super().upgrade_state_dict_named(state_dict, name)
return state_dict
@classmethod
def build_model(cls, cfg: HubertConfig, task: HubertPretrainingTask):
"""Build a new model instance."""
model = HubertModel(cfg, task.cfg, task.dictionaries)
return model
def apply_mask(self, x, padding_mask, target_list):
B, T, C = x.shape
if self.mask_prob > 0:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob,
self.mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x[mask_indices] = self.mask_emb
else:
mask_indices = None
if self.mask_channel_prob > 0:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
return x, mask_indices
def compute_nce(self, x, pos, negs):
neg_is_pos = (pos == negs).all(-1)
pos = pos.unsqueeze(0)
targets = torch.cat([pos, negs], dim=0)
logits = torch.cosine_similarity(x.float(), targets.float(), dim=-1).type_as(x)
logits /= self.logit_temp
if neg_is_pos.any():
logits[1:][neg_is_pos] = float("-inf")
logits = logits.transpose(0, 1) # (num_x, num_cls+1)
return logits
def forward_features(self, source: torch.Tensor) -> torch.Tensor:
if self.feature_grad_mult > 0:
features = self.feature_extractor(source)
if self.feature_grad_mult != 1.0:
features = GradMultiply.apply(features, self.feature_grad_mult)
else:
with torch.no_grad():
features = self.feature_extractor(source)
return features
def forward_targets(
self,
features: torch.Tensor,
target_list: List[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor]:
# Trim features to ensure labels exist and then get aligned labels
feat_tsz = features.size(2)
targ_tsz = min([t.size(1) for t in target_list])
if self.feat2tar_ratio * feat_tsz > targ_tsz:
feat_tsz = int(targ_tsz / self.feat2tar_ratio)
features = features[..., :feat_tsz]
target_inds = torch.arange(feat_tsz).float() * self.feat2tar_ratio
target_list = [t[:, target_inds.long()] for t in target_list]
return features, target_list
def forward_padding_mask(
self,
features: torch.Tensor,
padding_mask: torch.Tensor,
) -> torch.Tensor:
extra = padding_mask.size(1) % features.size(1)
if extra > 0:
padding_mask = padding_mask[:, :-extra]
padding_mask = padding_mask.view(padding_mask.size(0), features.size(1), -1)
padding_mask = padding_mask.all(-1)
return padding_mask
def forward(
self,
source: torch.Tensor,
target_list: Optional[List[torch.Tensor]] = None,
padding_mask: Optional[torch.Tensor] = None,
mask: bool = True,
features_only: bool = False,
output_layer: Optional[int] = None,
) -> Dict[str, torch.Tensor]:
"""output layer is 1-based"""
features = self.forward_features(source)
if target_list is not None:
features, target_list = self.forward_targets(features, target_list)
features_pen = features.float().pow(2).mean()
features = features.transpose(1, 2)
features = self.layer_norm(features)
unmasked_features = features.clone()
if padding_mask is not None:
padding_mask = self.forward_padding_mask(features, padding_mask)
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
features = self.dropout_input(features)
unmasked_features = self.dropout_features(unmasked_features)
if mask:
x, mask_indices = self.apply_mask(features, padding_mask, target_list)
else:
x = features
mask_indices = None
# feature: (B, T, D), float
# target: (B, T), long
# x: (B, T, D), float
# padding_mask: (B, T), bool
# mask_indices: (B, T), bool
x, _ = self.encoder(
x,
padding_mask=padding_mask,
layer=None if output_layer is None else output_layer - 1,
)
if features_only:
return {"x": x, "padding_mask": padding_mask, "features": features}
def compute_pred(proj_x, target, label_embs):
# compute logits for the i-th label set
y = torch.index_select(label_embs, 0, target.long())
negs = label_embs.unsqueeze(1).expand(-1, proj_x.size(0), -1)
if self.target_glu:
y = self.target_glu(y)
negs = self.target_glu(negs)
# proj_x: (S, D)
# y: (S, D)
# negs: (Neg, S, D)
return self.compute_nce(proj_x, y, negs)
label_embs_list = self.label_embs_concat.split(self.num_classes, 0)
if not self.skip_masked:
masked_indices = torch.logical_and(~padding_mask, mask_indices)
proj_x_m = self.final_proj(x[masked_indices])
if self.untie_final_proj:
proj_x_m_list = proj_x_m.chunk(len(target_list), dim=-1)
else:
proj_x_m_list = [proj_x_m for _ in range(len(target_list))]
logit_m_list = [
compute_pred(proj_x_m, t[masked_indices], label_embs_list[i])
for i, (proj_x_m, t) in enumerate(zip(proj_x_m_list, target_list))
]
else:
logit_m_list = [None for _ in target_list]
if not self.skip_nomask:
nomask_indices = torch.logical_and(~padding_mask, ~mask_indices)
proj_x_u = self.final_proj(x[nomask_indices])
if self.untie_final_proj:
proj_x_u_list = proj_x_u.chunk(len(target_list), dim=-1)
else:
proj_x_u_list = [proj_x_u for _ in range(len(target_list))]
logit_u_list = [
compute_pred(proj_x_u, t[nomask_indices], label_embs_list[i])
for i, (proj_x_u, t) in enumerate(zip(proj_x_u_list, target_list))
]
else:
logit_u_list = [None for _ in target_list]
result = {
"logit_m_list": logit_m_list,
"logit_u_list": logit_u_list,
"padding_mask": padding_mask,
"features_pen": features_pen,
}
return result
def extract_features(
self,
source: torch.Tensor,
padding_mask: Optional[torch.Tensor] = None,
mask: bool = False,
ret_conv: bool = False,
output_layer: Optional[int] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
res = self.forward(
source,
padding_mask=padding_mask,
mask=mask,
features_only=True,
output_layer=output_layer,
)
feature = res["features"] if ret_conv else res["x"]
return feature, res["padding_mask"]
def get_logits(self, net_output, is_masked=True):
if is_masked:
logits_list = net_output["logit_m_list"]
else:
logits_list = net_output["logit_u_list"]
logits_list = [x.float() for x in logits_list if x is not None]
return logits_list
def get_targets(self, net_output, is_masked=True):
logits_list = self.get_logits(net_output, is_masked)
targets_list = [x.new_zeros(x.size(0), dtype=torch.long) for x in logits_list]
return targets_list
def get_extra_losses(self, net_output):
extra_losses = []
names = []
if "features_pen" in net_output:
extra_losses.append(net_output["features_pen"])
names.append("features_pen")
return extra_losses, names
def remove_pretraining_modules(self):
self.target_glu = None
self.final_proj = None
| EXA-1-master | exa/libraries/fairseq/fairseq/models/hubert/hubert.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
from dataclasses import dataclass, field, fields
from typing import List, Optional
from omegaconf import II
from fairseq import utils
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.utils import safe_getattr, safe_hasattr
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
DEFAULT_MIN_PARAMS_TO_WRAP = int(1e8)
_NAME_PARSER = r"(decoder|encoder|quant_noise)_(.*)"
@dataclass
class EncDecBaseConfig(FairseqDataclass):
embed_path: Optional[str] = field(
default=None, metadata={"help": "path to pre-trained embedding"}
)
embed_dim: Optional[int] = field(
default=512, metadata={"help": "embedding dimension"}
)
ffn_embed_dim: int = field(
default=2048, metadata={"help": "embedding dimension for FFN"}
)
layers: int = field(default=6, metadata={"help": "number of layers"})
attention_heads: int = field(
default=8, metadata={"help": "number of attention heads"}
)
normalize_before: bool = field(
default=False, metadata={"help": "apply layernorm before each block"}
)
learned_pos: bool = field(
default=False, metadata={"help": "use learned positional embeddings"}
)
# args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
layerdrop: float = field(default=0, metadata={"help": "LayerDrop probability"})
layers_to_keep: Optional[List[int]] = field(
default=None, metadata={"help": "which layers to *keep* when pruning"}
)
xformers_att_config: Optional[str] = field(
default=None,
metadata={
"help": "config for xFormers attention, defined in xformers.components.attention.AttentionConfig"
},
)
@dataclass
class DecoderConfig(EncDecBaseConfig):
input_dim: int = II("model.decoder.embed_dim")
output_dim: int = field(
default=II("model.decoder.embed_dim"),
metadata={
"help": "decoder output dimension (extra linear layer if different from decoder embed dim)"
},
)
def __post_init__(self):
# II doesn't work if we are just creating the object outside of hydra so fix that
if self.input_dim == II("model.decoder.embed_dim"):
self.input_dim = self.embed_dim
if self.output_dim == II("model.decoder.embed_dim"):
self.output_dim = self.embed_dim
@dataclass
class QuantNoiseConfig(FairseqDataclass):
pq: float = field(
default=0.0,
metadata={"help": "iterative PQ quantization noise at training time"},
)
pq_block_size: int = field(
default=8,
metadata={"help": "block size of quantization noise at training time"},
)
scalar: float = field(
default=0.0,
metadata={
"help": "scalar quantization noise and scalar quantization at training time"
},
)
@dataclass
class TransformerConfig(FairseqDataclass):
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="relu",
metadata={"help": "activation function to use"},
)
dropout: float = field(default=0.1, metadata={"help": "dropout probability"})
attention_dropout: float = field(
default=0.0, metadata={"help": "dropout probability for attention weights"}
)
activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN.",
"alias": "--relu-dropout",
},
)
adaptive_input: bool = False
encoder: EncDecBaseConfig = EncDecBaseConfig()
# TODO should really be in the encoder config
max_source_positions: int = field(
default=DEFAULT_MAX_SOURCE_POSITIONS,
metadata={"help": "Maximum input length supported by the encoder"},
)
decoder: DecoderConfig = DecoderConfig()
# TODO should really be in the decoder config
max_target_positions: int = field(
default=DEFAULT_MAX_TARGET_POSITIONS,
metadata={"help": "Maximum output length supported by the decoder"},
)
share_decoder_input_output_embed: bool = field(
default=False, metadata={"help": "share decoder input and output embeddings"}
)
share_all_embeddings: bool = field(
default=False,
metadata={
"help": "share encoder, decoder and output embeddings (requires shared dictionary and embed dim)"
},
)
merge_src_tgt_embed: bool = field(
default=False,
metadata={
"help": "if true then the source and target embedding table is "
"merged into one table. This is going to make the model smaller but "
"it might hurt performance."
},
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if True, disables positional embeddings (outside self attention)"
},
)
adaptive_softmax_cutoff: Optional[List[int]] = field(
default=None,
metadata={
"help": "list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion"
},
)
adaptive_softmax_dropout: float = field(
default=0.0,
metadata={"help": "sets adaptive softmax dropout for the tail projections"},
)
adaptive_softmax_factor: float = field(
default=4, metadata={"help": "adaptive input factor"}
)
layernorm_embedding: bool = field(
default=False, metadata={"help": "add layernorm to embedding"}
)
tie_adaptive_weights: bool = field(
default=False,
metadata={
"help": "if set, ties the weights of adaptive softmax and adaptive input"
},
)
tie_adaptive_proj: bool = field(
default=False,
metadata={
"help": "if set, ties the projection weights of adaptive softmax and adaptive input"
},
)
no_scale_embedding: bool = field(
default=False, metadata={"help": "if True, dont scale embeddings"}
)
checkpoint_activations: bool = field(
default=False,
metadata={
"help": "checkpoint activations at each layer, which saves GPU memory usage at the cost of some additional compute"
},
)
offload_activations: bool = field(
default=False,
metadata={
"help": "checkpoint activations at each layer, then save to gpu. Sets --checkpoint-activations."
},
)
# args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019)
no_cross_attention: bool = field(
default=False, metadata={"help": "do not perform cross-attention"}
)
cross_self_attention: bool = field(
default=False, metadata={"help": "perform cross+self-attention"}
)
# args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020)
quant_noise: QuantNoiseConfig = field(default=QuantNoiseConfig())
min_params_to_wrap: int = field(
default=DEFAULT_MIN_PARAMS_TO_WRAP,
metadata={
"help": "minimum number of params for a layer to be wrapped with FSDP() when "
"training with --ddp-backend=fully_sharded. Smaller values will "
"improve memory efficiency, but may make torch.distributed "
"communication less efficient due to smaller input sizes. This option "
"is set to 0 (i.e., always wrap) when --checkpoint-activations or "
"--offload-activations are passed."
},
)
# DEPRECATED field, but some old checkpoints might have it
char_inputs: bool = field(
default=False, metadata={"help": "if set, model takes character ids as input"}
)
relu_dropout: float = 0.0
# config for "BASE Layers: Simplifying Training of Large, Sparse Models"
base_layers: Optional[int] = field(
default=0, metadata={"help": "number of BASE layers in total"}
)
base_sublayers: Optional[int] = field(
default=1, metadata={"help": "number of sublayers in each BASE layer"}
)
base_shuffle: Optional[int] = field(
default=1,
metadata={"help": "shuffle tokens between workers before computing assignment"},
)
export: bool = field(
default=False,
metadata={"help": "make the layernorm exportable with torchscript."},
)
# copied from transformer_lm but expected in transformer_decoder:
no_decoder_final_norm: bool = field(
default=False,
metadata={"help": "don't add an extra layernorm after the last decoder block"},
)
# We need to make this hierarchical dataclass like the flat namespace
# __getattr__ and __setattr__ here allow backward compatibility
# for subclasses of Transformer(Legacy) that depend on read/write on
# the flat namespace.
def __getattr__(self, name):
match = re.match(_NAME_PARSER, name)
if match:
sub = safe_getattr(self, match[1])
return safe_getattr(sub, match[2])
raise AttributeError(f"invalid argument {name}.")
def __setattr__(self, name, value):
match = re.match(_NAME_PARSER, name)
if match:
sub = safe_getattr(self, match[1])
setattr(sub, match[2], value)
else:
super().__setattr__(name, value)
@staticmethod
def _copy_keys(args, cls, prefix, seen):
"""
copy the prefixed keys (decoder_embed_dim) to the DC fields: decoder.embed_dim
"""
cfg = cls()
for fld in fields(cls):
# for all the fields in the DC, find the fields (e.g. embed_dim)
# in the namespace with the prefix (e.g. decoder)
# and set it on the dc.
args_key = f"{prefix}_{fld.name}"
if safe_hasattr(args, args_key):
seen.add(args_key)
setattr(cfg, fld.name, safe_getattr(args, args_key))
if safe_hasattr(args, fld.name):
seen.add(fld.name)
setattr(cfg, fld.name, safe_getattr(args, fld.name))
return cfg
@classmethod
def from_namespace(cls, args):
if args is None:
return None
if not isinstance(args, cls):
seen = set()
config = cls()
# currently, we can go generically from DC fields to args hierarchically
# but we can't easily deconstruct a flat namespace to a hierarchical
# DC. Mostly because we could have a sub-dc called `decoder-foo` that should not
# go to the sub struct called `decoder`. There are ways to go around this, but let's keep it simple
# for now.
for fld in fields(cls):
# concretelly, the transformer_config know what sub-dc it has, so we go through all the dc fields
# and if it's one that has a sub-dc, we build that sub-dc with `copy_keys()`
if fld.name == "decoder":
if safe_hasattr(args, "decoder"):
# in some cases, the args we receive is already structured (as DictConfigs), so let's just build the correct DC
seen.add("decoder")
config.decoder = DecoderConfig(**args.decoder)
else:
config.decoder = cls._copy_keys(
args, DecoderConfig, "decoder", seen
)
elif fld.name == "encoder":
# same but for encoder
if safe_hasattr(args, "encoder"):
seen.add("encoder")
config.encoder = EncDecBaseConfig(**args.encoder)
else:
config.encoder = cls._copy_keys(
args, EncDecBaseConfig, "encoder", seen
)
elif fld.name == "quant_noise":
# same but for quant_noise
if safe_hasattr(args, "quant_noise"):
seen.add("quant_noise")
config.quant_noise = QuantNoiseConfig(**args.quant_noise)
else:
config.quant_noise = cls._copy_keys(
args, QuantNoiseConfig, "quant_noise", seen
)
elif safe_hasattr(args, fld.name):
# if it's not a structure field, it's just a normal field, copy it over
seen.add(fld.name)
setattr(config, fld.name, safe_getattr(args, fld.name))
# we got all the fields defined in the dataclass, but
# the argparse namespace might have extra args for two reasons:
# - we are in a legacy class so all the args are not declared in the dataclass. Ideally once everyone has defined a dataclass for their model, we won't need this
# - some places expect args to be there but never define them
args_dict = (
args._asdict()
if safe_hasattr(args, "_asdict")
else vars(args)
if safe_hasattr(args, "__dict__")
else {}
) # namedtupled doesn't have __dict__ :-/
for key, value in args_dict.items():
if key not in seen:
setattr(config, key, value)
return config
else:
return args
| EXA-1-master | exa/libraries/fairseq/fairseq/models/transformer/transformer_config.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.dataclass.utils import gen_parser_from_dataclass
from fairseq.models import (
register_model,
register_model_architecture,
)
from fairseq.models.transformer.transformer_config import (
TransformerConfig,
DEFAULT_MAX_SOURCE_POSITIONS,
DEFAULT_MAX_TARGET_POSITIONS,
DEFAULT_MIN_PARAMS_TO_WRAP,
)
from fairseq.models.transformer.transformer_base import (
TransformerModelBase,
)
@register_model("transformer")
class TransformerModel(TransformerModelBase):
"""
This is the legacy implementation of the transformer model that
uses argparse for configuration.
"""
@classmethod
def hub_models(cls):
# fmt: off
def moses_subword(path):
return {
'path': path,
'tokenizer': 'moses',
'bpe': 'subword_nmt',
}
def moses_fastbpe(path):
return {
'path': path,
'tokenizer': 'moses',
'bpe': 'fastbpe',
}
def spm(path):
return {
'path': path,
'bpe': 'sentencepiece',
'tokenizer': 'space',
}
return {
'transformer.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2'),
'transformer.wmt16.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2',
'transformer.wmt18.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt18.en-de.ensemble.tar.gz'),
'transformer.wmt19.en-de': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz'),
'transformer.wmt19.en-ru': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz'),
'transformer.wmt19.de-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz'),
'transformer.wmt19.ru-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz'),
'transformer.wmt19.en-de.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.single_model.tar.gz'),
'transformer.wmt19.en-ru.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.single_model.tar.gz'),
'transformer.wmt19.de-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.single_model.tar.gz'),
'transformer.wmt19.ru-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.single_model.tar.gz'),
'transformer.wmt20.en-ta': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-ta.single.tar.gz'),
'transformer.wmt20.en-iu.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.news.single.tar.gz'),
'transformer.wmt20.en-iu.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.nh.single.tar.gz'),
'transformer.wmt20.ta-en': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.ta-en.single.tar.gz'),
'transformer.wmt20.iu-en.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.news.single.tar.gz'),
'transformer.wmt20.iu-en.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.nh.single.tar.gz'),
'transformer.flores101.mm100.615M': spm('https://dl.fbaipublicfiles.com/flores101/pretrained_models/flores101_mm100_615M.tar.gz'),
'transformer.flores101.mm100.175M': spm('https://dl.fbaipublicfiles.com/flores101/pretrained_models/flores101_mm100_175M.tar.gz'),
}
# fmt: on
def __init__(self, args, encoder, decoder):
cfg = TransformerConfig.from_namespace(args)
super().__init__(cfg, encoder, decoder)
self.args = args
@classmethod
def add_args(cls, parser):
"""Add model-specific arguments to the parser."""
# we want to build the args recursively in this case.
# do not set defaults so that settings defaults from various architectures still works
gen_parser_from_dataclass(
parser, TransformerConfig(), delete_default=True, with_prefix=""
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if args.encoder_layers_to_keep:
args.encoder_layers = len(args.encoder_layers_to_keep.split(","))
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if getattr(args, "max_source_positions", None) is None:
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError("--share-all-embeddings requires a joined dictionary")
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise ValueError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
args.share_decoder_input_output_embed = True
if getattr(args, "offload_activations", False):
args.checkpoint_activations = True # offloading implies checkpointing
if not args.share_all_embeddings:
args.min_params_to_wrap = getattr(
args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP
)
cfg = TransformerConfig.from_namespace(args)
return super().build_model(cfg, task)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
return super().build_embedding(
TransformerConfig.from_namespace(args), dictionary, embed_dim, path
)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return super().build_encoder(
TransformerConfig.from_namespace(args), src_dict, embed_tokens
)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return super().build_decoder(
TransformerConfig.from_namespace(args), tgt_dict, embed_tokens
)
# architectures
@register_model_architecture("transformer", "transformer_tiny")
def tiny_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 64)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 64)
args.encoder_layers = getattr(args, "encoder_layers", 2)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 2)
args.decoder_layers = getattr(args, "decoder_layers", 2)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 2)
return base_architecture(args)
@register_model_architecture("transformer", "transformer")
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.merge_src_tgt_embed = getattr(args, "merge_src_tgt_embed", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
@register_model_architecture("transformer", "transformer_iwslt_de_en")
def transformer_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 6)
base_architecture(args)
@register_model_architecture("transformer", "transformer_wmt_en_de")
def transformer_wmt_en_de(args):
base_architecture(args)
# parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017)
@register_model_architecture("transformer", "transformer_vaswani_wmt_en_de_big")
def transformer_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
base_architecture(args)
@register_model_architecture("transformer", "transformer_vaswani_wmt_en_fr_big")
def transformer_vaswani_wmt_en_fr_big(args):
args.dropout = getattr(args, "dropout", 0.1)
transformer_vaswani_wmt_en_de_big(args)
@register_model_architecture("transformer", "transformer_wmt_en_de_big")
def transformer_wmt_en_de_big(args):
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
transformer_vaswani_wmt_en_de_big(args)
# default parameters used in tensor2tensor implementation
@register_model_architecture("transformer", "transformer_wmt_en_de_big_t2t")
def transformer_wmt_en_de_big_t2t(args):
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.1)
transformer_vaswani_wmt_en_de_big(args)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/transformer/transformer_legacy.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from fairseq import utils
from fairseq.distributed import fsdp_wrap
from fairseq.models import FairseqEncoder
from fairseq.models.transformer import TransformerConfig
from fairseq.modules import (
FairseqDropout,
LayerDropModuleList,
LayerNorm,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
transformer_layer,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
# rewrite name for backward compatibility in `make_generation_fast_`
def module_name_fordropout(module_name: str) -> str:
if module_name == "TransformerEncoderBase":
return "TransformerEncoder"
else:
return module_name
class TransformerEncoderBase(FairseqEncoder):
"""
Transformer encoder consisting of *cfg.encoder.layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, cfg, dictionary, embed_tokens, return_fc=False):
self.cfg = cfg
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self.dropout_module = FairseqDropout(
cfg.dropout, module_name=module_name_fordropout(self.__class__.__name__)
)
self.encoder_layerdrop = cfg.encoder.layerdrop
self.return_fc = return_fc
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = cfg.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim)
self.embed_positions = (
PositionalEmbedding(
cfg.max_source_positions,
embed_dim,
self.padding_idx,
learned=cfg.encoder.learned_pos,
)
if not cfg.no_token_positional_embeddings
else None
)
if cfg.layernorm_embedding:
self.layernorm_embedding = LayerNorm(embed_dim, export=cfg.export)
else:
self.layernorm_embedding = None
if not cfg.adaptive_input and cfg.quant_noise.pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
cfg.quant_noise.pq,
cfg.quant_noise.pq_block_size,
)
else:
self.quant_noise = None
if self.encoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.encoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[self.build_encoder_layer(cfg) for i in range(cfg.encoder.layers)]
)
self.num_layers = len(self.layers)
if cfg.encoder.normalize_before:
self.layer_norm = LayerNorm(embed_dim, export=cfg.export)
else:
self.layer_norm = None
def build_encoder_layer(self, cfg):
layer = transformer_layer.TransformerEncoderLayerBase(
cfg, return_fc=self.return_fc
)
checkpoint = cfg.checkpoint_activations
if checkpoint:
offload_to_cpu = cfg.offload_activations
layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
# if we are checkpointing, enforce that FSDP always wraps the
# checkpointed layer, regardless of layer size
min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0
layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
return layer
def forward_embedding(
self, src_tokens, token_embedding: Optional[torch.Tensor] = None
):
# embed tokens and positions
if token_embedding is None:
token_embedding = self.embed_tokens(src_tokens)
x = embed = self.embed_scale * token_embedding
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
if self.quant_noise is not None:
x = self.quant_noise(x)
return x, embed
def forward(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
return self.forward_scriptable(
src_tokens, src_lengths, return_all_hiddens, token_embeddings
)
# TorchScript doesn't support super() method so that the scriptable Subclass
# can't access the base class model in Torchscript.
# Current workaround is to add a helper function with different name and
# call the helper function from scriptable Subclass.
def forward_scriptable(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
has_pads = (
torch.tensor(src_tokens.device.type == "xla") or encoder_padding_mask.any()
)
# Torchscript doesn't handle bool Tensor correctly, so we need to work around.
if torch.jit.is_scripting():
has_pads = torch.tensor(1) if has_pads else torch.tensor(0)
x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings)
# account for padding while computing the representation
x = x * (
1 - encoder_padding_mask.unsqueeze(-1).type_as(x) * has_pads.type_as(x)
)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_states = []
fc_results = []
if return_all_hiddens:
encoder_states.append(x)
# encoder layers
for layer in self.layers:
lr = layer(
x, encoder_padding_mask=encoder_padding_mask if has_pads else None
)
if isinstance(lr, tuple) and len(lr) == 2:
x, fc_result = lr
else:
x = lr
fc_result = None
if return_all_hiddens and not torch.jit.is_scripting():
assert encoder_states is not None
encoder_states.append(x)
fc_results.append(fc_result)
if self.layer_norm is not None:
x = self.layer_norm(x)
# The Pytorch Mobile lite interpreter does not supports returning NamedTuple in
# `forward` so we use a dictionary instead.
# TorchScript does not support mixed values so the values are all lists.
# The empty list is equivalent to None.
src_lengths = (
src_tokens.ne(self.padding_idx)
.sum(dim=1, dtype=torch.int32)
.reshape(-1, 1)
.contiguous()
)
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask], # B x T
"encoder_embedding": [encoder_embedding], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"fc_results": fc_results, # List[T x B x C]
"src_tokens": [],
"src_lengths": [src_lengths],
}
@torch.jit.export
def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if len(encoder_out["encoder_out"]) == 0:
new_encoder_out = []
else:
new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)]
if len(encoder_out["encoder_padding_mask"]) == 0:
new_encoder_padding_mask = []
else:
new_encoder_padding_mask = [
encoder_out["encoder_padding_mask"][0].index_select(0, new_order)
]
if len(encoder_out["encoder_embedding"]) == 0:
new_encoder_embedding = []
else:
new_encoder_embedding = [
encoder_out["encoder_embedding"][0].index_select(0, new_order)
]
if len(encoder_out["src_tokens"]) == 0:
src_tokens = []
else:
src_tokens = [(encoder_out["src_tokens"][0]).index_select(0, new_order)]
if len(encoder_out["src_lengths"]) == 0:
src_lengths = []
else:
src_lengths = [(encoder_out["src_lengths"][0]).index_select(0, new_order)]
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": src_tokens, # B x T
"src_lengths": src_lengths, # B x 1
}
@torch.jit.export
def _reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order):
"""Dummy re-order function for beamable enc-dec attention"""
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions)
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
print("deleting {0}".format(weights_key))
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
for i in range(self.num_layers):
# update layer norms
self.layers[i].upgrade_state_dict_named(
state_dict, "{}.layers.{}".format(name, i)
)
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerEncoder(TransformerEncoderBase):
def __init__(self, args, dictionary, embed_tokens, return_fc=False):
self.args = args
super().__init__(
TransformerConfig.from_namespace(args),
dictionary,
embed_tokens,
return_fc=return_fc,
)
def build_encoder_layer(self, args):
return super().build_encoder_layer(
TransformerConfig.from_namespace(args),
)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/transformer/transformer_encoder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from fairseq import utils
from fairseq.distributed import fsdp_wrap
from fairseq.models.transformer import TransformerConfig
from fairseq.models.transformer.transformer_decoder import TransformerDecoderBase
from fairseq.modules import (
LayerDropModuleList,
SinusoidalPositionalEmbedding,
transformer_layer_aug,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
class AugTransformerDecoderBase(TransformerDecoderBase):
"""
Transformer decoder augmented with an additional cross-attention. Each layer
is a :class:`AugTransformerDecoderLayerBase`.
Args:
cfg (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
encoder_attn_merge_type (str, optional): the way to combine outputs from
two cross-attention modules. If "sequential" is set, two cross-attention
modules are stacked sequentially. If "parallel" is set, they are processed
in parallel and combined before feeding it to FFN (default: sequential).
dropnet_ratio (float, optional): a probability to drop each cross-attention
module during training (default: 0.0).
"""
def __init__(
self,
cfg,
dictionary,
embed_tokens,
output_projection=None,
encoder_attn_merge_type="sequential",
dropnet_ratio=0.0,
):
super().__init__(
cfg,
dictionary,
embed_tokens,
no_encoder_attn=False,
output_projection=output_projection,
)
# assert cfg.cross_self_attention
self.cross_self_attention = cfg.cross_self_attention
if self.decoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.decoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[
self.build_decoder_layer(cfg, encoder_attn_merge_type, dropnet_ratio)
for _ in range(cfg.decoder.layers)
]
)
def build_decoder_layer(
self,
cfg,
encoder_attn_merge_type="sequential",
dropnet_ratio=0,
):
layer = transformer_layer_aug.AugTransformerDecoderLayerBase(
cfg,
no_encoder_attn=False,
encoder_attn_merge_type=encoder_attn_merge_type,
dropnet_ratio=dropnet_ratio,
)
checkpoint = cfg.checkpoint_activations
if checkpoint:
offload_to_cpu = cfg.offload_activations
layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
# if we are checkpointing, enforce that FSDP always wraps the
# checkpointed layer, regardless of layer size
min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0
layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
return layer
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
encoder_out_aug: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention, should be of size T x B x C
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
encoder_out_aug=encoder_out_aug,
incremental_state=incremental_state,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
)
if not features_only:
x = self.output_layer(x)
return x, extra
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
encoder_out_aug: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
return self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
encoder_out_aug,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
"""
A scriptable subclass of this class has an extract_features method and calls
super().extract_features, but super() is not supported in torchscript. A copy of
this function is made to be used in the subclass instead.
"""
def extract_features_scriptable(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
encoder_out_aug: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
"""
Similar to *forward* but only return features.
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
alignment_layer (int, optional): return mean alignment over
heads at this layer (default: last layer).
alignment_heads (int, optional): only average alignment over
this many heads (default: all heads).
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
bs, slen = prev_output_tokens.size()
if alignment_layer is None:
alignment_layer = self.num_layers - 1
enc: Optional[Tensor] = None
padding_mask: Optional[Tensor] = None
if encoder_out is not None and len(encoder_out["encoder_out"]) > 0:
enc = encoder_out["encoder_out"][0]
if encoder_out is not None and len(encoder_out["encoder_padding_mask"]) > 0:
padding_mask = encoder_out["encoder_padding_mask"][0]
enc_aug: Optional[Tensor] = None
padding_mask_aug: Optional[Tensor] = None
if encoder_out_aug is not None and len(encoder_out_aug["encoder_out"]) > 0:
enc_aug = encoder_out_aug["encoder_out"][0]
if (
encoder_out_aug is not None
and len(encoder_out_aug["encoder_padding_mask"]) > 0
):
padding_mask_aug = encoder_out_aug["encoder_padding_mask"][0]
# embed positions
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# Prevent torchscript exporting issue for dynamic quant embedding
prev_output_tokens = prev_output_tokens.contiguous()
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
self_attn_padding_mask: Optional[Tensor] = None
if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# decoder layers
attn: Optional[Tensor] = None
attn_aug: Optional[Tensor] = None
inner_states: List[Optional[Tensor]] = [x]
for idx, layer in enumerate(self.layers):
if incremental_state is None and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
x, layer_attn, layer_attn_aug, _ = layer(
x,
enc,
padding_mask,
enc_aug,
padding_mask_aug,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
)
inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
if layer_attn_aug is not None and idx == alignment_layer:
attn_aug = layer_attn_aug.float().to(x)
if attn is not None:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
if attn_aug is not None:
if alignment_heads is not None:
attn_aug = attn_aug[:alignment_heads]
# average probabilities over heads
attn_aug = attn_aug.mean(dim=0)
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": [attn], "attn_aug": [attn_aug], "inner_states": inner_states}
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
if f"{name}.output_projection.weight" not in state_dict:
if self.share_input_output_embed:
embed_out_key = f"{name}.embed_tokens.weight"
else:
embed_out_key = f"{name}.embed_out"
if embed_out_key in state_dict:
state_dict[f"{name}.output_projection.weight"] = state_dict[
embed_out_key
]
if not self.share_input_output_embed:
del state_dict[embed_out_key]
for i in range(self.num_layers):
# update layer norms
layer_norm_map = {
"0": "self_attn_layer_norm",
"1": "encoder_attn_layer_norm",
"2": "encoder_attn_layer_norm2",
"3": "final_layer_norm",
}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m)
if k in state_dict:
state_dict[
"{}.layers.{}.{}.{}".format(name, i, new, m)
] = state_dict[k]
del state_dict[k]
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class AugTransformerDecoder(AugTransformerDecoderBase):
def __init__(
self,
args,
dictionary,
embed_tokens,
output_projection=None,
):
self.args = args
super().__init__(
TransformerConfig.from_namespace(args),
dictionary,
embed_tokens,
no_encoder_attn=False,
output_projection=output_projection,
encoder_attn_merge_type=getattr(
args, "synthesizer_augmented_cross_attention_merge_type", "sequential"
),
dropnet_ratio=getattr(args, "dropnet_ratio", 0),
)
def build_output_projection(self, args, dictionary, embed_tokens):
super().build_output_projection(
TransformerConfig.from_namespace(args), dictionary, embed_tokens
)
def build_decoder_layer(
self,
args,
encoder_attn_merge_type="sequential",
dropnet_ratio=0,
):
return super().build_decoder_layer(
TransformerConfig.from_namespace(args),
no_encoder_attn=False,
encoder_attn_merge_type=encoder_attn_merge_type,
dropnet_ratio=dropnet_ratio,
)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/transformer/transformer_decoder_aug.py |
# Copyright (c) Facebook Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
from .transformer_config import (
TransformerConfig,
DEFAULT_MAX_SOURCE_POSITIONS,
DEFAULT_MAX_TARGET_POSITIONS,
DEFAULT_MIN_PARAMS_TO_WRAP,
)
from .transformer_decoder import TransformerDecoder, TransformerDecoderBase, Linear
from .transformer_encoder import TransformerEncoder, TransformerEncoderBase
from .transformer_legacy import (
TransformerModel,
base_architecture,
tiny_architecture,
transformer_iwslt_de_en,
transformer_wmt_en_de,
transformer_vaswani_wmt_en_de_big,
transformer_vaswani_wmt_en_fr_big,
transformer_wmt_en_de_big,
transformer_wmt_en_de_big_t2t,
)
from .transformer_base import TransformerModelBase, Embedding
__all__ = [
"TransformerModelBase",
"TransformerConfig",
"TransformerDecoder",
"TransformerDecoderBase",
"TransformerEncoder",
"TransformerEncoderBase",
"TransformerModel",
"Embedding",
"Linear",
"base_architecture",
"tiny_architecture",
"transformer_iwslt_de_en",
"transformer_wmt_en_de",
"transformer_vaswani_wmt_en_de_big",
"transformer_vaswani_wmt_en_fr_big",
"transformer_wmt_en_de_big",
"transformer_wmt_en_de_big_t2t",
"DEFAULT_MAX_SOURCE_POSITIONS",
"DEFAULT_MAX_TARGET_POSITIONS",
"DEFAULT_MIN_PARAMS_TO_WRAP",
]
| EXA-1-master | exa/libraries/fairseq/fairseq/models/transformer/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Any, Dict, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from fairseq import utils
from fairseq.distributed import fsdp_wrap
from fairseq.models import FairseqIncrementalDecoder
from fairseq.models.transformer import TransformerConfig
from fairseq.modules import (
AdaptiveSoftmax,
BaseLayer,
FairseqDropout,
LayerDropModuleList,
LayerNorm,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
transformer_layer,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
# rewrite name for backward compatibility in `make_generation_fast_`
def module_name_fordropout(module_name: str) -> str:
if module_name == "TransformerDecoderBase":
return "TransformerDecoder"
else:
return module_name
class TransformerDecoderBase(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *cfg.decoder.layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
cfg (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self,
cfg,
dictionary,
embed_tokens,
no_encoder_attn=False,
output_projection=None,
):
self.cfg = cfg
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self._future_mask = torch.empty(0)
self.dropout_module = FairseqDropout(
cfg.dropout, module_name=module_name_fordropout(self.__class__.__name__)
)
self.decoder_layerdrop = cfg.decoder.layerdrop
self.share_input_output_embed = cfg.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = cfg.decoder.embed_dim
self.embed_dim = embed_dim
self.output_embed_dim = cfg.decoder.output_dim
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = cfg.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim)
if not cfg.adaptive_input and cfg.quant_noise.pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
cfg.quant_noise.pq,
cfg.quant_noise.pq_block_size,
)
else:
self.quant_noise = None
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
self.max_target_positions,
embed_dim,
self.padding_idx,
learned=cfg.decoder.learned_pos,
)
if not cfg.no_token_positional_embeddings
else None
)
if cfg.layernorm_embedding:
self.layernorm_embedding = LayerNorm(embed_dim, export=cfg.export)
else:
self.layernorm_embedding = None
self.cross_self_attention = cfg.cross_self_attention
if self.decoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.decoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[
self.build_decoder_layer(cfg, no_encoder_attn)
for _ in range(cfg.decoder.layers)
]
)
self.num_layers = len(self.layers)
if cfg.decoder.normalize_before and not cfg.no_decoder_final_norm:
self.layer_norm = LayerNorm(embed_dim, export=cfg.export)
else:
self.layer_norm = None
self.project_out_dim = (
Linear(embed_dim, self.output_embed_dim, bias=False)
if embed_dim != self.output_embed_dim and not cfg.tie_adaptive_weights
else None
)
self.adaptive_softmax = None
self.output_projection = output_projection
if self.output_projection is None:
self.build_output_projection(cfg, dictionary, embed_tokens)
def build_output_projection(self, cfg, dictionary, embed_tokens):
if cfg.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
utils.eval_str_list(cfg.adaptive_softmax_cutoff, type=int),
dropout=cfg.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if cfg.tie_adaptive_weights else None,
factor=cfg.adaptive_softmax_factor,
tie_proj=cfg.tie_adaptive_proj,
)
elif self.share_input_output_embed:
self.output_projection = nn.Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
)
self.output_projection.weight = self.embed_tokens.weight
else:
self.output_projection = nn.Linear(
self.output_embed_dim, len(dictionary), bias=False
)
nn.init.normal_(
self.output_projection.weight, mean=0, std=self.output_embed_dim**-0.5
)
num_base_layers = cfg.base_layers
for i in range(num_base_layers):
self.layers.insert(
((i + 1) * cfg.decoder.layers) // (num_base_layers + 1),
BaseLayer(cfg),
)
def build_decoder_layer(self, cfg, no_encoder_attn=False):
layer = transformer_layer.TransformerDecoderLayerBase(cfg, no_encoder_attn)
checkpoint = cfg.checkpoint_activations
if checkpoint:
offload_to_cpu = cfg.offload_activations
layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
# if we are checkpointing, enforce that FSDP always wraps the
# checkpointed layer, regardless of layer size
min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0
layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
return layer
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention, should be of size T x B x C
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
)
if not features_only:
x = self.output_layer(x)
return x, extra
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
return self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
"""
A scriptable subclass of this class has an extract_features method and calls
super().extract_features, but super() is not supported in torchscript. A copy of
this function is made to be used in the subclass instead.
"""
def extract_features_scriptable(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
"""
Similar to *forward* but only return features.
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
alignment_layer (int, optional): return mean alignment over
heads at this layer (default: last layer).
alignment_heads (int, optional): only average alignment over
this many heads (default: all heads).
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
bs, slen = prev_output_tokens.size()
if alignment_layer is None:
alignment_layer = self.num_layers - 1
enc: Optional[Tensor] = None
padding_mask: Optional[Tensor] = None
if encoder_out is not None and len(encoder_out["encoder_out"]) > 0:
enc = encoder_out["encoder_out"][0]
if encoder_out is not None and len(encoder_out["encoder_padding_mask"]) > 0:
padding_mask = encoder_out["encoder_padding_mask"][0]
# embed positions
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# Prevent torchscript exporting issue for dynamic quant embedding
prev_output_tokens = prev_output_tokens.contiguous()
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
self_attn_padding_mask: Optional[Tensor] = None
if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# decoder layers
attn: Optional[Tensor] = None
inner_states: List[Optional[Tensor]] = [x]
for idx, layer in enumerate(self.layers):
if incremental_state is None and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
x, layer_attn, _ = layer(
x,
enc,
padding_mask,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
)
inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
if attn is not None:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": [attn], "inner_states": inner_states}
def output_layer(self, features):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
return self.output_projection(features)
else:
return features
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
# self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.
if (
self._future_mask.size(0) == 0
or (not self._future_mask.device == tensor.device)
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1
)
self._future_mask = self._future_mask.to(tensor)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
if f"{name}.output_projection.weight" not in state_dict:
if self.share_input_output_embed:
embed_out_key = f"{name}.embed_tokens.weight"
else:
embed_out_key = f"{name}.embed_out"
if embed_out_key in state_dict:
state_dict[f"{name}.output_projection.weight"] = state_dict[
embed_out_key
]
if not self.share_input_output_embed:
del state_dict[embed_out_key]
for i in range(self.num_layers):
# update layer norms
layer_norm_map = {
"0": "self_attn_layer_norm",
"1": "encoder_attn_layer_norm",
"2": "final_layer_norm",
}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m)
if k in state_dict:
state_dict[
"{}.layers.{}.{}.{}".format(name, i, new, m)
] = state_dict[k]
del state_dict[k]
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
class TransformerDecoder(TransformerDecoderBase):
def __init__(
self,
args,
dictionary,
embed_tokens,
no_encoder_attn=False,
output_projection=None,
):
self.args = args
super().__init__(
TransformerConfig.from_namespace(args),
dictionary,
embed_tokens,
no_encoder_attn=no_encoder_attn,
output_projection=output_projection,
)
def build_output_projection(self, args, dictionary, embed_tokens):
super().build_output_projection(
TransformerConfig.from_namespace(args), dictionary, embed_tokens
)
def build_decoder_layer(self, args, no_encoder_attn=False):
return super().build_decoder_layer(
TransformerConfig.from_namespace(args), no_encoder_attn=no_encoder_attn
)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/transformer/transformer_decoder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from torch import Tensor
import logging
from fairseq import utils
from fairseq.dataclass.utils import gen_parser_from_dataclass
from fairseq.distributed import fsdp_wrap
from fairseq.models import FairseqEncoderDecoderModel
from fairseq.models.transformer import (
TransformerConfig,
TransformerDecoderBase,
TransformerEncoderBase,
)
logger = logging.getLogger(__name__)
class TransformerModelBase(FairseqEncoderDecoderModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
"""
def __init__(self, cfg, encoder, decoder):
super().__init__(encoder, decoder)
self.cfg = cfg
self.supports_align_args = True
@classmethod
def add_args(cls, parser):
"""Add model-specific arguments to the parser."""
# we want to build the args recursively in this case.
gen_parser_from_dataclass(
parser, TransformerConfig(), delete_default=False, with_prefix=""
)
@classmethod
def build_model(cls, cfg, task):
"""Build a new model instance."""
# -- TODO T96535332
# bug caused by interaction between OmegaConf II and argparsing
cfg.decoder.input_dim = int(cfg.decoder.input_dim)
cfg.decoder.output_dim = int(cfg.decoder.output_dim)
# --
if cfg.encoder.layers_to_keep:
cfg.encoder.layers = len(cfg.encoder.layers_to_keep.split(","))
if cfg.decoder.layers_to_keep:
cfg.decoder.layers = len(cfg.decoder.layers_to_keep.split(","))
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if cfg.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError("--share-all-embeddings requires a joined dictionary")
if cfg.encoder.embed_dim != cfg.decoder.embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if cfg.decoder.embed_path and (
cfg.decoder.embed_path != cfg.encoder.embed_path
):
raise ValueError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
encoder_embed_tokens = cls.build_embedding(
cfg, src_dict, cfg.encoder.embed_dim, cfg.encoder.embed_path
)
decoder_embed_tokens = encoder_embed_tokens
cfg.share_decoder_input_output_embed = True
elif cfg.merge_src_tgt_embed:
logger.info(f"source dict size: {len(src_dict)}")
logger.info(f"target dict size: {len(tgt_dict)}")
src_dict.update(tgt_dict)
task.src_dict = src_dict
task.tgt_dict = src_dict
logger.info(f"merged dict size: {len(src_dict)}")
encoder_embed_tokens = cls.build_embedding(
cfg, src_dict, cfg.encoder.embed_dim
)
decoder_embed_tokens = encoder_embed_tokens
cfg.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = cls.build_embedding(
cfg, src_dict, cfg.encoder.embed_dim, cfg.encoder.embed_path
)
decoder_embed_tokens = cls.build_embedding(
cfg, tgt_dict, cfg.decoder.embed_dim, cfg.decoder.embed_path
)
if cfg.offload_activations:
cfg.checkpoint_activations = True # offloading implies checkpointing
encoder = cls.build_encoder(cfg, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(cfg, tgt_dict, decoder_embed_tokens)
return cls(cfg, encoder, decoder)
@classmethod
def build_embedding(cls, cfg, dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
@classmethod
def build_encoder(cls, cfg, src_dict, embed_tokens):
return TransformerEncoderBase(cfg, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, cfg, tgt_dict, embed_tokens):
return TransformerDecoderBase(
cfg,
tgt_dict,
embed_tokens,
no_encoder_attn=cfg.no_cross_attention,
)
# TorchScript doesn't support optional arguments with variable length (**kwargs).
# Current workaround is to add union of all arguments in child classes.
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
return_all_hiddens: bool = True,
features_only: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
"""
Run the forward pass for an encoder-decoder model.
Copied from the base class, but without ``**kwargs``,
which are not supported by TorchScript.
"""
encoder_out = self.encoder(
src_tokens, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens
)
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
features_only=features_only,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
src_lengths=src_lengths,
return_all_hiddens=return_all_hiddens,
)
return decoder_out
# Since get_normalized_probs is in the Fairseq Model which is not scriptable,
# I rewrite the get_normalized_probs from Base Class to call the
# helper function in the Base Class.
@torch.jit.export
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim**-0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
| EXA-1-master | exa/libraries/fairseq/fairseq/models/transformer/transformer_base.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.utils import new_arange
# -------------- Helper Functions --------------------------------------------------- #
def load_libnat():
try:
from fairseq import libnat_cuda
return libnat_cuda, True
except ImportError as e:
print(str(e) + "... fall back to CPU version")
try:
from fairseq import libnat
return libnat, False
except ImportError as e:
import sys
sys.stderr.write(
"ERROR: missing libnat_cuda. run `python setup.py build_ext --inplace`\n"
)
raise e
def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx):
libnat, use_cuda = load_libnat()
def _get_ins_targets_cuda(in_tokens, out_tokens, padding_idx, unk_idx):
in_masks = in_tokens.ne(padding_idx)
out_masks = out_tokens.ne(padding_idx)
mask_ins_targets, masked_tgt_masks = libnat.generate_insertion_labels(
out_tokens.int(),
libnat.levenshtein_distance(
in_tokens.int(),
out_tokens.int(),
in_masks.sum(1).int(),
out_masks.sum(1).int(),
),
)
masked_tgt_masks = masked_tgt_masks.bool() & out_masks
mask_ins_targets = mask_ins_targets.type_as(in_tokens)[
:, 1 : in_masks.size(1)
].masked_fill_(~in_masks[:, 1:], 0)
masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx)
return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets
def _get_ins_targets_cpu(in_tokens, out_tokens, padding_idx, unk_idx):
in_seq_len, out_seq_len = in_tokens.size(1), out_tokens.size(1)
in_tokens_list = [
[t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist())
]
out_tokens_list = [
[t for t in s if t != padding_idx]
for i, s in enumerate(out_tokens.tolist())
]
full_labels = libnat.suggested_ed2_path(
in_tokens_list, out_tokens_list, padding_idx
)
mask_inputs = [
[len(c) if c[0] != padding_idx else 0 for c in a[:-1]] for a in full_labels
]
# generate labels
masked_tgt_masks = []
for mask_input in mask_inputs:
mask_label = []
for beam_size in mask_input[1:-1]: # HACK 1:-1
mask_label += [0] + [1 for _ in range(beam_size)]
masked_tgt_masks.append(
mask_label + [0 for _ in range(out_seq_len - len(mask_label))]
)
mask_ins_targets = [
mask_input[1:-1]
+ [0 for _ in range(in_seq_len - 1 - len(mask_input[1:-1]))]
for mask_input in mask_inputs
]
# transform to tensor
masked_tgt_masks = torch.tensor(
masked_tgt_masks, device=out_tokens.device
).bool()
mask_ins_targets = torch.tensor(mask_ins_targets, device=in_tokens.device)
masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx)
return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets
if use_cuda:
return _get_ins_targets_cuda(in_tokens, out_tokens, padding_idx, unk_idx)
return _get_ins_targets_cpu(in_tokens, out_tokens, padding_idx, unk_idx)
def _get_del_targets(in_tokens, out_tokens, padding_idx):
libnat, use_cuda = load_libnat()
def _get_del_targets_cuda(in_tokens, out_tokens, padding_idx):
in_masks = in_tokens.ne(padding_idx)
out_masks = out_tokens.ne(padding_idx)
word_del_targets = libnat.generate_deletion_labels(
in_tokens.int(),
libnat.levenshtein_distance(
in_tokens.int(),
out_tokens.int(),
in_masks.sum(1).int(),
out_masks.sum(1).int(),
),
)
word_del_targets = word_del_targets.type_as(in_tokens).masked_fill_(
~in_masks, 0
)
return word_del_targets
def _get_del_targets_cpu(in_tokens, out_tokens, padding_idx):
out_seq_len = out_tokens.size(1)
with torch.cuda.device_of(in_tokens):
in_tokens_list = [
[t for t in s if t != padding_idx]
for i, s in enumerate(in_tokens.tolist())
]
out_tokens_list = [
[t for t in s if t != padding_idx]
for i, s in enumerate(out_tokens.tolist())
]
full_labels = libnat.suggested_ed2_path(
in_tokens_list, out_tokens_list, padding_idx
)
word_del_targets = [b[-1] for b in full_labels]
word_del_targets = [
labels + [0 for _ in range(out_seq_len - len(labels))]
for labels in word_del_targets
]
# transform to tensor
word_del_targets = torch.tensor(word_del_targets, device=out_tokens.device)
return word_del_targets
if use_cuda:
return _get_del_targets_cuda(in_tokens, out_tokens, padding_idx)
return _get_del_targets_cpu(in_tokens, out_tokens, padding_idx)
def _apply_ins_masks(
in_tokens, in_scores, mask_ins_pred, padding_idx, unk_idx, eos_idx
):
in_masks = in_tokens.ne(padding_idx)
in_lengths = in_masks.sum(1)
# HACK: hacky way to shift all the paddings to eos first.
in_tokens.masked_fill_(~in_masks, eos_idx)
mask_ins_pred.masked_fill_(~in_masks[:, 1:], 0)
out_lengths = in_lengths + mask_ins_pred.sum(1)
out_max_len = out_lengths.max()
out_masks = new_arange(out_lengths, out_max_len)[None, :] < out_lengths[:, None]
reordering = (mask_ins_pred + in_masks[:, 1:].long()).cumsum(1)
out_tokens = (
in_tokens.new_zeros(in_tokens.size(0), out_max_len)
.fill_(padding_idx)
.masked_fill_(out_masks, unk_idx)
)
out_tokens[:, 0] = in_tokens[:, 0]
out_tokens.scatter_(1, reordering, in_tokens[:, 1:])
out_scores = None
if in_scores is not None:
in_scores.masked_fill_(~in_masks, 0)
out_scores = in_scores.new_zeros(*out_tokens.size())
out_scores[:, 0] = in_scores[:, 0]
out_scores.scatter_(1, reordering, in_scores[:, 1:])
return out_tokens, out_scores
def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_scores, unk_idx):
word_ins_masks = in_tokens.eq(unk_idx)
out_tokens = in_tokens.masked_scatter(word_ins_masks, word_ins_pred[word_ins_masks])
if in_scores is not None:
out_scores = in_scores.masked_scatter(
word_ins_masks, word_ins_scores[word_ins_masks]
)
else:
out_scores = None
return out_tokens, out_scores
def _apply_del_words(
in_tokens, in_scores, in_attn, word_del_pred, padding_idx, bos_idx, eos_idx
):
# apply deletion to a tensor
in_masks = in_tokens.ne(padding_idx)
bos_eos_masks = in_tokens.eq(bos_idx) | in_tokens.eq(eos_idx)
max_len = in_tokens.size(1)
word_del_pred.masked_fill_(~in_masks, 1)
word_del_pred.masked_fill_(bos_eos_masks, 0)
reordering = new_arange(in_tokens).masked_fill_(word_del_pred, max_len).sort(1)[1]
out_tokens = in_tokens.masked_fill(word_del_pred, padding_idx).gather(1, reordering)
out_scores = None
if in_scores is not None:
out_scores = in_scores.masked_fill(word_del_pred, 0).gather(1, reordering)
out_attn = None
if in_attn is not None:
_mask = word_del_pred[:, :, None].expand_as(in_attn)
_reordering = reordering[:, :, None].expand_as(in_attn)
out_attn = in_attn.masked_fill(_mask, 0.0).gather(1, _reordering)
return out_tokens, out_scores, out_attn
def _skip(x, mask):
"""
Getting sliced (dim=0) tensor by mask. Supporting tensor and list/dict of tensors.
"""
if isinstance(x, int):
return x
if x is None:
return None
if isinstance(x, torch.Tensor):
if x.size(0) == mask.size(0):
return x[mask]
elif x.size(1) == mask.size(0):
return x[:, mask]
if isinstance(x, list):
return [_skip(x_i, mask) for x_i in x]
if isinstance(x, dict):
return {k: _skip(v, mask) for k, v in x.items()}
raise NotImplementedError
def _skip_encoder_out(encoder, encoder_out, mask):
if not mask.any():
return encoder_out
else:
return encoder.reorder_encoder_out(
encoder_out, mask.nonzero(as_tuple=False).squeeze()
)
def _fill(x, mask, y, padding_idx):
"""
Filling tensor x with y at masked positions (dim=0).
"""
if x is None:
return y
assert x.dim() == y.dim() and mask.size(0) == x.size(0)
assert x.dim() == 2 or (x.dim() == 3 and x.size(2) == y.size(2))
n_selected = mask.sum()
assert n_selected == y.size(0)
if n_selected == x.size(0):
return y
if x.size(1) < y.size(1):
dims = [x.size(0), y.size(1) - x.size(1)]
if x.dim() == 3:
dims.append(x.size(2))
x = torch.cat([x, x.new_zeros(*dims).fill_(padding_idx)], 1)
x[mask] = y
elif x.size(1) > y.size(1):
x[mask] = padding_idx
if x.dim() == 2:
x[mask, : y.size(1)] = y
else:
x[mask, : y.size(1), :] = y
else:
x[mask] = y
return x
| EXA-1-master | exa/libraries/fairseq/fairseq/models/nat/levenshtein_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import NATransformerModel
def _sequential_poisoning(s, V, beta=0.33, bos=2, eos=3, pad=1):
# s: input batch
# V: vocabulary size
rand_words = torch.randint(low=4, high=V, size=s.size(), device=s.device)
choices = torch.rand(size=s.size(), device=s.device)
choices.masked_fill_((s == pad) | (s == bos) | (s == eos), 1)
replace = choices < beta / 3
repeat = (choices >= beta / 3) & (choices < beta * 2 / 3)
swap = (choices >= beta * 2 / 3) & (choices < beta)
safe = choices >= beta
for i in range(s.size(1) - 1):
rand_word = rand_words[:, i]
next_word = s[:, i + 1]
self_word = s[:, i]
replace_i = replace[:, i]
swap_i = swap[:, i] & (next_word != 3)
repeat_i = repeat[:, i] & (next_word != 3)
safe_i = safe[:, i] | ((next_word == 3) & (~replace_i))
s[:, i] = (
self_word * (safe_i | repeat_i).long()
+ next_word * swap_i.long()
+ rand_word * replace_i.long()
)
s[:, i + 1] = (
next_word * (safe_i | replace_i).long()
+ self_word * (swap_i | repeat_i).long()
)
return s
def gumbel_noise(input, TINY=1e-8):
return (
input.new_zeros(*input.size())
.uniform_()
.add_(TINY)
.log_()
.neg_()
.add_(TINY)
.log_()
.neg_()
)
@register_model("iterative_nonautoregressive_transformer")
class IterNATransformerModel(NATransformerModel):
@staticmethod
def add_args(parser):
NATransformerModel.add_args(parser)
parser.add_argument(
"--train-step",
type=int,
help="number of refinement iterations during training",
)
parser.add_argument(
"--dae-ratio",
type=float,
help="the probability of switching to the denoising auto-encoder loss",
)
parser.add_argument(
"--stochastic-approx",
action="store_true",
help="sampling from the decoder as the inputs for next iteration",
)
@classmethod
def build_model(cls, args, task):
model = super().build_model(args, task)
model.train_step = getattr(args, "train_step", 4)
model.dae_ratio = getattr(args, "dae_ratio", 0.5)
model.stochastic_approx = getattr(args, "stochastic_approx", False)
return model
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
):
B, T = prev_output_tokens.size()
# encoding
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
# length prediction
length_out = self.decoder.forward_length(
normalize=False, encoder_out=encoder_out
)
length_tgt = self.decoder.forward_length_prediction(
length_out, encoder_out, tgt_tokens
)
# decoding
word_ins_outs, word_ins_tgts, word_ins_masks = [], [], []
for t in range(self.train_step):
word_ins_out = self.decoder(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
step=t,
)
word_ins_tgt = tgt_tokens
word_ins_mask = word_ins_tgt.ne(self.pad)
word_ins_outs.append(word_ins_out)
word_ins_tgts.append(word_ins_tgt)
word_ins_masks.append(word_ins_mask)
if t < (self.train_step - 1):
# prediction for next iteration
if self.stochastic_approx:
word_ins_prediction = (
word_ins_out + gumbel_noise(word_ins_out)
).max(-1)[1]
else:
word_ins_prediction = word_ins_out.max(-1)[1]
prev_output_tokens = prev_output_tokens.masked_scatter(
word_ins_mask, word_ins_prediction[word_ins_mask]
)
if self.dae_ratio > 0:
# we do not perform denoising for the first iteration
corrputed = (
torch.rand(size=(B,), device=prev_output_tokens.device)
< self.dae_ratio
)
corrputed_tokens = _sequential_poisoning(
tgt_tokens[corrputed],
len(self.tgt_dict),
0.33,
self.bos,
self.eos,
self.pad,
)
prev_output_tokens[corrputed] = corrputed_tokens
# concat everything
word_ins_out = torch.cat(word_ins_outs, 0)
word_ins_tgt = torch.cat(word_ins_tgts, 0)
word_ins_mask = torch.cat(word_ins_masks, 0)
return {
"word_ins": {
"out": word_ins_out,
"tgt": word_ins_tgt,
"mask": word_ins_mask,
"ls": self.args.label_smoothing,
"nll_loss": True,
},
"length": {
"out": length_out,
"tgt": length_tgt,
"factor": self.decoder.length_loss_factor,
},
}
@register_model_architecture(
"iterative_nonautoregressive_transformer", "iterative_nonautoregressive_transformer"
)
def inat_base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# --- special arguments ---
args.sg_length_pred = getattr(args, "sg_length_pred", False)
args.pred_length_offset = getattr(args, "pred_length_offset", False)
args.length_loss_factor = getattr(args, "length_loss_factor", 0.1)
args.ngram_predictor = getattr(args, "ngram_predictor", 1)
args.src_embedding_copy = getattr(args, "src_embedding_copy", False)
args.train_step = getattr(args, "train_step", 4)
args.dae_ratio = getattr(args, "dae_ratio", 0.5)
args.stochastic_approx = getattr(args, "stochastic_approx", False)
@register_model_architecture(
"iterative_nonautoregressive_transformer",
"iterative_nonautoregressive_transformer_wmt_en_de",
)
def iter_nat_wmt_en_de(args):
inat_base_architecture(args)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/nat/iterative_nonautoregressive_transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from fairseq.models.transformer import (
TransformerDecoder,
TransformerEncoder,
TransformerModel,
)
from fairseq.modules.transformer_sentence_encoder import init_bert_params
def ensemble_encoder(func):
def wrapper(self, *args, **kwargs):
if self.ensemble_models is None or len(self.ensemble_models) == 1:
return func(self, *args, **kwargs)
encoder_outs = [
func(model, *args, **kwargs, return_all_hiddens=True)
for model in self.ensemble_models
]
_encoder_out = encoder_outs[0].copy()
def stack(key):
outs = [e[key][0] for e in encoder_outs]
return [torch.stack(outs, -1) if outs[0] is not None else None]
_encoder_out["encoder_out"] = stack("encoder_out")
_encoder_out["encoder_embedding"] = stack("encoder_embedding")
num_layers = len(_encoder_out["encoder_states"])
if num_layers > 0:
_encoder_out["encoder_states"] = [
torch.stack([e["encoder_states"][i] for e in encoder_outs], -1)
for i in range(num_layers)
]
return _encoder_out
return wrapper
def ensemble_decoder(func):
def wrapper(self, normalize=False, encoder_out=None, *args, **kwargs):
if self.ensemble_models is None or len(self.ensemble_models) == 1:
return func(
self, normalize=normalize, encoder_out=encoder_out, *args, **kwargs
)
def _replace(encoder_out, new_val):
new_encoder_out = encoder_out.copy()
new_encoder_out["encoder_out"] = [new_val]
return new_encoder_out
action_outs = [
func(
model,
normalize=normalize,
encoder_out=_replace(
encoder_out, encoder_out["encoder_out"][0][:, :, :, i]
),
*args,
**kwargs
)
for i, model in enumerate(self.ensemble_models)
]
if not isinstance(action_outs[0], tuple): # return multiple values
action_outs = [[a] for a in action_outs]
else:
action_outs = [list(a) for a in action_outs]
ensembled_outs = []
for i in range(len(action_outs[0])):
if i == 0 and normalize:
ensembled_outs += [
torch.logsumexp(
torch.stack([a[i] for a in action_outs], -1), dim=-1
)
- math.log(len(self.ensemble_models))
]
elif action_outs[0][i] is not None:
ensembled_outs += [torch.stack([a[i] for a in action_outs], -1)]
else:
ensembled_outs += [None]
if len(ensembled_outs) == 1:
return ensembled_outs[0]
return tuple(ensembled_outs)
return wrapper
class FairseqNATModel(TransformerModel):
"""
Abstract class for all nonautoregressive-based models
"""
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
self.tgt_dict = decoder.dictionary
self.bos = decoder.dictionary.bos()
self.eos = decoder.dictionary.eos()
self.pad = decoder.dictionary.pad()
self.unk = decoder.dictionary.unk()
self.ensemble_models = None
@property
def allow_length_beam(self):
return False
@property
def allow_ensemble(self):
return True
def enable_ensemble(self, models):
self.encoder.ensemble_models = [m.encoder for m in models]
self.decoder.ensemble_models = [m.decoder for m in models]
@staticmethod
def add_args(parser):
TransformerModel.add_args(parser)
parser.add_argument(
"--apply-bert-init",
action="store_true",
help="use custom param initialization for BERT",
)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
decoder = FairseqNATDecoder(args, tgt_dict, embed_tokens)
if getattr(args, "apply_bert_init", False):
decoder.apply(init_bert_params)
return decoder
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
encoder = FairseqNATEncoder(args, src_dict, embed_tokens)
if getattr(args, "apply_bert_init", False):
encoder.apply(init_bert_params)
return encoder
def forward_encoder(self, encoder_inputs):
return self.encoder(*encoder_inputs)
def forward_decoder(self, *args, **kwargs):
return NotImplementedError
def initialize_output_tokens(self, *args, **kwargs):
return NotImplementedError
def forward(self, *args, **kwargs):
return NotImplementedError
class FairseqNATEncoder(TransformerEncoder):
def __init__(self, args, dictionary, embed_tokens):
super().__init__(args, dictionary, embed_tokens)
self.ensemble_models = None
@ensemble_encoder
def forward(self, *args, **kwargs):
return super().forward(*args, **kwargs)
class FairseqNATDecoder(TransformerDecoder):
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(args, dictionary, embed_tokens, no_encoder_attn)
self.ensemble_models = None
| EXA-1-master | exa/libraries/fairseq/fairseq/models/nat/fairseq_nat_model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
from .fairseq_nat_model import *
from .nonautoregressive_transformer import *
from .nat_crf_transformer import *
from .iterative_nonautoregressive_transformer import *
from .cmlm_transformer import *
from .levenshtein_transformer import *
from .insertion_transformer import *
| EXA-1-master | exa/libraries/fairseq/fairseq/models/nat/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.iterative_refinement_generator import DecoderOut
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import FairseqNATDecoder, FairseqNATModel, ensemble_decoder
from fairseq.models.transformer import Embedding
from fairseq.modules import TransformerDecoderLayer
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .levenshtein_utils import (
_apply_del_words,
_apply_ins_masks,
_apply_ins_words,
_fill,
_get_del_targets,
_get_ins_targets,
_skip,
_skip_encoder_out,
)
@register_model("levenshtein_transformer")
class LevenshteinTransformerModel(FairseqNATModel):
@property
def allow_length_beam(self):
return False
@staticmethod
def add_args(parser):
FairseqNATModel.add_args(parser)
parser.add_argument(
"--early-exit",
default="6,6,6",
type=str,
help="number of decoder layers before word_del, mask_ins, word_ins",
)
parser.add_argument(
"--no-share-discriminator",
action="store_true",
help="separate parameters for discriminator",
)
parser.add_argument(
"--no-share-maskpredictor",
action="store_true",
help="separate parameters for mask-predictor",
)
parser.add_argument(
"--share-discriminator-maskpredictor",
action="store_true",
help="share the parameters for both mask-predictor and discriminator",
)
parser.add_argument(
"--sampling-for-deletion",
action="store_true",
help="instead of argmax, use sampling to predict the tokens",
)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
decoder = LevenshteinTransformerDecoder(args, tgt_dict, embed_tokens)
if getattr(args, "apply_bert_init", False):
decoder.apply(init_bert_params)
return decoder
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
):
assert tgt_tokens is not None, "forward function only supports training."
# encoding
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
# generate training labels for insertion
masked_tgt_masks, masked_tgt_tokens, mask_ins_targets = _get_ins_targets(
prev_output_tokens, tgt_tokens, self.pad, self.unk
)
mask_ins_targets = mask_ins_targets.clamp(min=0, max=255) # for safe prediction
mask_ins_masks = prev_output_tokens[:, 1:].ne(self.pad)
mask_ins_out, _ = self.decoder.forward_mask_ins(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
)
word_ins_out, _ = self.decoder.forward_word_ins(
normalize=False,
prev_output_tokens=masked_tgt_tokens,
encoder_out=encoder_out,
)
# make online prediction
if self.decoder.sampling_for_deletion:
word_predictions = torch.multinomial(
F.softmax(word_ins_out, -1).view(-1, word_ins_out.size(-1)), 1
).view(word_ins_out.size(0), -1)
else:
word_predictions = F.log_softmax(word_ins_out, dim=-1).max(2)[1]
word_predictions.masked_scatter_(
~masked_tgt_masks, tgt_tokens[~masked_tgt_masks]
)
# generate training labels for deletion
word_del_targets = _get_del_targets(word_predictions, tgt_tokens, self.pad)
word_del_out, _ = self.decoder.forward_word_del(
normalize=False,
prev_output_tokens=word_predictions,
encoder_out=encoder_out,
)
word_del_masks = word_predictions.ne(self.pad)
return {
"mask_ins": {
"out": mask_ins_out,
"tgt": mask_ins_targets,
"mask": mask_ins_masks,
"ls": 0.01,
},
"word_ins": {
"out": word_ins_out,
"tgt": tgt_tokens,
"mask": masked_tgt_masks,
"ls": self.args.label_smoothing,
"nll_loss": True,
},
"word_del": {
"out": word_del_out,
"tgt": word_del_targets,
"mask": word_del_masks,
},
}
def forward_decoder(
self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs
):
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
attn = decoder_out.attn
history = decoder_out.history
bsz = output_tokens.size(0)
if max_ratio is None:
max_lens = torch.zeros_like(output_tokens).fill_(255)
else:
if not encoder_out["encoder_padding_mask"]:
max_src_len = encoder_out["encoder_out"].size(0)
src_lens = encoder_out["encoder_out"].new(bsz).fill_(max_src_len)
else:
src_lens = (~encoder_out["encoder_padding_mask"][0]).sum(1)
max_lens = (src_lens * max_ratio).clamp(min=10).long()
# delete words
# do not delete tokens if it is <s> </s>
can_del_word = output_tokens.ne(self.pad).sum(1) > 2
if can_del_word.sum() != 0: # we cannot delete, skip
word_del_score, word_del_attn = self.decoder.forward_word_del(
normalize=True,
prev_output_tokens=_skip(output_tokens, can_del_word),
encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_del_word),
)
word_del_pred = word_del_score.max(-1)[1].bool()
_tokens, _scores, _attn = _apply_del_words(
output_tokens[can_del_word],
output_scores[can_del_word],
word_del_attn,
word_del_pred,
self.pad,
self.bos,
self.eos,
)
output_tokens = _fill(output_tokens, can_del_word, _tokens, self.pad)
output_scores = _fill(output_scores, can_del_word, _scores, 0)
attn = _fill(attn, can_del_word, _attn, 0.0)
if history is not None:
history.append(output_tokens.clone())
# insert placeholders
can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lens
if can_ins_mask.sum() != 0:
mask_ins_score, _ = self.decoder.forward_mask_ins(
normalize=True,
prev_output_tokens=_skip(output_tokens, can_ins_mask),
encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_mask),
)
if eos_penalty > 0.0:
mask_ins_score[:, :, 0] = mask_ins_score[:, :, 0] - eos_penalty
mask_ins_pred = mask_ins_score.max(-1)[1]
mask_ins_pred = torch.min(
mask_ins_pred, max_lens[can_ins_mask, None].expand_as(mask_ins_pred)
)
_tokens, _scores = _apply_ins_masks(
output_tokens[can_ins_mask],
output_scores[can_ins_mask],
mask_ins_pred,
self.pad,
self.unk,
self.eos,
)
output_tokens = _fill(output_tokens, can_ins_mask, _tokens, self.pad)
output_scores = _fill(output_scores, can_ins_mask, _scores, 0)
if history is not None:
history.append(output_tokens.clone())
# insert words
can_ins_word = output_tokens.eq(self.unk).sum(1) > 0
if can_ins_word.sum() != 0:
word_ins_score, word_ins_attn = self.decoder.forward_word_ins(
normalize=True,
prev_output_tokens=_skip(output_tokens, can_ins_word),
encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_word),
)
word_ins_score, word_ins_pred = word_ins_score.max(-1)
_tokens, _scores = _apply_ins_words(
output_tokens[can_ins_word],
output_scores[can_ins_word],
word_ins_pred,
word_ins_score,
self.unk,
)
output_tokens = _fill(output_tokens, can_ins_word, _tokens, self.pad)
output_scores = _fill(output_scores, can_ins_word, _scores, 0)
attn = _fill(attn, can_ins_word, word_ins_attn, 0.0)
if history is not None:
history.append(output_tokens.clone())
# delete some unnecessary paddings
cut_off = output_tokens.ne(self.pad).sum(1).max()
output_tokens = output_tokens[:, :cut_off]
output_scores = output_scores[:, :cut_off]
attn = None if attn is None else attn[:, :cut_off, :]
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=attn,
history=history,
)
def initialize_output_tokens(self, encoder_out, src_tokens):
initial_output_tokens = src_tokens.new_zeros(src_tokens.size(0), 2)
initial_output_tokens[:, 0] = self.bos
initial_output_tokens[:, 1] = self.eos
initial_output_scores = initial_output_tokens.new_zeros(
*initial_output_tokens.size()
).type_as(encoder_out["encoder_out"][0])
return DecoderOut(
output_tokens=initial_output_tokens,
output_scores=initial_output_scores,
attn=None,
step=0,
max_step=0,
history=None,
)
class LevenshteinTransformerDecoder(FairseqNATDecoder):
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(
args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn
)
self.dictionary = dictionary
self.bos = dictionary.bos()
self.unk = dictionary.unk()
self.eos = dictionary.eos()
self.sampling_for_deletion = getattr(args, "sampling_for_deletion", False)
self.embed_mask_ins = Embedding(256, self.output_embed_dim * 2, None)
self.embed_word_del = Embedding(2, self.output_embed_dim, None)
# del_word, ins_mask, ins_word
self.early_exit = [int(i) for i in args.early_exit.split(",")]
assert len(self.early_exit) == 3
# copy layers for mask-predict/deletion
self.layers_msk = None
if getattr(args, "no_share_maskpredictor", False):
self.layers_msk = nn.ModuleList(
[
TransformerDecoderLayer(args, no_encoder_attn)
for _ in range(self.early_exit[1])
]
)
self.layers_del = None
if getattr(args, "no_share_discriminator", False):
self.layers_del = nn.ModuleList(
[
TransformerDecoderLayer(args, no_encoder_attn)
for _ in range(self.early_exit[0])
]
)
if getattr(args, "share_discriminator_maskpredictor", False):
assert getattr(
args, "no_share_discriminator", False
), "must set saperate discriminator"
self.layers_msk = self.layers_del
def extract_features(
self,
prev_output_tokens,
encoder_out=None,
early_exit=None,
layers=None,
**unused
):
"""
Similar to *forward* but only return features.
Inputs:
prev_output_tokens: Tensor(B, T)
encoder_out: a dictionary of hidden states and masks
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
the LevenshteinTransformer decoder has full-attention to all generated tokens
"""
# embed positions
positions = (
self.embed_positions(prev_output_tokens)
if self.embed_positions is not None
else None
)
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
decoder_padding_mask = prev_output_tokens.eq(self.padding_idx)
layers = self.layers if layers is None else layers
early_exit = len(layers) if early_exit is None else early_exit
for _, layer in enumerate(layers[:early_exit]):
x, attn, _ = layer(
x,
encoder_out["encoder_out"][0]
if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0)
else None,
encoder_out["encoder_padding_mask"][0]
if (
encoder_out is not None
and len(encoder_out["encoder_padding_mask"]) > 0
)
else None,
self_attn_mask=None,
self_attn_padding_mask=decoder_padding_mask,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": attn, "inner_states": inner_states}
@ensemble_decoder
def forward_mask_ins(self, normalize, encoder_out, prev_output_tokens, **unused):
features, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
early_exit=self.early_exit[1],
layers=self.layers_msk,
**unused
)
features_cat = torch.cat([features[:, :-1, :], features[:, 1:, :]], 2)
decoder_out = F.linear(features_cat, self.embed_mask_ins.weight)
if normalize:
return F.log_softmax(decoder_out, -1), extra["attn"]
return decoder_out, extra["attn"]
@ensemble_decoder
def forward_word_ins(self, normalize, encoder_out, prev_output_tokens, **unused):
features, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
early_exit=self.early_exit[2],
layers=self.layers,
**unused
)
decoder_out = self.output_layer(features)
if normalize:
return F.log_softmax(decoder_out, -1), extra["attn"]
return decoder_out, extra["attn"]
@ensemble_decoder
def forward_word_del(self, normalize, encoder_out, prev_output_tokens, **unused):
features, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
early_exit=self.early_exit[0],
layers=self.layers_del,
**unused
)
decoder_out = F.linear(features, self.embed_word_del.weight)
if normalize:
return F.log_softmax(decoder_out, -1), extra["attn"]
return decoder_out, extra["attn"]
@register_model_architecture("levenshtein_transformer", "levenshtein_transformer")
def levenshtein_base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.sampling_for_deletion = getattr(args, "sampling_for_deletion", False)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.early_exit = getattr(args, "early_exit", "6,6,6")
args.no_share_discriminator = getattr(args, "no_share_discriminator", False)
args.no_share_maskpredictor = getattr(args, "no_share_maskpredictor", False)
args.share_discriminator_maskpredictor = getattr(
args, "share_discriminator_maskpredictor", False
)
args.no_share_last_layer = getattr(args, "no_share_last_layer", False)
@register_model_architecture(
"levenshtein_transformer", "levenshtein_transformer_wmt_en_de"
)
def levenshtein_transformer_wmt_en_de(args):
levenshtein_base_architecture(args)
# similar parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017)
@register_model_architecture(
"levenshtein_transformer", "levenshtein_transformer_vaswani_wmt_en_de_big"
)
def levenshtein_transformer_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
levenshtein_base_architecture(args)
# default parameters used in tensor2tensor implementation
@register_model_architecture(
"levenshtein_transformer", "levenshtein_transformer_wmt_en_de_big"
)
def levenshtein_transformer_wmt_en_de_big_t2t(args):
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.1)
levenshtein_transformer_vaswani_wmt_en_de_big(args)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/nat/levenshtein_transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq.models.nat import (
_apply_del_words,
_apply_ins_masks,
_apply_ins_words,
_fill,
_skip,
_skip_encoder_out,
)
class _EnsembleModelEncoder(object):
def __init__(self, models):
self.models = models
def reorder_encoder_out(self, encoder_outs, new_order):
encoder_outs = [
model.encoder.reorder_encoder_out(encoder_out, new_order)
for model, encoder_out in zip(self.models, encoder_outs)
]
return encoder_outs
class BasicEnsembleModel(torch.nn.Module):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__()
self.models = torch.nn.ModuleList(models)
self.bos = self.models[0].decoder.dictionary.bos()
self.eos = self.models[0].decoder.dictionary.eos()
self.pad = self.models[0].decoder.dictionary.pad()
self.unk = self.models[0].decoder.dictionary.unk()
self.encoder = _EnsembleModelEncoder(self.models)
def has_encoder(self):
return hasattr(self.models[0], "encoder")
def max_decoder_positions(self):
return min(m.max_decoder_positions() for m in self.models)
@torch.no_grad()
def forward_encoder(self, encoder_input):
if not self.has_encoder():
return None
return [model.forward_encoder(encoder_input) for model in self.models]
@torch.no_grad()
def forward_decoder(self, *inputs):
raise NotImplementedError
def initialize_output_tokens(self, *inputs):
raise NotImplementedError
class EnsembleLevT(BasicEnsembleModel):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__(models)
@torch.no_grad()
def forward_decoder(
self, decoder_out, encoder_outs, eos_penalty=0.0, max_ratio=None, **kwargs
):
# LevT ensembling
# A pipeline of three steps: deletion, placeholder, and word insertion.
# We need to average scores in each step in a pipeline way because of dependence.
# deletion
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
attn = decoder_out.attn
bsz = output_tokens.size(0)
if max_ratio is None:
max_lens = output_tokens.new().fill_(255)
else:
if not encoder_outs[0]["encoder_padding_mask"]:
src_lens = (
encoder_outs[0]["encoder_out"][0]
.new(bsz)
.fill_(encoder_outs[0]["encoder_out"][0].size(1))
)
else:
src_lens = (~encoder_outs[0]["encoder_padding_mask"][0]).sum(1)
max_lens = (src_lens * max_ratio).clamp(min=10).long()
# delete words
# do not delete tokens if it is <s> </s>
can_del_word = output_tokens.ne(self.pad).sum(1) > 2
if can_del_word.sum() != 0: # we cannot delete, skip
output_tokens, output_scores, attn = self.forward_word_del(
encoder_outs,
output_tokens,
output_scores,
attn,
can_del_word,
)
# insert placeholders
can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lens
if can_ins_mask.sum() != 0:
output_tokens, output_scores = self.forward_mask_ins(
encoder_outs,
output_tokens,
output_scores,
can_ins_mask,
eos_penalty,
max_lens,
)
# insert words
can_ins_word = output_tokens.eq(self.unk).sum(1) > 0
if can_ins_word.sum() != 0:
output_tokens, output_scores, attn = self.forward_word_ins(
encoder_outs,
output_tokens,
output_scores,
attn,
can_ins_word,
)
# delete some unnecessary paddings
cut_off = output_tokens.ne(self.pad).sum(1).max()
output_tokens = output_tokens[:, :cut_off]
output_scores = output_scores[:, :cut_off]
attn = None if attn is None else attn[:, :cut_off, :]
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=attn,
history=None,
)
def forward_word_del(
self, encoder_outs, output_tokens, output_scores, attn, can_del_word
):
word_del_score_avg = []
word_del_attn_avg = []
for model, encoder_out in zip(self.models, encoder_outs):
word_del_out, word_del_attn = model.decoder.forward_word_del(
_skip(output_tokens, can_del_word),
_skip_encoder_out(model.encoder, encoder_out, can_del_word),
)
word_del_score = F.log_softmax(word_del_out, 2)
word_del_score_avg.append(word_del_score)
word_del_attn_avg.append(word_del_attn)
word_del_score_avg = torch.logsumexp(
torch.stack(word_del_score_avg, dim=0), dim=0
) - math.log(len(self.models))
word_del_pred = word_del_score_avg.max(-1)[1].bool()
if word_del_attn_avg[0] is not None:
word_del_attn_avg = torch.stack(word_del_attn_avg, dim=0) / len(self.models)
else:
word_del_attn_avg = None
_tokens, _scores, _attn = _apply_del_words(
output_tokens[can_del_word],
output_scores[can_del_word],
word_del_attn_avg,
word_del_pred,
self.pad,
self.bos,
self.eos,
)
output_tokens = _fill(output_tokens, can_del_word, _tokens, self.pad)
output_scores = _fill(output_scores, can_del_word, _scores, 0)
attn = _fill(attn, can_del_word, _attn, 0.0)
return output_tokens, output_scores, attn
def forward_mask_ins(
self,
encoder_outs,
output_tokens,
output_scores,
can_ins_mask,
eos_penalty,
max_lens,
):
mask_ins_score_avg = []
for model, encoder_out in zip(self.models, encoder_outs):
mask_ins_out, _ = model.decoder.forward_mask_ins(
_skip(output_tokens, can_ins_mask),
_skip_encoder_out(model.encoder, encoder_out, can_ins_mask),
)
mask_ins_score = F.log_softmax(mask_ins_out, 2)
if eos_penalty > 0.0:
mask_ins_score[:, :, 0] -= eos_penalty
mask_ins_score_avg.append(mask_ins_score)
mask_ins_score_avg = torch.logsumexp(
torch.stack(mask_ins_score_avg, dim=0), dim=0
) - math.log(len(self.models))
mask_ins_pred = mask_ins_score_avg.max(-1)[1]
mask_ins_pred = torch.min(
mask_ins_pred, max_lens[can_ins_mask, None].expand_as(mask_ins_pred)
)
_tokens, _scores = _apply_ins_masks(
output_tokens[can_ins_mask],
output_scores[can_ins_mask],
mask_ins_pred,
self.pad,
self.unk,
self.eos,
)
output_tokens = _fill(output_tokens, can_ins_mask, _tokens, self.pad)
output_scores = _fill(output_scores, can_ins_mask, _scores, 0)
return output_tokens, output_scores
def forward_word_ins(
self, encoder_outs, output_tokens, output_scores, attn, can_ins_word
):
word_ins_score_avg = []
word_ins_attn_avg = []
for model, encoder_out in zip(self.models, encoder_outs):
word_ins_out, word_ins_attn = model.decoder.forward_word_ins(
_skip(output_tokens, can_ins_word),
_skip_encoder_out(model.encoder, encoder_out, can_ins_word),
)
word_ins_score = F.log_softmax(word_ins_out, 2)
word_ins_score_avg.append(word_ins_score)
word_ins_attn_avg.append(word_ins_attn)
word_ins_score_avg = torch.logsumexp(
torch.stack(word_ins_score_avg, dim=0), dim=0
) - math.log(len(self.models))
if word_ins_attn_avg[0] is not None:
word_ins_attn_avg = torch.stack(word_ins_attn_avg, dim=0) / len(self.models)
else:
word_ins_attn_avg = None
word_ins_score_max, word_ins_pred = word_ins_score_avg.max(-1)
_tokens, _scores = _apply_ins_words(
output_tokens[can_ins_word],
output_scores[can_ins_word],
word_ins_pred,
word_ins_score_max,
self.unk,
)
output_tokens = _fill(output_tokens, can_ins_word, _tokens, self.pad)
output_scores = _fill(output_scores, can_ins_word, _scores, 0)
attn = _fill(attn, can_ins_word, word_ins_attn, 0.0)
return output_tokens, output_scores, attn
def initialize_output_tokens(self, encoder_outs, src_tokens):
# LevT doesn't do length prediction.
return self.models[0].initialize_output_tokens(encoder_outs[0], src_tokens)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/nat/nonautoregressive_ensembles.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.iterative_refinement_generator import DecoderOut
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import FairseqNATDecoder, FairseqNATModel, ensemble_decoder
from fairseq.models.transformer import Embedding
from fairseq.modules.transformer_sentence_encoder import init_bert_params
def _mean_pooling(enc_feats, src_masks):
# enc_feats: T x B x C
# src_masks: B x T or None
if src_masks is None:
enc_feats = enc_feats.mean(0)
else:
src_masks = (~src_masks).transpose(0, 1).type_as(enc_feats)
enc_feats = (
(enc_feats / src_masks.sum(0)[None, :, None]) * src_masks[:, :, None]
).sum(0)
return enc_feats
def _argmax(x, dim):
return (x == x.max(dim, keepdim=True)[0]).type_as(x)
def _uniform_assignment(src_lens, trg_lens):
max_trg_len = trg_lens.max()
steps = (src_lens.float() - 1) / (trg_lens.float() - 1) # step-size
# max_trg_len
index_t = utils.new_arange(trg_lens, max_trg_len).float()
index_t = steps[:, None] * index_t[None, :] # batch_size X max_trg_len
index_t = torch.round(index_t).long().detach()
return index_t
@register_model("nonautoregressive_transformer")
class NATransformerModel(FairseqNATModel):
@property
def allow_length_beam(self):
return True
@staticmethod
def add_args(parser):
FairseqNATModel.add_args(parser)
# length prediction
parser.add_argument(
"--src-embedding-copy",
action="store_true",
help="copy encoder word embeddings as the initial input of the decoder",
)
parser.add_argument(
"--pred-length-offset",
action="store_true",
help="predicting the length difference between the target and source sentences",
)
parser.add_argument(
"--sg-length-pred",
action="store_true",
help="stop the gradients back-propagated from the length predictor",
)
parser.add_argument(
"--length-loss-factor",
type=float,
help="weights on the length prediction loss",
)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
decoder = NATransformerDecoder(args, tgt_dict, embed_tokens)
if getattr(args, "apply_bert_init", False):
decoder.apply(init_bert_params)
return decoder
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
):
# encoding
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
# length prediction
length_out = self.decoder.forward_length(
normalize=False, encoder_out=encoder_out
)
length_tgt = self.decoder.forward_length_prediction(
length_out, encoder_out, tgt_tokens
)
# decoding
word_ins_out = self.decoder(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
)
return {
"word_ins": {
"out": word_ins_out,
"tgt": tgt_tokens,
"mask": tgt_tokens.ne(self.pad),
"ls": self.args.label_smoothing,
"nll_loss": True,
},
"length": {
"out": length_out,
"tgt": length_tgt,
"factor": self.decoder.length_loss_factor,
},
}
def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs):
step = decoder_out.step
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
history = decoder_out.history
# execute the decoder
output_masks = output_tokens.ne(self.pad)
_scores, _tokens = self.decoder(
normalize=True,
prev_output_tokens=output_tokens,
encoder_out=encoder_out,
step=step,
).max(-1)
output_tokens.masked_scatter_(output_masks, _tokens[output_masks])
output_scores.masked_scatter_(output_masks, _scores[output_masks])
if history is not None:
history.append(output_tokens.clone())
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=None,
history=history,
)
def initialize_output_tokens(self, encoder_out, src_tokens):
# length prediction
length_tgt = self.decoder.forward_length_prediction(
self.decoder.forward_length(normalize=True, encoder_out=encoder_out),
encoder_out=encoder_out,
)
max_length = length_tgt.clamp_(min=2).max()
idx_length = utils.new_arange(src_tokens, max_length)
initial_output_tokens = src_tokens.new_zeros(
src_tokens.size(0), max_length
).fill_(self.pad)
initial_output_tokens.masked_fill_(
idx_length[None, :] < length_tgt[:, None], self.unk
)
initial_output_tokens[:, 0] = self.bos
initial_output_tokens.scatter_(1, length_tgt[:, None] - 1, self.eos)
initial_output_scores = initial_output_tokens.new_zeros(
*initial_output_tokens.size()
).type_as(encoder_out["encoder_out"][0])
return DecoderOut(
output_tokens=initial_output_tokens,
output_scores=initial_output_scores,
attn=None,
step=0,
max_step=0,
history=None,
)
def regenerate_length_beam(self, decoder_out, beam_size):
output_tokens = decoder_out.output_tokens
length_tgt = output_tokens.ne(self.pad).sum(1)
length_tgt = (
length_tgt[:, None]
+ utils.new_arange(length_tgt, 1, beam_size)
- beam_size // 2
)
length_tgt = length_tgt.view(-1).clamp_(min=2)
max_length = length_tgt.max()
idx_length = utils.new_arange(length_tgt, max_length)
initial_output_tokens = output_tokens.new_zeros(
length_tgt.size(0), max_length
).fill_(self.pad)
initial_output_tokens.masked_fill_(
idx_length[None, :] < length_tgt[:, None], self.unk
)
initial_output_tokens[:, 0] = self.bos
initial_output_tokens.scatter_(1, length_tgt[:, None] - 1, self.eos)
initial_output_scores = initial_output_tokens.new_zeros(
*initial_output_tokens.size()
).type_as(decoder_out.output_scores)
return decoder_out._replace(
output_tokens=initial_output_tokens, output_scores=initial_output_scores
)
class NATransformerDecoder(FairseqNATDecoder):
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(
args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn
)
self.dictionary = dictionary
self.bos = dictionary.bos()
self.unk = dictionary.unk()
self.eos = dictionary.eos()
self.encoder_embed_dim = args.encoder_embed_dim
self.sg_length_pred = getattr(args, "sg_length_pred", False)
self.pred_length_offset = getattr(args, "pred_length_offset", False)
self.length_loss_factor = getattr(args, "length_loss_factor", 0.1)
self.src_embedding_copy = getattr(args, "src_embedding_copy", False)
self.embed_length = Embedding(256, self.encoder_embed_dim, None)
@ensemble_decoder
def forward(self, normalize, encoder_out, prev_output_tokens, step=0, **unused):
features, _ = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
embedding_copy=(step == 0) & self.src_embedding_copy,
)
decoder_out = self.output_layer(features)
return F.log_softmax(decoder_out, -1) if normalize else decoder_out
@ensemble_decoder
def forward_length(self, normalize, encoder_out):
enc_feats = encoder_out["encoder_out"][0] # T x B x C
if len(encoder_out["encoder_padding_mask"]) > 0:
src_masks = encoder_out["encoder_padding_mask"][0] # B x T
else:
src_masks = None
enc_feats = _mean_pooling(enc_feats, src_masks)
if self.sg_length_pred:
enc_feats = enc_feats.detach()
length_out = F.linear(enc_feats, self.embed_length.weight)
return F.log_softmax(length_out, -1) if normalize else length_out
def extract_features(
self,
prev_output_tokens,
encoder_out=None,
early_exit=None,
embedding_copy=False,
**unused
):
"""
Similar to *forward* but only return features.
Inputs:
prev_output_tokens: Tensor(B, T)
encoder_out: a dictionary of hidden states and masks
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
the LevenshteinTransformer decoder has full-attention to all generated tokens
"""
# embedding
if embedding_copy:
src_embd = encoder_out["encoder_embedding"][0]
if len(encoder_out["encoder_padding_mask"]) > 0:
src_mask = encoder_out["encoder_padding_mask"][0]
else:
src_mask = None
src_mask = (
~src_mask
if src_mask is not None
else prev_output_tokens.new_ones(*src_embd.size()[:2]).bool()
)
x, decoder_padding_mask = self.forward_embedding(
prev_output_tokens,
self.forward_copying_source(
src_embd, src_mask, prev_output_tokens.ne(self.padding_idx)
),
)
else:
x, decoder_padding_mask = self.forward_embedding(prev_output_tokens)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
for i, layer in enumerate(self.layers):
# early exit from the decoder.
if (early_exit is not None) and (i >= early_exit):
break
x, attn, _ = layer(
x,
encoder_out["encoder_out"][0]
if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0)
else None,
encoder_out["encoder_padding_mask"][0]
if (
encoder_out is not None
and len(encoder_out["encoder_padding_mask"]) > 0
)
else None,
self_attn_mask=None,
self_attn_padding_mask=decoder_padding_mask,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": attn, "inner_states": inner_states}
def forward_embedding(self, prev_output_tokens, states=None):
# embed positions
positions = (
self.embed_positions(prev_output_tokens)
if self.embed_positions is not None
else None
)
# embed tokens and positions
if states is None:
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
else:
x = states
if positions is not None:
x += positions
x = self.dropout_module(x)
decoder_padding_mask = prev_output_tokens.eq(self.padding_idx)
return x, decoder_padding_mask
def forward_copying_source(self, src_embeds, src_masks, tgt_masks):
length_sources = src_masks.sum(1)
length_targets = tgt_masks.sum(1)
mapped_inputs = _uniform_assignment(length_sources, length_targets).masked_fill(
~tgt_masks, 0
)
copied_embedding = torch.gather(
src_embeds,
1,
mapped_inputs.unsqueeze(-1).expand(
*mapped_inputs.size(), src_embeds.size(-1)
),
)
return copied_embedding
def forward_length_prediction(self, length_out, encoder_out, tgt_tokens=None):
enc_feats = encoder_out["encoder_out"][0] # T x B x C
if len(encoder_out["encoder_padding_mask"]) > 0:
src_masks = encoder_out["encoder_padding_mask"][0] # B x T
else:
src_masks = None
if self.pred_length_offset:
if src_masks is None:
src_lengs = enc_feats.new_ones(enc_feats.size(1)).fill_(
enc_feats.size(0)
)
else:
src_lengs = (~src_masks).transpose(0, 1).type_as(enc_feats).sum(0)
src_lengs = src_lengs.long()
if tgt_tokens is not None:
# obtain the length target
tgt_lengs = tgt_tokens.ne(self.padding_idx).sum(1).long()
if self.pred_length_offset:
length_tgt = tgt_lengs - src_lengs + 128
else:
length_tgt = tgt_lengs
length_tgt = length_tgt.clamp(min=0, max=255)
else:
# predict the length target (greedy for now)
# TODO: implementing length-beam
pred_lengs = length_out.max(-1)[1]
if self.pred_length_offset:
length_tgt = pred_lengs - 128 + src_lengs
else:
length_tgt = pred_lengs
return length_tgt
@register_model_architecture(
"nonautoregressive_transformer", "nonautoregressive_transformer"
)
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# --- special arguments ---
args.sg_length_pred = getattr(args, "sg_length_pred", False)
args.pred_length_offset = getattr(args, "pred_length_offset", False)
args.length_loss_factor = getattr(args, "length_loss_factor", 0.1)
args.src_embedding_copy = getattr(args, "src_embedding_copy", False)
@register_model_architecture(
"nonautoregressive_transformer", "nonautoregressive_transformer_wmt_en_de"
)
def nonautoregressive_transformer_wmt_en_de(args):
base_architecture(args)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/nat/nonautoregressive_transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This file implements:
Ghazvininejad, Marjan, et al.
"Constant-time machine translation with conditional masked language models."
arXiv preprint arXiv:1904.09324 (2019).
"""
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import NATransformerModel
from fairseq.utils import new_arange
def _skeptical_unmasking(output_scores, output_masks, p):
sorted_index = output_scores.sort(-1)[1]
boundary_len = (
(output_masks.sum(1, keepdim=True).type_as(output_scores) - 2) * p
).long()
skeptical_mask = new_arange(output_masks) < boundary_len
return skeptical_mask.scatter(1, sorted_index, skeptical_mask)
@register_model("cmlm_transformer")
class CMLMNATransformerModel(NATransformerModel):
@staticmethod
def add_args(parser):
NATransformerModel.add_args(parser)
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
):
assert not self.decoder.src_embedding_copy, "do not support embedding copy."
# encoding
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
# length prediction
length_out = self.decoder.forward_length(
normalize=False, encoder_out=encoder_out
)
length_tgt = self.decoder.forward_length_prediction(
length_out, encoder_out, tgt_tokens
)
# decoding
word_ins_out = self.decoder(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
)
word_ins_mask = prev_output_tokens.eq(self.unk)
return {
"word_ins": {
"out": word_ins_out,
"tgt": tgt_tokens,
"mask": word_ins_mask,
"ls": self.args.label_smoothing,
"nll_loss": True,
},
"length": {
"out": length_out,
"tgt": length_tgt,
"factor": self.decoder.length_loss_factor,
},
}
def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs):
step = decoder_out.step
max_step = decoder_out.max_step
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
history = decoder_out.history
# execute the decoder
output_masks = output_tokens.eq(self.unk)
_scores, _tokens = self.decoder(
normalize=True,
prev_output_tokens=output_tokens,
encoder_out=encoder_out,
).max(-1)
output_tokens.masked_scatter_(output_masks, _tokens[output_masks])
output_scores.masked_scatter_(output_masks, _scores[output_masks])
if history is not None:
history.append(output_tokens.clone())
# skeptical decoding (depend on the maximum decoding steps.)
if (step + 1) < max_step:
skeptical_mask = _skeptical_unmasking(
output_scores, output_tokens.ne(self.pad), 1 - (step + 1) / max_step
)
output_tokens.masked_fill_(skeptical_mask, self.unk)
output_scores.masked_fill_(skeptical_mask, 0.0)
if history is not None:
history.append(output_tokens.clone())
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=None,
history=history,
)
@register_model_architecture("cmlm_transformer", "cmlm_transformer")
def cmlm_base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", True)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# --- special arguments ---
args.sg_length_pred = getattr(args, "sg_length_pred", False)
args.pred_length_offset = getattr(args, "pred_length_offset", False)
args.length_loss_factor = getattr(args, "length_loss_factor", 0.1)
args.ngram_predictor = getattr(args, "ngram_predictor", 1)
args.src_embedding_copy = getattr(args, "src_embedding_copy", False)
@register_model_architecture("cmlm_transformer", "cmlm_transformer_wmt_en_de")
def cmlm_wmt_en_de(args):
cmlm_base_architecture(args)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/nat/cmlm_transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import NATransformerModel, base_architecture
from fairseq.modules import DynamicCRF
@register_model("nacrf_transformer")
class NACRFTransformerModel(NATransformerModel):
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
self.crf_layer = DynamicCRF(
num_embedding=len(self.tgt_dict),
low_rank=args.crf_lowrank_approx,
beam_size=args.crf_beam_approx,
)
@property
def allow_ensemble(self):
return False
@staticmethod
def add_args(parser):
NATransformerModel.add_args(parser)
parser.add_argument(
"--crf-lowrank-approx",
type=int,
help="the dimension of low-rank approximation of transition",
)
parser.add_argument(
"--crf-beam-approx",
type=int,
help="the beam size for apporixmating the normalizing factor",
)
parser.add_argument(
"--word-ins-loss-factor",
type=float,
help="weights on NAT loss used to co-training with CRF loss.",
)
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
):
# encoding
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
# length prediction
length_out = self.decoder.forward_length(
normalize=False, encoder_out=encoder_out
)
length_tgt = self.decoder.forward_length_prediction(
length_out, encoder_out, tgt_tokens
)
# decoding
word_ins_out = self.decoder(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
)
word_ins_tgt, word_ins_mask = tgt_tokens, tgt_tokens.ne(self.pad)
# compute the log-likelihood of CRF
crf_nll = -self.crf_layer(word_ins_out, word_ins_tgt, word_ins_mask)
crf_nll = (crf_nll / word_ins_mask.type_as(crf_nll).sum(-1)).mean()
return {
"word_ins": {
"out": word_ins_out,
"tgt": word_ins_tgt,
"mask": word_ins_mask,
"ls": self.args.label_smoothing,
"nll_loss": True,
"factor": self.args.word_ins_loss_factor,
},
"word_crf": {"loss": crf_nll},
"length": {
"out": length_out,
"tgt": length_tgt,
"factor": self.decoder.length_loss_factor,
},
}
def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs):
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
history = decoder_out.history
# execute the decoder and get emission scores
output_masks = output_tokens.ne(self.pad)
word_ins_out = self.decoder(
normalize=False, prev_output_tokens=output_tokens, encoder_out=encoder_out
)
# run viterbi decoding through CRF
_scores, _tokens = self.crf_layer.forward_decoder(word_ins_out, output_masks)
output_tokens.masked_scatter_(output_masks, _tokens[output_masks])
output_scores.masked_scatter_(output_masks, _scores[output_masks])
if history is not None:
history.append(output_tokens.clone())
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=None,
history=history,
)
@register_model_architecture("nacrf_transformer", "nacrf_transformer")
def nacrf_base_architecture(args):
args.crf_lowrank_approx = getattr(args, "crf_lowrank_approx", 32)
args.crf_beam_approx = getattr(args, "crf_beam_approx", 64)
args.word_ins_loss_factor = getattr(args, "word_ins_loss_factor", 0.5)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
base_architecture(args)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/nat/nat_crf_transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import (
FairseqNATModel,
LevenshteinTransformerDecoder,
LevenshteinTransformerModel,
ensemble_decoder,
)
from fairseq.models.transformer import Linear
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from fairseq.utils import new_arange
class NegativeDistanceScore(object):
def __init__(self):
# pre-compute some values
self.scores = {}
self.scores[0.5] = self.compute_score_full(50, 0.5)
self.scores[1.0] = self.compute_score_full(50, 1.0)
self.scores[2.0] = self.compute_score_full(50, 2.0)
def __call__(self, i, L, tau):
if (tau is None) or (tau > 1000):
return 1 / L
if tau in self.scores:
if L < self.scores[tau].shape[0]:
return self.scores[tau][L - 1, i]
return self.compute_score(L, tau)[i]
def compute_score(self, L, tau):
s = np.array([-abs(L / 2 - i) / tau for i in range(L)])
s = np.exp(s - s.max())
return s / s.sum()
def compute_score_full(self, L, tau):
s = -abs(np.arange(0, L - 1)[:, None] / 2 - np.arange(L)[None, :]) / tau
s = np.tril(s, 0) + np.triu(s - float("inf"), 1)
s = np.exp(s - s.max(1, keepdims=True))
return s / s.sum(1, keepdims=True)
neg_scorer = NegativeDistanceScore()
def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx, vocab_size, tau=None):
try:
from fairseq import libnat
except ImportError as e:
import sys
sys.stderr.write("ERROR: missing libnat. run `pip install --editable .`\n")
raise e
B = in_tokens.size(0)
T = in_tokens.size(1)
V = vocab_size
with torch.cuda.device_of(in_tokens):
in_tokens_list = [
[t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist())
]
out_tokens_list = [
[t for t in s if t != padding_idx]
for i, s in enumerate(out_tokens.tolist())
]
full_labels = libnat.suggested_ed2_path(
in_tokens_list, out_tokens_list, padding_idx
)
insert_labels = [a[:-1] for a in full_labels]
# numericalize1
insert_label_tensors = in_tokens.new_zeros(B * (T - 1) * V).float()
insert_index, insert_labels = zip(
*[
(w + (j + i * (T - 1)) * V, neg_scorer(k, len(label), tau))
for i, labels in enumerate(insert_labels)
for j, label in enumerate(labels[1:-1])
for k, w in enumerate(label)
]
) # HACK 1:-1
insert_index, insert_labels = [
torch.tensor(list(a), device=in_tokens.device)
for a in [insert_index, insert_labels]
]
insert_label_tensors.scatter_(0, insert_index.long(), insert_labels)
insert_label_tensors = insert_label_tensors.view(B, T - 1, V)
return insert_label_tensors
def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_scores, padding_idx):
padding_masks = in_tokens[:, 1:].eq(padding_idx)
word_ins_scores.masked_fill_(padding_masks, 0.0)
word_ins_pred.masked_fill_(padding_masks, padding_idx)
in_coords = new_arange(in_tokens).type_as(in_scores)
# shift all padding predictions to infinite
out_coords = (in_coords[:, 1:] - 0.5).masked_fill(
word_ins_pred.eq(padding_idx), float("inf")
)
out_coords = torch.cat([in_coords, out_coords], 1).sort(-1)[1]
out_tokens = torch.cat([in_tokens, word_ins_pred], 1).gather(1, out_coords)
out_scores = torch.cat([in_scores, word_ins_scores], 1).gather(1, out_coords)
return out_tokens, out_scores
@register_model("insertion_transformer")
class InsertionTransformerModel(LevenshteinTransformerModel):
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
@staticmethod
def add_args(parser):
FairseqNATModel.add_args(parser)
parser.add_argument("--label-tau", default=None, type=float)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
decoder = InsertionTransformerDecoder(args, tgt_dict, embed_tokens)
if getattr(args, "apply_bert_init", False):
decoder.apply(init_bert_params)
return decoder
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
):
assert tgt_tokens is not None, "forward function only supports training."
# encoding
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
# generate training labels for insertion
word_ins_out = self.decoder.forward_word_ins(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
)
word_ins_tgt = _get_ins_targets(
prev_output_tokens,
tgt_tokens,
self.pad,
self.unk,
len(self.tgt_dict),
tau=self.decoder.label_tau,
).type_as(word_ins_out)
word_ins_masks = prev_output_tokens[:, 1:].ne(self.pad)
return {
"word_ins": {
"out": word_ins_out,
"tgt": word_ins_tgt,
"mask": word_ins_masks,
"ls": self.args.label_smoothing,
"nll_loss": True,
}
}
def forward_decoder(
self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs
):
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
history = decoder_out.history
# TODO: decoding for InsertionTransformer
word_ins_score = self.decoder.forward_word_ins(
normalize=True, prev_output_tokens=output_tokens, encoder_out=encoder_out
)
if eos_penalty > 0.0:
word_ins_score[:, :, self.pad] -= eos_penalty
word_ins_score, word_ins_pred = word_ins_score.max(-1)
output_tokens, output_scores = _apply_ins_words(
output_tokens, output_scores, word_ins_pred, word_ins_score, self.pad
)
# delete some unnecessary paddings
cut_off = output_tokens.ne(self.pad).sum(1).max()
output_tokens = output_tokens[:, :cut_off]
output_scores = output_scores[:, :cut_off]
if history is not None:
history.append(output_tokens.clone())
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=None,
history=history,
)
class InsertionTransformerDecoder(LevenshteinTransformerDecoder):
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
# use the TransformerDecoder's __init__
super(LevenshteinTransformerDecoder, self).__init__(
args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn
)
self.dictionary = dictionary
self.bos = dictionary.bos()
self.unk = dictionary.unk()
self.eos = dictionary.eos()
self.pool_out = Linear(self.output_embed_dim * 2, self.output_embed_dim)
self.label_tau = getattr(args, "label_tau", None)
@ensemble_decoder
def forward_word_ins(self, normalize, encoder_out, prev_output_tokens):
features = self.extract_features(prev_output_tokens, encoder_out=encoder_out)[0]
features = self.pool_out(
torch.cat([features[:, :-1, :], features[:, 1:, :]], 2)
)
decoder_out = self.output_layer(features)
return F.log_softmax(decoder_out, -1) if normalize else decoder_out
def forward_mask_ins(self, *args, **kwargs):
raise NotImplementedError
def forward_word_del(self, *args, **kwargs):
raise NotImplementedError
@register_model_architecture("insertion_transformer", "insertion_transformer")
def insertion_base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# special for insertion transformer
args.label_tau = getattr(args, "label_tau", None)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/nat/insertion_transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .hub_interface import * # noqa
from .model import * # noqa
| EXA-1-master | exa/libraries/fairseq/fairseq/models/bart/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
BART: Denoising Sequence-to-Sequence Pre-training for
Natural Language Generation, Translation, and Comprehension
"""
import logging
from typing import Optional
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer import TransformerModel
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .hub_interface import BARTHubInterface
logger = logging.getLogger(__name__)
@register_model("bart")
class BARTModel(TransformerModel):
__jit_unused_properties__ = ["supported_targets"]
@classmethod
def hub_models(cls):
return {
"bart.base": "http://dl.fbaipublicfiles.com/fairseq/models/bart.base.tar.gz",
"bart.large": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.tar.gz",
"bart.large.mnli": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.mnli.tar.gz",
"bart.large.cnn": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.cnn.tar.gz",
"bart.large.xsum": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.xsum.tar.gz",
}
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
# We follow BERT's random weight initialization
self.apply(init_bert_params)
self.classification_heads = nn.ModuleDict()
if hasattr(self.encoder, "dictionary"):
self.eos: int = self.encoder.dictionary.eos()
@staticmethod
def add_args(parser):
super(BARTModel, BARTModel).add_args(parser)
parser.add_argument(
"--pooler-dropout",
type=float,
metavar="D",
help="dropout probability in the masked_lm pooler layers",
)
parser.add_argument(
"--pooler-activation-fn",
choices=utils.get_available_activation_fns(),
help="activation function to use for pooler layer",
)
parser.add_argument(
"--spectral-norm-classification-head",
action="store_true",
help="Apply spectral normalization on the classification head",
)
@property
def supported_targets(self):
return {"self"}
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
features_only: bool = False,
classification_head_name: Optional[str] = None,
token_embeddings: Optional[torch.Tensor] = None,
return_all_hiddens: bool = True,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
if classification_head_name is not None:
features_only = True
encoder_out = self.encoder(
src_tokens,
src_lengths=src_lengths,
token_embeddings=token_embeddings,
return_all_hiddens=return_all_hiddens,
)
x, extra = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
features_only=features_only,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
src_lengths=src_lengths,
return_all_hiddens=return_all_hiddens,
)
eos: int = self.eos
if classification_head_name is not None:
sentence_representation = x[src_tokens.eq(eos), :].view(
x.size(0), -1, x.size(-1)
)[:, -1, :]
for k, head in self.classification_heads.items():
# for torch script only supports iteration
if k == classification_head_name:
x = head(sentence_representation)
break
return x, extra
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
bpe="gpt2",
sample_break_mode="eos",
**kwargs,
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
sample_break_mode=sample_break_mode,
**kwargs,
)
return BARTHubInterface(x["args"], x["task"], x["models"][0])
def register_classification_head(
self, name, num_classes=None, inner_dim=None, **kwargs
):
"""Register a classification head."""
logger.info("Registering classification head: {0}".format(name))
if name in self.classification_heads:
prev_num_classes = self.classification_heads[name].out_proj.out_features
prev_inner_dim = self.classification_heads[name].dense.out_features
if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
logger.warning(
're-registering head "{}" with num_classes {} (prev: {}) '
"and inner_dim {} (prev: {})".format(
name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
)
)
self.classification_heads[name] = BARTClassificationHead(
input_dim=self.args.encoder_embed_dim,
inner_dim=inner_dim or self.args.encoder_embed_dim,
num_classes=num_classes,
activation_fn=self.args.pooler_activation_fn,
pooler_dropout=self.args.pooler_dropout,
do_spectral_norm=getattr(
self.args, "spectral_norm_classification_head", False
),
)
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
prefix = name + "." if name != "" else ""
current_head_names = (
[]
if not hasattr(self, "classification_heads")
else self.classification_heads.keys()
)
# Handle new classification heads present in the state dict.
keys_to_delete = []
for k in state_dict.keys():
if not k.startswith(prefix + "classification_heads."):
continue
head_name = k[len(prefix + "classification_heads.") :].split(".")[0]
num_classes = state_dict[
prefix + "classification_heads." + head_name + ".out_proj.weight"
].size(0)
inner_dim = state_dict[
prefix + "classification_heads." + head_name + ".dense.weight"
].size(0)
if getattr(self.args, "load_checkpoint_heads", False):
if head_name not in current_head_names:
self.register_classification_head(head_name, num_classes, inner_dim)
else:
if head_name not in current_head_names:
logger.warning(
"deleting classification head ({}) from checkpoint "
"not present in current model: {}".format(head_name, k)
)
keys_to_delete.append(k)
elif (
num_classes
!= self.classification_heads[head_name].out_proj.out_features
or inner_dim
!= self.classification_heads[head_name].dense.out_features
):
logger.warning(
"deleting classification head ({}) from checkpoint "
"with different dimensions than current model: {}".format(
head_name, k
)
)
keys_to_delete.append(k)
for k in keys_to_delete:
del state_dict[k]
def truncate_emb(key):
if key in state_dict:
state_dict[key] = state_dict[key][:-1, :]
# When finetuning on translation task, remove last row of
# embedding matrix that corresponds to mask_idx token.
loaded_dict_size = state_dict["encoder.embed_tokens.weight"].size(0)
if (
loaded_dict_size == len(self.encoder.dictionary) + 1
and "<mask>" not in self.encoder.dictionary
):
truncate_emb("encoder.embed_tokens.weight")
truncate_emb("decoder.embed_tokens.weight")
truncate_emb("encoder.output_projection.weight")
truncate_emb("decoder.output_projection.weight")
# When continued pretraining on new set of languages for mbart,
# add extra lang embeddings at the end of embed_tokens.
# Note: newly added languages are assumed to have been added at the end.
if self.args.task == "multilingual_denoising" and loaded_dict_size < len(
self.encoder.dictionary
):
logger.info(
"Adding extra language embeddings not found in pretrained model for "
"continued pretraining of MBART on new set of languages."
)
loaded_mask_token_embedding = state_dict["encoder.embed_tokens.weight"][
-1, :
]
num_langids_to_add = len(self.encoder.dictionary) - loaded_dict_size
embed_dim = state_dict["encoder.embed_tokens.weight"].size(1)
new_lang_embed_to_add = torch.zeros(num_langids_to_add, embed_dim)
nn.init.normal_(new_lang_embed_to_add, mean=0, std=embed_dim**-0.5)
new_lang_embed_to_add = new_lang_embed_to_add.to(
dtype=state_dict["encoder.embed_tokens.weight"].dtype,
)
state_dict["encoder.embed_tokens.weight"] = torch.cat(
[
state_dict["encoder.embed_tokens.weight"][
: loaded_dict_size - 1, :
],
new_lang_embed_to_add,
loaded_mask_token_embedding.unsqueeze(0),
]
)
state_dict["decoder.embed_tokens.weight"] = torch.cat(
[
state_dict["decoder.embed_tokens.weight"][
: loaded_dict_size - 1, :
],
new_lang_embed_to_add,
loaded_mask_token_embedding.unsqueeze(0),
]
)
# Copy any newly-added classification heads into the state dict
# with their current weights.
if hasattr(self, "classification_heads"):
cur_state = self.classification_heads.state_dict()
for k, v in cur_state.items():
if prefix + "classification_heads." + k not in state_dict:
logger.info("Overwriting " + prefix + "classification_heads." + k)
state_dict[prefix + "classification_heads." + k] = v
def set_beam_size(self, beam):
"""Set beam size for efficient beamable enc-dec attention."""
beamable = False
for layer in self.decoder.layers:
if layer.encoder_attn is not None:
if hasattr(layer.encoder_attn, "set_beam_size"):
layer.encoder_attn.set_beam_size(beam)
beamable = True
if beamable:
self.encoder.reorder_encoder_out = self.encoder._reorder_encoder_out
class BARTClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim,
inner_dim,
num_classes,
activation_fn,
pooler_dropout,
do_spectral_norm=False,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
if do_spectral_norm:
self.out_proj = torch.nn.utils.spectral_norm(self.out_proj)
def forward(self, features, **kwargs):
x = features
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@register_model_architecture("bart", "bart_large")
def bart_large_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 1024)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", True)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.relu_dropout = getattr(args, "relu_dropout", 0.0)
args.dropout = getattr(args, "dropout", 0.1)
args.max_target_positions = getattr(args, "max_target_positions", 1024)
args.max_source_positions = getattr(args, "max_source_positions", 1024)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", True
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", True)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", True)
args.layernorm_embedding = getattr(args, "layernorm_embedding", True)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
@register_model_architecture("bart", "bart_base")
def bart_base_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 768)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 12)
bart_large_architecture(args)
@register_model_architecture("bart", "mbart_large")
def mbart_large_architecture(args):
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
bart_large_architecture(args)
@register_model_architecture("bart", "mbart_base")
def mbart_base_architecture(args):
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
bart_base_architecture(args)
@register_model_architecture("bart", "mbart_base_wmt20")
def mbart_base_wmt20_architecture(args):
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
mbart_base_architecture(args)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/bart/model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import logging
from typing import Dict, List
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.data import encoders
from fairseq.hub_utils import GeneratorHubInterface
from omegaconf import open_dict
logger = logging.getLogger(__name__)
class BARTHubInterface(GeneratorHubInterface):
"""A simple PyTorch Hub interface to BART.
Usage: https://github.com/pytorch/fairseq/tree/main/examples/bart
"""
def __init__(self, cfg, task, model):
super().__init__(cfg, task, [model])
self.model = self.models[0]
def encode(
self, sentence: str, *addl_sentences, no_separator=True
) -> torch.LongTensor:
"""
BPE-encode a sentence (or multiple sentences).
Every sequence begins with a beginning-of-sentence (`<s>`) symbol.
Every sentence ends with an end-of-sentence (`</s>`).
Example (single sentence): `<s> a b c </s>`
Example (sentence pair): `<s> d e f </s> 1 2 3 </s>`
The BPE encoding follows GPT-2. One subtle detail is that the GPT-2 BPE
requires leading spaces. For example::
>>> bart.encode('Hello world').tolist()
[0, 31414, 232, 2]
>>> bart.encode(' world').tolist()
[0, 232, 2]
>>> bart.encode('world').tolist()
[0, 8331, 2]
"""
tokens = self.bpe.encode(sentence)
if len(tokens.split(" ")) > min(self.max_positions) - 2:
tokens = " ".join(tokens.split(" ")[: min(self.max_positions) - 2])
bpe_sentence = "<s> " + tokens + " </s>"
for s in addl_sentences:
bpe_sentence += " </s>" if not no_separator else ""
bpe_sentence += " " + self.bpe.encode(s) + " </s>"
tokens = self.task.source_dictionary.encode_line(bpe_sentence, append_eos=False)
return tokens.long()
def decode(self, tokens: torch.LongTensor):
assert tokens.dim() == 1
tokens = tokens.cpu().numpy()
if tokens[0] == self.task.source_dictionary.bos():
tokens = tokens[1:] # remove <s>
eos_mask = tokens == self.task.source_dictionary.eos()
doc_mask = eos_mask[1:] & eos_mask[:-1]
sentences = np.split(tokens, doc_mask.nonzero()[0] + 1)
sentences = [
self.bpe.decode(self.task.source_dictionary.string(s)) for s in sentences
]
if len(sentences) == 1:
return sentences[0]
return sentences
def _build_sample(self, src_tokens: List[torch.LongTensor]):
# assert torch.is_tensor(src_tokens)
dataset = self.task.build_dataset_for_inference(
src_tokens,
[x.numel() for x in src_tokens],
)
sample = dataset.collater(dataset)
sample = utils.apply_to_sample(lambda tensor: tensor.to(self.device), sample)
return sample
def generate(
self,
tokenized_sentences: List[torch.LongTensor],
*args,
inference_step_args=None,
skip_invalid_size_inputs=False,
**kwargs
) -> List[List[Dict[str, torch.Tensor]]]:
inference_step_args = inference_step_args or {}
if "prefix_tokens" in inference_step_args:
raise NotImplementedError("prefix generation not implemented for BART")
res = []
for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs):
src_tokens = batch["net_input"]["src_tokens"]
inference_step_args["prefix_tokens"] = src_tokens.new_full(
(src_tokens.size(0), 1), fill_value=self.task.source_dictionary.bos()
).to(device=self.device)
results = super().generate(
src_tokens,
*args,
inference_step_args=inference_step_args,
skip_invalid_size_inputs=skip_invalid_size_inputs,
**kwargs
)
for id, hypos in zip(batch["id"].tolist(), results):
res.append((id, hypos))
res = [hypos for _, hypos in sorted(res, key=lambda x: x[0])]
return res
def extract_features(
self, tokens: torch.LongTensor, return_all_hiddens: bool = False
) -> torch.Tensor:
if tokens.dim() == 1:
tokens = tokens.unsqueeze(0)
if tokens.size(-1) > min(self.model.max_positions()):
raise ValueError(
"tokens exceeds maximum length: {} > {}".format(
tokens.size(-1), self.model.max_positions()
)
)
tokens.to(device=self.device),
prev_output_tokens = tokens.clone()
prev_output_tokens[:, 0] = tokens.gather(
1,
(tokens.ne(self.task.source_dictionary.pad()).sum(dim=1) - 1).unsqueeze(-1),
).squeeze()
prev_output_tokens[:, 1:] = tokens[:, :-1]
features, extra = self.model(
src_tokens=tokens,
src_lengths=None,
prev_output_tokens=prev_output_tokens,
features_only=True,
return_all_hiddens=return_all_hiddens,
)
if return_all_hiddens:
# convert from T x B x C -> B x T x C
inner_states = extra["inner_states"]
return [inner_state.transpose(0, 1) for inner_state in inner_states]
else:
return features # just the last layer's features
def register_classification_head(
self, name: str, num_classes: int = None, embedding_size: int = None, **kwargs
):
self.model.register_classification_head(
name, num_classes=num_classes, embedding_size=embedding_size, **kwargs
)
def predict(self, head: str, tokens: torch.LongTensor, return_logits: bool = False):
if tokens.dim() == 1:
tokens = tokens.unsqueeze(0)
features = self.extract_features(tokens.to(device=self.device))
sentence_representation = features[
tokens.eq(self.task.source_dictionary.eos()), :
].view(features.size(0), -1, features.size(-1))[:, -1, :]
logits = self.model.classification_heads[head](sentence_representation)
if return_logits:
return logits
return F.log_softmax(logits, dim=-1)
def fill_mask(
self,
masked_inputs: List[str],
topk: int = 5,
match_source_len: bool = True,
**generate_kwargs
):
masked_token = "<mask>"
batch_tokens = []
for masked_input in masked_inputs:
assert (
masked_token in masked_input
), "please add one {} token for the input".format(masked_token)
text_spans = masked_input.split(masked_token)
text_spans_bpe = (
(" {0} ".format(masked_token))
.join([self.bpe.encode(text_span.rstrip()) for text_span in text_spans])
.strip()
)
tokens = self.task.source_dictionary.encode_line(
"<s> " + text_spans_bpe + " </s>",
append_eos=False,
add_if_not_exist=False,
).long()
batch_tokens.append(tokens)
# ensure beam size is at least as big as topk
generate_kwargs["beam"] = max(
topk,
generate_kwargs.get("beam", -1),
)
generate_kwargs["match_source_len"] = match_source_len
batch_hypos = self.generate(batch_tokens, **generate_kwargs)
return [
[(self.decode(hypo["tokens"]), hypo["score"]) for hypo in hypos[:topk]]
for hypos in batch_hypos
]
| EXA-1-master | exa/libraries/fairseq/fairseq/models/bart/hub_interface.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.models import BaseFairseqModel, register_model
from fairseq.models.wav2vec.wav2vec2_asr import (
Wav2Vec2CtcConfig,
Wav2VecCtc,
Wav2VecEncoder,
)
from fairseq.tasks import FairseqTask
@register_model("wav2vec2_laser", dataclass=Wav2Vec2CtcConfig)
class Wav2VecLaser(Wav2VecCtc):
def __init__(self, cfg: Wav2Vec2CtcConfig, w2v_encoder: BaseFairseqModel):
super().__init__(cfg, w2v_encoder)
self.num_updates = 0
self.freeze_finetune_updates = cfg.freeze_finetune_updates
@classmethod
def build_model(cls, cfg: Wav2Vec2CtcConfig, task: FairseqTask):
"""Build a new model instance."""
w2v_encoder = Wav2VecEncoder(cfg, 1024)
return cls(cfg, w2v_encoder)
def forward(self, **kwargs):
output = super().forward(**kwargs)
x_out = output["encoder_out"] * 0.01
out_pad_mask = output["padding_mask"]
# Set padded outputs to -inf so they are not selected by max-pooling
if out_pad_mask is not None and out_pad_mask.any():
x_out = (
x_out.float()
.masked_fill_(out_pad_mask.T.unsqueeze(-1), float("-inf"))
.type_as(x_out)
)
return x_out.max(dim=0)[0]
| EXA-1-master | exa/libraries/fairseq/fairseq/models/wav2vec/wav2vec2_laser.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .wav2vec import * # noqa
from .wav2vec2 import * # noqa
from .wav2vec2_asr import * # noqa
from .wav2vec2_laser import * # noqa
| EXA-1-master | exa/libraries/fairseq/fairseq/models/wav2vec/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import copy
import logging
import math
import re
from argparse import Namespace
from dataclasses import dataclass, field
from typing import Any, Optional
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from omegaconf import II, MISSING, open_dict
from fairseq import checkpoint_utils, tasks, utils
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.models import (
BaseFairseqModel,
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
)
from fairseq.models.wav2vec.wav2vec2 import MASKING_DISTRIBUTION_CHOICES
from fairseq.modules import LayerNorm, PositionalEmbedding, TransformerDecoderLayer
from fairseq.tasks import FairseqTask
logger = logging.getLogger(__name__)
@dataclass
class Wav2Vec2AsrConfig(FairseqDataclass):
w2v_path: str = field(
default=MISSING, metadata={"help": "path to wav2vec 2.0 model"}
)
no_pretrained_weights: bool = field(
default=False, metadata={"help": "if true, does not load pretrained weights"}
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
final_dropout: float = field(
default=0.0,
metadata={"help": "dropout after transformer and before final projection"},
)
dropout: float = field(
default=0.0, metadata={"help": "dropout probability inside wav2vec 2.0 model"}
)
attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights inside wav2vec 2.0 model"
},
)
activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN inside wav2vec 2.0 model"
},
)
# masking
apply_mask: bool = field(
default=False, metadata={"help": "apply masking during fine-tuning"}
)
mask_length: int = field(
default=10, metadata={"help": "repeat the mask indices multiple times"}
)
mask_prob: float = field(
default=0.5,
metadata={
"help": "probability of replacing a token with mask (normalized by length)"
},
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose masks"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
mask_min_space: Optional[int] = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
require_same_masks: bool = field(
default=True,
metadata={
"help": "whether to number of masked timesteps must be the same across all "
"examples in a batch"
},
)
mask_dropout: float = field(
default=0.0,
metadata={"help": "percent of masks to unmask for each sample"},
)
# channel masking
mask_channel_length: int = field(
default=10, metadata={"help": "length of the mask for features (channels)"}
)
mask_channel_prob: float = field(
default=0.0, metadata={"help": "probability of replacing a feature with 0"}
)
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_channel_overlap: bool = field(
default=False, metadata={"help": "whether to allow channel masks to overlap"}
)
freeze_finetune_updates: int = field(
default=0, metadata={"help": "dont finetune wav2vec for this many updates"}
)
feature_grad_mult: float = field(
default=0.0, metadata={"help": "reset feature grad mult in wav2vec 2.0 to this"}
)
layerdrop: float = field(
default=0.0, metadata={"help": "probability of dropping a layer in wav2vec 2.0"}
)
drop_path: float = 0
mask_channel_min_space: Optional[int] = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
mask_channel_before: bool = False
normalize: bool = II("task.normalize")
update_alibi: bool = True
data: str = II("task.data")
# this holds the loaded wav2vec args
w2v_args: Any = None
offload_activations: bool = field(
default=False, metadata={"help": "offload_activations"}
)
min_params_to_wrap: int = field(
default=int(1e8),
metadata={
"help": "minimum number of params for a layer to be wrapped with FSDP() when "
"training with --ddp-backend=fully_sharded. Smaller values will "
"improve memory efficiency, but may make torch.distributed "
"communication less efficient due to smaller input sizes. This option "
"is set to 0 (i.e., always wrap) when --checkpoint-activations or "
"--offload-activations are passed."
},
)
checkpoint_activations: bool = field(
default=False,
metadata={"help": "recompute activations and save memory for extra compute"},
)
ddp_backend: str = II("distributed_training.ddp_backend")
zero_mask: bool = False
load_ema: bool = False
layer_decay: float = 1
@dataclass
class Wav2Vec2CtcConfig(Wav2Vec2AsrConfig):
blank_weight: float = 0
blank_mode: str = "add"
@register_model("wav2vec_ctc", dataclass=Wav2Vec2CtcConfig)
class Wav2VecCtc(BaseFairseqModel):
def __init__(self, cfg: Wav2Vec2CtcConfig, w2v_encoder: BaseFairseqModel):
super().__init__()
self.cfg = cfg
self.w2v_encoder = w2v_encoder
self.blank_weight = cfg.blank_weight
self.blank_mode = cfg.blank_mode
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
@classmethod
def build_model(cls, cfg: Wav2Vec2CtcConfig, task: FairseqTask):
"""Build a new model instance."""
w2v_encoder = Wav2VecEncoder(cfg, len(task.target_dictionary))
return cls(cfg, w2v_encoder)
def get_logits(self, net_output, normalize=False):
logits = net_output["encoder_out"]
if self.blank_weight != 0:
if self.blank_mode == "add":
logits[..., 0] += self.blank_weight
elif self.blank_mode == "set":
logits[..., 0] = self.blank_weight
else:
raise Exception(f"invalid blank mode {self.blank_mode}")
if net_output["padding_mask"] is not None and net_output["padding_mask"].any():
number_of_classes = logits.size(-1)
masking_tensor = torch.ones(
number_of_classes, device=logits.device
) * float("-inf")
masking_tensor[0] = 0
if logits.size(0) > net_output["padding_mask"].size(1):
net_output["padding_mask"] = F.pad(
net_output["padding_mask"], (1, 0), value=False
)
logits[net_output["padding_mask"].T] = masking_tensor.type_as(logits)
if normalize:
logits = utils.log_softmax(logits.float(), dim=-1)
return logits
def get_normalized_probs(self, net_output, log_probs):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = self.get_logits(net_output)
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
def forward(self, **kwargs):
x = self.w2v_encoder(**kwargs)
return x
@dataclass
class Wav2Vec2Seq2SeqConfig(Wav2Vec2AsrConfig):
decoder_embed_dim: int = field(
default=768, metadata={"help": "decoder embedding dimension"}
)
decoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "decoder embedding dimension for FFN"}
)
decoder_layers: int = field(default=6, metadata={"help": "num of decoder layers"})
decoder_layerdrop: float = field(
default=0.0, metadata={"help": "decoder layerdrop chance"}
)
decoder_attention_heads: int = field(
default=4, metadata={"help": "num decoder attention heads"}
)
decoder_learned_pos: bool = field(
default=False,
metadata={"help": "use learned positional embeddings in the decoder"},
)
decoder_normalize_before: bool = field(
default=False, metadata={"help": "apply layernorm before each decoder block"}
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if set, disables positional embeddings (outside self attention)"
},
)
decoder_dropout: float = field(
default=0.0, metadata={"help": "dropout probability in the decoder"}
)
decoder_attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights inside the decoder"
},
)
decoder_activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN inside the decoder"
},
)
max_target_positions: int = field(
default=2048, metadata={"help": "max target positions"}
)
share_decoder_input_output_embed: bool = field(
default=False, metadata={"help": "share decoder input and output embeddings"}
)
autoregressive: bool = II("task.autoregressive")
@register_model("wav2vec_seq2seq", dataclass=Wav2Vec2Seq2SeqConfig)
class Wav2Vec2Seq2SeqModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def build_model(cls, cfg: Wav2Vec2Seq2SeqConfig, task: FairseqTask):
"""Build a new model instance."""
assert (
cfg.autoregressive
), "Please set task.autoregressive=true for seq2seq asr models"
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
return emb
decoder_embed_tokens = build_embedding(tgt_dict, cfg.decoder_embed_dim)
encoder = cls.build_encoder(cfg)
decoder = cls.build_decoder(cfg, tgt_dict, decoder_embed_tokens)
return Wav2Vec2Seq2SeqModel(encoder, decoder)
@classmethod
def build_encoder(cls, cfg: Wav2Vec2AsrConfig):
return Wav2VecEncoder(cfg)
@classmethod
def build_decoder(cls, cfg: Wav2Vec2Seq2SeqConfig, tgt_dict, embed_tokens):
return TransformerDecoder(cfg, tgt_dict, embed_tokens)
def forward(self, **kwargs):
encoder_out = self.encoder(**kwargs)
decoder_out = self.decoder(encoder_out=encoder_out, **kwargs)
return decoder_out
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
class Wav2VecEncoder(FairseqEncoder):
def __init__(self, cfg: Wav2Vec2AsrConfig, output_size=None):
self.apply_mask = cfg.apply_mask
arg_overrides = {
"dropout": cfg.dropout,
"activation_dropout": cfg.activation_dropout,
"dropout_input": cfg.dropout_input,
"attention_dropout": cfg.attention_dropout,
"mask_length": cfg.mask_length,
"mask_prob": cfg.mask_prob,
"require_same_masks": getattr(cfg, "require_same_masks", True),
"pct_holes": getattr(cfg, "mask_dropout", 0),
"mask_selection": cfg.mask_selection,
"mask_other": cfg.mask_other,
"no_mask_overlap": cfg.no_mask_overlap,
"mask_channel_length": cfg.mask_channel_length,
"mask_channel_prob": cfg.mask_channel_prob,
"mask_channel_before": cfg.mask_channel_before,
"mask_channel_selection": cfg.mask_channel_selection,
"mask_channel_other": cfg.mask_channel_other,
"no_mask_channel_overlap": cfg.no_mask_channel_overlap,
"encoder_layerdrop": cfg.layerdrop,
"feature_grad_mult": cfg.feature_grad_mult,
"checkpoint_activations": cfg.checkpoint_activations,
"offload_activations": cfg.offload_activations,
"min_params_to_wrap": cfg.min_params_to_wrap,
# d2v multi args
"encoder_dropout": cfg.dropout,
"drop_path": getattr(cfg, "drop_path", 0),
"mask_dropout": getattr(cfg, "mask_dropout", 0),
"zero_mask": getattr(cfg, "zero_mask", False),
"local_grad_mult": cfg.feature_grad_mult,
"layerdrop": cfg.layerdrop,
"prenet_layerdrop": cfg.layerdrop,
"prenet_dropout": cfg.dropout,
"post_mlp_drop": cfg.dropout,
"encoder_zero_mask": getattr(cfg, "zero_mask", False),
"inverse_mask": False,
"learned_alibi_scale": getattr(cfg, "update_alibi", True),
}
if cfg.w2v_args is None:
state = checkpoint_utils.load_checkpoint_to_cpu(cfg.w2v_path, arg_overrides)
w2v_args = state.get("cfg", None)
if w2v_args is None:
w2v_args = convert_namespace_to_omegaconf(state["args"])
w2v_args.criterion = None
w2v_args.lr_scheduler = None
cfg.w2v_args = w2v_args
logger.info(w2v_args)
else:
state = None
w2v_args = cfg.w2v_args
if isinstance(w2v_args, Namespace):
cfg.w2v_args = w2v_args = convert_namespace_to_omegaconf(w2v_args)
self.is_d2v_multi = "data2vec_multi" in w2v_args.model.get("_name", None)
if not self.is_d2v_multi:
model_normalized = w2v_args.task.get(
"normalize", w2v_args.model.get("normalize", False)
)
assert cfg.normalize == model_normalized, (
"Fine-tuning works best when data normalization is the same. "
"Please check that --normalize is set or unset for both pre-training and here"
)
if hasattr(cfg, "checkpoint_activations") and cfg.checkpoint_activations:
with open_dict(w2v_args):
w2v_args.model.checkpoint_activations = cfg.checkpoint_activations
w2v_args.task.data = cfg.data
task = tasks.setup_task(w2v_args.task, from_checkpoint=True)
model = task.build_model(w2v_args.model, from_checkpoint=True)
model.remove_pretraining_modules()
d = w2v_args.model.encoder_embed_dim
else:
assert cfg.normalize
if hasattr(w2v_args.task, "audio"):
w2v_args.task.audio.data = cfg.data
else:
w2v_args.task.data = cfg.data
task = tasks.setup_task(w2v_args.task, from_checkpoint=True)
model = task.build_model(w2v_args.model, from_checkpoint=True)
model.remove_pretraining_modules(modality="audio")
d = w2v_args.model.embed_dim
if state is not None and not cfg.no_pretrained_weights:
if cfg.load_ema:
assert "_ema" in state["model"]
for k in state["model"]["_ema"]:
mk = "encoder." + k
assert mk in state["model"], mk
state["model"][mk] = state["model"]["_ema"][k]
self.load_model_weights(state, model, cfg)
super().__init__(task.source_dictionary)
self.w2v_model = model
self.final_dropout = nn.Dropout(cfg.final_dropout)
self.freeze_finetune_updates = cfg.freeze_finetune_updates
self.num_updates = 0
targ_d = None
self.proj = None
if output_size is not None:
targ_d = output_size
elif getattr(cfg, "decoder_embed_dim", d) != d:
targ_d = cfg.decoder_embed_dim
if targ_d is not None:
self.proj = Linear(d, targ_d)
layer_decay = getattr(cfg, "layer_decay", 1)
if layer_decay < 1:
mod_encs = list(model.modality_encoders.values())
assert len(mod_encs) == 1, len(mod_encs)
blocks = list(mod_encs[0].context_encoder.blocks) + list(model.blocks)
num_layers = len(blocks) + 1
layer_scales = list(
layer_decay ** (num_layers - i) for i in range(num_layers + 1)
)
for i, b in enumerate(blocks):
lid = i + 1
if layer_scales[lid] == 1.0:
continue
for n, p in b.named_parameters():
optim_override = getattr(p, "optim_overrides", {})
if "optimizer" not in optim_override:
optim_override["optimizer"] = {}
optim_override["optimizer"]["lr_scale"] = layer_scales[lid]
p.optim_overrides = optim_override
def load_model_weights(self, state, model, cfg):
if cfg.ddp_backend == "fully_sharded":
from fairseq.distributed import FullyShardedDataParallel
for name, module in model.named_modules():
if "encoder.layers" in name and len(name.split(".")) == 3:
# Only for layers, we do a special handling and load the weights one by one
# We dont load all weights together as that wont be memory efficient and may
# cause oom
new_dict = {
k.replace(name + ".", ""): v
for (k, v) in state["model"].items()
if name + "." in k
}
assert isinstance(module, FullyShardedDataParallel)
with module.summon_full_params():
module.load_state_dict(new_dict, strict=True)
module._reset_lazy_init()
# Once layers are loaded, filter them out and load everything else.
r = re.compile("encoder.layers.\d.")
filtered_list = list(filter(r.match, state["model"].keys()))
new_big_dict = {
k: v for (k, v) in state["model"].items() if k not in filtered_list
}
model.load_state_dict(new_big_dict, strict=False)
else:
to_delete = {"_ema", "target_proj", "decoder"}
for k in to_delete:
if k in state["model"]:
del state["model"][k]
if hasattr(model, "modality_encoders"):
if "modality_encoders.AUDIO.encoder_mask" not in state["model"]:
model.modality_encoders["AUDIO"].encoder_mask = None
elif not cfg.zero_mask:
model.modality_encoders["AUDIO"].encoder_mask = None
del state["model"]["modality_encoders.AUDIO.encoder_mask"]
for k in list(state["model"].keys()):
if k.startswith("modality_encoders.") and not k.startswith(
"modality_encoders.AUDIO"
):
del state["model"][k]
print(model)
model.load_state_dict(state["model"], strict=True)
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
super().set_num_updates(num_updates)
self.num_updates = num_updates
def forward(self, source, padding_mask, **kwargs):
w2v_args = {
"source": source,
"padding_mask": padding_mask,
"mask": self.apply_mask and self.training,
}
if self.is_d2v_multi:
w2v_args["mode"] = "AUDIO"
ft = self.freeze_finetune_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
res = self.w2v_model.extract_features(**w2v_args)
x = res["x"]
padding_mask = res["padding_mask"]
# B x T x C -> T x B x C
x = x.transpose(0, 1)
x = self.final_dropout(x)
if self.proj:
x = self.proj(x)
return {
"encoder_out": x, # T x B x C
"padding_mask": padding_mask, # B x T,
"layer_results": res["layer_results"],
}
def forward_torchscript(self, net_input):
if torch.jit.is_scripting():
return self.forward(net_input["source"], net_input["padding_mask"])
else:
return self.forward_non_torchscript(net_input)
def reorder_encoder_out(self, encoder_out, new_order):
if encoder_out["encoder_out"] is not None:
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
if encoder_out["padding_mask"] is not None:
encoder_out["padding_mask"] = encoder_out["padding_mask"].index_select(
0, new_order
)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return None
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self,
cfg: Wav2Vec2Seq2SeqConfig,
dictionary,
embed_tokens,
no_encoder_attn=False,
):
super().__init__(dictionary)
self.dropout = cfg.decoder_dropout
self.share_input_output_embed = cfg.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = cfg.decoder_embed_dim
self.output_embed_dim = cfg.decoder_embed_dim
self.layerdrop = cfg.decoder_layerdrop
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = cfg.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
cfg.max_target_positions,
embed_dim,
self.padding_idx,
learned=cfg.decoder_learned_pos,
)
if not cfg.no_token_positional_embeddings
else None
)
# TODO: update this when transformer gets converted to dataclass configs
transformer_cfg = copy.deepcopy(cfg)
with open_dict(transformer_cfg):
transformer_cfg.dropout = transformer_cfg.decoder_dropout
transformer_cfg.attention_dropout = (
transformer_cfg.decoder_attention_dropout
)
transformer_cfg.activation_dropout = (
transformer_cfg.decoder_activation_dropout
)
self.layers = nn.ModuleList([])
self.layers.extend(
[
TransformerDecoderLayer(transformer_cfg, no_encoder_attn)
for _ in range(transformer_cfg.decoder_layers)
]
)
if not self.share_input_output_embed:
self.embed_out = nn.Parameter(
torch.Tensor(len(dictionary), self.output_embed_dim)
)
nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim**-0.5)
if transformer_cfg.decoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
if type(prev_output_tokens) == list:
max_len = max((len(x) for x in prev_output_tokens))
tmp = torch.zeros(
[len(prev_output_tokens), max_len], device=prev_output_tokens[0].device
)
for (i, p) in enumerate(prev_output_tokens):
tmp[i, : len(p)] = p
prev_output_tokens = tmp
prev_output_tokens = prev_output_tokens.long()
x, extra = self.extract_features(
prev_output_tokens, encoder_out, incremental_state
)
x = self.output_layer(x)
return x, extra
def extract_features(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused
):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
# embed positions
positions = (
self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if self.embed_positions is not None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
self_attn_padding_mask = None
if prev_output_tokens.eq(self.padding_idx).any():
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
for layer in self.layers:
dropout_probability = np.random.random()
if not self.training or (dropout_probability > self.layerdrop):
x, attn, _ = layer(
x,
encoder_out["encoder_out"] if encoder_out is not None else None,
encoder_out["padding_mask"] if encoder_out is not None else None,
incremental_state,
self_attn_mask=self.buffered_future_mask(x)
if incremental_state is None
else None,
self_attn_padding_mask=self_attn_padding_mask,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
return x, {"attn": attn, "inner_states": inner_states}
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
# project back to size of vocabulary
if self.share_input_output_embed:
return F.linear(features, self.embed_tokens.weight)
else:
return F.linear(features, self.embed_out)
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, "_future_mask")
or self._future_mask is None
or self._future_mask.device != tensor.device
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim**-0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
| EXA-1-master | exa/libraries/fairseq/fairseq/models/wav2vec/wav2vec2_asr.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
from typing import List, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.data.data_utils import compute_mask_indices
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.distributed import fsdp_wrap
from fairseq.models import BaseFairseqModel, register_model
from fairseq.modules import (
Fp32GroupNorm,
Fp32LayerNorm,
GradMultiply,
GumbelVectorQuantizer,
LayerNorm,
MultiheadAttention,
RelPositionalEncoding,
SamePad,
TransposeLast,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.conformer_layer import ConformerWav2Vec2EncoderLayer
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from fairseq.utils import buffered_arange, index_put, is_xla_tensor
from .utils import pad_to_multiple
EXTRACTOR_MODE_CHOICES = ChoiceEnum(["default", "layer_norm"])
MASKING_DISTRIBUTION_CHOICES = ChoiceEnum(["static", "uniform", "normal", "poisson"])
LAYER_TYPE_CHOICES = ChoiceEnum(["transformer", "conformer"])
@dataclass
class Wav2Vec2Config(FairseqDataclass):
extractor_mode: EXTRACTOR_MODE_CHOICES = field(
default="default",
metadata={
"help": "mode for feature extractor. default has a single group norm with d "
"groups in the first conv block, whereas layer_norm has layer norms in "
"every block (meant to use with normalize=True)"
},
)
encoder_layers: int = field(
default=12, metadata={"help": "num encoder layers in the transformer"}
)
encoder_embed_dim: int = field(
default=768, metadata={"help": "encoder embedding dimension"}
)
encoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "encoder embedding dimension for FFN"}
)
encoder_attention_heads: int = field(
default=12, metadata={"help": "num encoder attention heads"}
)
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="gelu", metadata={"help": "activation function to use"}
)
layer_type: LAYER_TYPE_CHOICES = field(
default="transformer", metadata={"help": "layer type in encoder"}
)
# dropouts
dropout: float = field(
default=0.1, metadata={"help": "dropout probability for the transformer"}
)
attention_dropout: float = field(
default=0.1, metadata={"help": "dropout probability for attention weights"}
)
activation_dropout: float = field(
default=0.0, metadata={"help": "dropout probability after activation in FFN"}
)
encoder_layerdrop: float = field(
default=0.0, metadata={"help": "probability of dropping a tarnsformer layer"}
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
dropout_features: float = field(
default=0.0,
metadata={"help": "dropout to apply to the features (after feat extr)"},
)
final_dim: int = field(
default=0,
metadata={
"help": "project final representations and targets to this many dimensions."
"set to encoder_embed_dim is <= 0"
},
)
layer_norm_first: bool = field(
default=False, metadata={"help": "apply layernorm first in the transformer"}
)
conv_feature_layers: str = field(
default="[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512,2,2)] + [(512,2,2)]",
metadata={
"help": "string describing convolutional feature extraction layers in form of a python list that contains "
"[(dim, kernel_size, stride), ...]"
},
)
conv_bias: bool = field(
default=False, metadata={"help": "include bias in conv encoder"}
)
logit_temp: float = field(
default=0.1, metadata={"help": "temperature to divide logits by"}
)
quantize_targets: bool = field(
default=False, metadata={"help": "use quantized targets"}
)
quantize_input: bool = field(
default=False, metadata={"help": "use quantized inputs"}
)
same_quantizer: bool = field(
default=False, metadata={"help": "use same quantizer for inputs and targets"}
)
target_glu: bool = field(
default=False, metadata={"help": "adds projection + glu to targets"}
)
feature_grad_mult: float = field(
default=1.0, metadata={"help": "multiply feature extractor var grads by this"}
)
quantizer_depth: int = field(
default=1,
metadata={"help": "number of quantizer layers"},
)
quantizer_factor: int = field(
default=3,
metadata={
"help": "dimensionality increase for inner quantizer layers (if depth > 1)"
},
)
latent_vars: int = field(
default=320,
metadata={"help": "number of latent variables V in each group of the codebook"},
)
latent_groups: int = field(
default=2,
metadata={"help": "number of groups G of latent variables in the codebook"},
)
latent_dim: int = field(
default=0,
metadata={
"help": "if > 0, uses this dimensionality for latent variables. "
"otherwise uses final_dim / latent_groups"
},
)
# masking
mask_length: int = field(default=10, metadata={"help": "mask length"})
mask_prob: float = field(
default=0.65, metadata={"help": "probability of replacing a token with mask"}
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose mask length"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
mask_min_space: int = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
require_same_masks: bool = field(
default=True,
metadata={
"help": "whether to number of masked timesteps must be the same across all "
"examples in a batch"
},
)
mask_dropout: float = field(
default=0.0,
metadata={"help": "percent of masks to unmask for each sample"},
)
# channel masking
mask_channel_length: int = field(
default=10, metadata={"help": "length of the mask for features (channels)"}
)
mask_channel_prob: float = field(
default=0.0, metadata={"help": "probability of replacing a feature with 0"}
)
mask_channel_before: bool = False
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_channel_overlap: bool = field(
default=False, metadata={"help": "whether to allow channel masks to overlap"}
)
mask_channel_min_space: int = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
# negative selection
num_negatives: int = field(
default=100,
metadata={"help": "number of negative examples from the same sample"},
)
negatives_from_everywhere: bool = field(
default=False,
metadata={"help": "sample negatives from everywhere, not just masked states"},
)
cross_sample_negatives: int = field(
default=0, metadata={"help": "number of negative examples from the any sample"}
)
codebook_negatives: int = field(
default=0, metadata={"help": "number of negative examples codebook"}
)
# positional embeddings
conv_pos: int = field(
default=128,
metadata={"help": "number of filters for convolutional positional embeddings"},
)
conv_pos_groups: int = field(
default=16,
metadata={"help": "number of groups for convolutional positional embedding"},
)
pos_conv_depth: int = field(
default=1,
metadata={"help": "depth of positional encoder network"},
)
latent_temp: Tuple[float, float, float] = field(
default=(2, 0.5, 0.999995),
metadata={
"help": "temperature for latent variable sampling. "
"can be tuple of 3 values (start, end, decay)"
},
)
max_positions: int = field(default=100000, metadata={"help": "Max positions"})
checkpoint_activations: bool = field(
default=False,
metadata={"help": "recompute activations and save memory for extra compute"},
)
# FP16 optimization
required_seq_len_multiple: int = field(
default=2,
metadata={
"help": "pad the input to encoder such that the sequence length is divisible by multiple"
},
)
crop_seq_to_multiple: int = field(
default=1,
metadata={
"help": "crop convolutional feature extractor output such that the sequence length is divisible by multiple"
},
)
# Conformer
depthwise_conv_kernel_size: int = field(
default=31,
metadata={
"help": "depthwise-conv-kernel-size for convolution in conformer layer"
},
)
attn_type: str = field(
default="",
metadata={"help": "if espnet use ESPNET MHA"},
)
pos_enc_type: str = field(
default="abs",
metadata={"help": "Positional encoding type to use in conformer"},
)
fp16: bool = field(default=False, metadata={"help": "If fp16 is being used"})
@register_model("wav2vec2", dataclass=Wav2Vec2Config)
class Wav2Vec2Model(BaseFairseqModel):
def __init__(self, cfg: Wav2Vec2Config):
super().__init__()
self.cfg = cfg
feature_enc_layers = eval(cfg.conv_feature_layers)
self.embed = feature_enc_layers[-1][0]
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
mode=cfg.extractor_mode,
conv_bias=cfg.conv_bias,
)
self.post_extract_proj = (
nn.Linear(self.embed, cfg.encoder_embed_dim)
if self.embed != cfg.encoder_embed_dim and not cfg.quantize_input
else None
)
self.crop_seq_to_multiple = cfg.crop_seq_to_multiple
self.mask_prob = cfg.mask_prob
self.mask_selection = cfg.mask_selection
self.mask_other = cfg.mask_other
self.mask_length = cfg.mask_length
self.no_mask_overlap = cfg.no_mask_overlap
self.mask_min_space = cfg.mask_min_space
self.mask_channel_prob = cfg.mask_channel_prob
self.mask_channel_before = cfg.mask_channel_before
self.mask_channel_selection = cfg.mask_channel_selection
self.mask_channel_other = cfg.mask_channel_other
self.mask_channel_length = cfg.mask_channel_length
self.no_mask_channel_overlap = cfg.no_mask_channel_overlap
self.mask_channel_min_space = cfg.mask_channel_min_space
self.dropout_input = nn.Dropout(cfg.dropout_input)
self.dropout_features = nn.Dropout(cfg.dropout_features)
self.feature_grad_mult = cfg.feature_grad_mult
self.quantizer = None
self.input_quantizer = None
self.n_negatives = cfg.num_negatives
self.cross_sample_negatives = cfg.cross_sample_negatives
self.codebook_negatives = cfg.codebook_negatives
self.negatives_from_everywhere = cfg.negatives_from_everywhere
self.logit_temp = cfg.logit_temp
final_dim = cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim
if cfg.quantize_targets:
vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else final_dim
self.quantizer = GumbelVectorQuantizer(
dim=self.embed,
num_vars=cfg.latent_vars,
temp=cfg.latent_temp,
groups=cfg.latent_groups,
combine_groups=False,
vq_dim=vq_dim,
time_first=True,
weight_proj_depth=cfg.quantizer_depth,
weight_proj_factor=cfg.quantizer_factor,
)
self.project_q = nn.Linear(vq_dim, final_dim)
else:
self.project_q = nn.Linear(self.embed, final_dim)
if cfg.quantize_input:
if cfg.same_quantizer and self.quantizer is not None:
vq_dim = final_dim
self.input_quantizer = self.quantizer
else:
vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else cfg.encoder_embed_dim
self.input_quantizer = GumbelVectorQuantizer(
dim=self.embed,
num_vars=cfg.latent_vars,
temp=cfg.latent_temp,
groups=cfg.latent_groups,
combine_groups=False,
vq_dim=vq_dim,
time_first=True,
weight_proj_depth=cfg.quantizer_depth,
weight_proj_factor=cfg.quantizer_factor,
)
self.project_inp = nn.Linear(vq_dim, cfg.encoder_embed_dim)
self.mask_emb = nn.Parameter(
torch.FloatTensor(cfg.encoder_embed_dim).uniform_()
)
encoder_cls = TransformerEncoder
if cfg.layer_type == "conformer" and cfg.pos_enc_type in ["rel_pos", "rope"]:
encoder_cls = ConformerEncoder
self.encoder = encoder_cls(cfg)
self.layer_norm = LayerNorm(self.embed)
self.target_glu = None
if cfg.target_glu:
self.target_glu = nn.Sequential(
nn.Linear(final_dim, final_dim * 2), nn.GLU()
)
self.final_proj = nn.Linear(cfg.encoder_embed_dim, final_dim)
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
@classmethod
def build_model(cls, cfg: Wav2Vec2Config, task=None):
"""Build a new model instance."""
return cls(cfg)
def apply_mask(
self,
x,
padding_mask,
mask_indices=None,
mask_channel_indices=None,
):
B, T, C = x.shape
if self.mask_channel_prob > 0 and self.mask_channel_before:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
if self.mask_prob > 0:
if mask_indices is None:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob,
self.mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
require_same_masks=self.cfg.require_same_masks,
mask_dropout=self.cfg.mask_dropout,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x = index_put(x, mask_indices, self.mask_emb)
else:
mask_indices = None
if self.mask_channel_prob > 0 and not self.mask_channel_before:
if mask_channel_indices is None:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x = index_put(x, mask_channel_indices, 0)
return x, mask_indices
def sample_negatives(self, y, num, padding_count=None):
if self.n_negatives == 0 and self.cross_sample_negatives == 0:
return y.new(0)
bsz, tsz, fsz = y.shape
y = y.view(-1, fsz) # BTC => (BxT)C
# FIXME: what happens if padding_count is specified?
cross_high = tsz * bsz
high = tsz - (padding_count or 0)
with torch.no_grad():
assert high > 1, f"{bsz,tsz,fsz}"
if self.n_negatives > 0:
tszs = (
buffered_arange(num)
.unsqueeze(-1)
.expand(-1, self.n_negatives)
.flatten()
)
neg_idxs = torch.randint(
low=0, high=high - 1, size=(bsz, self.n_negatives * num)
)
neg_idxs[neg_idxs >= tszs] += 1
if self.cross_sample_negatives > 0:
tszs = (
buffered_arange(num)
.unsqueeze(-1)
.expand(-1, self.cross_sample_negatives)
.flatten()
)
cross_neg_idxs = torch.randint(
low=0,
high=cross_high - 1,
size=(bsz, self.cross_sample_negatives * num),
)
cross_neg_idxs[cross_neg_idxs >= tszs] += 1
if self.n_negatives > 0:
neg_idxs = neg_idxs + (torch.arange(bsz).unsqueeze(1) * high)
else:
neg_idxs = cross_neg_idxs
if self.cross_sample_negatives > 0 and self.n_negatives > 0:
neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1)
negs = y[neg_idxs.view(-1)]
negs = negs.view(
bsz, num, self.n_negatives + self.cross_sample_negatives, fsz
).permute(
2, 0, 1, 3
) # to NxBxTxC
return negs, neg_idxs
def compute_preds(self, x, y, negatives):
neg_is_pos = (y == negatives).all(-1)
y = y.unsqueeze(0)
targets = torch.cat([y, negatives], dim=0)
logits = torch.cosine_similarity(x.float(), targets.float(), dim=-1)
logits = logits / self.logit_temp
logits = logits.type_as(x)
if is_xla_tensor(logits) or neg_is_pos.any():
if not hasattr(self, "_inftensor"):
fillval = -float(2**30)
self._inftensor = (
torch.tensor(fillval).to(x.device)
if is_xla_tensor(logits)
else float("-inf")
)
logits[1:] = index_put(logits[1:], neg_is_pos, self._inftensor)
return logits
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
return torch.floor((input_length - kernel_size) / stride + 1)
conv_cfg_list = eval(self.cfg.conv_feature_layers)
for i in range(len(conv_cfg_list)):
input_lengths = _conv_out_length(
input_lengths, conv_cfg_list[i][1], conv_cfg_list[i][2]
)
return input_lengths.to(torch.long)
def forward(
self,
source,
padding_mask=None,
mask=True,
features_only=False,
layer=None,
mask_indices=None,
mask_channel_indices=None,
padding_count=None,
):
if self.feature_grad_mult > 0:
features = self.feature_extractor(source)
if self.feature_grad_mult != 1.0:
features = GradMultiply.apply(features, self.feature_grad_mult)
else:
with torch.no_grad():
features = self.feature_extractor(source)
features_pen = features.float().pow(2).mean()
features = features.transpose(1, 2)
features = self.layer_norm(features)
unmasked_features = features.clone()
if padding_mask is not None and padding_mask.any():
input_lengths = (1 - padding_mask.long()).sum(-1)
# apply conv formula to get real output_lengths
output_lengths = self._get_feat_extract_output_lengths(input_lengths)
padding_mask = torch.zeros(
features.shape[:2], dtype=features.dtype, device=features.device
)
# these two operations makes sure that all values
# before the output lengths indices are attended to
padding_mask[
(
torch.arange(padding_mask.shape[0], device=padding_mask.device),
output_lengths - 1,
)
] = 1
padding_mask = (1 - padding_mask.flip([-1]).cumsum(-1).flip([-1])).bool()
else:
padding_mask = None
time_steps_to_drop = features.size(1) % self.crop_seq_to_multiple
if time_steps_to_drop != 0:
features = features[:, :-time_steps_to_drop]
unmasked_features = unmasked_features[:, :-time_steps_to_drop]
if padding_mask is not None:
padding_mask = padding_mask[:, :-time_steps_to_drop]
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
features = self.dropout_input(features)
unmasked_features = self.dropout_features(unmasked_features)
num_vars = None
code_ppl = None
prob_ppl = None
curr_temp = None
if self.input_quantizer:
q = self.input_quantizer(features, produce_targets=False)
features = q["x"]
num_vars = q["num_vars"]
code_ppl = q["code_perplexity"]
prob_ppl = q["prob_perplexity"]
curr_temp = q["temp"]
features = self.project_inp(features)
if mask:
x, mask_indices = self.apply_mask(
features,
padding_mask,
mask_indices=mask_indices,
mask_channel_indices=mask_channel_indices,
)
if not is_xla_tensor(x) and mask_indices is not None:
# tpu-comment: reducing the size in a dynamic way causes
# too many recompilations on xla.
y = unmasked_features[mask_indices].view(
unmasked_features.size(0), -1, unmasked_features.size(-1)
)
else:
y = unmasked_features
else:
x = features
y = unmasked_features
mask_indices = None
x, layer_results = self.encoder(x, padding_mask=padding_mask, layer=layer)
if features_only:
return {
"x": x,
"padding_mask": padding_mask,
"features": unmasked_features,
"layer_results": layer_results,
}
if self.quantizer:
if self.negatives_from_everywhere:
q = self.quantizer(unmasked_features, produce_targets=False)
y = q["x"]
num_vars = q["num_vars"]
code_ppl = q["code_perplexity"]
prob_ppl = q["prob_perplexity"]
curr_temp = q["temp"]
y = self.project_q(y)
negs, _ = self.sample_negatives(
y,
mask_indices[0].sum(),
padding_count=padding_count,
)
y = y[mask_indices].view(y.size(0), -1, y.size(-1))
else:
q = self.quantizer(y, produce_targets=False)
y = q["x"]
num_vars = q["num_vars"]
code_ppl = q["code_perplexity"]
prob_ppl = q["prob_perplexity"]
curr_temp = q["temp"]
y = self.project_q(y)
negs, _ = self.sample_negatives(
y,
y.size(1),
padding_count=padding_count,
)
if self.codebook_negatives > 0:
cb_negs = self.quantizer.sample_from_codebook(
y.size(0) * y.size(1), self.codebook_negatives
)
cb_negs = cb_negs.view(
self.codebook_negatives, y.size(0), y.size(1), -1
) # order doesnt matter
cb_negs = self.project_q(cb_negs)
negs = torch.cat([negs, cb_negs], dim=0)
else:
y = self.project_q(y)
if self.negatives_from_everywhere:
negs, _ = self.sample_negatives(
unmasked_features,
y.size(1),
padding_count=padding_count,
)
negs = self.project_q(negs)
else:
negs, _ = self.sample_negatives(
y,
y.size(1),
padding_count=padding_count,
)
if not is_xla_tensor(x):
# tpu-comment: reducing the size in a dynamic way causes
# too many recompilations on xla.
x = x[mask_indices].view(x.size(0), -1, x.size(-1))
if self.target_glu:
y = self.target_glu(y)
negs = self.target_glu(negs)
x = self.final_proj(x)
x = self.compute_preds(x, y, negs)
result = {
"x": x,
"padding_mask": padding_mask,
"features_pen": features_pen,
}
if prob_ppl is not None:
result["prob_perplexity"] = prob_ppl
result["code_perplexity"] = code_ppl
result["num_vars"] = num_vars
result["temp"] = curr_temp
return result
def quantize(self, x):
assert self.quantizer is not None
x = self.feature_extractor(x)
x = x.transpose(1, 2)
x = self.layer_norm(x)
return self.quantizer.forward_idx(x)
def extract_features(self, source, padding_mask, mask=False, layer=None):
res = self.forward(
source, padding_mask, mask=mask, features_only=True, layer=layer
)
return res
def get_logits(self, net_output):
logits = net_output["x"]
logits = logits.transpose(0, 2)
logits = logits.reshape(-1, logits.size(-1))
return logits
def get_targets(self, sample, net_output, expand_steps=True):
x = net_output["x"]
return x.new_zeros(x.size(1) * x.size(2), dtype=torch.long)
def get_extra_losses(self, net_output):
pen = []
if "prob_perplexity" in net_output:
pen.append(
(net_output["num_vars"] - net_output["prob_perplexity"])
/ net_output["num_vars"]
)
if "features_pen" in net_output:
pen.append(net_output["features_pen"])
return pen
def remove_pretraining_modules(self, last_layer=None):
self.quantizer = None
self.project_q = None
self.target_glu = None
self.final_proj = None
if last_layer is not None:
self.encoder.layers = nn.ModuleList(
l for i, l in enumerate(self.encoder.layers) if i <= last_layer
)
class ConvFeatureExtractionModel(nn.Module):
def __init__(
self,
conv_layers: List[Tuple[int, int, int]],
dropout: float = 0.0,
mode: str = "default",
conv_bias: bool = False,
):
super().__init__()
assert mode in {"default", "layer_norm"}
def block(
n_in,
n_out,
k,
stride,
is_layer_norm=False,
is_group_norm=False,
conv_bias=False,
):
def make_conv():
conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias)
nn.init.kaiming_normal_(conv.weight)
return conv
assert (
is_layer_norm and is_group_norm
) == False, "layer norm and group norm are exclusive"
if is_layer_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
nn.Sequential(
TransposeLast(),
Fp32LayerNorm(dim, elementwise_affine=True),
TransposeLast(),
),
nn.GELU(),
)
elif is_group_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
Fp32GroupNorm(dim, dim, affine=True),
nn.GELU(),
)
else:
return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU())
in_d = 1
self.conv_layers = nn.ModuleList()
for i, cl in enumerate(conv_layers):
assert len(cl) == 3, "invalid conv definition: " + str(cl)
(dim, k, stride) = cl
self.conv_layers.append(
block(
in_d,
dim,
k,
stride,
is_layer_norm=mode == "layer_norm",
is_group_norm=mode == "default" and i == 0,
conv_bias=conv_bias,
)
)
in_d = dim
def forward(self, x):
# BxT -> BxCxT
x = x.unsqueeze(1)
for conv in self.conv_layers:
x = conv(x)
return x
def make_conv_pos(e, k, g):
pos_conv = nn.Conv1d(
e,
e,
kernel_size=k,
padding=k // 2,
groups=g,
)
dropout = 0
std = math.sqrt((4 * (1.0 - dropout)) / (k * e))
nn.init.normal_(pos_conv.weight, mean=0, std=std)
nn.init.constant_(pos_conv.bias, 0)
pos_conv = nn.utils.weight_norm(pos_conv, name="weight", dim=2)
pos_conv = nn.Sequential(pos_conv, SamePad(k), nn.GELU())
return pos_conv
class TransformerEncoder(nn.Module):
def build_encoder_layer(self, args: Wav2Vec2Config):
if args.layer_type == "transformer":
layer = TransformerSentenceEncoderLayer(
embedding_dim=self.embedding_dim,
ffn_embedding_dim=args.encoder_ffn_embed_dim,
num_attention_heads=args.encoder_attention_heads,
dropout=self.dropout,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
activation_fn=args.activation_fn,
layer_norm_first=args.layer_norm_first,
)
elif args.layer_type == "conformer":
layer = ConformerWav2Vec2EncoderLayer(
embed_dim=self.embedding_dim,
ffn_embed_dim=args.encoder_ffn_embed_dim,
attention_heads=args.encoder_attention_heads,
dropout=args.dropout,
depthwise_conv_kernel_size=args.depthwise_conv_kernel_size,
activation_fn="swish",
attn_type=args.attn_type,
use_fp16=args.fp16,
pos_enc_type="abs",
)
layer = fsdp_wrap(layer)
if args.checkpoint_activations:
layer = checkpoint_wrapper(layer)
return layer
def __init__(self, args: Wav2Vec2Config):
super().__init__()
self.dropout = args.dropout
self.embedding_dim = args.encoder_embed_dim
self.required_seq_len_multiple = args.required_seq_len_multiple
pos_conv_depth = getattr(args, "pos_conv_depth", 1)
if pos_conv_depth > 1:
num_layers = args.pos_conv_depth
k = max(3, args.conv_pos // num_layers)
def make_conv_block(e, k, g, l):
return nn.Sequential(
*[
nn.Sequential(
nn.Conv1d(
e,
e,
kernel_size=k,
padding=k // 2,
groups=g,
),
SamePad(k),
TransposeLast(),
LayerNorm(e, elementwise_affine=False),
TransposeLast(),
nn.GELU(),
)
for _ in range(l)
]
)
self.pos_conv = make_conv_block(
self.embedding_dim, k, args.conv_pos_groups, num_layers
)
else:
self.pos_conv = make_conv_pos(
self.embedding_dim,
args.conv_pos,
args.conv_pos_groups,
)
self.layers = nn.ModuleList(
[self.build_encoder_layer(args) for _ in range(args.encoder_layers)]
)
self.layer_norm_first = args.layer_norm_first
self.layer_norm = LayerNorm(self.embedding_dim)
self.layerdrop = args.encoder_layerdrop
self.apply(init_bert_params)
def forward(self, x, padding_mask=None, layer=None):
x, layer_results = self.extract_features(x, padding_mask, layer)
if self.layer_norm_first and layer is None:
x = self.layer_norm(x)
return x, layer_results
def extract_features(
self,
x,
padding_mask=None,
tgt_layer=None,
min_layer=0,
):
if padding_mask is not None:
x = index_put(x, padding_mask, 0)
x_conv = self.pos_conv(x.transpose(1, 2))
x_conv = x_conv.transpose(1, 2)
x = x + x_conv
if not self.layer_norm_first:
x = self.layer_norm(x)
# pad to the sequence length dimension
x, pad_length = pad_to_multiple(
x, self.required_seq_len_multiple, dim=-2, value=0
)
if pad_length > 0 and padding_mask is None:
padding_mask = x.new_zeros((x.size(0), x.size(1)), dtype=torch.bool)
padding_mask[:, -pad_length:] = True
else:
padding_mask, _ = pad_to_multiple(
padding_mask, self.required_seq_len_multiple, dim=-1, value=True
)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
layer_results = []
r = None
for i, layer in enumerate(self.layers):
dropout_probability = np.random.random() if self.layerdrop > 0 else 1
if not self.training or (dropout_probability > self.layerdrop):
x, (z, lr) = layer(
x, self_attn_padding_mask=padding_mask, need_weights=False
)
if i >= min_layer:
layer_results.append((x, z, lr))
if i == tgt_layer:
r = x
break
if r is not None:
x = r
# T x B x C -> B x T x C
x = x.transpose(0, 1)
# undo paddding
if pad_length > 0:
x = x[:, :-pad_length]
def undo_pad(a, b, c):
return (
a[:-pad_length],
b[:-pad_length] if b is not None else b,
c[:-pad_length],
)
layer_results = [undo_pad(*u) for u in layer_results]
return x, layer_results
def max_positions(self):
"""Maximum output length supported by the encoder."""
return self.args.max_positions
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
class ConformerEncoder(TransformerEncoder):
def build_encoder_layer(self, args):
layer = ConformerWav2Vec2EncoderLayer(
embed_dim=self.embedding_dim,
ffn_embed_dim=args.encoder_ffn_embed_dim,
attention_heads=args.encoder_attention_heads,
dropout=args.dropout,
depthwise_conv_kernel_size=args.depthwise_conv_kernel_size,
activation_fn="swish",
attn_type=args.attn_type,
pos_enc_type=args.pos_enc_type,
use_fp16=args.fp16, # only used for rope
)
layer = fsdp_wrap(layer)
if args.checkpoint_activations:
layer = checkpoint_wrapper(layer)
return layer
def __init__(self, args):
super().__init__(args)
self.args = args
self.dropout = args.dropout
self.embedding_dim = args.encoder_embed_dim
self.pos_enc_type = args.pos_enc_type
max_source_positions = self.max_positions()
if self.pos_enc_type == "rel_pos":
self.embed_positions = RelPositionalEncoding(
max_source_positions, self.embedding_dim
)
elif self.pos_enc_type == "rope":
self.embed_positions = None
else:
raise Exception("Unsupported positional encoding type")
self.layers = nn.ModuleList(
[self.build_encoder_layer(args) for _ in range(args.encoder_layers)]
)
self.layer_norm_first = args.layer_norm_first
self.layer_norm = LayerNorm(self.embedding_dim)
self.layerdrop = args.encoder_layerdrop
self.apply(init_bert_params)
def extract_features(self, x, padding_mask=None, tgt_layer=None):
if padding_mask is not None:
x = index_put(x, padding_mask, 0)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# B X T X C here
position_emb = None
if self.pos_enc_type == "rel_pos":
position_emb = self.embed_positions(x)
if not self.layer_norm_first:
x = self.layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
layer_results = []
r = None
for i, layer in enumerate(self.layers):
dropout_probability = np.random.random()
if not self.training or (dropout_probability > self.layerdrop):
x, z = layer(
x,
self_attn_padding_mask=padding_mask,
need_weights=False,
position_emb=position_emb,
)
if tgt_layer is not None:
layer_results.append((x, z))
if i == tgt_layer:
r = x
break
if r is not None:
x = r
# T x B x C -> B x T x C
x = x.transpose(0, 1)
return x, layer_results
class TransformerSentenceEncoderLayer(nn.Module):
"""
Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(
self,
embedding_dim: float = 768,
ffn_embedding_dim: float = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = "relu",
layer_norm_first: bool = False,
) -> None:
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
# Initialize blocks
self.activation_fn = utils.get_activation_fn(activation_fn)
self.self_attn = MultiheadAttention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
self_attention=True,
)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(self.activation_dropout)
self.dropout3 = nn.Dropout(dropout)
self.layer_norm_first = layer_norm_first
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embedding_dim)
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LayerNorm(self.embedding_dim)
def forward(
self,
x: torch.Tensor,
self_attn_mask: torch.Tensor = None,
self_attn_padding_mask: torch.Tensor = None,
need_weights: bool = False,
att_args=None,
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer imlementation.
"""
residual = x
if self.layer_norm_first:
x = self.self_attn_layer_norm(x)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
attn_mask=self_attn_mask,
need_weights=False,
)
x = self.dropout1(x)
x = residual + x
residual = x
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
layer_result = x
x = self.dropout3(x)
x = residual + x
else:
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
need_weights=False,
)
x = self.dropout1(x)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
layer_result = x
x = self.dropout3(x)
x = residual + x
x = self.final_layer_norm(x)
return x, (attn, layer_result)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/wav2vec/wav2vec2.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch.nn.functional as F
def pad_to_multiple(x, multiple, dim=-1, value=0):
# Inspired from https://github.com/lucidrains/local-attention/blob/master/local_attention/local_attention.py#L41
if x is None:
return None, 0
tsz = x.size(dim)
m = tsz / multiple
remainder = math.ceil(m) * multiple - tsz
if m.is_integer():
return x, 0
pad_offset = (0,) * (-1 - dim) * 2
return F.pad(x, (*pad_offset, 0, remainder), value=value), remainder
| EXA-1-master | exa/libraries/fairseq/fairseq/models/wav2vec/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import logging
import math
from typing import Optional, Tuple
from omegaconf import II
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model
from fairseq.modules import (
Fp32GroupNorm,
Fp32LayerNorm,
GumbelVectorQuantizer,
KmeansVectorQuantizer,
TransposeLast,
)
from fairseq.tasks import FairseqTask
from fairseq.utils import buffered_arange
logger = logging.getLogger(__name__)
AGGREGATOR_CHOICES = ChoiceEnum(["cnn", "gru"])
PROJECT_FEATURES_CHOICES = ChoiceEnum(["none", "same", "new"])
ACTIVATION_CHOICES = ChoiceEnum(["relu", "gelu"])
VQ_TYPE_CHOICES = ChoiceEnum(["none", "gumbel", "kmeans"])
@dataclass
class Wav2VecConfig(FairseqDataclass):
prediction_steps: int = field(
default=12, metadata={"help": "number of steps ahead to predict"}
)
sample_distance: Optional[int] = field(
default=None,
metadata={
"help": "sample distance from target. does not work properly with cross-sampling"
},
)
cross_sample_negatives: int = field(
default=0, metadata={"help": "num of cross sampled negatives"}
)
num_negatives: int = field(
default=10, metadata={"help": "num of sampled negatives"}
)
conv_feature_layers: str = field(
default="[(512, 10, 5), (512, 8, 4), (512, 4, 2), (512, 4, 2), (512, 4, 2), (512, 1, 1), (512, 1, 1), (512, 1, 1)]",
metadata={
"help": "convolutional feature extraction layers [(dim, kernel_size, stride), ...]"
},
)
conv_aggregator_layers: str = field(
default="[(512, 2, 1), (512, 3, 1), (512, 4, 1), (512, 5, 1), (512, 6, 1), (512, 7, 1), (512, 8, 1), (512, 9, 1), (512, 10, 1), (512, 11, 1), (512, 12, 1), (512, 13, 1)]",
metadata={
"help": "convolutional aggregator layers [(dim, kernel_size, stride), ...]"
},
)
dropout: float = field(
default=0.0, metadata={"help": "dropout to apply within the model"}
)
dropout_features: float = field(
default=0.0, metadata={"help": "dropout to apply to the features"}
)
dropout_agg: float = field(
default=0.0, metadata={"help": "dropout to apply after aggregation step"}
)
aggregator: AGGREGATOR_CHOICES = field(
default="cnn", metadata={"help": "type of aggregator to use"}
)
gru_dim: int = field(default=512, metadata={"help": "GRU dimensionality"})
no_conv_bias: bool = field(
default=False, metadata={"help": "if set, does not learn bias for conv layers"}
)
agg_zero_pad: bool = field(
default=False,
metadata={"help": "if set, zero pads in aggregator instead of repl pad"},
)
skip_connections_feat: bool = field(
default=False,
metadata={"help": "if set, adds skip connections to the feature extractor"},
)
skip_connections_agg: bool = field(
default=True,
metadata={"help": "if set, adds skip connections to the aggregator"},
)
residual_scale: float = field(
default=0.5, metadata={"help": "scales residual by sqrt(value)"}
)
log_compression: bool = field(
default=True,
metadata={"help": "if set, adds a log compression to feature extractor"},
)
balanced_classes: bool = field(
default=False,
metadata={"help": "if set, loss is scaled to balance for number of negatives"},
)
project_features: PROJECT_FEATURES_CHOICES = field(
default="none",
metadata={
"help": "if not none, features are projected using the (same or new) aggregator"
},
)
non_affine_group_norm: bool = field(
default=False, metadata={"help": "if set, group norm is not affine"}
)
offset: str = field(
default="auto",
metadata={
"help": "if set to 'auto', it is computed automatically from the receptive field, else set to int value"
},
)
activation: ACTIVATION_CHOICES = field(
default="relu",
metadata={
"help": "if set to 'auto', it is computed automatically from the receptive field, else set to int value"
},
)
vq_type: VQ_TYPE_CHOICES = field(
default="none", metadata={"help": "which type of quantizer to use"}
)
vq_vars: int = field(
default=320,
metadata={"help": "project to this many vector quantized variables per group"},
)
vq_groups: int = field(
default=2, metadata={"help": "number of groups of latent variables"}
)
vq_dim: int = field(
default=0,
metadata={
"help": "uses this dimensionality for quantized vectors. 0 to use model dim // groups"
},
)
vq_depth: int = field(
default=1, metadata={"help": "number of layers for vq weight projection"}
)
combine_groups: bool = field(
default=False, metadata={"help": "if set, variables are shared among groups"}
)
vq_temp: Tuple[float, float, float] = field(
default=(2.0, 0.5, 0.999995),
metadata={
"help": "temperature for latent variable sampling with gumbel softmax. should be a tuple of 3 values (start, end, decay)"
},
)
vq_gamma: float = field(
default=0.25,
metadata={"help": "gamma parameter for kmeans style vector quantization"},
)
infonce: bool = II("criterion.infonce")
@register_model("wav2vec", dataclass=Wav2VecConfig)
class Wav2VecModel(BaseFairseqModel):
@classmethod
def build_model(cls, cfg: Wav2VecConfig, task: FairseqTask):
"""Build a new model instance."""
model = Wav2VecModel(cfg)
logger.info(model)
return model
def __init__(self, cfg: Wav2VecConfig):
super().__init__()
self.prediction_steps = cfg.prediction_steps
offset = cfg.offset
if cfg.activation == "relu":
activation = nn.ReLU()
elif cfg.activation == "gelu":
activation = nn.GELU()
else:
raise Exception("unknown activation " + cfg.activation)
feature_enc_layers = eval(cfg.conv_feature_layers)
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
log_compression=cfg.log_compression,
skip_connections=cfg.skip_connections_feat,
residual_scale=cfg.residual_scale,
non_affine_group_norm=cfg.non_affine_group_norm,
activation=activation,
)
embed = feature_enc_layers[-1][0]
self.vector_quantizer = None
if cfg.vq_type == "gumbel":
self.vector_quantizer = GumbelVectorQuantizer(
dim=embed,
num_vars=cfg.vq_vars,
temp=cfg.vq_temp,
groups=cfg.vq_groups,
combine_groups=cfg.combine_groups,
vq_dim=cfg.vq_dim if cfg.vq_dim > 0 else embed,
time_first=False,
activation=activation,
weight_proj_depth=cfg.vq_depth,
weight_proj_factor=2,
)
elif cfg.vq_type == "kmeans":
self.vector_quantizer = KmeansVectorQuantizer(
dim=embed,
num_vars=cfg.vq_vars,
groups=cfg.vq_groups,
combine_groups=cfg.combine_groups,
vq_dim=cfg.vq_dim if cfg.vq_dim > 0 else embed,
time_first=False,
gamma=cfg.vq_gamma,
)
else:
assert (
cfg.vq_type == "none" or cfg.vq_type is None
), "Unknown quantizer type"
if cfg.offset == "auto":
jin = 0
rin = 0
for _, k, stride in feature_enc_layers:
if rin == 0:
rin = k
rin = rin + (k - 1) * jin
if jin == 0:
jin = stride
else:
jin *= stride
offset = math.ceil(rin / jin)
offset = int(offset)
def make_aggregator():
if cfg.aggregator == "cnn":
agg_layers = eval(cfg.conv_aggregator_layers)
agg_dim = agg_layers[-1][0]
feature_aggregator = ConvAggegator(
conv_layers=agg_layers,
embed=embed,
dropout=cfg.dropout,
skip_connections=cfg.skip_connections_agg,
residual_scale=cfg.residual_scale,
non_affine_group_norm=cfg.non_affine_group_norm,
conv_bias=not cfg.no_conv_bias,
zero_pad=cfg.agg_zero_pad,
activation=activation,
)
elif cfg.aggregator == "gru":
agg_dim = cfg.gru_dim
feature_aggregator = nn.Sequential(
TransposeLast(),
nn.GRU(
input_size=embed,
hidden_size=agg_dim,
num_layers=1,
dropout=cfg.dropout,
),
TransposeLast(deconstruct_idx=0),
)
else:
raise Exception("unknown aggregator type " + cfg.aggregator)
return feature_aggregator, agg_dim
self.feature_aggregator, agg_dim = make_aggregator()
self.wav2vec_predictions = Wav2VecPredictionsModel(
in_dim=agg_dim,
out_dim=embed,
prediction_steps=cfg.prediction_steps,
n_negatives=cfg.num_negatives,
cross_sample_negatives=cfg.cross_sample_negatives,
sample_distance=cfg.sample_distance,
dropout=cfg.dropout,
offset=offset,
balanced_classes=cfg.balanced_classes,
infonce=cfg.infonce,
)
self.dropout_feats = nn.Dropout(p=cfg.dropout_features)
self.dropout_agg = nn.Dropout(p=cfg.dropout_agg)
if cfg.project_features == "none":
self.project_features = None
elif cfg.project_features == "same":
self.project_features = self.feature_aggregator
elif cfg.project_features == "new":
self.project_features, _ = make_aggregator()
def forward(self, source):
result = {}
features = self.feature_extractor(source)
if self.vector_quantizer:
q_res = self.vector_quantizer(features)
features = q_res["x"]
for k in q_res.keys():
if k != "x":
result[k] = q_res[k]
x = self.dropout_feats(features)
x = self.feature_aggregator(x)
x = self.dropout_agg(x)
if self.project_features is not None:
features = self.project_features(features)
x, targets = self.wav2vec_predictions(x, features)
result["cpc_logits"] = x
result["cpc_targets"] = targets
return result
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
def max_positions(self):
"""Maximum length supported by the model."""
return sys.maxsize
def get_logits(self, net_output):
logits = net_output["cpc_logits"]
return logits
def get_targets(self, sample, net_output):
t = net_output["cpc_targets"]
if isinstance(t, tuple):
t = t[0]
return t.contiguous()
def get_target_weights(self, targets, net_output):
targets = net_output["cpc_targets"]
if isinstance(targets, tuple) and targets[-1] is not None:
return targets[-1]
return None
def get_extra_losses(self, net_output):
loss = None
if "prob_perplexity" in net_output:
loss = net_output["num_vars"] - net_output["prob_perplexity"]
elif "kmeans_loss" in net_output:
loss = net_output["kmeans_loss"]
return loss
def norm_block(is_layer_norm, dim, affine=True):
if is_layer_norm:
mod = nn.Sequential(
TransposeLast(),
Fp32LayerNorm(dim, elementwise_affine=affine),
TransposeLast(),
)
else:
mod = Fp32GroupNorm(1, dim, affine=affine)
return mod
class ConvFeatureExtractionModel(nn.Module):
def __init__(
self,
conv_layers,
dropout,
log_compression,
skip_connections,
residual_scale,
non_affine_group_norm,
activation,
):
super().__init__()
def block(n_in, n_out, k, stride):
return nn.Sequential(
nn.Conv1d(n_in, n_out, k, stride=stride, bias=False),
nn.Dropout(p=dropout),
norm_block(
is_layer_norm=False, dim=n_out, affine=not non_affine_group_norm
),
activation,
)
in_d = 1
self.conv_layers = nn.ModuleList()
for dim, k, stride in conv_layers:
self.conv_layers.append(block(in_d, dim, k, stride))
in_d = dim
self.log_compression = log_compression
self.skip_connections = skip_connections
self.residual_scale = math.sqrt(residual_scale)
def forward(self, x):
# BxT -> BxCxT
x = x.unsqueeze(1)
for conv in self.conv_layers:
residual = x
x = conv(x)
if self.skip_connections and x.size(1) == residual.size(1):
tsz = x.size(2)
r_tsz = residual.size(2)
residual = residual[..., :: r_tsz // tsz][..., :tsz]
x = (x + residual) * self.residual_scale
if self.log_compression:
x = x.abs()
x = x + 1
x = x.log()
return x
class ZeroPad1d(nn.Module):
def __init__(self, pad_left, pad_right):
super().__init__()
self.pad_left = pad_left
self.pad_right = pad_right
def forward(self, x):
return F.pad(x, (self.pad_left, self.pad_right))
class ConvAggegator(nn.Module):
def __init__(
self,
conv_layers,
embed,
dropout,
skip_connections,
residual_scale,
non_affine_group_norm,
conv_bias,
zero_pad,
activation,
):
super().__init__()
def block(n_in, n_out, k, stride):
# padding dims only really make sense for stride = 1
ka = k // 2
kb = ka - 1 if k % 2 == 0 else ka
pad = (
ZeroPad1d(ka + kb, 0) if zero_pad else nn.ReplicationPad1d((ka + kb, 0))
)
return nn.Sequential(
pad,
nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias),
nn.Dropout(p=dropout),
norm_block(False, n_out, affine=not non_affine_group_norm),
activation,
)
in_d = embed
self.conv_layers = nn.ModuleList()
self.residual_proj = nn.ModuleList()
for dim, k, stride in conv_layers:
if in_d != dim and skip_connections:
self.residual_proj.append(nn.Conv1d(in_d, dim, 1, bias=False))
else:
self.residual_proj.append(None)
self.conv_layers.append(block(in_d, dim, k, stride))
in_d = dim
self.conv_layers = nn.Sequential(*self.conv_layers)
self.skip_connections = skip_connections
self.residual_scale = math.sqrt(residual_scale)
def forward(self, x):
for rproj, conv in zip(self.residual_proj, self.conv_layers):
residual = x
x = conv(x)
if self.skip_connections:
if rproj is not None:
residual = rproj(residual)
x = (x + residual) * self.residual_scale
return x
class Wav2VecPredictionsModel(nn.Module):
def __init__(
self,
in_dim,
out_dim,
prediction_steps,
n_negatives,
cross_sample_negatives,
sample_distance,
dropout,
offset,
balanced_classes,
infonce,
):
super().__init__()
self.n_negatives = n_negatives
self.cross_sample_negatives = cross_sample_negatives
self.sample_distance = sample_distance
self.project_to_steps = nn.ConvTranspose2d(
in_dim, out_dim, (1, prediction_steps)
)
self.dropout = nn.Dropout(p=dropout)
self.offset = offset
self.balanced_classes = balanced_classes
self.infonce = infonce
def sample_negatives(self, y):
bsz, fsz, tsz = y.shape
y = y.transpose(0, 1) # BCT -> CBT
y = y.contiguous().view(fsz, -1) # CBT => C(BxT)
cross_high = tsz * bsz
high = tsz if self.sample_distance is None else min(tsz, self.sample_distance)
assert high > 1
neg_idxs = torch.randint(low=0, high=high, size=(bsz, self.n_negatives * tsz))
with torch.no_grad():
if self.n_negatives > 0:
tszs = (
buffered_arange(tsz)
.unsqueeze(-1)
.expand(-1, self.n_negatives)
.flatten()
)
neg_idxs = torch.randint(
low=0, high=high - 1, size=(bsz, self.n_negatives * tsz)
)
neg_idxs[neg_idxs >= tszs] += 1
if self.cross_sample_negatives > 0:
tszs = (
buffered_arange(tsz)
.unsqueeze(-1)
.expand(-1, self.cross_sample_negatives)
.flatten()
)
cross_neg_idxs = torch.randint(
low=0,
high=cross_high - 1,
size=(bsz, self.cross_sample_negatives * tsz),
)
cross_neg_idxs[cross_neg_idxs >= tszs] += 1
if self.n_negatives > 0:
for i in range(1, bsz):
neg_idxs[i] += i * high
else:
neg_idxs = cross_neg_idxs
if self.cross_sample_negatives > 0 and self.n_negatives > 0:
neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1)
negs = y[..., neg_idxs.view(-1)]
negs = negs.view(
fsz, bsz, self.n_negatives + self.cross_sample_negatives, tsz
).permute(
2, 1, 0, 3
) # to NxBxCxT
return negs
def forward(self, x, y):
x = x.unsqueeze(-1)
x = self.project_to_steps(x) # BxCxTxS
x = self.dropout(x)
negatives = self.sample_negatives(y)
y = y.unsqueeze(0)
targets = torch.cat([y, negatives], dim=0) # Copies x B x C x T
copies = targets.size(0)
bsz, dim, tsz, steps = x.shape
steps = min(steps, tsz - self.offset)
predictions = x.new(
bsz * copies * (tsz - self.offset + 1) * steps
- ((steps + 1) * steps // 2) * copies * bsz
)
if self.infonce:
labels = predictions.new_full(
(predictions.shape[0] // copies,), 0, dtype=torch.long
)
else:
labels = torch.zeros_like(predictions)
weights = (
torch.full_like(labels, 1 / self.n_negatives)
if self.balanced_classes and not self.infonce
else None
)
start = end = 0
for i in range(steps):
offset = i + self.offset
end = start + (tsz - offset) * bsz * copies
if self.infonce:
predictions[start:end] = torch.einsum(
"bct,nbct->tbn", x[..., :-offset, i], targets[..., offset:]
).flatten()
else:
pos_num = (end - start) // copies
predictions[start:end] = torch.einsum(
"bct,nbct->nbt", x[..., :-offset, i], targets[..., offset:]
).flatten()
labels[start : start + pos_num] = 1.0
if weights is not None:
weights[start : start + pos_num] = 1.0
start = end
assert end == predictions.numel(), "{} != {}".format(end, predictions.numel())
if self.infonce:
predictions = predictions.view(-1, copies)
else:
if weights is not None:
labels = (labels, weights)
return predictions, labels
| EXA-1-master | exa/libraries/fairseq/fairseq/models/wav2vec/wav2vec.py |
import argparse
import logging
import torch.nn as nn
import fairseq.checkpoint_utils
from fairseq.models import (
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import TransformerDecoder
from fairseq.models.roberta import model as roberta
logger = logging.getLogger(__name__)
@register_model("roberta_enc_dec")
class RobertaEncDecModel(FairseqEncoderDecoderModel):
@staticmethod
def add_args(parser):
parser.add_argument(
"--pretrained-mlm-checkpoint",
default=None,
type=str,
metavar="PRETRAINED",
help="path to pretrained mlm checkpoint",
)
parser.add_argument(
"--pretrained-decoder", action="store_true", help="reload decoder"
)
parser.add_argument(
"--hack-layernorm-embedding",
action="store_true",
help="hack to reload old models trained with encoder-normalize-before=False (no equivalent to encoder-normalize-before=False and layernorm_embedding=False",
)
parser.add_argument(
"--share-decoder-input-output-embed",
action="store_true",
help="share decoder input and output embeddings",
)
parser.add_argument(
"--share-all-embeddings",
action="store_true",
help="share encoder, decoder and output embeddings"
" (requires shared dictionary and embed dim)",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present
base_enc_dec_architecture(args)
if args.pretrained_mlm_checkpoint:
arg_overrides = None
if args.hack_layernorm_embedding:
arg_overrides = {"layernorm_embedding": False}
loaded = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[args.pretrained_mlm_checkpoint], arg_overrides=arg_overrides
)
([roberta_enc], _cfg, _task) = loaded
else:
# Do we need to edit untie_weights here ?
share_in_out = (
args.share_decoder_input_output_embed or args.share_all_embeddings
)
args.untie_weights_roberta = not share_in_out
if args.hack_layernorm_embedding:
args.layernorm_embedding = False
args.encoder_normalize_before = False
roberta_enc = roberta.RobertaModel.build_model(args, task)
return cls.from_roberta(roberta_enc, args, task.source_dictionary)
@staticmethod
def from_roberta(roberta_enc: roberta.RobertaModel, args, dictionary):
encoder = roberta_enc.encoder.sentence_encoder
vocab_size, embed_dim = encoder.embed_tokens.weight.shape
if args.share_all_embeddings:
lm_head = roberta_enc.encoder.lm_head
assert encoder.embed_tokens.weight is lm_head.weight, (
"Can't use --share-all-embeddings with a model "
"that was pretraiend with --untie-weights-roberta_enc"
)
else:
lm_head = roberta.RobertaLMHead(
embed_dim, vocab_size, roberta_enc.args.activation_fn
)
dec_embs = nn.Embedding(vocab_size, embed_dim, dictionary.pad())
if args.share_all_embeddings or args.share_decoder_input_output_embed:
# Note: I wasn't able to use Embedding _weight parameter to achive this sharing.
dec_embs.weight = lm_head.weight
decoder = TransformerDecoder(
RobertaEncDecModel.read_args_from_roberta(roberta_enc.args),
dictionary,
dec_embs,
no_encoder_attn=False,
output_projection=lm_head,
)
if getattr(args, "pretrained_decoder", False):
decoder_dict = encoder.state_dict()
# TODO: hide setting "encoder_attn" layers behind a flag.
for k, w in list(decoder_dict.items()):
if ".self_attn" in k:
k_enc_attn = k.replace(".self_attn", ".encoder_attn")
decoder_dict[k_enc_attn] = w.detach().clone()
for k, w in lm_head.state_dict().items():
decoder_dict["output_projection." + k] = w
missing_keys, unexpected_keys = decoder.load_state_dict(
decoder_dict, strict=False
)
# missing_keys = [m for m in missing_keys if ".encoder_attn" not in m]
assert not missing_keys and not unexpected_keys, (
"Failed to load state dict. "
f"Missing keys: {missing_keys}. "
f"Unexpected keys: {unexpected_keys}."
)
if args.share_all_embeddings:
assert decoder.output_projection.weight is decoder.embed_tokens.weight
assert encoder.embed_tokens.weight is decoder.embed_tokens.weight
elif args.share_decoder_input_output_embed:
assert decoder.output_projection.weight is decoder.embed_tokens.weight
assert encoder.embed_tokens.weight is not decoder.embed_tokens.weight
else:
assert decoder.output_projection.weight is not decoder.embed_tokens.weight
assert encoder.embed_tokens.weight is not decoder.embed_tokens.weight
return RobertaEncDecModel(encoder, decoder)
@staticmethod
def read_args_from_roberta(roberta_args: argparse.Namespace):
# TODO: this would become easier if encoder/decoder where using a similar
# TransformerConfig object
args = argparse.Namespace(**vars(roberta_args))
attr_map = [
("encoder_attention_heads", "decoder_attention_heads"),
("encoder_embed_dim", "decoder_embed_dim"),
("encoder_embed_dim", "decoder_output_dim"),
("encoder_normalize_before", "decoder_normalize_before"),
("encoder_layers_to_keep", "decoder_layers_to_keep"),
("encoder_ffn_embed_dim", "decoder_ffn_embed_dim"),
("encoder_layerdrop", "decoder_layerdrop"),
("encoder_layers", "decoder_layers"),
("encoder_learned_pos", "decoder_learned_pos"),
# should this be set from here ?
("max_positions", "max_target_positions"),
]
for k1, k2 in attr_map:
setattr(args, k2, getattr(roberta_args, k1))
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = not roberta_args.untie_weights_roberta
return args
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
super().upgrade_state_dict_named(state_dict, name)
old_keys = list(state_dict.keys())
# rename decoder -> encoder before upgrading children modules
for k in old_keys:
if k.startswith(prefix + "encoder.lm_head"):
state_dict.pop(k)
continue
new_k = k
new_k = new_k.replace(".sentence_encoder.", ".")
new_k = new_k.replace("decoder.lm_head.", "decoder.output_projection.")
if k == new_k:
continue
# print(k, "->", new_k)
state_dict[new_k] = state_dict.pop(k)
@register_model_architecture("roberta_enc_dec", "roberta_enc_dec")
def base_enc_dec_architecture(args):
args.hack_layernorm_embedding = getattr(args, "hack_layernorm_embedding", False)
args.pretrained_mlm_checkpoint = getattr(args, "pretrained_mlm_checkpoint", None)
args.pretrained_decoder = getattr(args, "pretrained_decoder", None)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
roberta.base_architecture(args)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/roberta/enc_dec.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
GottBERT: a pure German Language Model
"""
from fairseq.models import register_model
from .hub_interface import RobertaHubInterface
from .model import RobertaModel
@register_model("gottbert")
class GottbertModel(RobertaModel):
@classmethod
def hub_models(cls):
return {
"gottbert-base": "https://dl.gottbert.de/fairseq/models/gottbert-base.tar.gz",
}
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
bpe="hf_byte_bpe",
bpe_vocab="vocab.json",
bpe_merges="merges.txt",
bpe_add_prefix_space=False,
**kwargs
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
bpe_vocab=bpe_vocab,
bpe_merges=bpe_merges,
bpe_add_prefix_space=bpe_add_prefix_space,
**kwargs,
)
return RobertaHubInterface(x["args"], x["task"], x["models"][0])
| EXA-1-master | exa/libraries/fairseq/fairseq/models/roberta/model_gottbert.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Unsupervised Cross-lingual Representation Learning at Scale
"""
from fairseq.models import register_model
from .hub_interface import RobertaHubInterface
from .model import RobertaModel
@register_model("xlmr")
class XLMRModel(RobertaModel):
@classmethod
def hub_models(cls):
return {
"xlmr.base": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr.base.tar.gz",
"xlmr.large": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr.large.tar.gz",
"xlmr.xl": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr/xlmr.xl.tar.gz",
"xlmr.xxl": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr/xlmr.xxl.tar.gz",
}
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
bpe="sentencepiece",
**kwargs
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
**kwargs,
)
return RobertaHubInterface(x["args"], x["task"], x["models"][0])
| EXA-1-master | exa/libraries/fairseq/fairseq/models/roberta/model_xlmr.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import Counter
from typing import List
import torch
def align_bpe_to_words(roberta, bpe_tokens: torch.LongTensor, other_tokens: List[str]):
"""
Helper to align GPT-2 BPE to other tokenization formats (e.g., spaCy).
Args:
roberta (RobertaHubInterface): RoBERTa instance
bpe_tokens (torch.LongTensor): GPT-2 BPE tokens of shape `(T_bpe)`
other_tokens (List[str]): other tokens of shape `(T_words)`
Returns:
List[str]: mapping from *other_tokens* to corresponding *bpe_tokens*.
"""
assert bpe_tokens.dim() == 1
assert bpe_tokens[0] == 0
def clean(text):
return text.strip()
# remove whitespaces to simplify alignment
bpe_tokens = [roberta.task.source_dictionary.string([x]) for x in bpe_tokens]
bpe_tokens = [
clean(roberta.bpe.decode(x) if x not in {"<s>", ""} else x) for x in bpe_tokens
]
other_tokens = [clean(str(o)) for o in other_tokens]
# strip leading <s>
bpe_tokens = bpe_tokens[1:]
assert "".join(bpe_tokens) == "".join(other_tokens)
# create alignment from every word to a list of BPE tokens
alignment = []
bpe_toks = filter(lambda item: item[1] != "", enumerate(bpe_tokens, start=1))
j, bpe_tok = next(bpe_toks)
for other_tok in other_tokens:
bpe_indices = []
while True:
if other_tok.startswith(bpe_tok):
bpe_indices.append(j)
other_tok = other_tok[len(bpe_tok) :]
try:
j, bpe_tok = next(bpe_toks)
except StopIteration:
j, bpe_tok = None, None
elif bpe_tok.startswith(other_tok):
# other_tok spans multiple BPE tokens
bpe_indices.append(j)
bpe_tok = bpe_tok[len(other_tok) :]
other_tok = ""
else:
raise Exception('Cannot align "{}" and "{}"'.format(other_tok, bpe_tok))
if other_tok == "":
break
assert len(bpe_indices) > 0
alignment.append(bpe_indices)
assert len(alignment) == len(other_tokens)
return alignment
def align_features_to_words(roberta, features, alignment):
"""
Align given features to words.
Args:
roberta (RobertaHubInterface): RoBERTa instance
features (torch.Tensor): features to align of shape `(T_bpe x C)`
alignment: alignment between BPE tokens and words returned by
func:`align_bpe_to_words`.
"""
assert features.dim() == 2
bpe_counts = Counter(j for bpe_indices in alignment for j in bpe_indices)
assert bpe_counts[0] == 0 # <s> shouldn't be aligned
denom = features.new([bpe_counts.get(j, 1) for j in range(len(features))])
weighted_features = features / denom.unsqueeze(-1)
output = [weighted_features[0]]
largest_j = -1
for bpe_indices in alignment:
output.append(weighted_features[bpe_indices].sum(dim=0))
largest_j = max(largest_j, *bpe_indices)
for j in range(largest_j + 1, len(features)):
output.append(weighted_features[j])
output = torch.stack(output)
assert torch.all(torch.abs(output.sum(dim=0) - features.sum(dim=0)) < 1e-4)
return output
def spacy_nlp():
if getattr(spacy_nlp, "_nlp", None) is None:
try:
from spacy.lang.en import English
spacy_nlp._nlp = English()
except ImportError:
raise ImportError("Please install spacy with: pip install spacy")
return spacy_nlp._nlp
def spacy_tokenizer():
if getattr(spacy_tokenizer, "_tokenizer", None) is None:
try:
nlp = spacy_nlp()
spacy_tokenizer._tokenizer = nlp.Defaults.create_tokenizer(nlp)
except ImportError:
raise ImportError("Please install spacy with: pip install spacy")
return spacy_tokenizer._tokenizer
| EXA-1-master | exa/libraries/fairseq/fairseq/models/roberta/alignment_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .hub_interface import * # noqa
from .model import * # noqa
from .enc_dec import * # noqa
from .model_camembert import * # noqa
from .model_gottbert import * # noqa
from .model_xlmr import * # noqa
| EXA-1-master | exa/libraries/fairseq/fairseq/models/roberta/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
RoBERTa: A Robustly Optimized BERT Pretraining Approach.
"""
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import DEFAULT_MIN_PARAMS_TO_WRAP, TransformerEncoder
from fairseq.modules import LayerNorm
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from fairseq.utils import safe_getattr, safe_hasattr
from .hub_interface import RobertaHubInterface
logger = logging.getLogger(__name__)
@register_model("roberta")
class RobertaModel(FairseqEncoderModel):
@classmethod
def hub_models(cls):
return {
"roberta.base": "http://dl.fbaipublicfiles.com/fairseq/models/roberta.base.tar.gz",
"roberta.large": "http://dl.fbaipublicfiles.com/fairseq/models/roberta.large.tar.gz",
"roberta.large.mnli": "http://dl.fbaipublicfiles.com/fairseq/models/roberta.large.mnli.tar.gz",
"roberta.large.wsc": "http://dl.fbaipublicfiles.com/fairseq/models/roberta.large.wsc.tar.gz",
}
def __init__(self, args, encoder):
super().__init__(encoder)
self.args = args
# We follow BERT's random weight initialization
self.apply(init_bert_params)
self.classification_heads = nn.ModuleDict()
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--encoder-layers", type=int, metavar="L", help="num encoder layers"
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="H",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="F",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="A",
help="num encoder attention heads",
)
parser.add_argument(
"--activation-fn",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--pooler-activation-fn",
choices=utils.get_available_activation_fns(),
help="activation function to use for pooler layer",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--layernorm-embedding",
action="store_true",
help="add layernorm to embedding",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN",
)
parser.add_argument(
"--pooler-dropout",
type=float,
metavar="D",
help="dropout probability in the masked_lm pooler layers",
)
parser.add_argument(
"--max-positions", type=int, help="number of positional embeddings to learn"
)
parser.add_argument(
"--load-checkpoint-heads",
action="store_true",
help="(re-)register and load heads when loading checkpoints",
)
parser.add_argument(
"--untie-weights-roberta",
action="store_true",
help="Untie weights between embeddings and classifiers in RoBERTa",
)
# args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
parser.add_argument(
"--encoder-layerdrop",
type=float,
metavar="D",
default=0,
help="LayerDrop probability for encoder",
)
parser.add_argument(
"--encoder-layers-to-keep",
default=None,
help="which layers to *keep* when pruning as a comma-separated list",
)
# args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020)
parser.add_argument(
"--quant-noise-pq",
type=float,
metavar="D",
default=0,
help="iterative PQ quantization noise at training time",
)
parser.add_argument(
"--quant-noise-pq-block-size",
type=int,
metavar="D",
default=8,
help="block size of quantization noise at training time",
)
parser.add_argument(
"--quant-noise-scalar",
type=float,
metavar="D",
default=0,
help="scalar quantization noise and scalar quantization at training time",
)
# args for "Better Fine-Tuning by Reducing Representational Collapse" (Aghajanyan et al. 2020)
parser.add_argument(
"--spectral-norm-classification-head",
action="store_true",
default=False,
help="Apply spectral normalization on the classification head",
)
# args for Fully Sharded Data Parallel (FSDP) training
parser.add_argument(
"--min-params-to-wrap",
type=int,
metavar="D",
default=DEFAULT_MIN_PARAMS_TO_WRAP,
help=(
"minimum number of params for a layer to be wrapped with FSDP() when "
"training with --ddp-backend=fully_sharded. Smaller values will "
"improve memory efficiency, but may make torch.distributed "
"communication less efficient due to smaller input sizes. This option "
"is set to 0 (i.e., always wrap) when --checkpoint-activations or "
"--offload-activations are passed."
),
)
# args for AdaPruning
# In short, it adds regularizarion for the multihead attention module and feed forward neural nets
# For more details, please refer to the paper https://openreview.net/forum?id=_CMSV7FTzGI
parser.add_argument(
"--mha-reg-scale-factor",
type=float,
metavar="D",
default=0.0,
help="scaling factor for regularization term in adptive pruning, recommendation is 0.000375",
)
parser.add_argument(
"--ffn-reg-scale-factor",
type=float,
metavar="D",
default=0.0,
help="scaling factor for regularization term in adptive pruning, recommendation is 0.000375",
)
parser.add_argument(
"--mha-heads-to-keep",
type=int,
metavar="D",
default=-1,
help="number of heads to keep in each multi-head attention module, -1 means keeping all heads",
)
parser.add_argument(
"--ffn-blocks-to-remove",
type=int,
metavar="D",
default=-1,
help="number of feedforward blocks to remove in each transformer layer, -1 means keeping all ffn blocks",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
from omegaconf import OmegaConf
if OmegaConf.is_config(args):
OmegaConf.set_struct(args, False)
# make sure all arguments are present
base_architecture(args)
if not safe_hasattr(args, "max_positions"):
if not safe_hasattr(args, "tokens_per_sample"):
args.tokens_per_sample = task.max_positions()
args.max_positions = args.tokens_per_sample
encoder = RobertaEncoder(args, task.source_dictionary)
if OmegaConf.is_config(args):
OmegaConf.set_struct(args, True)
return cls(args, encoder)
def forward(
self,
src_tokens,
features_only=False,
return_all_hiddens=False,
classification_head_name=None,
**kwargs,
):
if classification_head_name is not None:
features_only = True
x, extra = self.encoder(src_tokens, features_only, return_all_hiddens, **kwargs)
if classification_head_name is not None:
x = self.classification_heads[classification_head_name](x)
return x, extra
def _get_adaptive_head_loss(self):
norm_loss = 0
scaling = float(self.args.mha_reg_scale_factor)
for layer in self.encoder.sentence_encoder.layers:
norm_loss_layer = 0
for i in range(layer.self_attn.num_heads):
start_idx = i * layer.self_attn.head_dim
end_idx = (i + 1) * layer.self_attn.head_dim
norm_loss_layer += scaling * (
torch.sum(
torch.abs(
layer.self_attn.q_proj.weight[
start_idx:end_idx,
]
)
)
+ torch.sum(
torch.abs(layer.self_attn.q_proj.bias[start_idx:end_idx])
)
)
norm_loss_layer += scaling * (
torch.sum(
torch.abs(
layer.self_attn.k_proj.weight[
start_idx:end_idx,
]
)
)
+ torch.sum(
torch.abs(layer.self_attn.k_proj.bias[start_idx:end_idx])
)
)
norm_loss_layer += scaling * (
torch.sum(
torch.abs(
layer.self_attn.v_proj.weight[
start_idx:end_idx,
]
)
)
+ torch.sum(
torch.abs(layer.self_attn.v_proj.bias[start_idx:end_idx])
)
)
norm_loss += norm_loss_layer
return norm_loss
def _get_adaptive_ffn_loss(self):
ffn_scale_factor = float(self.args.ffn_reg_scale_factor)
filter_loss = 0
for layer in self.encoder.sentence_encoder.layers:
filter_loss += torch.sum(
torch.abs(layer.fc1.weight * ffn_scale_factor)
) + torch.sum(torch.abs(layer.fc2.weight * ffn_scale_factor))
filter_loss += torch.sum(
torch.abs(layer.fc1.bias * ffn_scale_factor)
) + torch.sum(torch.abs(layer.fc2.bias * ffn_scale_factor))
return filter_loss
def get_normalized_probs(self, net_output, log_probs, sample=None):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output[0].float()
if log_probs:
return F.log_softmax(logits, dim=-1)
else:
return F.softmax(logits, dim=-1)
def register_classification_head(
self, name, num_classes=None, inner_dim=None, **kwargs
):
"""Register a classification head."""
if name in self.classification_heads:
prev_num_classes = self.classification_heads[name].out_proj.out_features
prev_inner_dim = self.classification_heads[name].dense.out_features
if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
logger.warning(
're-registering head "{}" with num_classes {} (prev: {}) '
"and inner_dim {} (prev: {})".format(
name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
)
)
self.classification_heads[name] = RobertaClassificationHead(
input_dim=self.args.encoder_embed_dim,
inner_dim=inner_dim or self.args.encoder_embed_dim,
num_classes=num_classes,
activation_fn=self.args.pooler_activation_fn,
pooler_dropout=self.args.pooler_dropout,
q_noise=self.args.quant_noise_pq,
qn_block_size=self.args.quant_noise_pq_block_size,
do_spectral_norm=self.args.spectral_norm_classification_head,
)
@property
def supported_targets(self):
return {"self"}
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
bpe="gpt2",
**kwargs,
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
**kwargs,
)
logger.info(x["args"])
return RobertaHubInterface(x["args"], x["task"], x["models"][0])
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
# rename decoder -> encoder before upgrading children modules
for k in list(state_dict.keys()):
if k.startswith(prefix + "decoder"):
new_k = prefix + "encoder" + k[len(prefix + "decoder") :]
state_dict[new_k] = state_dict[k]
del state_dict[k]
# rename emb_layer_norm -> layernorm_embedding
for k in list(state_dict.keys()):
if ".emb_layer_norm." in k:
new_k = k.replace(".emb_layer_norm.", ".layernorm_embedding.")
state_dict[new_k] = state_dict[k]
del state_dict[k]
# upgrade children modules
super().upgrade_state_dict_named(state_dict, name)
# Handle new classification heads present in the state dict.
current_head_names = (
[]
if not hasattr(self, "classification_heads")
else self.classification_heads.keys()
)
keys_to_delete = []
for k in state_dict.keys():
if not k.startswith(prefix + "classification_heads."):
continue
head_name = k[len(prefix + "classification_heads.") :].split(".")[0]
num_classes = state_dict[
prefix + "classification_heads." + head_name + ".out_proj.weight"
].size(0)
inner_dim = state_dict[
prefix + "classification_heads." + head_name + ".dense.weight"
].size(0)
if getattr(self.args, "load_checkpoint_heads", False):
if head_name not in current_head_names:
self.register_classification_head(head_name, num_classes, inner_dim)
else:
if head_name not in current_head_names:
logger.warning(
"deleting classification head ({}) from checkpoint "
"not present in current model: {}".format(head_name, k)
)
keys_to_delete.append(k)
elif (
num_classes
!= self.classification_heads[head_name].out_proj.out_features
or inner_dim
!= self.classification_heads[head_name].dense.out_features
):
logger.warning(
"deleting classification head ({}) from checkpoint "
"with different dimensions than current model: {}".format(
head_name, k
)
)
keys_to_delete.append(k)
for k in keys_to_delete:
del state_dict[k]
# Copy any newly-added classification heads into the state dict
# with their current weights.
if hasattr(self, "classification_heads"):
cur_state = self.classification_heads.state_dict()
for k, v in cur_state.items():
if prefix + "classification_heads." + k not in state_dict:
logger.info("Overwriting " + prefix + "classification_heads." + k)
state_dict[prefix + "classification_heads." + k] = v
# adapt data2vec models
if (
"encoder._ema" in state_dict
and "encoder.lm_head.weight" not in state_dict
):
lm_state = self.encoder.lm_head.state_dict()
for k, v in lm_state.items():
state_dict["encoder.lm_head." + k] = v
for k in list(state_dict.keys()):
if k.startswith("encoder.regression_head") or k == "encoder._ema":
del state_dict[k]
class RobertaLMHead(nn.Module):
"""Head for masked language modeling."""
def __init__(self, embed_dim, output_dim, activation_fn, weight=None):
super().__init__()
self.dense = nn.Linear(embed_dim, embed_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.layer_norm = LayerNorm(embed_dim)
if weight is None:
weight = nn.Linear(embed_dim, output_dim, bias=False).weight
self.weight = weight
self.bias = nn.Parameter(torch.zeros(output_dim))
def forward(self, features, masked_tokens=None, **kwargs):
# Only project the masked tokens while training,
# saves both memory and computation
if masked_tokens is not None:
features = features[masked_tokens, :]
x = self.dense(features)
x = self.activation_fn(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = F.linear(x, self.weight) + self.bias
return x
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim,
inner_dim,
num_classes,
activation_fn,
pooler_dropout,
q_noise=0,
qn_block_size=8,
do_spectral_norm=False,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = apply_quant_noise_(
nn.Linear(inner_dim, num_classes), q_noise, qn_block_size
)
if do_spectral_norm:
if q_noise != 0:
raise NotImplementedError(
"Attempting to use Spectral Normalization with Quant Noise. This is not officially supported"
)
self.out_proj = torch.nn.utils.spectral_norm(self.out_proj)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class RobertaEncoder(FairseqEncoder):
"""RoBERTa encoder."""
def __init__(self, args, dictionary):
super().__init__(dictionary)
# set any missing default values
base_architecture(args)
self.args = args
if args.encoder_layers_to_keep:
args.encoder_layers = len(args.encoder_layers_to_keep.split(","))
embed_tokens = self.build_embedding(
len(dictionary), args.encoder_embed_dim, dictionary.pad()
)
self.sentence_encoder = self.build_encoder(args, dictionary, embed_tokens)
self.lm_head = self.build_lm_head(
embed_dim=args.encoder_embed_dim,
output_dim=len(dictionary),
activation_fn=args.activation_fn,
weight=(
self.sentence_encoder.embed_tokens.weight
if not args.untie_weights_roberta
else None
),
)
def build_embedding(self, vocab_size, embedding_dim, padding_idx):
return nn.Embedding(vocab_size, embedding_dim, padding_idx)
def build_encoder(self, args, dictionary, embed_tokens):
encoder = TransformerEncoder(args, dictionary, embed_tokens)
encoder.apply(init_bert_params)
return encoder
def build_lm_head(self, embed_dim, output_dim, activation_fn, weight):
return RobertaLMHead(embed_dim, output_dim, activation_fn, weight)
def forward(
self,
src_tokens,
features_only=False,
return_all_hiddens=False,
masked_tokens=None,
**unused,
):
"""
Args:
src_tokens (LongTensor): input tokens of shape `(batch, src_len)`
features_only (bool, optional): skip LM head and just return
features. If True, the output will be of shape
`(batch, src_len, embed_dim)`.
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
Returns:
tuple:
- the LM output of shape `(batch, src_len, vocab)`
- a dictionary of additional data, where 'inner_states'
is a list of hidden states. Note that the hidden
states have shape `(src_len, batch, vocab)`.
"""
x, extra = self.extract_features(
src_tokens, return_all_hiddens=return_all_hiddens
)
if not features_only:
x = self.output_layer(x, masked_tokens=masked_tokens)
return x, extra
def extract_features(self, src_tokens, return_all_hiddens=False, **kwargs):
encoder_out = self.sentence_encoder(
src_tokens,
return_all_hiddens=return_all_hiddens,
token_embeddings=kwargs.get("token_embeddings", None),
)
# T x B x C -> B x T x C
features = encoder_out["encoder_out"][0].transpose(0, 1)
inner_states = encoder_out["encoder_states"] if return_all_hiddens else None
return features, {"inner_states": inner_states}
def output_layer(self, features, masked_tokens=None, **unused):
return self.lm_head(features, masked_tokens)
def max_positions(self):
"""Maximum output length supported by the encoder."""
return self.args.max_positions
@register_model_architecture("roberta", "roberta")
def base_architecture(args):
args.encoder_layers = safe_getattr(args, "encoder_layers", 12)
args.encoder_embed_dim = safe_getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = safe_getattr(args, "encoder_ffn_embed_dim", 3072)
args.encoder_attention_heads = safe_getattr(args, "encoder_attention_heads", 12)
args.dropout = safe_getattr(args, "dropout", 0.1)
args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1)
args.activation_dropout = safe_getattr(args, "activation_dropout", 0.0)
args.pooler_dropout = safe_getattr(args, "pooler_dropout", 0.0)
args.max_source_positions = safe_getattr(args, "max_positions", 512)
args.no_token_positional_embeddings = safe_getattr(
args, "no_token_positional_embeddings", False
)
# BERT has a few structural differences compared to the original Transformer
args.encoder_learned_pos = safe_getattr(args, "encoder_learned_pos", True)
args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", True)
args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", True)
args.activation_fn = safe_getattr(args, "activation_fn", "gelu")
args.encoder_normalize_before = safe_getattr(
args, "encoder_normalize_before", False
)
args.pooler_activation_fn = safe_getattr(args, "pooler_activation_fn", "tanh")
args.untie_weights_roberta = safe_getattr(args, "untie_weights_roberta", False)
# Adaptive input config
args.adaptive_input = safe_getattr(args, "adaptive_input", False)
# LayerDrop config
args.encoder_layerdrop = safe_getattr(args, "encoder_layerdrop", 0.0)
args.encoder_layers_to_keep = safe_getattr(args, "encoder_layers_to_keep", None)
# Quantization noise config
args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0)
# R4F config
args.spectral_norm_classification_head = safe_getattr(
args, "spectral_norm_classification_head", False
)
@register_model_architecture("roberta", "roberta_prenorm")
def roberta_prenorm_architecture(args):
args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False)
args.encoder_normalize_before = safe_getattr(args, "encoder_normalize_before", True)
base_architecture(args)
@register_model_architecture("roberta", "roberta_base")
def roberta_base_architecture(args):
base_architecture(args)
@register_model_architecture("roberta", "roberta_large")
def roberta_large_architecture(args):
args.encoder_layers = safe_getattr(args, "encoder_layers", 24)
args.encoder_embed_dim = safe_getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = safe_getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = safe_getattr(args, "encoder_attention_heads", 16)
base_architecture(args)
@register_model_architecture("roberta", "xlm")
def xlm_architecture(args):
args.encoder_layers = safe_getattr(args, "encoder_layers", 16)
args.encoder_embed_dim = safe_getattr(args, "encoder_embed_dim", 1280)
args.encoder_ffn_embed_dim = safe_getattr(args, "encoder_ffn_embed_dim", 1280 * 4)
args.encoder_attention_heads = safe_getattr(args, "encoder_attention_heads", 16)
base_architecture(args)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/roberta/model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
CamemBERT: a Tasty French Language Model
"""
from fairseq.models import register_model
from .hub_interface import RobertaHubInterface
from .model import RobertaModel
@register_model("camembert")
class CamembertModel(RobertaModel):
@classmethod
def hub_models(cls):
return {
"camembert": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base.tar.gz",
"camembert.v0": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base.tar.gz",
"camembert-base": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base.tar.gz",
"camembert-large": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-large.tar.gz",
"camembert-base-ccnet": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base-ccnet.tar.gz",
"camembert-base-ccnet-4gb": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base-ccnet-4gb.tar.gz",
"camembert-base-wikipedia-4gb": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base-wikipedia-4gb.tar.gz",
"camembert-base-oscar-4gb": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base-oscar-4gb.tar.gz",
}
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
bpe="sentencepiece",
**kwargs
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
**kwargs,
)
return RobertaHubInterface(x["args"], x["task"], x["models"][0])
| EXA-1-master | exa/libraries/fairseq/fairseq/models/roberta/model_camembert.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.data import encoders
class RobertaHubInterface(nn.Module):
"""A simple PyTorch Hub interface to RoBERTa.
Usage: https://github.com/pytorch/fairseq/tree/main/examples/roberta
"""
def __init__(self, cfg, task, model):
super().__init__()
self.cfg = cfg
self.task = task
self.model = model
self.bpe = encoders.build_bpe(cfg.bpe)
# this is useful for determining the device
self.register_buffer("_float_tensor", torch.tensor([0], dtype=torch.float))
@property
def device(self):
return self._float_tensor.device
def encode(
self, sentence: str, *addl_sentences, no_separator=False
) -> torch.LongTensor:
"""
BPE-encode a sentence (or multiple sentences).
Every sequence begins with a beginning-of-sentence (`<s>`) symbol.
Every sentence ends with an end-of-sentence (`</s>`) and we use an
extra end-of-sentence (`</s>`) as a separator.
Example (single sentence): `<s> a b c </s>`
Example (sentence pair): `<s> d e f </s> </s> 1 2 3 </s>`
The BPE encoding follows GPT-2. One subtle detail is that the GPT-2 BPE
requires leading spaces. For example::
>>> roberta.encode('Hello world').tolist()
[0, 31414, 232, 2]
>>> roberta.encode(' world').tolist()
[0, 232, 2]
>>> roberta.encode('world').tolist()
[0, 8331, 2]
"""
bpe_sentence = "<s> " + self.bpe.encode(sentence) + " </s>"
for s in addl_sentences:
bpe_sentence += " </s>" if not no_separator else ""
bpe_sentence += " " + self.bpe.encode(s) + " </s>"
tokens = self.task.source_dictionary.encode_line(
bpe_sentence, append_eos=False, add_if_not_exist=False
)
return tokens.long()
def decode(self, tokens: torch.LongTensor):
assert tokens.dim() == 1
tokens = tokens.numpy()
if tokens[0] == self.task.source_dictionary.bos():
tokens = tokens[1:] # remove <s>
eos_mask = tokens == self.task.source_dictionary.eos()
doc_mask = eos_mask[1:] & eos_mask[:-1]
sentences = np.split(tokens, doc_mask.nonzero()[0] + 1)
sentences = [
self.bpe.decode(self.task.source_dictionary.string(s)) for s in sentences
]
if len(sentences) == 1:
return sentences[0]
return sentences
def extract_features(
self, tokens: torch.LongTensor, return_all_hiddens: bool = False
) -> torch.Tensor:
if tokens.dim() == 1:
tokens = tokens.unsqueeze(0)
if tokens.size(-1) > self.model.max_positions():
raise ValueError(
"tokens exceeds maximum length: {} > {}".format(
tokens.size(-1), self.model.max_positions()
)
)
features, extra = self.model(
tokens.to(device=self.device),
features_only=True,
return_all_hiddens=return_all_hiddens,
)
if return_all_hiddens:
# convert from T x B x C -> B x T x C
inner_states = extra["inner_states"]
return [inner_state.transpose(0, 1) for inner_state in inner_states]
else:
return features # just the last layer's features
def register_classification_head(
self, name: str, num_classes: int = None, embedding_size: int = None, **kwargs
):
self.model.register_classification_head(
name, num_classes=num_classes, embedding_size=embedding_size, **kwargs
)
def predict(self, head: str, tokens: torch.LongTensor, return_logits: bool = False):
features = self.extract_features(tokens.to(device=self.device))
logits = self.model.classification_heads[head](features)
if return_logits:
return logits
return F.log_softmax(logits, dim=-1)
def extract_features_aligned_to_words(
self, sentence: str, return_all_hiddens: bool = False
) -> torch.Tensor:
"""Extract RoBERTa features, aligned to spaCy's word-level tokenizer."""
from fairseq.models.roberta import alignment_utils
from spacy.tokens import Doc
nlp = alignment_utils.spacy_nlp()
tokenizer = alignment_utils.spacy_tokenizer()
# tokenize both with GPT-2 BPE and spaCy
bpe_toks = self.encode(sentence)
spacy_toks = tokenizer(sentence)
spacy_toks_ws = [t.text_with_ws for t in tokenizer(sentence)]
alignment = alignment_utils.align_bpe_to_words(self, bpe_toks, spacy_toks_ws)
# extract features and align them
features = self.extract_features(
bpe_toks, return_all_hiddens=return_all_hiddens
)
features = features.squeeze(0)
aligned_feats = alignment_utils.align_features_to_words(
self, features, alignment
)
# wrap in spaCy Doc
doc = Doc(
nlp.vocab,
words=["<s>"] + [x.text for x in spacy_toks] + ["</s>"],
spaces=[True]
+ [x.endswith(" ") for x in spacy_toks_ws[:-1]]
+ [True, False],
)
assert len(doc) == aligned_feats.size(0)
doc.user_token_hooks["vector"] = lambda token: aligned_feats[token.i]
return doc
def fill_mask(self, masked_input: str, topk: int = 5):
masked_token = "<mask>"
assert (
masked_token in masked_input and masked_input.count(masked_token) == 1
), "Please add one {0} token for the input, eg: 'He is a {0} guy'".format(
masked_token
)
text_spans = masked_input.split(masked_token)
text_spans_bpe = (
(" {0} ".format(masked_token))
.join([self.bpe.encode(text_span.rstrip()) for text_span in text_spans])
.strip()
)
tokens = self.task.source_dictionary.encode_line(
"<s> " + text_spans_bpe + " </s>",
append_eos=False,
add_if_not_exist=False,
)
masked_index = (tokens == self.task.mask_idx).nonzero(as_tuple=False)
if tokens.dim() == 1:
tokens = tokens.unsqueeze(0)
with utils.model_eval(self.model):
features, extra = self.model(
tokens.long().to(device=self.device),
features_only=False,
return_all_hiddens=False,
)
logits = features[0, masked_index, :].squeeze()
prob = logits.softmax(dim=0)
values, index = prob.topk(k=topk, dim=0)
topk_predicted_token_bpe = self.task.source_dictionary.string(index)
topk_filled_outputs = []
for index, predicted_token_bpe in enumerate(
topk_predicted_token_bpe.split(" ")
):
predicted_token = self.bpe.decode(predicted_token_bpe)
# Quick hack to fix https://github.com/pytorch/fairseq/issues/1306
if predicted_token_bpe.startswith("\u2581"):
predicted_token = " " + predicted_token
if " {0}".format(masked_token) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(
" {0}".format(masked_token), predicted_token
),
values[index].item(),
predicted_token,
)
)
else:
topk_filled_outputs.append(
(
masked_input.replace(masked_token, predicted_token),
values[index].item(),
predicted_token,
)
)
return topk_filled_outputs
def disambiguate_pronoun(self, sentence: str) -> bool:
"""
Usage::
>>> disambiguate_pronoun('The _trophy_ would not fit in the brown suitcase because [it] was too big.')
True
>>> disambiguate_pronoun('The trophy would not fit in the brown suitcase because [it] was too big.')
'The trophy'
"""
assert hasattr(
self.task, "disambiguate_pronoun"
), "roberta.disambiguate_pronoun() requires a model trained with the WSC task."
with utils.model_eval(self.model):
return self.task.disambiguate_pronoun(
self.model, sentence, use_cuda=self.device.type == "cuda"
)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/roberta/hub_interface.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
# automatically import any Python files in the models/huggingface/ directory
models_dir = os.path.dirname(__file__)
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
model_name = file[: file.find(".py")] if file.endswith(".py") else file
module = importlib.import_module("fairseq.models.huggingface." + model_name)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/huggingface/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
from typing import Dict, List, Optional
import torch
from fairseq.models import (
FairseqIncrementalDecoder,
FairseqLanguageModel,
register_model,
register_model_architecture,
)
logger = logging.getLogger(__name__)
DEFAULT_MAX_TARGET_POSITIONS = 1024
@register_model("hf_gpt2")
class HuggingFaceGPT2LanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--embed-dim', type=int, metavar='N',
help='embedding dimension')
parser.add_argument('--num-attention-heads', type=int, metavar='N',
help='num attention heads')
parser.add_argument('--num-layers', type=int, metavar='N',
help='num layers')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability for all fully connected layers '
'in the embeddings, encoder, and pooler')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
default_architecture(args)
return cls(HuggingFaceGPT2Decoder(args, task))
class HuggingFaceGPT2Decoder(FairseqIncrementalDecoder):
def __init__(self, args, task):
try:
from transformers import GPT2Config, GPT2LMHeadModel
except ImportError:
raise ImportError(
"\n\nPlease install huggingface/transformers with:"
"\n\n pip install transformers"
)
super().__init__(task.target_dictionary)
config = GPT2Config(
vocab_size=len(task.target_dictionary),
n_positions=args.max_target_positions + 1,
n_ctx=args.max_target_positions,
n_embd=args.embed_dim,
n_layer=args.num_layers,
n_head=args.num_attention_heads,
resid_pdrop=args.dropout,
embd_pdrop=args.dropout,
attn_pdrop=args.attention_dropout,
layer_norm_epsilon=1e-6,
)
self.model = GPT2LMHeadModel(config)
# set zero embedding for padding symbol
self.pad_idx = task.target_dictionary.pad()
self.model.transformer.wte.weight.data[self.pad_idx].zero_()
self.model.transformer.wpe.weight.data[0].zero_()
def forward(
self,
prev_output_tokens,
src_lengths=None,
incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None,
encoder_out=None,
):
features = self.extract_features(prev_output_tokens, incremental_state)
lm_logits = self.model.lm_head(features)
return (lm_logits,)
def extract_features(
self,
prev_output_tokens,
incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None,
):
if incremental_state:
past = self.get_incremental_state("past")
else:
past = None
# don't attend to padding symbols
attention_mask = prev_output_tokens.ne(self.pad_idx).int()
# set position ids to exclude padding symbols
position_ids = attention_mask * (
torch.arange(1, 1 + prev_output_tokens.size(1))
.to(prev_output_tokens)
.repeat(prev_output_tokens.size(0), 1)
)
outputs = self.model.transformer(
input_ids=prev_output_tokens,
past=past,
attention_mask=attention_mask,
position_ids=position_ids,
)
last_hidden_states = outputs[0]
if incremental_state:
self.set_incremental_state(incremental_state, "past", outputs[1])
return last_hidden_states
def max_positions(self):
return self.model.config.n_positions - 1
@register_model_architecture("hf_gpt2", "hf_gpt2")
def default_architecture(args):
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = getattr(
args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS
)
args.embed_dim = getattr(args, "embed_dim", 768)
args.num_attention_heads = getattr(args, "num_attention_heads", 12)
args.num_layers = getattr(args, "num_layers", 12)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
@register_model_architecture("hf_gpt2", "hf_gpt2_medium")
def hf_gpt2_medium(args):
args.embed_dim = getattr(args, "embed_dim", 1024)
args.num_attention_heads = getattr(args, "num_attention_heads", 16)
args.num_layers = getattr(args, "num_layers", 24)
default_architecture(args)
@register_model_architecture("hf_gpt2", "hf_gpt2_large")
def hf_gpt2_large(args):
args.embed_dim = getattr(args, "embed_dim", 1280)
args.num_attention_heads = getattr(args, "num_attention_heads", 20)
args.num_layers = getattr(args, "num_layers", 36)
default_architecture(args)
@register_model_architecture("hf_gpt2", "hf_gpt2_xl")
def hf_gpt2_xl(args):
args.embed_dim = getattr(args, "embed_dim", 1600)
args.num_attention_heads = getattr(args, "num_attention_heads", 25)
args.num_layers = getattr(args, "num_layers", 48)
default_architecture(args)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/huggingface/hf_gpt2.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .speech_dlm import * # noqa
from .hub_interface import * # noqa
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_dlm/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Optional
from fairseq import utils
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import Embedding
from .modules.speech_dlm_decoder import CrossChannelTransformerDecoder
from omegaconf import II
DEFAULT_MAX_TARGET_POSITIONS = 1024
logger = logging.getLogger(__name__)
@dataclass
class SpeechDLMConfig(FairseqDataclass):
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="relu", metadata={"help": "activation function to use"}
)
dropout: float = field(default=0.1, metadata={"help": "dropout probability"})
attention_dropout: float = field(
default=0.0, metadata={"help": "dropout probability for attention weights"}
)
activation_dropout: float = field(
default=0.0, metadata={"help": "dropout probability after activation in FFN."}
)
relu_dropout: float = field(
default=0.0, metadata={"help": "dropout probability after activation in FFN."}
)
decoder_embed_dim: int = field(
default=512, metadata={"help": "decoder embedding dimension"}
)
decoder_output_dim: int = field(
default=512, metadata={"help": "decoder output dimension"}
)
decoder_input_dim: int = field(
default=512, metadata={"help": "decoder input dimension"}
)
decoder_ffn_embed_dim: int = field(
default=2048, metadata={"help": "decoder embedding dimension for FFN"}
)
decoder_layers: int = field(default=6, metadata={"help": "num decoder layers"})
decoder_cross_layers: int = field(
default=-1, metadata={"help": "num self cross attention decoder layers"}
)
decoder_attention_heads: int = field(
default=8, metadata={"help": "num decoder attention heads"}
)
decoder_normalize_before: bool = field(
default=False, metadata={"help": "apply layernorm before each decoder block"}
)
no_decoder_final_norm: bool = field(
default=False,
metadata={"help": "don't add an extra layernorm after the last decoder block"},
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if set, disables positional embeddings (outside self attention)"
},
)
share_decoder_input_output_embed: bool = field(
default=False, metadata={"help": "share decoder input and output embeddings"}
)
decoder_learned_pos: bool = field(
default=False,
metadata={"help": "use learned positional embeddings in the decoder"},
)
decoder_layerdrop: float = field(
default=0.0, metadata={"help": "LayerDrop probability for decoder"}
)
decoder_layers_to_keep: Optional[str] = field(
default=None,
metadata={
"help": "which layers to *keep* when pruning as a comma-separated list"
},
)
layernorm_embedding: bool = field(
default=False, metadata={"help": "add layernorm to embedding"}
)
no_scale_embedding: bool = field(
default=False, metadata={"help": "if True, dont scale embeddings"}
)
checkpoint_activations: bool = field(
default=False, metadata={"help": "checkpoint activations at each layer"}
)
offload_activations: bool = field(
default=False,
metadata={"help": "move checkpointed activations to CPU after they are used."},
)
quant_noise_pq: float = field(
default=0.0,
metadata={"help": "iterative PQ quantization noise at training time"},
)
quant_noise_pq_block_size: int = field(
default=8,
metadata={"help": "block size of quantization noise at training time"},
)
# TODO common var add to parent
quant_noise_scalar: float = field(
default=0.0,
metadata={
"help": "scalar quantization noise and scalar quantization at training time"
},
)
add_bos_token: bool = II("task.add_bos_token")
tokens_per_sample: int = II("task.tokens_per_sample")
max_target_positions: Optional[int] = II("task.max_target_positions")
tpu: bool = II("common.tpu")
duration_prediction: str = II("task.duration_prediction")
delayed_duration_target: str = II("task.delayed_duration_target")
main_and_cross_weights: str = II("criterion.main_and_cross_weights")
@register_model("speech_dlm", dataclass=SpeechDLMConfig)
class SpeechDLM(FairseqLanguageModel):
"""Spoken Unit-based Dialogue Language Model model (SpeechDLM) as described
in the paper: https://arxiv.org/pdf/2203.16502.pdf
"""
def __init__(self, decoder):
super().__init__(decoder)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_lm_architecture(args)
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if args.decoder_cross_layers < 0:
args.decoder_cross_layers = args.decoder_layers
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = getattr(
args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS
)
# Assert all dictionary to be the same
assert all(
task.source_dictionaries[channel] == task.source_dictionary
for channel in task.channels
), "Source dictionaries of all channels are expected to be the same!!!"
assert all(
task.target_dictionaries[channel] == task.target_dictionary
for channel in task.channels
), "Target dictionaries of all channels are expected to be the same!!!"
# Build the unit embeddings
embed_tokens = cls.build_embedding(
args, task.source_dictionary, args.decoder_input_dim
)
decoder = CrossChannelTransformerDecoder(
args,
task.target_dictionary,
embed_tokens,
channels=task.channels,
no_encoder_attn=True,
)
return cls(decoder)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
embed_tokens = Embedding(len(dictionary), embed_dim, dictionary.pad())
return embed_tokens
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
**kwargs,
):
"""
Load a :class:`~fairseq.models.FairseqModel` from a pre-trained model
file. Downloads and caches the pre-trained model file if needed.
The base implementation returns a
:class:`~fairseq.hub_utils.GeneratorHubInterface`, which can be used to
generate translations or sample from language models. The underlying
:class:`~fairseq.models.FairseqModel` can be accessed via the
*generator.models* attribute.
This function return a class:`MultichannelGeneratorHubInterface` object,
which allows generation in multiple channels with a multichannel model.
Args:
model_name_or_path (str): either the name of a pre-trained model to
load or a path/URL to a pre-trained model state dict
checkpoint_file (str, optional): colon-separated list of checkpoint
files in the model archive to ensemble (default: 'model.pt')
data_name_or_path (str, optional): point args.data to the archive
at the given path/URL. Can start with '.' or './' to reuse the
model archive path.
"""
from fairseq import hub_utils
from .hub_interface import MultichannelGeneratorHubInterface
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
**kwargs,
)
logger.info(x["args"])
return MultichannelGeneratorHubInterface(x["args"], x["task"], x["models"])
@property
def supported_targets(self):
return {"next", "edge", "duration"}
def base_lm_architecture(args):
# backward compatibility for older model checkpoints
if hasattr(args, "decoder_final_norm"):
args.no_decoder_final_norm = not args.decoder_final_norm
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_cross_layers = getattr(args, "decoder_cross_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
args.add_bos_token = getattr(args, "add_bos_token", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# Model training is not stable without this
args.decoder_normalize_before = True
args.no_decoder_final_norm = getattr(args, "no_decoder_final_norm", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
@register_model_architecture("speech_dlm", "speech_dlm_big")
def speech_dlm_big(args):
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.decoder_cross_layers = getattr(args, "decoder_cross_layers", 12)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
base_lm_architecture(args)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_dlm/speech_dlm.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import logging
from typing import Any, Dict, Iterator, List
import torch
from fairseq import utils
from omegaconf import open_dict
from torch import nn
from tqdm import tqdm
from fairseq.hub_utils import GeneratorHubInterface
logger = logging.getLogger(__name__)
class MultichannelGeneratorHubInterface(GeneratorHubInterface):
"""Pytorch Hub interface for generating sequences from a pre-trained
multichannel language model.
"""
def __init__(self, cfg, task, models):
super().__init__(cfg, task, models)
self.cfg = cfg
self.task = task
self.models = nn.ModuleList(models)
self.src_dicts = task.source_dictionaries
self.tgt_dicts = task.target_dictionaries
self.channels = task.channels
# optimize model for generation
for model in self.models:
model.prepare_for_inference_(cfg)
def sample(
self,
sentences: List[Dict[str, str]],
beam: int = 1,
verbose: bool = False,
**kwargs
) -> List[str]:
if isinstance(sentences, dict):
return self.sample([sentences], beam=beam, verbose=verbose, **kwargs)[0]
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
batched_hypos = self.generate(tokenized_sentences, beam, verbose, **kwargs)
return [self.decode(hypos[0]["tokens"]) for hypos in batched_hypos]
def score(self, sentences: List[Dict[str, str]], **kwargs):
raise NotImplementedError(
"MultichannelGeneratorHubInterface doesn't support score() method"
)
def generate(
self,
tokenized_sentences: List[Dict[str, torch.LongTensor]],
beam: int = 5,
verbose: bool = False,
skip_invalid_size_inputs=False,
inference_step_args=None,
**kwargs
) -> List[List[Dict[str, torch.Tensor]]]:
if isinstance(tokenized_sentences, dict):
return self.generate(
[tokenized_sentences], beam=beam, verbose=verbose, **kwargs
)[0]
# build generator using current args as well as any kwargs
gen_args = copy.deepcopy(self.cfg.generation)
with open_dict(gen_args):
gen_args.beam = beam
for k, v in kwargs.items():
setattr(gen_args, k, v)
generator = self.task.build_generator(self.models, gen_args)
inference_step_args = inference_step_args or {}
results = []
for batch in tqdm(
self._build_batches(tokenized_sentences, skip_invalid_size_inputs)
):
batch = utils.apply_to_sample(lambda t: t.to(self.device), batch)
translations = self.task.inference_step(
generator, self.models, batch, **inference_step_args
)
for id, hypos in zip(batch["id"].tolist(), translations):
# The output of the generator is supposed to be a tensor of size (bsz x max_len x n_channels)
# So we need to convert it to dictionary form
for i in range(len(hypos)):
hypos[i]["tokens"] = {
channel: hypos[i]["tokens"][..., j]
for j, channel in enumerate(self.channels)
}
results.append((id, hypos))
# sort output to match input order
outputs = [hypos for _, hypos in sorted(results, key=lambda x: x[0])]
if verbose:
def getarg(name, default):
return getattr(gen_args, name, getattr(self.cfg, name, default))
for source_tokens, target_hypotheses in zip(tokenized_sentences, outputs):
src_str_with_unk = {
channel: self.string(source_tokens[channel], channel)
for channel in source_tokens
}
logger.info("S\t{}".format(src_str_with_unk))
for hypo in target_hypotheses:
hypo_str = self.decode(hypo["tokens"])
logger.info("H\t{}\t{}".format(hypo["score"], hypo_str))
# hypo["positional_scores"]: T x n_channels
pos_scores = {}
for c, channel in enumerate(source_tokens):
pos_scores[channel] = " ".join(
map(
lambda x: "{:.4f}".format(x),
hypo["positional_scores"][:, c].tolist(),
)
)
logger.info("P\t{}".format(pos_scores))
return outputs
def encode(self, sentence: Dict[str, str]) -> Dict[str, torch.LongTensor]:
assert isinstance(
sentence, dict
), "Input sentence is expected to be a dictionary over channels"
assert set(sentence.keys()) == set(
self.channels
), "Mismatch between input sentence keys and model channels ({} vs {})".format(
set(sentence.keys()), set(self.channels)
)
encoded_sentence = {}
for channel in sentence:
sentence_channel = sentence[channel]
sentence_channel = self.tokenize(sentence_channel)
sentence_channel = self.apply_bpe(sentence_channel)
sentence_channel = self.binarize(sentence_channel, channel)
encoded_sentence[channel] = sentence_channel
sentence_size = encoded_sentence[self.channels[0]].size()
assert all(
encoded_sentence[channel].size() == sentence_size
for channel in encoded_sentence
), "Input tensors are expected to have the same size in all channels"
return encoded_sentence
def decode(self, tokens: Dict[str, torch.LongTensor]) -> Dict[str, str]:
assert isinstance(
tokens, dict
), "Input tokens are expected to be a dictionary over channels"
assert set(tokens.keys()) == set(
self.channels
), "Mismatch between input tokens keys and model channels ({} vs {})".format(
set(tokens.keys()), set(self.channels)
)
decoded_sentence = {}
for channel in tokens:
tokens_channel = tokens[channel]
sentence_channel = self.string(tokens_channel, channel)
sentence_channel = self.remove_bpe(sentence_channel)
sentence_channel = self.detokenize(sentence_channel)
decoded_sentence[channel] = sentence_channel
return decoded_sentence
def binarize(self, sentence: str, channel: str) -> torch.LongTensor:
return (
self.src_dicts[channel].encode_line(sentence, add_if_not_exist=False).long()
)
def string(self, tokens: torch.LongTensor, channel: str) -> str:
return self.tgt_dicts[channel].string(tokens)
def _build_batches(
self, tokens: List[Dict[str, List[int]]], skip_invalid_size_inputs: bool
) -> Iterator[Dict[str, Any]]:
lengths = torch.LongTensor([next(iter(d.values())).numel() for d in tokens])
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.build_dataset_for_inference(tokens, lengths),
max_tokens=self.cfg.dataset.max_tokens,
max_sentences=self.cfg.dataset.batch_size,
max_positions=self.max_positions,
ignore_invalid_inputs=skip_invalid_size_inputs,
disable_iterator_cache=True,
).next_epoch_itr(shuffle=False)
return batch_iterator
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_dlm/hub_interface.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.models import FairseqIncrementalDecoder
from fairseq.modules import (
FairseqDropout,
LayerDropModuleList,
LayerNorm,
PositionalEmbedding,
)
from .speech_dlm_decoder_layer import (
CrossChannelTransformerDecoderLayer,
StandardTransformerDecoderLayer,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from torch import Tensor
class CrossChannelTransformerDecoder(FairseqIncrementalDecoder):
"""
Cross-channel Transformer Decoder Block for parallel spoken dialogue units
as described in the paper: https://arxiv.org/pdf/2203.16502.pdf;
consisting of *args.decoder_layers* layers. Each layer is a
:class:`StandardTransformerDecoderLayer` or
:class:`CrossChannelTransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
channels (list): list of channel names (string)
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, dictionary, embed_tokens, channels, no_encoder_attn=False):
self.args = args
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self._future_mask = torch.empty(0)
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.decoder_layerdrop = args.decoder_layerdrop
self.share_input_output_embed = args.share_decoder_input_output_embed
self.channels = channels
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.embed_dim = embed_dim
self.output_embed_dim = args.decoder_output_dim
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
if args.quant_noise_pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
args.quant_noise_pq,
args.quant_noise_pq_block_size,
)
else:
self.quant_noise = None
self.project_in_dim = (
nn.Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
self.max_target_positions,
embed_dim,
self.padding_idx,
learned=args.decoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
if getattr(args, "layernorm_embedding", False):
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
self.cross_self_attention = getattr(args, "cross_self_attention", False)
assert 0 <= args.decoder_cross_layers <= args.decoder_layers, (
"The number of cross-channel attention decoder layers must be non-negative"
f"and not exceeds the number of decoder layers (found {args.decoder_cross_layers})"
)
if self.decoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.decoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[
self.build_decoder_layer(args, no_encoder_attn)
if i < args.decoder_layers - args.decoder_cross_layers
else self.build_cross_decoder_layer(args, no_encoder_attn)
for i in range(args.decoder_layers)
]
)
self.num_layers = len(self.layers)
self.non_cross_layers = args.decoder_layers - args.decoder_cross_layers
if args.decoder_normalize_before and not getattr(
args, "no_decoder_final_norm", False
):
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
self.project_out_dim = (
nn.Linear(embed_dim, self.output_embed_dim, bias=False)
if embed_dim != self.output_embed_dim
else None
)
self.output_projection = None
self.is_cross_prediction = bool(
float(args.main_and_cross_weights.split(",")[1]) != 0
)
self.n_output_projections = (
1 if not self.is_cross_prediction else len(self.channels)
)
if self.share_input_output_embed:
# Output projection is a list of projections
# where the first proj is for the main-channel,
# then roll in a cicular way.
# For example: if the main channel has index i
# the second proj is for channel i+1 (mod N_channels), etc.
self.output_projection = nn.ModuleList(
[
nn.Linear(
embed_tokens.weight.shape[1], # embed_dim
embed_tokens.weight.shape[0], # n_dictionaries
bias=False,
)
for _ in range(self.n_output_projections)
]
)
# Only share the main-channel projection
self.output_projection[0].weight = embed_tokens.weight
for i in range(1, self.n_output_projections):
nn.init.normal_(
self.output_projection[i].weight,
mean=0,
std=embed_tokens.weight.shape[1] ** -0.5,
)
else:
self.output_projection = nn.ModuleList(
[
nn.Linear(self.output_embed_dim, len(dictionary), bias=False)
for _ in range(self.n_output_projections)
]
)
for i in range(self.n_output_projections):
nn.init.normal_(
self.output_projection[i].weight,
mean=0,
std=self.output_embed_dim**-0.5,
)
self.output_duration_prediction = (
None
if str(args.duration_prediction).lower() == "false"
else nn.ModuleList(
[
nn.Linear(self.output_embed_dim, 1)
for _ in range(self.n_output_projections)
]
)
)
def build_decoder_layer(self, args, no_encoder_attn=False):
layer = StandardTransformerDecoderLayer(args, no_encoder_attn)
if getattr(args, "checkpoint_activations", False):
offload_to_cpu = getattr(args, "offload_activations", False)
layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
return layer
def build_cross_decoder_layer(self, args, no_encoder_attn=False):
layer = CrossChannelTransformerDecoderLayer(args, no_encoder_attn)
if getattr(args, "checkpoint_activations", False):
offload_to_cpu = getattr(args, "offload_activations", False)
layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
return layer
def forward(
self,
prev_output_tokens: Dict[str, Tensor],
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[
List[Dict[str, Dict[str, Optional[Tensor]]]]
] = None,
features_only: bool = False,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
# return_all_hiddens: bool = False,
):
"""
Args:
prev_output_tokens (dict[str, LongTensor]): previous decoder outputs,
dictionary over all channels with the values being the tensors
of shape `(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): list of dictionaries used for storing state
during :ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
Returns:
tuple:
- the decoder's output, dict over channels of tensors
of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
)
if not features_only:
x = self.output_layer(x)
return x, extra
def extract_features(
self,
prev_output_tokens: Dict[str, Tensor],
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[
List[Dict[str, Dict[str, Optional[Tensor]]]]
] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
return self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
"""
A scriptable subclass of this class has an extract_features method and calls
super().extract_features, but super() is not supported in torchscript. A copy of
this function is made to be used in the subclass instead.
"""
def extract_features_scriptable(
self,
prev_output_tokens: Dict[str, Tensor],
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[
List[Dict[str, Dict[str, Optional[Tensor]]]]
] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
"""
The core function of *forward* but only return features.
The input (prev_output_tokens) is a dictionary over all channels,
expected to have the following form:
{
'channel1' : Tensor((batch x tgt_len)),
'channel2' : Tensor((batch x tgt_len)),
}
Args:
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
alignment_layer (int, optional): return mean alignment over
heads at this layer (default: last layer).
alignment_heads (int, optional): only average alignment over
this many heads (default: all heads).
Returns:
tuple:
- the decoder's features, dict over channels of tensors
of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
if alignment_layer is None:
alignment_layer = self.num_layers - 1
x_list = []
for i, channel in enumerate(self.channels):
# embed positions
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
prev_output_tokens[channel],
incremental_state=incremental_state[i]
if incremental_state is not None
else None,
)
if incremental_state is not None:
prev_output_tokens[channel] = prev_output_tokens[channel][:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_tokens(prev_output_tokens[channel])
if self.project_in_dim is not None:
x = self.project_in_dim(x)
x = self.embed_scale * x
if self.quant_noise is not None:
x = self.quant_noise(x)
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
x_list.append(x)
self_attn_padding_mask: Optional[Tensor] = None
if (
self.cross_self_attention
or prev_output_tokens[self.channels[0]].eq(self.padding_idx).any()
):
self_attn_padding_mask = prev_output_tokens[self.channels[0]].eq(
self.padding_idx
)
# decoder layers
attn: Optional[Dict[Tensor]] = None
inner_states: List[Optional[Dict[str, Tensor]]] = [
{channel: x_list[i] for i, channel in enumerate(self.channels)}
]
for idx, layer in enumerate(self.layers):
if incremental_state is None and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x_list[0])
else:
self_attn_mask = None
# need to change to tensor for the checkpoint activation to work
if isinstance(x_list, list):
x_list = torch.stack(x_list)
x_list, layer_attn_list, _ = layer(
x_list,
encoder_out["encoder_out"][0]
if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0)
else None,
encoder_out["encoder_padding_mask"][0]
if (
encoder_out is not None
and len(encoder_out["encoder_padding_mask"]) > 0
)
else None,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
)
inner_states.append(
{channel: x_list[i] for i, channel in enumerate(self.channels)}
)
if idx == alignment_layer and all(
layer_attn is not None for layer_attn in layer_attn_list
):
attn = {
channel: layer_attn_list[i].float().to(x_list[0])
for i, channel in enumerate(self.channels)
}
# change back from tensor to list
if not isinstance(x_list, list):
x_list = list(torch.unbind(x_list))
if attn is not None:
for channel in attn:
if alignment_heads is not None:
attn[channel] = attn[channel][:alignment_heads]
# average probabilities over heads
attn[channel] = attn[channel].mean(dim=0)
for i, x in enumerate(x_list):
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
x_list[i] = x
x = {channel: x_list[i] for i, channel in enumerate(self.channels)}
return x, {"attn": [attn], "inner_states": inner_states}
def output_layer(self, features):
"""Project features to the vocabulary size.
Return a dictionary of the form:
{
'input-channel': {
'predicted-channel': token prediction tensor of shape `(batch, tgt_len, vocab)`,
}
}
if duration_prediction is enabled
{
'input-channel': {
'predicted-channel': {
'pred_token': token prediction tensor of shape `(batch, tgt_len, vocab)`,
'pred_duration': duration prediction tensor
}
}
}
"""
# project back to size of vocabulary
if self.output_duration_prediction is None:
if self.is_cross_prediction:
return {
channel: {
pred_channel: self.output_projection[j - i](features[channel])
for j, pred_channel in enumerate(self.channels)
}
for i, channel in enumerate(self.channels)
}
else:
return {
channel: {channel: self.output_projection[0](features[channel])}
for i, channel in enumerate(self.channels)
}
else:
if self.is_cross_prediction:
return {
channel: {
pred_channel: {
"pred_token": self.output_projection[j - i](
features[channel]
),
"pred_duration": self.output_duration_prediction[j - i](
features[channel]
),
}
for j, pred_channel in enumerate(self.channels)
}
for i, channel in enumerate(self.channels)
}
else:
return {
channel: {
channel: {
"pred_token": self.output_projection[0](features[channel]),
"pred_duration": self.output_duration_prediction[0](
features[channel]
),
}
}
for i, channel in enumerate(self.channels)
}
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
# self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.
if (
self._future_mask.size(0) == 0
or (not self._future_mask.device == tensor.device)
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1
)
self._future_mask = self._future_mask.to(tensor)
return self._future_mask[:dim, :dim]
def get_normalized_probs_scriptable(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
logits_dict = net_output[0]
out_dict = {}
for channel in logits_dict:
out_dict[channel] = {}
for pred_channel in logits_dict[channel]:
if isinstance(logits_dict[channel][pred_channel], dict):
pred_token_logits = logits_dict[channel][pred_channel]["pred_token"]
else:
pred_token_logits = logits_dict[channel][pred_channel]
if log_probs:
out = utils.log_softmax(
pred_token_logits, dim=-1, onnx_trace=self.onnx_trace
)
else:
out = utils.softmax(
pred_token_logits, dim=-1, onnx_trace=self.onnx_trace
)
if isinstance(logits_dict[channel][pred_channel], dict):
out_dict[channel][pred_channel] = {
"pred_token": out,
"pred_duration": logits_dict[channel][pred_channel][
"pred_duration"
].float(),
} # move to float32 to avoid inf loss
else:
out_dict[channel][pred_channel] = out
return out_dict
def reorder_incremental_state_scripting(
self,
incremental_state: List[Dict[str, Dict[str, Optional[Tensor]]]],
new_order: Tensor,
):
"""Main entry point for reordering the incremental state.
Due to limitations in TorchScript, we call this function in
:class:`fairseq.sequence_generator.SequenceGenerator` instead of
calling :func:`reorder_incremental_state` directly.
"""
for module in self.modules():
if hasattr(module, "reorder_incremental_state"):
for i, incremental_state_channel in enumerate(incremental_state):
result = module.reorder_incremental_state(
incremental_state_channel, new_order
)
if result is not None:
incremental_state[i] = result
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_dlm/modules/speech_dlm_decoder.py |
EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_dlm/modules/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Tuple, Optional
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.modules import LayerNorm, MultiheadAttention
from fairseq.modules.fairseq_dropout import FairseqDropout
from fairseq.modules.quant_noise import quant_noise
from torch import Tensor
class CrossChannelTransformerDecoderLayer(nn.Module):
"""Cross-Attention Transformer Decoder Layer block as described
in the paper: https://arxiv.org/pdf/2203.16502.pdf
Composed of a Multi-head Self Attention block followed by a
Multi-head Cross-Attention block which attends to the self-attention
outputs of the other channels. The weights of the attention blocks
in all channels are shared.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False
):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.quant_noise = getattr(args, "quant_noise_pq", 0)
self.quant_noise_block_size = getattr(args, "quant_noise_pq_block_size", 8)
# This cross_self_attention is used for encoder-decoder systems,
# It's not the cross-channel attention (defined below as cross_channel_attn)
self.cross_self_attention = getattr(args, "cross_self_attention", False)
self.self_attn = self.build_self_attention(
self.embed_dim,
args,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
)
self.cross_channel_attn = self.build_cross_channel_attention(
self.embed_dim,
args,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
)
self.activation_fn = utils.get_activation_fn(
activation=str(args.activation_fn)
if getattr(args, "activation_fn", None) is not None
else "relu"
)
activation_dropout_p = getattr(args, "activation_dropout", 0) or 0
if activation_dropout_p == 0:
# for backwards compatibility with models that use args.relu_dropout
activation_dropout_p = getattr(args, "relu_dropout", 0) or 0
self.activation_dropout_module = FairseqDropout(
float(activation_dropout_p), module_name=self.__class__.__name__
)
self.normalize_before = args.decoder_normalize_before
# use layerNorm rather than FusedLayerNorm for exporting.
# char_inputs can be used to determint this.
# TODO remove this once we update apex with the fix
export = getattr(args, "char_inputs", False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.cross_channel_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = self.build_encoder_attention(self.embed_dim, args)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.fc1 = self.build_fc1(
self.embed_dim,
args.decoder_ffn_embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.fc2 = self.build_fc2(
args.decoder_ffn_embed_dim,
self.embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_self_attention(
self, embed_dim, args, add_bias_kv=False, add_zero_attn=False
):
return MultiheadAttention(
embed_dim,
args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=not getattr(args, "cross_self_attention", False),
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
)
def build_cross_channel_attention(
self, embed_dim, args, add_bias_kv=False, add_zero_attn=False
):
return MultiheadAttention(
embed_dim,
args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=False,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
)
def build_encoder_attention(self, embed_dim, args):
return MultiheadAttention(
embed_dim,
args.decoder_attention_heads,
kdim=getattr(args, "encoder_embed_dim", None),
vdim=getattr(args, "encoder_embed_dim", None),
dropout=args.attention_dropout,
encoder_decoder_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def residual_connection(self, x, residual):
return residual + x
def forward(
self,
x_list_tensor: List[torch.Tensor],
encoder_out: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
incremental_state: Optional[
List[Dict[str, Dict[str, Optional[Tensor]]]]
] = None,
prev_self_attn_state: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None,
prev_attn_state: Optional[List[torch.Tensor]] = None,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
):
"""
Args:
x_list_tensor (List[Tensor]): list of input tensors in different channels,
each tensor is of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor, optional): binary
ByteTensor of shape `(batch, src_len)` where padding
elements are indicated by ``1``.
incremental_state (optional): list of incremental_state dictionaries over
different channels (sequence generation mode)
prev_self_attn_state (List[Tuple[Tensor, Tensor]], optional): list of tuples
(self_attn_state, cross_channel_attn_state) over different channels
need_attn (bool, optional): return attention weights
need_head_weights (bool, optional): return attention weights
for each head (default: return average over heads).
Returns:
list of encoded output of shape `(seq_len, batch, embed_dim)`
"""
n_channels = len(x_list_tensor)
if need_head_weights:
need_attn = True
# incremental_state is a list of dictionaries over different channels
if incremental_state is not None:
assert isinstance(incremental_state, list)
assert len(incremental_state) == n_channels
# prev_self_attn_state is a list of tuples (self_attn_state, cross_channel_attn_state) over different channels
if prev_self_attn_state is not None:
assert isinstance(prev_self_attn_state, list)
assert len(prev_self_attn_state) == n_channels
for prev_self_attn_state_channel in prev_self_attn_state:
assert isinstance(prev_self_attn_state_channel, tuple)
assert len(prev_self_attn_state_channel) == 2
# Backup for other channels & cross channel attention
self_attn_mask_orin = self_attn_mask
self_attn_padding_mask_orin = self_attn_padding_mask
x_list = []
attn_list = []
for i, x in enumerate(x_list_tensor):
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if prev_self_attn_state is not None:
prev_key, prev_value = prev_self_attn_state[i][0][:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_self_attn_state[i][0]) >= 3:
saved_state["prev_key_padding_mask"] = prev_self_attn_state[i][0][2]
assert incremental_state is not None
self.self_attn._set_input_buffer(incremental_state[i], saved_state)
_self_attn_input_buffer = self.self_attn._get_input_buffer(
incremental_state[i] if incremental_state is not None else None
)
if self.cross_self_attention and not (
incremental_state is not None
and _self_attn_input_buffer is not None
and "prev_key" in _self_attn_input_buffer
):
if self_attn_mask_orin is not None:
assert encoder_out is not None
self_attn_mask = torch.cat(
(
x.new_zeros(x.size(0), encoder_out.size(0)),
self_attn_mask_orin,
),
dim=1,
)
if self_attn_padding_mask_orin is not None:
if encoder_padding_mask is None:
assert encoder_out is not None
encoder_padding_mask = self_attn_padding_mask_orin.new_zeros(
encoder_out.size(1), encoder_out.size(0)
)
self_attn_padding_mask = torch.cat(
(encoder_padding_mask, self_attn_padding_mask_orin), dim=1
)
assert encoder_out is not None
y = torch.cat((encoder_out, x), dim=0)
else:
y = x
x, attn = self.self_attn(
query=x,
key=y,
value=y,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state[i]
if incremental_state is not None
else None,
need_weights=False,
attn_mask=self_attn_mask,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
if self.encoder_attn is not None and encoder_out is not None:
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
if prev_attn_state is not None:
prev_key, prev_value = prev_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_attn_state[2]
assert incremental_state is not None
self.encoder_attn._set_input_buffer(
incremental_state[i], saved_state
)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state[i]
if incremental_state is not None
else None,
static_kv=True,
need_weights=need_attn or (not self.training and self.need_attn),
need_head_weights=need_head_weights,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
x_list.append(x)
attn_list.append(attn)
# Store attentions & new x(s) (bc the old x(s) are used in other channels)
x_list_new = []
# Here comes the cross channel attention
for i, x in enumerate(x_list):
residual = x
if self.normalize_before:
x = self.cross_channel_attn_layer_norm(x)
if prev_self_attn_state is not None:
prev_key, prev_value = prev_self_attn_state[i][1][:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_self_attn_state[i][1]) >= 3:
saved_state["prev_key_padding_mask"] = prev_self_attn_state[i][1][2]
assert incremental_state is not None
self.cross_channel_attn._set_input_buffer(
incremental_state[i], saved_state
)
# The cross attention is computed with the concatenation of attentions from other channels
if len(x_list) > 1:
x_other = torch.cat(
[x_list[(i + j) % len(x_list)] for j in range(1, len(x_list))],
dim=0,
)
else:
# Self-attention when having only one channel
x_other = x_list[i]
x, attn = self.cross_channel_attn(
query=x,
key=x_other,
value=x_other,
key_padding_mask=self_attn_padding_mask_orin,
incremental_state=incremental_state[i]
if incremental_state is not None
else None,
need_weights=False,
attn_mask=self_attn_mask_orin,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.cross_channel_attn_layer_norm(x)
x_list_new.append(x)
x_list = x_list_new
for i, x in enumerate(x_list):
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
x_list[i] = x
# Trick for the checkpoint activation
x_list_tensor = torch.stack(x_list)
if self.onnx_trace and incremental_state is not None:
self_and_cross_attn_state_list = []
for i in range(n_channels):
self_and_cross_attn_state = []
for self_attn_module in [self.self_attn, self.cross_channel_attn]:
saved_state = self_attn_module._get_input_buffer(
incremental_state[i]
)
assert saved_state is not None
if self_attn_padding_mask is not None:
self_attn_module_state = [
saved_state["prev_key"],
saved_state["prev_value"],
saved_state["prev_key_padding_mask"],
]
else:
self_attn_module_state = [
saved_state["prev_key"],
saved_state["prev_value"],
]
self_and_cross_attn_state.append(self_attn_module_state)
self_and_cross_attn_state_list.append(tuple(self_and_cross_attn_state))
return x_list_tensor, attn_list, self_and_cross_attn_state_list
return x_list_tensor, attn_list, None
def make_generation_fast_(self, need_attn: bool = False, **kwargs):
self.need_attn = need_attn
# Rewrite fairseq.modules.TransformerDecoderLayer
# to be compatible with checkpoint_activations
# (avoid forwarding model multiple times)
class StandardTransformerDecoderLayer(nn.Module):
"""Rewrite fairseq.modules.TransformerDecoderLayer to avoid forwarding
model multiple times and be compatible with checkpoint_activations.
The input is expected to be a list of tensors from different channels,
each is forwarded to the same model (shared attention weights).
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.decoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False
):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.quant_noise = getattr(args, "quant_noise_pq", 0)
self.quant_noise_block_size = getattr(args, "quant_noise_pq_block_size", 8)
self.cross_self_attention = getattr(args, "cross_self_attention", False)
self.self_attn = self.build_self_attention(
self.embed_dim,
args,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
)
self.activation_fn = utils.get_activation_fn(
activation=str(args.activation_fn)
if getattr(args, "activation_fn", None) is not None
else "relu"
)
activation_dropout_p = getattr(args, "activation_dropout", 0) or 0
if activation_dropout_p == 0:
# for backwards compatibility with models that use args.relu_dropout
activation_dropout_p = getattr(args, "relu_dropout", 0) or 0
self.activation_dropout_module = FairseqDropout(
float(activation_dropout_p), module_name=self.__class__.__name__
)
self.normalize_before = args.decoder_normalize_before
# use layerNorm rather than FusedLayerNorm for exporting.
# char_inputs can be used to determint this.
# TODO remove this once we update apex with the fix
export = getattr(args, "char_inputs", False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = self.build_encoder_attention(self.embed_dim, args)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.fc1 = self.build_fc1(
self.embed_dim,
args.decoder_ffn_embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.fc2 = self.build_fc2(
args.decoder_ffn_embed_dim,
self.embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_self_attention(
self, embed_dim, args, add_bias_kv=False, add_zero_attn=False
):
return MultiheadAttention(
embed_dim,
args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=not getattr(args, "cross_self_attention", False),
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
)
def build_encoder_attention(self, embed_dim, args):
return MultiheadAttention(
embed_dim,
args.decoder_attention_heads,
kdim=getattr(args, "encoder_embed_dim", None),
vdim=getattr(args, "encoder_embed_dim", None),
dropout=args.attention_dropout,
encoder_decoder_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def residual_connection(self, x, residual):
return residual + x
def forward(
self,
x_list_tensor: List[torch.Tensor],
encoder_out: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
incremental_state: Optional[
List[Dict[str, Dict[str, Optional[Tensor]]]]
] = None,
prev_self_attn_state: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None,
prev_attn_state: Optional[List[torch.Tensor]] = None,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
):
"""
Args:
x_list_tensor (List[Tensor]): list of input tensors in different channels,
each tensor is of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor, optional): binary
ByteTensor of shape `(batch, src_len)` where padding
elements are indicated by ``1``.
incremental_state (optional): list of incremental_state dictionaries over
different channels (sequence generation mode)
prev_self_attn_state (List[Tuple[Tensor, Tensor]], optional): list of tuples
(self_attn_state, cross_channel_attn_state) over different channels
need_attn (bool, optional): return attention weights
need_head_weights (bool, optional): return attention weights
for each head (default: return average over heads).
Returns:
list of encoded output of shape `(seq_len, batch, embed_dim)`
"""
n_channels = len(x_list_tensor)
if need_head_weights:
need_attn = True
# incremental_state is a list of dictionaries over different channels
if incremental_state is not None:
assert isinstance(incremental_state, list)
assert len(incremental_state) == n_channels
# prev_self_attn_state is a list of self_attn_state over different channels
if prev_self_attn_state is not None:
assert isinstance(prev_self_attn_state, list)
assert len(prev_self_attn_state) == n_channels
x_list = []
attn_list = []
for i, x in enumerate(x_list_tensor):
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if prev_self_attn_state is not None:
prev_key, prev_value = prev_self_attn_state[i][:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_self_attn_state[i]) >= 3:
saved_state["prev_key_padding_mask"] = prev_self_attn_state[2]
assert incremental_state is not None
self.self_attn._set_input_buffer(incremental_state[i], saved_state)
_self_attn_input_buffer = self.self_attn._get_input_buffer(
incremental_state
)
if self.cross_self_attention and not (
incremental_state is not None
and _self_attn_input_buffer is not None
and "prev_key" in _self_attn_input_buffer
):
if self_attn_mask is not None:
assert encoder_out is not None
self_attn_mask = torch.cat(
(x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask),
dim=1,
)
if self_attn_padding_mask is not None:
if encoder_padding_mask is None:
assert encoder_out is not None
encoder_padding_mask = self_attn_padding_mask.new_zeros(
encoder_out.size(1), encoder_out.size(0)
)
self_attn_padding_mask = torch.cat(
(encoder_padding_mask, self_attn_padding_mask), dim=1
)
assert encoder_out is not None
y = torch.cat((encoder_out, x), dim=0)
else:
y = x
x, attn = self.self_attn(
query=x,
key=y,
value=y,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state[i]
if incremental_state is not None
else None,
need_weights=False,
attn_mask=self_attn_mask,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
if self.encoder_attn is not None and encoder_out is not None:
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
if prev_attn_state is not None:
prev_key, prev_value = prev_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_attn_state[2]
assert incremental_state is not None
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state[i]
if incremental_state is not None
else None,
static_kv=True,
need_weights=need_attn or (not self.training and self.need_attn),
need_head_weights=need_head_weights,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
x_list.append(x)
attn_list.append(attn)
# Trick for the checkpoint activation
x_list_tensor = torch.stack(x_list)
if self.onnx_trace and incremental_state is not None:
self_attn_state_list = []
for i in range(n_channels):
saved_state = self.self_attn._get_input_buffer(incremental_state[i])
assert saved_state is not None
if self_attn_padding_mask is not None:
self_attn_state = [
saved_state["prev_key"],
saved_state["prev_value"],
saved_state["prev_key_padding_mask"],
]
else:
self_attn_state = [
saved_state["prev_key"],
saved_state["prev_value"],
]
self_attn_state_list.append(self_attn_state)
return x_list_tensor, attn_list, self_attn_state_list
return x_list_tensor, attn_list, None
def make_generation_fast_(self, need_attn: bool = False, **kwargs):
self.need_attn = need_attn
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_dlm/modules/speech_dlm_decoder_layer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, List, Optional
from omegaconf.listconfig import ListConfig
from omegaconf.dictconfig import DictConfig
import torch
import torch.nn as nn
from fairseq.models import FairseqIncrementalDecoder
from torch import Tensor
from fairseq.ngram_repeat_block import NGramRepeatBlock
from .multichannel_search import ContiguousMultichannelBeamSearch
from fairseq.models.speech_dlm import SpeechDLM
class MultichannelSequenceGenerator(nn.Module):
def __init__(
self,
models,
tgt_dicts,
beam_size=1,
max_len_a=0,
max_len_b=200,
min_len=1,
normalize_scores=True,
len_penalty=1.0,
unk_penalty=0.0,
temperature=1.0,
match_source_len=False,
no_repeat_ngram_size=0,
search_strategy=None,
eos=None,
symbols_to_strip_from_output=None,
lm_model=None,
lm_weight=1.0,
duration_temperature=1.0,
):
"""Generate multi-channel parallel units with the SpeechDLM model
as described in the paper: https://arxiv.org/pdf/2203.16502.pdf;
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models,
currently support fairseq.models.TransformerModel for scripting
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
match_source_len (bool, optional): outputs should match the source
length (default: False)
duration_temperature (float, optional): rate of the duration prediction,
higher rate induces a faster generated wav (default: 1.0)
"""
super().__init__()
if isinstance(models, MultichannelEnsembleModel):
self.model = models
else:
self.model = MultichannelEnsembleModel(models)
self.tgt_dicts = tgt_dicts
self.pad = list(tgt_dicts.values())[0].pad()
self.unk = list(tgt_dicts.values())[0].unk()
self.eos = list(tgt_dicts.values())[0].eos() if eos is None else eos
self.symbols_to_strip_from_output = (
symbols_to_strip_from_output.union({self.eos})
if symbols_to_strip_from_output is not None
else {self.eos}
)
self.channels = list(tgt_dicts.keys())
self.n_channels = len(self.channels)
self.vocab_sizes = [len(tgt_dicts[channel]) for channel in self.channels]
# the max beam size is the dictionary size - 1, since we never select pad
max_possible_beam_size = 1
for i in self.vocab_sizes:
max_possible_beam_size *= i - 1
self.beam_size = min(beam_size, max_possible_beam_size)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
if isinstance(temperature, (int, float)):
temperature = {channel: temperature for channel in self.channels}
elif isinstance(temperature, ListConfig) or isinstance(temperature, list):
temperature = {
channel: temperature[i] for i, channel in enumerate(self.channels)
}
assert isinstance(temperature, DictConfig) or isinstance(
temperature, dict
), f"temperature: expected dict, but found {type(temperature)}"
self.temperature = temperature
self.match_source_len = match_source_len
if no_repeat_ngram_size > 0:
self.repeat_ngram_blocker = NGramRepeatBlock(no_repeat_ngram_size)
else:
self.repeat_ngram_blocker = None
for channel in temperature:
assert temperature[channel] > 0, "--temperature must be greater than 0"
if search_strategy is None:
self.search = ContiguousMultichannelBeamSearch(tgt_dicts)
else:
self.search = search_strategy
# We only need to set src_lengths in LengthConstrainedBeamSearch.
# As a module attribute, setting it would break in multithread
# settings when the model is shared.
self.should_set_src_lengths = (
hasattr(self.search, "needs_src_lengths") and self.search.needs_src_lengths
)
self.model.eval()
self.lm_model = lm_model
self.lm_weight = lm_weight
if self.lm_model is not None:
self.lm_model.eval()
self.duration_prediction = bool(
str(getattr(models[0].decoder.args, "duration_prediction", "false")).lower()
== "true"
)
self.delayed_duration = bool(
str(
getattr(models[0].decoder.args, "delayed_duration_target", "false")
).lower()
== "true"
)
self.duration_temperature = duration_temperature
def cuda(self):
self.model.cuda()
return self
@torch.no_grad()
def forward(
self,
sample: Dict[str, Dict[str, Tensor]], # TODO: Modify this
prefix_tokens: Optional[Dict[str, Tensor]] = None,
bos_token: Optional[int] = None,
):
"""Generate a batch of translations.
Args:
sample (dict): batch
prefix_tokens (dict of torch.LongTensor, optional): force decoder to begin
with these tokens
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, prefix_tokens, bos_token=bos_token)
@torch.no_grad()
def generate(self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs):
"""Generate translations. Match the api of other fairseq generators.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (dict of torch.LongTensor, optional): force decoder to begin
with these tokens
constraints (torch.LongTensor, optional): force decoder to include
the list of constraints
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, **kwargs)
def _generate(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Dict[str, Tensor]] = None,
constraints: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
"""
Here sample is expected to have the following form
{
'id': index,
'net_input': {
'src_tokens': {
'channel1' : tensor((batch x src_length)),
'channel2' : tensor((batch x src_length)),
},
...
},
}
and prefix_tokens
{
'channel1' : tensor((batch x prefix_length)),
'channel2' : tensor((batch x prefix_length)),
}
"""
if self.model.is_speech_dlm:
incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[{} for _ in range(self.n_channels)],
)
for i in range(self.model.models_size)
],
)
else:
incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(self.model.models_size)
],
)
net_input = sample["net_input"]
# Convert from dict to tensor form
# shape of src_tokens : (bsz x src_len x n_channels)
src_tokens = torch.stack(
[net_input["src_tokens"][channel] for channel in self.channels], dim=-1
)
prefix_tokens = torch.stack(
[prefix_tokens[channel] for channel in self.channels], dim=-1
)
# length of the source text being the character length except EndOfSentence and pad
src_lengths = (
(src_tokens[..., 0].ne(self.eos) & src_tokens[..., 0].ne(self.pad))
.long()
.sum(dim=1)
)
# bsz: total number of sentences in beam
# Note that src_tokens may have more than 2 dimensions (i.e. audio features)
bsz, src_len = src_tokens.size()[:2]
beam_size = self.beam_size
if constraints is not None and not self.search.supports_constraints:
raise NotImplementedError(
"Target-side constraints were provided, but search method doesn't support them"
)
# Initialize constraints, when active
self.search.init_constraints(constraints, beam_size)
max_len: int = -1
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
# exclude the EOS marker
self.model.max_decoder_positions() - 1,
)
assert (
self.min_len <= max_len
), "min_len cannot be larger than max_len, please adjust these!"
# compute the encoder output for each beam
encoder_outs = self.model.forward_encoder(net_input)
# placeholder of indices for bsz * beam_size to hold tokens and accumulative scores
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = self.model.reorder_encoder_out(encoder_outs, new_order)
# ensure encoder_outs is a List.
assert encoder_outs is not None
# initialize buffers
# cumulative scores of hypotheses
scores = (
torch.zeros(bsz * beam_size, max_len + 1, self.n_channels)
.to(src_tokens)
.float()
) # +1 for eos; pad is never chosen for scoring
tokens = (
torch.zeros(bsz * beam_size, max_len + 2, self.n_channels)
.to(src_tokens)
.long()
.fill_(self.pad)
) # +2 for eos and pad
tokens[:, 0] = self.eos if bos_token is None else bos_token
attn: Optional[Tensor] = None
# A list that indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then cands_to_ignore would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
cands_to_ignore = (
torch.zeros(bsz, beam_size).to(src_tokens).eq(-1)
) # forward and backward-compatible False mask
# list of completed sentences
finalized = torch.jit.annotate(
List[List[Dict[str, Tensor]]],
[torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)],
) # contains lists of dictionaries of infomation about the hypothesis being finalized at each step
finished = [
False for i in range(bsz)
] # a boolean array indicating if the sentence at the index is finished or not
num_remaining_sent = bsz # number of sentences remaining
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (
(torch.arange(0, bsz) * beam_size)
.unsqueeze(1)
.type_as(tokens)
.to(src_tokens.device)
)
cand_offsets = torch.arange(0, cand_size).type_as(tokens).to(src_tokens.device)
reorder_state: Optional[Tensor] = None
batch_idxs: Optional[Tensor] = None
original_batch_idxs: Optional[Tensor] = None
if "id" in sample and isinstance(sample["id"], Tensor):
original_batch_idxs = sample["id"]
else:
original_batch_idxs = torch.arange(0, bsz).type_as(tokens)
if self.duration_prediction:
dur_counter = torch.ones(bsz * beam_size, self.n_channels).to(src_tokens)
# save the indice where the dur_counter just copied from dur_pred
dur_counter_jump_indices = None
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(
batch_idxs
)
reorder_state.view(-1, beam_size).add_(
corr.unsqueeze(-1) * beam_size
)
original_batch_idxs = original_batch_idxs[batch_idxs]
self.model.reorder_incremental_state(incremental_states, reorder_state)
encoder_outs = self.model.reorder_encoder_out(
encoder_outs, reorder_state
)
input_tokens = {
channel: tokens[:, : step + 1, i]
for i, channel in enumerate(self.channels)
}
lprobs_dict, avg_attn_scores = self.model.forward_decoder(
input_tokens,
encoder_outs,
incremental_states,
self.temperature,
)
# Because the sizes of vocab is different, we cannot concat the lprobs to form a single tensor
if not self.duration_prediction:
lprobs_list = list(lprobs_dict.values())
else:
lprobs_list = [
net_output["pred_token"] for net_output in lprobs_dict.values()
]
# non-positive predicted durations
dur_preds = (
torch.stack(
[
net_output["pred_duration"]
for net_output in lprobs_dict.values()
]
)
.squeeze(-1)
.T
)
dur_preds = dur_preds / self.duration_temperature
dur_preds = dur_preds.round().long()
dur_preds[dur_preds < 1] = 1
# dur_preds & dur_counter needs to be modified when there isn't an edge
if step > 0:
non_edge_indices = tokens[:, step, :] == tokens[:, step - 1, :]
if self.delayed_duration:
dur_preds[non_edge_indices] = 1
else:
if dur_counter_jump_indices is not None:
dur_counter[dur_counter_jump_indices & non_edge_indices] = 2
# update dur_counter
if step > 0:
if self.delayed_duration:
dur_counter -= (
(dur_counter == 1)
| (tokens[:, step, :] == tokens[:, step - 1, :])
).int()
dur_counter[dur_counter < 0] = 0
else:
dur_counter -= (
tokens[:, step, :] == tokens[:, step - 1, :]
).int()
dur_counter[dur_counter < 1] = 1
# whether to copy previous token (ie. if the counter is still on)
# and get get the new duration
if self.delayed_duration:
dur_counter_jump_indices = dur_counter == 0
dur_counter[dur_counter_jump_indices] = dur_preds[
dur_counter_jump_indices
]
# whether to copy previous token in this step
copy_prev_token = dur_counter != 1
if self.delayed_duration is False:
dur_counter_jump_indices = dur_counter == 1
dur_counter[dur_counter_jump_indices] = dur_preds[
dur_counter_jump_indices
]
# else:
# dur_counter[dur_counter==0] = dur_preds[dur_counter==0] - 1
# copy_prev_token = (dur_counter > 0)
if self.lm_model is not None:
assert False, "Currently not supported in multichannelLM case"
for i in range(self.n_channels):
lprobs_list[i][lprobs_list[i] != lprobs_list[i]] = torch.tensor(
-math.inf
).to(lprobs_list[i])
lprobs_list[i][:, self.pad] = -math.inf # never select pad
lprobs_list[i][:, self.unk] -= self.unk_penalty # apply unk penalty
# handle max length constraint
if step >= max_len:
lprobs_list[i][:, : self.eos] = -math.inf
lprobs_list[i][:, self.eos + 1 :] = -math.inf
else:
lprobs_list[i][
:, self.eos
] = -math.inf # quick fix for short generation
# handle prefix tokens (possibly with different lengths)
if (
prefix_tokens is not None
and step < prefix_tokens.size(1)
and step < max_len
):
(
lprobs_list[i],
tokens[..., i],
scores[..., i],
) = self._prefix_tokens(
step,
lprobs_list[i],
scores[..., i],
tokens[..., i],
prefix_tokens[..., i],
beam_size,
)
if self.duration_prediction:
# Can copy previous token if the prefix token is padding or unk (1-channel conditionned case)
can_copy_mask = (
prefix_tokens[:, step, i].eq(self.pad)
| prefix_tokens[:, step, i].eq(self.unk)
).repeat_interleave(beam_size)
copy_prev_token[:, i] &= can_copy_mask
elif step < self.min_len:
# minimum length constraint (does not apply if using prefix_tokens)
lprobs_list[i][:, self.eos] = -math.inf
if self.duration_prediction:
if step < max_len:
for j in range(copy_prev_token.size(0)):
if copy_prev_token[j, i]:
prev_token = tokens[j, step, i]
lprobs_list[i][j, :prev_token] = -math.inf
lprobs_list[i][j, prev_token + 1 :] = -math.inf
# lprobs_list[i][j, prev_token] = 0.
# dur_counter[j,i] -= 1
# else:
# prev_token = tokens[j, step, i]
# if not (lprobs_list[i][j,:].ne(-math.inf).nonzero() == prev_token).all():
# lprobs_list[i][j, prev_token] = -math.inf
# dur_counter[j,i] = 0.
# Record attention scores, only support avg_attn_scores is a Tensor
if avg_attn_scores is not None:
if attn is None:
attn = torch.empty(
bsz * beam_size, avg_attn_scores.size(1), max_len + 2
).to(scores)
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs_list[0])
eos_bbsz_idx = torch.empty(0).to(
tokens
) # indices of hypothesis ending with eos (finished sentences)
eos_scores = torch.empty(0).to(
scores
) # scores of hypothesis ending with eos (finished sentences)
if self.should_set_src_lengths:
self.search.set_src_lengths(src_lengths)
if self.repeat_ngram_blocker is not None:
for i in range(self.n_channels):
lprobs_list[i] = self.repeat_ngram_blocker(
tokens, lprobs_list[i], bsz, beam_size, step
)
# Shape: (batch, cand_size)
cand_scores, cand_indices, cand_beams = self.search.step(
step,
[
lprobs_list[i].view(bsz, -1, self.vocab_sizes[i])
for i in range(self.n_channels)
],
scores.view(bsz, beam_size, -1, self.n_channels)[:, :, :step, :],
tokens[:, : step + 1],
original_batch_idxs,
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos
# Shape of eos_mask: (batch size, beam size)
eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf)
eos_mask = torch.any(eos_mask, dim=-1, keepdim=False)
eos_mask[:, :beam_size][cands_to_ignore] = torch.tensor(0).to(eos_mask)
# only consider eos when it's among the top beam_size indices
# Now we know what beam item(s) to finish
# Shape: 1d list of absolute-numbered
eos_bbsz_idx = torch.masked_select(
cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents: List[int] = []
if eos_bbsz_idx.numel() > 0:
eos_scores = torch.stack(
[
torch.masked_select(
cand_scores[:, :beam_size, i], mask=eos_mask[:, :beam_size]
)
for i in range(self.n_channels)
],
dim=-1,
)
finalized_sents = self.finalize_hypos(
step,
eos_bbsz_idx,
eos_scores,
tokens,
scores,
finalized,
finished,
beam_size,
attn,
src_lengths,
max_len,
)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
if self.search.stop_on_max_len and step >= max_len:
break
assert step < max_len, f"{step} < {max_len}"
# Remove finalized sentences (ones for which {beam_size}
# finished hypotheses have been generated) from the batch.
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = torch.ones(
bsz, dtype=torch.bool, device=cand_indices.device
)
batch_mask[finalized_sents] = False
# TODO replace `nonzero(as_tuple=False)` after TorchScript supports it
batch_idxs = torch.arange(
bsz, device=cand_indices.device
).masked_select(batch_mask)
# Choose the subset of the hypothesized constraints that will continue
self.search.prune_sentences(batch_idxs)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
cands_to_ignore = cands_to_ignore[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(
new_bsz * beam_size, -1, self.n_channels
)
tokens = tokens.view(bsz, -1)[batch_idxs].view(
new_bsz * beam_size, -1, self.n_channels
)
if self.duration_prediction:
dur_counter = dur_counter.view(bsz, -1)[batch_idxs].view(
new_bsz * beam_size, self.n_channels
)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(
new_bsz * beam_size, attn.size(1), -1
)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos hypos
# and values < cand_size indicate candidate active hypos.
# After, the min values per row are the top candidate active hypos
# Rewrite the operator since the element wise or is not supported in torchscript.
eos_mask[:, :beam_size] = ~((~cands_to_ignore) & (~eos_mask[:, :beam_size]))
active_mask = torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[: eos_mask.size(1)],
)
# get the top beam_size active hypotheses, which are just
# the hypos with the smallest values in active_mask.
# {active_hypos} indicates which {beam_size} hypotheses
# from the list of {2 * beam_size} candidates were
# selected. Shapes: (batch size, beam size)
new_cands_to_ignore, active_hypos = torch.topk(
active_mask, k=beam_size, dim=1, largest=False
)
# update cands_to_ignore to ignore any finalized hypos.
cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size]
# Make sure there is at least one active item for each sentence in the batch.
assert (~cands_to_ignore).any(dim=1).all()
# update cands_to_ignore to ignore any finalized hypos
# {active_bbsz_idx} denotes which beam number is continued for each new hypothesis (a beam
# can be selected more than once).
active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos)
active_bbsz_idx = active_bbsz_idx.view(-1)
# active_scores = torch.stack([
# torch.gather(cand_scores[...,0], dim=1, index=active_hypos)
# for i in range(self.n_channels)
# ], dim = -1)
# active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
# Set the tokens for each beam (can select the same row more than once)
tokens[:, : step + 1] = torch.index_select(
tokens[:, : step + 1], dim=0, index=active_bbsz_idx
)
# Select the next token for each of them
for i in range(self.n_channels):
tokens.view(bsz, beam_size, -1, self.n_channels)[
:, :, step + 1, i
] = torch.gather(cand_indices[..., i], dim=1, index=active_hypos)
if step > 0:
scores[:, :step] = torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx
)
for i in range(self.n_channels):
scores.view(bsz, beam_size, -1, self.n_channels)[
:, :, step, i
] = torch.gather(cand_scores[..., i], dim=1, index=active_hypos)
if self.duration_prediction:
dur_counter = torch.index_select(
dur_counter, dim=0, index=active_bbsz_idx
)
# Update constraints based on which candidates were selected for the next beam
self.search.update_constraints(active_hypos)
# copy attention for active hypotheses
if attn is not None:
attn[:, :, : step + 2] = torch.index_select(
attn[:, :, : step + 2], dim=0, index=active_bbsz_idx
)
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
scores = torch.tensor(
[float(elem["score"].item()) for elem in finalized[sent]]
)
_, sorted_scores_indices = torch.sort(scores, descending=True)
finalized[sent] = [finalized[sent][ssi] for ssi in sorted_scores_indices]
finalized[sent] = torch.jit.annotate(
List[Dict[str, Tensor]], finalized[sent]
)
return finalized
def _prefix_tokens(
self, step: int, lprobs, scores, tokens, prefix_tokens, beam_size: int
):
"""Handle prefix tokens"""
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(self.pad)
# used for 1-channel generation, do not force the unk token (i.e. unk tokens are changed)
prefix_mask &= prefix_toks.ne(self.unk)
# zeroing the copying tokens
# if step > 0:
# copy_mask = (prefix_tokens[:, step] == prefix_tokens[:, step-1]).unsqueeze(-1).repeat(1, beam_size).view(-1)
# prefix_lprobs[copy_mask & prefix_mask] = 0.
lprobs[prefix_mask] = torch.tensor(-math.inf).to(lprobs)
lprobs[prefix_mask] = lprobs[prefix_mask].scatter(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask]
)
# shouldn't stop at unk token
unk_mask = prefix_toks.eq(self.unk)
if len(lprobs[unk_mask]) > 0:
# otherwise it won't assign to lprobs,
# see: https://discuss.pytorch.org/t/how-to-mask-and-assign-a-value-to-tensor/18437
copy_lprobs = lprobs[unk_mask][:, :]
copy_lprobs[:, self.eos] = -math.inf
lprobs[unk_mask] = copy_lprobs
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[
:, 0, 1 : step + 1
]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
# copy tokens, scores and lprobs from the first beam to all beams
tokens = self.replicate_first_beam(tokens, eos_mask_batch_dim, beam_size)
scores = self.replicate_first_beam(scores, eos_mask_batch_dim, beam_size)
lprobs = self.replicate_first_beam(lprobs, eos_mask_batch_dim, beam_size)
return lprobs, tokens, scores
def replicate_first_beam(self, tensor, mask, beam_size: int):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
def finalize_hypos(
self,
step: int,
bbsz_idx,
eos_scores,
tokens,
scores,
finalized: List[List[Dict[str, Tensor]]],
finished: List[bool],
beam_size: int,
attn: Optional[Tensor],
src_lengths,
max_len: int,
):
"""Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly.
A sentence is finalized when {beam_size} finished items have been collected for it.
Returns number of sentences (not beam items) being finalized.
These will be removed from the batch and not processed further.
Args:
bbsz_idx (Tensor):
"""
assert bbsz_idx.numel() == eos_scores.size(0)
# clone relevant token and attention tensors.
# tokens is (batch * beam, max_len). So the index_select
# gets the newly EOS rows, then selects cols 1..{step + 2}
tokens_clone = tokens.index_select(0, bbsz_idx)[
:, 1 : step + 2
] # skip the first index, which is EOS
tokens_clone[:, step] = self.eos
attn_clone = (
attn.index_select(0, bbsz_idx)[:, :, 1 : step + 2]
if attn is not None
else None
)
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, : step + 1]
pos_scores[:, step, :] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
# cum_unfin records which sentences in the batch are finished.
# It helps match indexing between (a) the original sentences
# in the batch and (b) the current, possibly-reduced set of
# sentences.
cum_unfin: List[int] = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
# The keys here are of the form "{sent}_{unfin_idx}", where
# "unfin_idx" is the index in the current (possibly reduced)
# list of sentences, and "sent" is the index in the original,
# unreduced batch
# set() is not supported in script export
sents_seen: Dict[str, Optional[Tensor]] = {}
# For every finished beam item
for i in range(bbsz_idx.size()[0]):
idx = bbsz_idx[i]
score = eos_scores[i].sum()
# sentence index in the current (possibly reduced) batch
unfin_idx = idx // beam_size
# sentence index in the original (unreduced) batch
sent = unfin_idx + cum_unfin[unfin_idx]
# Cannot create dict for key type '(int, int)' in torchscript.
# The workaround is to cast int to string
seen = str(sent.item()) + "_" + str(unfin_idx.item())
if seen not in sents_seen:
sents_seen[seen] = None
if self.match_source_len and step > src_lengths[unfin_idx]:
score = torch.tensor(-math.inf).to(score)
# An input sentence (among those in a batch) is finished when
# beam_size hypotheses have been collected for it
if len(finalized[sent]) < beam_size:
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = torch.empty(0)
finalized[sent].append(
{
"tokens": tokens_clone[i],
"score": score,
"attention": hypo_attn, # src_len x tgt_len
"alignment": torch.empty(0),
"positional_scores": pos_scores[i],
}
)
newly_finished: List[int] = []
for seen in sents_seen.keys():
# check termination conditions for this sentence
sent: int = int(float(seen.split("_")[0]))
unfin_idx: int = int(float(seen.split("_")[1]))
if not finished[sent] and self.is_finished(
step, unfin_idx, max_len, len(finalized[sent]), beam_size
):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
def is_finished(
self,
step: int,
unfin_idx: int,
max_len: int,
finalized_sent_len: int,
beam_size: int,
):
"""
Check whether decoding for a sentence is finished, which
occurs when the list of finalized sentences has reached the
beam size, or when we reach the maximum length.
"""
assert finalized_sent_len <= beam_size
if finalized_sent_len == beam_size or step == max_len:
return True
return False
class MultichannelEnsembleModel(nn.Module):
"""A wrapper around an ensemble of SpeechDLM models."""
def __init__(self, models):
super().__init__()
self.models_size = len(models)
# method '__len__' is not supported in ModuleList for torch script
self.single_model = models[0]
self.models = nn.ModuleList(models)
self.has_incremental: bool = False
if all(
hasattr(m, "decoder") and isinstance(m.decoder, FairseqIncrementalDecoder)
for m in models
):
self.has_incremental = True
if isinstance(models[0], SpeechDLM):
self.is_speech_dlm = True
# Otherwise it's a multi-channel language model (without cross-prediction outputs)
else:
self.is_speech_dlm = False
if getattr(models[0].decoder.args, "duration_prediction", False):
self.is_duration_prediction = True
else:
self.is_duration_prediction = False
def forward(self):
pass
def has_encoder(self):
return hasattr(self.single_model, "encoder")
def has_incremental_states(self):
return self.has_incremental
def max_decoder_positions(self):
return min([m.max_decoder_positions() for m in self.models])
@torch.jit.export
def forward_encoder(self, net_input: Dict[str, Tensor]):
if not self.has_encoder():
return None
return [model.encoder.forward_torchscript(net_input) for model in self.models]
@torch.jit.export
def forward_decoder(
self,
tokens,
encoder_outs: List[Dict[str, List[Tensor]]],
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
temperature: Dict[str, float] = 1.0,
):
if isinstance(temperature, (float, int)):
temperature = {channel: temperature for channel in tokens}
log_probs = {channel: [] for channel in tokens}
avg_attn: Optional[Tensor] = None
encoder_out: Optional[Dict[str, List[Tensor]]] = None
for i, model in enumerate(self.models):
if self.has_encoder():
encoder_out = encoder_outs[i]
# decode each model
if self.has_incremental_states():
decoder_out = model.decoder.forward(
tokens,
encoder_out=encoder_out,
incremental_state=incremental_states[i],
)
else:
decoder_out = model.decoder.forward(tokens, encoder_out=encoder_out)
attn: Optional[Tensor] = None
decoder_len = len(decoder_out)
if decoder_len > 1 and decoder_out[1] is not None:
if isinstance(decoder_out[1], Tensor):
attn = decoder_out[1]
else:
attn_holder = decoder_out[1]["attn"]
if isinstance(attn_holder, Tensor):
attn = attn_holder
elif attn_holder is not None:
attn = attn_holder[0]
if attn is not None:
attn = attn[:, -1, :]
if self.is_speech_dlm:
if self.is_duration_prediction:
decoder_out_divided_by_temperature = {
channel_src: {
channel_pred: {
"pred_token": decoder_out[0][channel_src][channel_pred][
"pred_token"
][:, -1:, :].div_(temperature[channel_pred]),
"pred_duration": decoder_out[0][channel_src][
channel_pred
]["pred_duration"][:, -1:, :],
}
for channel_pred in decoder_out[0][channel_src]
}
for channel_src in decoder_out[0]
}
else:
decoder_out_divided_by_temperature = {
channel_src: {
channel_pred: decoder_out[0][channel_src][channel_pred][
:, -1:, :
].div_(temperature[channel_pred])
for channel_pred in decoder_out[0][channel_src]
}
for channel_src in decoder_out[0]
}
else:
decoder_out_divided_by_temperature = {
channel: decoder_out[0][channel][:, -1:, :].div_(
temperature[channel]
)
for channel in decoder_out[0]
}
decoder_out_tuple = (
decoder_out_divided_by_temperature,
None if decoder_len <= 1 else decoder_out[1],
)
probs = model.get_normalized_probs(
decoder_out_tuple, log_probs=True, sample=None
)
if self.is_speech_dlm:
if self.is_duration_prediction:
probs = {
channel: {
"pred_token": probs[channel][channel]["pred_token"][
:, -1, :
],
"pred_duration": probs[channel][channel]["pred_duration"][
:, -1, :
],
}
for channel in probs
}
else:
probs = {
channel: probs[channel][channel][:, -1, :] for channel in probs
}
else:
probs = {channel: probs[channel][:, -1, :] for channel in probs}
if self.models_size == 1:
return probs, attn
for channel in probs:
log_probs[channel].append(probs[channel])
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = {}
for channel in log_probs:
avg_probs[channel] = torch.logsumexp(
torch.stack(log_probs[channel], dim=0), dim=0
) - math.log(self.models_size)
if avg_attn is not None:
avg_attn.div_(self.models_size)
return avg_probs, avg_attn
@torch.jit.export
def reorder_encoder_out(
self, encoder_outs: Optional[List[Dict[str, List[Tensor]]]], new_order
):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
new_outs: List[Dict[str, List[Tensor]]] = []
if not self.has_encoder():
return new_outs
for i, model in enumerate(self.models):
assert encoder_outs is not None
new_outs.append(
model.encoder.reorder_encoder_out(encoder_outs[i], new_order)
)
return new_outs
@torch.jit.export
def reorder_incremental_state(
self,
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
new_order,
):
if not self.has_incremental_states():
return
for i, model in enumerate(self.models):
model.decoder.reorder_incremental_state_scripting(
incremental_states[i], new_order
)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_dlm/sequence_generator/multichannel_sequence_generator.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .multichannel_sequence_generator import * # noqa
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_dlm/sequence_generator/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Optional
import torch
import torch.nn as nn
from torch import Tensor
class MultichannelSearch(nn.Module):
def __init__(self, tgt_dicts):
super().__init__()
tgt_dict = list(tgt_dicts.values())[0]
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
for tgt_dict in tgt_dicts.values():
assert self.pad == tgt_dict.pad()
assert self.unk == tgt_dict.unk()
assert self.eos == tgt_dict.eos()
self.vocab_sizes = {channel: len(tgt_dicts[channel]) for channel in tgt_dicts}
self.src_lengths = torch.tensor(-1)
self.supports_constraints = False
self.stop_on_max_len = False
def step(
self, step, lprobs, scores, prev_output_tokens=None, original_batch_idxs=None
):
"""Take a single search step.
Args:
step: the current search step, starting at 0
lprobs: dictionary of channels {channel : (bsz x input_beam_size x vocab_size_channel)}
the model's log-probabilities over the vocabulary at the current step
scores: {channel : (bsz x input_beam_size x step)}
the historical model scores of each hypothesis up to this point
prev_output_tokens: {channel : (bsz x step)}
the previously generated oputput tokens
original_batch_idxs: (bsz)
the tensor with the batch indices, in the range [0, bsz)
this is useful in case there has been applied a re-ordering
and we need to know the orignal indices
Return: A tuple of (scores, indices, beams) where:
scores: {channel : (bsz x output_beam_size)}
the scores of the chosen elements; output_beam_size can be
larger than input_beam_size, e.g., we may return
2*input_beam_size to account for EOS
indices: {channel : (bsz x output_beam_size)}
the indices of the chosen elements
beams: (bsz x output_beam_size)
the hypothesis ids of the chosen elements, in the range [0, input_beam_size)
"""
raise NotImplementedError
@torch.jit.export
def set_src_lengths(self, src_lengths):
self.src_lengths = src_lengths
@torch.jit.export
def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int):
"""Initialize constraint states for constrained decoding (if supported).
Args:
batch_constraints: (torch.Tensor, optional)
the list of constraints, in packed form
beam_size: (int)
the beam size
Returns:
*encoder_out* rearranged according to *new_order*
"""
pass
def prune_sentences(self, batch_idxs: Tensor):
"""
Removes constraint states for completed sentences (if supported).
This is called from sequence_generator._generate() when sentences are
deleted from the batch.
Args:
batch_idxs: Indices of *sentences* whose constraint state should be *kept*.
"""
pass
def update_constraints(self, active_hypos: Tensor):
"""
Updates the constraint states by selecting the beam items that are retained.
This is called at each time step of sequence_generator._generate() when
the set of 2 * {beam_size} candidate hypotheses are reduced to the beam size.
Args:
active_hypos: (batch size, beam size)
list of integers denoting, for each sentence, which beam candidate items
should be kept.
"""
pass
def unravel_index(index, shape):
out = []
for dim in reversed(shape):
out.append(index % dim)
index = index // dim
return torch.stack(tuple(reversed(out)), dim=-1)
def topk_sum(lprobs_list, k):
"""
lprobs_list = [lprobs_1,...,lprobs_n], where:
lprobs_1 : (batch_size x beam_size x vocab_1)
...
lprobs_n : (batch_size x beam_size x vocab_n)
Return:
- topk_values : (batch_size x k)
values of the topk sum of the form :
lprobs_1[bsz, beam_idx, vocab_1_idx] + ... + lprobs_n[bsz, beam_idx, vocab_n_idx]
- topk_idxs : (batch_size x k x n+1)
each (n+1)-tensor being [beam_idx, vocab_1_idx, ..., vocab_n_idx]
"""
# Reduce all lprobs to k candidates first to reduce later complexity
# We may assume that k << vocab
lprobs_topk_list = []
lprobs_topk_indices_list = []
for lprobs in lprobs_list:
k_i = min(k, lprobs.size(-1))
topk_values, topk_indices = torch.topk(lprobs, k=k_i)
# topk_values : (batch_size x beam_size x k_i)
# topk_indices : (batch_size x beam_size x k_i)
lprobs_topk_list.append(topk_values)
lprobs_topk_indices_list.append(topk_indices)
# Compute all possible sums
sum_lprobs_topk = lprobs_topk_list[0]
for i in range(1, len(lprobs_topk_list)):
unsqueezed_lprobs = lprobs_topk_list[i]
for _ in range(i):
unsqueezed_lprobs = unsqueezed_lprobs.unsqueeze(-2)
sum_lprobs_topk = sum_lprobs_topk.unsqueeze(-1) + unsqueezed_lprobs
# sum_lprobs : (batch_size x beam_size x k_1 x ... x k_n)
# Get the top k sums and the (transformed indices)
topk_sum_values, topk_sum_indices = torch.topk(
sum_lprobs_topk.view(sum_lprobs_topk.size(0), -1), k=k
)
# topk_sum_values : (batch_size x k)
# topk_sum_indices : (batch_size x k)
topk_sum_indices = unravel_index(topk_sum_indices, tuple(sum_lprobs_topk.shape[1:]))
# topk_sum_indices : (batch_size x k x n+1)
# Convert the transformed indices to the true indices
for i_batch in range(topk_sum_indices.size(0)):
for i_cand in range(topk_sum_indices.size(1)):
i_beam, *transformed_vocab_indices = topk_sum_indices[i_batch, i_cand]
true_vocab_indices = [i_beam]
for j, transformed_vocab_j_idx in enumerate(transformed_vocab_indices):
true_vocab_j_idx = lprobs_topk_indices_list[j][
i_batch, i_beam, transformed_vocab_j_idx
]
true_vocab_indices.append(true_vocab_j_idx)
topk_sum_indices[i_batch, i_cand] = torch.tensor(true_vocab_indices)
topk_sum_beams = topk_sum_indices[:, :, 0]
topk_sum_indices = topk_sum_indices[:, :, 1:]
return topk_sum_values, topk_sum_indices, topk_sum_beams
class MultichannelBeamSearch(MultichannelSearch):
def __init__(self, tgt_dicts):
super().__init__(tgt_dicts)
self.constraint_states = None
@torch.jit.export
def step(
self,
step: int,
lprobs,
scores: Optional[Dict[str, Tensor]],
prev_output_tokens: Optional[Dict[str, Tensor]] = None,
original_batch_idxs: Optional[Tensor] = None,
):
channels = list(lprobs.keys())
bsz, beam_size, _ = lprobs[channels[0]].size()
lprobs_list = []
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
for channel in channels:
lprobs_list.append(lprobs[channel][:, ::beam_size, :].contiguous())
else:
# make probs contain cumulative scores for each hypothesis
assert scores is not None
for channel in channels:
lprobs_list.append(
lprobs[channel] + scores[channel][:, :, step - 1].unsqueeze(-1)
)
topk_sum_values, topk_sum_indices, topk_sum_beams = topk_sum(
lprobs_list, k=beam_size * 2
)
beams_buf = topk_sum_beams
scores_buf = {}
indices_buf = {}
for i, channel in enumerate(channels):
indices_buf[channel] = topk_sum_indices[:, :, i]
scores_buf[channel] = (
torch.tensor(
[
lprobs_list[i][i_batch, i_beam, i_index]
for i_batch in range(bsz)
for i_beam, i_index in zip(
beams_buf[i_batch], indices_buf[channel][i_batch]
)
]
)
.view(bsz, -1)
.to(lprobs_list[i].device)
)
# At this point, beams_buf and indices_buf are single-dim and contain relative indices
return scores_buf, indices_buf, beams_buf
class ContiguousMultichannelBeamSearch(MultichannelSearch):
def __init__(self, tgt_dicts):
super().__init__(tgt_dicts)
self.constraint_states = None
@torch.jit.export
def step(
self,
step: int,
lprobs,
scores: Optional[Tensor],
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
n_channels = len(lprobs)
bsz, beam_size, _ = lprobs[0].size()
lprobs_list = []
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
for i in range(n_channels):
lprobs_list.append(lprobs[i][:, ::beam_size, :].contiguous())
else:
# make probs contain cumulative scores for each hypothesis
assert scores is not None
for i in range(n_channels):
lprobs_list.append(lprobs[i] + scores[:, :, step - 1, i].unsqueeze(-1))
topk_sum_values, topk_sum_indices, topk_sum_beams = topk_sum(
lprobs_list, k=beam_size * 2
)
beams_buf = topk_sum_beams
indices_buf = topk_sum_indices
scores_buf = (
torch.tensor(
[
lprobs_list[i][i_batch, i_beam, i_index]
for i in range(len(lprobs_list))
for i_batch in range(bsz)
for i_beam, i_index in zip(
beams_buf[i_batch], indices_buf[i_batch, :, i]
)
]
)
.view(len(lprobs_list), bsz, -1)
.permute(1, 2, 0)
.to(lprobs_list[0].device)
)
# At this point, beams_buf and indices_buf are single-dim and contain relative indices
return scores_buf, indices_buf, beams_buf
class ContiguousMultichannelSampling(MultichannelSearch):
sampling_topk: int
sampling_topp: float
def __init__(self, tgt_dicts, sampling_topk=-1, sampling_topp=-1.0):
super().__init__(tgt_dicts)
self.sampling_topk = sampling_topk
self.sampling_topp = sampling_topp
def _sample_topp(self, lprobs):
"""Sample among the smallest set of elements whose cumulative probability mass exceeds p.
See `"The Curious Case of Neural Text Degeneration"
(Holtzman et al., 2019) <https://arxiv.org/abs/1904.09751>`_.
Args:
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
Return: A tuple of (trimed_probs, truncated_indices) where:
trimed_probs: (bsz x input_beam_size x ?)
the model's probabilities over the elements selected to sample from. The
width of the third dimension is determined by top-P.
truncated_indices: (bsz x input_beam_size x ?)
the indices of the chosen elements.
"""
probs = lprobs.exp_()
# sort the last dimension (vocab dimension) in descending order
sorted_probs, sorted_indices = probs.sort(descending=True)
# compute a mask to indicate the words to be included in the top-P set.
cumsum_probs = sorted_probs.cumsum(dim=2)
mask = cumsum_probs.lt(self.sampling_topp)
# note that mask was computed by 'lt'. One more word needs to be included
# so that the cumulative probability mass can exceed p.
cumsum_mask = mask.cumsum(dim=2)
last_included = cumsum_mask[:, :, -1:]
last_included.clamp_(0, mask.size()[2] - 1)
mask = mask.scatter_(2, last_included, 1)
# truncate unnecessary dims.
max_dim = last_included.max()
truncated_mask = mask[:, :, : max_dim + 1]
truncated_probs = sorted_probs[:, :, : max_dim + 1]
truncated_indices = sorted_indices[:, :, : max_dim + 1]
# trim the words that are not in top-P by setting their probabilities
# to 0, so that they would not be sampled later.
trim_mask = ~truncated_mask
trimed_probs = truncated_probs.masked_fill_(trim_mask, 0)
return trimed_probs, truncated_indices
@torch.jit.export
def step(
self,
step: int,
lprobs,
scores,
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
n_channels = len(lprobs)
bsz, beam_size, vocab_size = lprobs[0].size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
for i in range(n_channels):
lprobs[i] = lprobs[i][:, ::beam_size, :].contiguous()
probs = []
top_indices = []
for i in range(n_channels):
if self.sampling_topp > 0:
# only sample from the smallest set of words whose cumulative probability mass exceeds p
probs_i, top_indices_i = self._sample_topp(lprobs[i])
elif self.sampling_topk > 0:
# only sample from top-k candidates
lprobs[i], top_indices_i = lprobs[i].topk(
min(self.sampling_topk, lprobs[i].size(-1))
)
probs_i = lprobs[i].exp_()
else:
probs_i = lprobs[i].exp_()
# dummy data to be consistent with true branch for type check
top_indices_i = torch.empty(0).to(probs_i)
probs.append(probs_i)
top_indices.append(top_indices_i)
# sample
indices_buf = []
for i in range(n_channels):
if step == 0:
indices_buf.append(
torch.multinomial(
probs[i].view(bsz, -1),
beam_size,
replacement=True,
).view(bsz, beam_size)
)
else:
indices_buf.append(
torch.multinomial(
probs[i].view(bsz * beam_size, -1),
1,
replacement=True,
).view(bsz, beam_size)
)
if step == 0:
for i in range(n_channels):
# expand to beam size
probs[i] = probs[i].expand(bsz, beam_size, -1)
# gather scores
scores_buf = []
for i in range(n_channels):
scores_buf.append(
torch.gather(probs[i], dim=2, index=indices_buf[i].unsqueeze(-1))
)
scores_buf[i] = scores_buf[i].log_().view(bsz, -1)
# remap indices if using top-k or top-P sampling
if self.sampling_topk > 0 or self.sampling_topp > 0:
for i in range(n_channels):
indices_buf[i] = torch.gather(
top_indices[i].expand(bsz, beam_size, -1),
dim=2,
index=indices_buf[i].unsqueeze(-1),
).squeeze(2)
if step == 0:
beams_buf = indices_buf[0].new_zeros(bsz, beam_size)
else:
beams_buf = torch.arange(0, beam_size).to(indices_buf[0]).repeat(bsz, 1)
# make scores cumulative
for i in range(n_channels):
scores_buf[i].add_(
torch.gather(scores[:, :, step - 1, i], dim=1, index=beams_buf)
)
scores_buf = torch.stack(scores_buf, dim=-1)
indices_buf = torch.stack(indices_buf, dim=-1)
return scores_buf, indices_buf, beams_buf
| EXA-1-master | exa/libraries/fairseq/fairseq/models/speech_dlm/sequence_generator/multichannel_search.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
from typing import Dict
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from fairseq.data.audio.audio_utils import (
TTSSpectrogram,
get_fourier_basis,
get_mel_filters,
get_window,
)
from fairseq.data.audio.speech_to_text_dataset import S2TDataConfig
from fairseq.models import BaseFairseqModel, register_model
from fairseq.models.text_to_speech.codehifigan import CodeGenerator as CodeHiFiGANModel
from fairseq.models.text_to_speech.hifigan import Generator as HiFiGANModel
from fairseq.models.text_to_speech.hub_interface import VocoderHubInterface
logger = logging.getLogger(__name__)
class PseudoInverseMelScale(torch.nn.Module):
def __init__(self, n_stft, n_mels, sample_rate, f_min, f_max) -> None:
super(PseudoInverseMelScale, self).__init__()
self.n_mels = n_mels
basis = get_mel_filters(sample_rate, (n_stft - 1) * 2, n_mels, f_min, f_max)
basis = torch.pinverse(basis) # F x F_mel
self.register_buffer("basis", basis)
def forward(self, melspec: torch.Tensor) -> torch.Tensor:
# pack batch
shape = melspec.shape # B_1 x ... x B_K x F_mel x T
n_mels, time = shape[-2], shape[-1]
melspec = melspec.view(-1, n_mels, time)
freq, _ = self.basis.size() # F x F_mel
assert self.n_mels == n_mels, (self.n_mels, n_mels)
specgram = self.basis.matmul(melspec).clamp(min=0)
# unpack batch
specgram = specgram.view(shape[:-2] + (freq, time))
return specgram
class GriffinLim(torch.nn.Module):
def __init__(
self,
n_fft: int,
win_length: int,
hop_length: int,
n_iter: int,
window_fn=torch.hann_window,
):
super(GriffinLim, self).__init__()
self.transform = TTSSpectrogram(
n_fft, win_length, hop_length, return_phase=True
)
basis = get_fourier_basis(n_fft)
basis = torch.pinverse(n_fft / hop_length * basis).T[:, None, :]
basis *= get_window(window_fn, n_fft, win_length)
self.register_buffer("basis", basis)
self.n_fft = n_fft
self.win_length = win_length
self.hop_length = hop_length
self.n_iter = n_iter
self.tiny = 1.1754944e-38
@classmethod
def get_window_sum_square(
cls, n_frames, hop_length, win_length, n_fft, window_fn=torch.hann_window
) -> torch.Tensor:
w_sq = get_window(window_fn, n_fft, win_length) ** 2
n = n_fft + hop_length * (n_frames - 1)
x = torch.zeros(n, dtype=torch.float32)
for i in range(n_frames):
ofst = i * hop_length
x[ofst : min(n, ofst + n_fft)] += w_sq[: max(0, min(n_fft, n - ofst))]
return x
def inverse(self, magnitude: torch.Tensor, phase) -> torch.Tensor:
x = torch.cat(
[magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1
)
x = F.conv_transpose1d(x, self.basis, stride=self.hop_length)
win_sum_sq = self.get_window_sum_square(
magnitude.shape[-1],
hop_length=self.hop_length,
win_length=self.win_length,
n_fft=self.n_fft,
).to(magnitude.device)
# remove modulation effects
approx_nonzero_indices = win_sum_sq > self.tiny
x[:, :, approx_nonzero_indices] /= win_sum_sq[approx_nonzero_indices]
x *= self.n_fft / self.hop_length
x = x[:, :, self.n_fft // 2 :]
x = x[:, :, : -self.n_fft // 2 :]
return x
def forward(self, specgram: torch.Tensor) -> torch.Tensor:
angles = np.angle(np.exp(2j * np.pi * np.random.rand(*specgram.shape)))
angles = torch.from_numpy(angles).to(specgram)
_specgram = specgram.view(-1, specgram.shape[-2], specgram.shape[-1])
waveform = self.inverse(_specgram, angles).squeeze(1)
for _ in range(self.n_iter):
_, angles = self.transform(waveform)
waveform = self.inverse(_specgram, angles).squeeze(1)
return waveform.squeeze(0)
class GriffinLimVocoder(nn.Module):
def __init__(
self,
sample_rate,
win_size,
hop_size,
n_fft,
n_mels,
f_min,
f_max,
window_fn,
spec_bwd_max_iter=32,
fp16=False,
):
super().__init__()
self.inv_mel_transform = PseudoInverseMelScale(
n_stft=n_fft // 2 + 1,
n_mels=n_mels,
sample_rate=sample_rate,
f_min=f_min,
f_max=f_max,
)
self.gl_transform = GriffinLim(
n_fft=n_fft,
win_length=win_size,
hop_length=hop_size,
window_fn=window_fn,
n_iter=spec_bwd_max_iter,
)
if fp16:
self.half()
self.inv_mel_transform.half()
self.gl_transform.half()
else:
self.float()
self.inv_mel_transform.float()
self.gl_transform.float()
def forward(self, x):
# x: (B x) T x D -> (B x) 1 x T
# NOTE: batched forward produces noisier waveform. recommend running
# one utterance at a time
self.eval()
x = x.exp().transpose(-1, -2)
x = self.inv_mel_transform(x)
x = self.gl_transform(x)
return x
@classmethod
def from_data_cfg(cls, args, data_cfg: S2TDataConfig):
feat_cfg = data_cfg.config["features"]
window_fn = getattr(torch, feat_cfg["window_fn"] + "_window")
return cls(
sample_rate=feat_cfg["sample_rate"],
win_size=int(feat_cfg["win_len_t"] * feat_cfg["sample_rate"]),
hop_size=int(feat_cfg["hop_len_t"] * feat_cfg["sample_rate"]),
n_fft=feat_cfg["n_fft"],
n_mels=feat_cfg["n_mels"],
f_min=feat_cfg["f_min"],
f_max=feat_cfg["f_max"],
window_fn=window_fn,
spec_bwd_max_iter=args.spec_bwd_max_iter,
fp16=args.fp16,
)
class HiFiGANVocoder(nn.Module):
def __init__(
self, checkpoint_path: str, model_cfg: Dict[str, str], fp16: bool = False
) -> None:
super().__init__()
self.model = HiFiGANModel(model_cfg)
state_dict = torch.load(checkpoint_path)
self.model.load_state_dict(state_dict["generator"])
if fp16:
self.model.half()
logger.info(f"loaded HiFiGAN checkpoint from {checkpoint_path}")
def forward(self, x: torch.Tensor) -> torch.Tensor:
# (B x) T x D -> (B x) 1 x T
model = self.model.eval()
if len(x.shape) == 2:
return model(x.unsqueeze(0).transpose(1, 2)).detach().squeeze(0)
else:
return model(x.transpose(-1, -2)).detach()
@classmethod
def from_data_cfg(cls, args, data_cfg: S2TDataConfig):
vocoder_cfg = data_cfg.vocoder
assert vocoder_cfg.get("type", "griffin_lim") == "hifigan"
with open(vocoder_cfg["config"]) as f:
model_cfg = json.load(f)
return cls(vocoder_cfg["checkpoint"], model_cfg, fp16=args.fp16)
@register_model("CodeHiFiGANVocoder")
class CodeHiFiGANVocoder(BaseFairseqModel):
def __init__(
self, checkpoint_path: str, model_cfg: Dict[str, str], fp16: bool = False
) -> None:
super().__init__()
self.model = CodeHiFiGANModel(model_cfg)
if torch.cuda.is_available():
state_dict = torch.load(checkpoint_path)
else:
state_dict = torch.load(checkpoint_path, map_location=torch.device("cpu"))
self.model.load_state_dict(state_dict["generator"])
self.model.eval()
if fp16:
self.model.half()
self.model.remove_weight_norm()
logger.info(f"loaded CodeHiFiGAN checkpoint from {checkpoint_path}")
def forward(self, x: Dict[str, torch.Tensor], dur_prediction=False) -> torch.Tensor:
assert "code" in x
x["dur_prediction"] = dur_prediction
# remove invalid code
mask = x["code"] >= 0
x["code"] = x["code"][mask].unsqueeze(dim=0)
if "f0" in x:
f0_up_ratio = x["f0"].size(1) // x["code"].size(1)
mask = mask.unsqueeze(2).repeat(1, 1, f0_up_ratio).view(-1, x["f0"].size(1))
x["f0"] = x["f0"][mask].unsqueeze(dim=0)
return self.model(**x).detach().squeeze()
@classmethod
def from_data_cfg(cls, args, data_cfg):
vocoder_cfg = data_cfg.vocoder
assert vocoder_cfg is not None, "vocoder not specified in the data config"
with open(vocoder_cfg["config"]) as f:
model_cfg = json.load(f)
return cls(vocoder_cfg["checkpoint"], model_cfg, fp16=args.fp16)
@classmethod
def hub_models(cls):
base_url = "http://dl.fbaipublicfiles.com/fairseq/vocoder"
model_ids = [
"unit_hifigan_mhubert_vp_en_es_fr_it3_400k_layer11_km1000_lj_dur",
"unit_hifigan_mhubert_vp_en_es_fr_it3_400k_layer11_km1000_es_css10_dur",
"unit_hifigan_HK_layer12.km2500_frame_TAT-TTS",
]
return {i: f"{base_url}/{i}.tar.gz" for i in model_ids}
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
config="config.json",
fp16: bool = False,
**kwargs,
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
config_yaml=config,
fp16=fp16,
is_vocoder=True,
**kwargs,
)
with open(f"{x['args']['data']}/{config}") as f:
vocoder_cfg = json.load(f)
assert len(x["args"]["model_path"]) == 1, "Too many vocoder models in the input"
vocoder = CodeHiFiGANVocoder(x["args"]["model_path"][0], vocoder_cfg)
return VocoderHubInterface(vocoder_cfg, vocoder)
def get_vocoder(args, data_cfg: S2TDataConfig):
if args.vocoder == "griffin_lim":
return GriffinLimVocoder.from_data_cfg(args, data_cfg)
elif args.vocoder == "hifigan":
return HiFiGANVocoder.from_data_cfg(args, data_cfg)
elif args.vocoder == "code_hifigan":
return CodeHiFiGANVocoder.from_data_cfg(args, data_cfg)
else:
raise ValueError("Unknown vocoder")
| EXA-1-master | exa/libraries/fairseq/fairseq/models/text_to_speech/vocoder.py |
from argparse import Namespace
import torch
import torch.nn as nn
from fairseq.models.text_to_speech.fastspeech2 import VariancePredictor
from fairseq.models.text_to_speech.hifigan import Generator
class CodeGenerator(Generator):
def __init__(self, cfg):
super().__init__(cfg)
self.dict = nn.Embedding(cfg["num_embeddings"], cfg["embedding_dim"])
self.multispkr = cfg.get("multispkr", None)
self.embedder = cfg.get("embedder_params", None)
if self.multispkr and not self.embedder:
self.spkr = nn.Embedding(cfg.get("num_speakers", 200), cfg["embedding_dim"])
elif self.embedder:
self.spkr = nn.Linear(cfg.get("embedder_dim", 256), cfg["embedding_dim"])
self.dur_predictor = None
if cfg.get("dur_predictor_params", None):
self.dur_predictor = VariancePredictor(
Namespace(**cfg["dur_predictor_params"])
)
self.f0 = cfg.get("f0", None)
n_f0_bin = cfg.get("f0_quant_num_bin", 0)
self.f0_quant_embed = (
None if n_f0_bin <= 0 else nn.Embedding(n_f0_bin, cfg["embedding_dim"])
)
@staticmethod
def _upsample(signal, max_frames):
if signal.dim() == 3:
bsz, channels, cond_length = signal.size()
elif signal.dim() == 2:
signal = signal.unsqueeze(2)
bsz, channels, cond_length = signal.size()
else:
signal = signal.view(-1, 1, 1)
bsz, channels, cond_length = signal.size()
signal = signal.unsqueeze(3).repeat(1, 1, 1, max_frames // cond_length)
# pad zeros as needed (if signal's shape does not divide completely with max_frames)
reminder = (max_frames - signal.shape[2] * signal.shape[3]) // signal.shape[3]
if reminder > 0:
raise NotImplementedError(
"Padding condition signal - misalignment between condition features."
)
signal = signal.view(bsz, channels, max_frames)
return signal
def forward(self, **kwargs):
x = self.dict(kwargs["code"]).transpose(1, 2)
if self.dur_predictor and kwargs.get("dur_prediction", False):
assert x.size(0) == 1, "only support single sample"
log_dur_pred = self.dur_predictor(x.transpose(1, 2))
dur_out = torch.clamp(
torch.round((torch.exp(log_dur_pred) - 1)).long(), min=1
)
# B x C x T
x = torch.repeat_interleave(x, dur_out.view(-1), dim=2)
if self.f0:
if self.f0_quant_embed:
kwargs["f0"] = self.f0_quant_embed(kwargs["f0"].long()).transpose(1, 2)
else:
kwargs["f0"] = kwargs["f0"].unsqueeze(1)
if x.shape[-1] < kwargs["f0"].shape[-1]:
x = self._upsample(x, kwargs["f0"].shape[-1])
elif x.shape[-1] > kwargs["f0"].shape[-1]:
kwargs["f0"] = self._upsample(kwargs["f0"], x.shape[-1])
x = torch.cat([x, kwargs["f0"]], dim=1)
if self.multispkr:
assert (
"spkr" in kwargs
), 'require "spkr" input for multispeaker CodeHiFiGAN vocoder'
spkr = self.spkr(kwargs["spkr"]).transpose(1, 2)
spkr = self._upsample(spkr, x.shape[-1])
x = torch.cat([x, spkr], dim=1)
for k, feat in kwargs.items():
if k in ["spkr", "code", "f0", "dur_prediction"]:
continue
feat = self._upsample(feat, x.shape[-1])
x = torch.cat([x, feat], dim=1)
return super().forward(x)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/text_to_speech/codehifigan.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
from torch import nn
from torch.nn import functional as F
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import LSTMCellWithZoneOut, LocationAttention
logger = logging.getLogger(__name__)
def encoder_init(m):
if isinstance(m, nn.Conv1d):
nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("relu"))
class Tacotron2Encoder(FairseqEncoder):
def __init__(self, args, src_dict, embed_speaker):
super().__init__(src_dict)
self.padding_idx = src_dict.pad()
self.embed_speaker = embed_speaker
self.spk_emb_proj = None
if embed_speaker is not None:
self.spk_emb_proj = nn.Linear(
args.encoder_embed_dim + args.speaker_embed_dim, args.encoder_embed_dim
)
self.embed_tokens = nn.Embedding(
len(src_dict), args.encoder_embed_dim, padding_idx=self.padding_idx
)
assert args.encoder_conv_kernel_size % 2 == 1
self.convolutions = nn.ModuleList(
nn.Sequential(
nn.Conv1d(
args.encoder_embed_dim,
args.encoder_embed_dim,
kernel_size=args.encoder_conv_kernel_size,
padding=((args.encoder_conv_kernel_size - 1) // 2),
),
nn.BatchNorm1d(args.encoder_embed_dim),
nn.ReLU(),
nn.Dropout(args.encoder_dropout),
)
for _ in range(args.encoder_conv_layers)
)
self.lstm = nn.LSTM(
args.encoder_embed_dim,
args.encoder_embed_dim // 2,
num_layers=args.encoder_lstm_layers,
batch_first=True,
bidirectional=True,
)
self.apply(encoder_init)
def forward(self, src_tokens, src_lengths=None, speaker=None, **kwargs):
x = self.embed_tokens(src_tokens)
x = x.transpose(1, 2).contiguous() # B x T x C -> B x C x T
for conv in self.convolutions:
x = conv(x)
x = x.transpose(1, 2).contiguous() # B x C x T -> B x T x C
src_lengths = src_lengths.cpu().long()
x = nn.utils.rnn.pack_padded_sequence(x, src_lengths, batch_first=True)
x = self.lstm(x)[0]
x = nn.utils.rnn.pad_packed_sequence(x, batch_first=True)[0]
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if self.embed_speaker is not None:
seq_len, bsz, _ = x.size()
emb = self.embed_speaker(speaker).expand(seq_len, bsz, -1)
x = self.spk_emb_proj(torch.cat([x, emb], dim=2))
return {
"encoder_out": [x], # B x T x C
"encoder_padding_mask": encoder_padding_mask, # B x T
}
class Prenet(nn.Module):
def __init__(self, in_dim, n_layers, n_units, dropout):
super().__init__()
self.layers = nn.ModuleList(
nn.Sequential(nn.Linear(in_dim if i == 0 else n_units, n_units), nn.ReLU())
for i in range(n_layers)
)
self.dropout = dropout
def forward(self, x):
for layer in self.layers:
x = F.dropout(layer(x), p=self.dropout) # always applies dropout
return x
class Postnet(nn.Module):
def __init__(self, in_dim, n_channels, kernel_size, n_layers, dropout):
super(Postnet, self).__init__()
self.convolutions = nn.ModuleList()
assert kernel_size % 2 == 1
for i in range(n_layers):
cur_layers = (
[
nn.Conv1d(
in_dim if i == 0 else n_channels,
n_channels if i < n_layers - 1 else in_dim,
kernel_size=kernel_size,
padding=((kernel_size - 1) // 2),
),
nn.BatchNorm1d(n_channels if i < n_layers - 1 else in_dim),
]
+ ([nn.Tanh()] if i < n_layers - 1 else [])
+ [nn.Dropout(dropout)]
)
nn.init.xavier_uniform_(
cur_layers[0].weight,
torch.nn.init.calculate_gain("tanh" if i < n_layers - 1 else "linear"),
)
self.convolutions.append(nn.Sequential(*cur_layers))
def forward(self, x):
x = x.transpose(1, 2) # B x T x C -> B x C x T
for conv in self.convolutions:
x = conv(x)
return x.transpose(1, 2)
def decoder_init(m):
if isinstance(m, torch.nn.Conv1d):
nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("tanh"))
class Tacotron2Decoder(FairseqIncrementalDecoder):
def __init__(self, args, src_dict):
super().__init__(None)
self.args = args
self.n_frames_per_step = args.n_frames_per_step
self.out_dim = args.output_frame_dim * args.n_frames_per_step
self.prenet = Prenet(
self.out_dim, args.prenet_layers, args.prenet_dim, args.prenet_dropout
)
# take prev_context, prev_frame, (speaker embedding) as input
self.attention_lstm = LSTMCellWithZoneOut(
args.zoneout,
args.prenet_dim + args.encoder_embed_dim,
args.decoder_lstm_dim,
)
# take attention_lstm output, attention_state, encoder_out as input
self.attention = LocationAttention(
args.attention_dim,
args.encoder_embed_dim,
args.decoder_lstm_dim,
(1 + int(args.attention_use_cumprob)),
args.attention_conv_dim,
args.attention_conv_kernel_size,
)
# take attention_lstm output, context, (gated_latent) as input
self.lstm = nn.ModuleList(
LSTMCellWithZoneOut(
args.zoneout,
args.encoder_embed_dim + args.decoder_lstm_dim,
args.decoder_lstm_dim,
)
for i in range(args.decoder_lstm_layers)
)
proj_in_dim = args.encoder_embed_dim + args.decoder_lstm_dim
self.feat_proj = nn.Linear(proj_in_dim, self.out_dim)
self.eos_proj = nn.Linear(proj_in_dim, 1)
self.postnet = Postnet(
self.out_dim,
args.postnet_conv_dim,
args.postnet_conv_kernel_size,
args.postnet_layers,
args.postnet_dropout,
)
self.ctc_proj = None
if getattr(args, "ctc_weight", 0.0) > 0.0:
self.ctc_proj = nn.Linear(self.out_dim, len(src_dict))
self.apply(decoder_init)
def _get_states(self, incremental_state, enc_out):
bsz, in_len, _ = enc_out.size()
alstm_h = self.get_incremental_state(incremental_state, "alstm_h")
if alstm_h is None:
alstm_h = enc_out.new_zeros(bsz, self.args.decoder_lstm_dim)
alstm_c = self.get_incremental_state(incremental_state, "alstm_c")
if alstm_c is None:
alstm_c = enc_out.new_zeros(bsz, self.args.decoder_lstm_dim)
lstm_h = self.get_incremental_state(incremental_state, "lstm_h")
if lstm_h is None:
lstm_h = [
enc_out.new_zeros(bsz, self.args.decoder_lstm_dim)
for _ in range(self.args.decoder_lstm_layers)
]
lstm_c = self.get_incremental_state(incremental_state, "lstm_c")
if lstm_c is None:
lstm_c = [
enc_out.new_zeros(bsz, self.args.decoder_lstm_dim)
for _ in range(self.args.decoder_lstm_layers)
]
attn_w = self.get_incremental_state(incremental_state, "attn_w")
if attn_w is None:
attn_w = enc_out.new_zeros(bsz, in_len)
attn_w_cum = self.get_incremental_state(incremental_state, "attn_w_cum")
if attn_w_cum is None:
attn_w_cum = enc_out.new_zeros(bsz, in_len)
return alstm_h, alstm_c, lstm_h, lstm_c, attn_w, attn_w_cum
def _get_init_attn_c(self, enc_out, enc_mask):
bsz = enc_out.size(0)
if self.args.init_attn_c == "zero":
return enc_out.new_zeros(bsz, self.args.encoder_embed_dim)
elif self.args.init_attn_c == "avg":
enc_w = (~enc_mask).type(enc_out.type())
enc_w = enc_w / enc_w.sum(dim=1, keepdim=True)
return torch.sum(enc_out * enc_w.unsqueeze(2), dim=1)
else:
raise ValueError(f"{self.args.init_attn_c} not supported")
def forward(
self,
prev_output_tokens,
encoder_out=None,
incremental_state=None,
target_lengths=None,
**kwargs,
):
enc_mask = encoder_out["encoder_padding_mask"]
enc_out = encoder_out["encoder_out"][0]
in_len = enc_out.size(1)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:, :]
bsz, out_len, _ = prev_output_tokens.size()
prenet_out = self.prenet(prev_output_tokens)
(alstm_h, alstm_c, lstm_h, lstm_c, attn_w, attn_w_cum) = self._get_states(
incremental_state, enc_out
)
attn_ctx = self._get_init_attn_c(enc_out, enc_mask)
attn_out = enc_out.new_zeros(bsz, in_len, out_len)
feat_out = enc_out.new_zeros(bsz, out_len, self.out_dim)
eos_out = enc_out.new_zeros(bsz, out_len)
for t in range(out_len):
alstm_in = torch.cat((attn_ctx, prenet_out[:, t, :]), dim=1)
alstm_h, alstm_c = self.attention_lstm(alstm_in, (alstm_h, alstm_c))
attn_state = attn_w.unsqueeze(1)
if self.args.attention_use_cumprob:
attn_state = torch.stack((attn_w, attn_w_cum), dim=1)
attn_ctx, attn_w = self.attention(enc_out, enc_mask, alstm_h, attn_state)
attn_w_cum = attn_w_cum + attn_w
attn_out[:, :, t] = attn_w
for i, cur_lstm in enumerate(self.lstm):
if i == 0:
lstm_in = torch.cat((attn_ctx, alstm_h), dim=1)
else:
lstm_in = torch.cat((attn_ctx, lstm_h[i - 1]), dim=1)
lstm_h[i], lstm_c[i] = cur_lstm(lstm_in, (lstm_h[i], lstm_c[i]))
proj_in = torch.cat((attn_ctx, lstm_h[-1]), dim=1)
feat_out[:, t, :] = self.feat_proj(proj_in)
eos_out[:, t] = self.eos_proj(proj_in).squeeze(1)
self.attention.clear_cache()
self.set_incremental_state(incremental_state, "alstm_h", alstm_h)
self.set_incremental_state(incremental_state, "alstm_c", alstm_c)
self.set_incremental_state(incremental_state, "lstm_h", lstm_h)
self.set_incremental_state(incremental_state, "lstm_c", lstm_c)
self.set_incremental_state(incremental_state, "attn_w", attn_w)
self.set_incremental_state(incremental_state, "attn_w_cum", attn_w_cum)
post_feat_out = feat_out + self.postnet(feat_out)
eos_out = eos_out.view(bsz, out_len, 1)
return post_feat_out, eos_out, {"attn": attn_out, "feature_out": feat_out}
@register_model("tacotron_2")
class Tacotron2Model(FairseqEncoderDecoderModel):
"""
Implementation for https://arxiv.org/pdf/1712.05884.pdf
"""
@staticmethod
def add_args(parser):
# encoder
parser.add_argument("--encoder-dropout", type=float)
parser.add_argument("--encoder-embed-dim", type=int)
parser.add_argument("--encoder-conv-layers", type=int)
parser.add_argument("--encoder-conv-kernel-size", type=int)
parser.add_argument("--encoder-lstm-layers", type=int)
# decoder
parser.add_argument("--attention-dim", type=int)
parser.add_argument("--attention-conv-dim", type=int)
parser.add_argument("--attention-conv-kernel-size", type=int)
parser.add_argument("--prenet-dropout", type=float)
parser.add_argument("--prenet-layers", type=int)
parser.add_argument("--prenet-dim", type=int)
parser.add_argument("--postnet-dropout", type=float)
parser.add_argument("--postnet-layers", type=int)
parser.add_argument("--postnet-conv-dim", type=int)
parser.add_argument("--postnet-conv-kernel-size", type=int)
parser.add_argument("--init-attn-c", type=str)
parser.add_argument("--attention-use-cumprob", action="store_true")
parser.add_argument("--zoneout", type=float)
parser.add_argument("--decoder-lstm-layers", type=int)
parser.add_argument("--decoder-lstm-dim", type=int)
parser.add_argument("--output-frame-dim", type=int)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._num_updates = 0
@classmethod
def build_model(cls, args, task):
embed_speaker = task.get_speaker_embeddings(args)
encoder = Tacotron2Encoder(args, task.src_dict, embed_speaker)
decoder = Tacotron2Decoder(args, task.src_dict)
return cls(encoder, decoder)
def forward_encoder(self, src_tokens, src_lengths, **kwargs):
return self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
self._num_updates = num_updates
@register_model_architecture("tacotron_2", "tacotron_2")
def base_architecture(args):
# encoder
args.encoder_dropout = getattr(args, "encoder_dropout", 0.5)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_conv_layers = getattr(args, "encoder_conv_layers", 3)
args.encoder_conv_kernel_size = getattr(args, "encoder_conv_kernel_size", 5)
args.encoder_lstm_layers = getattr(args, "encoder_lstm_layers", 1)
# decoder
args.attention_dim = getattr(args, "attention_dim", 128)
args.attention_conv_dim = getattr(args, "attention_conv_dim", 32)
args.attention_conv_kernel_size = getattr(args, "attention_conv_kernel_size", 15)
args.prenet_dropout = getattr(args, "prenet_dropout", 0.5)
args.prenet_layers = getattr(args, "prenet_layers", 2)
args.prenet_dim = getattr(args, "prenet_dim", 256)
args.postnet_dropout = getattr(args, "postnet_dropout", 0.5)
args.postnet_layers = getattr(args, "postnet_layers", 5)
args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512)
args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5)
args.init_attn_c = getattr(args, "init_attn_c", "zero")
args.attention_use_cumprob = getattr(args, "attention_use_cumprob", True)
args.zoneout = getattr(args, "zoneout", 0.1)
args.decoder_lstm_layers = getattr(args, "decoder_lstm_layers", 2)
args.decoder_lstm_dim = getattr(args, "decoder_lstm_dim", 1024)
args.output_frame_dim = getattr(args, "output_frame_dim", 80)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/text_to_speech/tacotron2.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .tacotron2 import * # noqa
from .tts_transformer import * # noqa
from .fastspeech2 import * # noqa
from .vocoder import * # noqa
| EXA-1-master | exa/libraries/fairseq/fairseq/models/text_to_speech/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
from torch import nn
from fairseq import utils
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.text_to_speech.hub_interface import TTSHubInterface
from fairseq.models.text_to_speech.tacotron2 import Postnet
from fairseq.modules import (
FairseqDropout,
LayerNorm,
MultiheadAttention,
PositionalEmbedding,
)
logger = logging.getLogger(__name__)
def model_init(m):
if isinstance(m, nn.Conv1d):
nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("relu"))
def Embedding(num_embeddings, embedding_dim, padding_idx=None):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim**-0.5)
return m
class PositionwiseFeedForward(nn.Module):
def __init__(self, in_dim, hidden_dim, kernel_size, dropout):
super().__init__()
self.ffn = nn.Sequential(
nn.Conv1d(
in_dim,
hidden_dim,
kernel_size=kernel_size,
padding=(kernel_size - 1) // 2,
),
nn.ReLU(),
nn.Conv1d(
hidden_dim,
in_dim,
kernel_size=kernel_size,
padding=(kernel_size - 1) // 2,
),
)
self.layer_norm = LayerNorm(in_dim)
self.dropout = self.dropout_module = FairseqDropout(
p=dropout, module_name=self.__class__.__name__
)
def forward(self, x):
# B x T x C
residual = x
x = self.ffn(x.transpose(1, 2)).transpose(1, 2)
x = self.dropout(x)
return self.layer_norm(x + residual)
class FFTLayer(torch.nn.Module):
def __init__(
self, embed_dim, n_heads, hidden_dim, kernel_size, dropout, attention_dropout
):
super().__init__()
self.self_attn = MultiheadAttention(
embed_dim, n_heads, dropout=attention_dropout, self_attention=True
)
self.layer_norm = LayerNorm(embed_dim)
self.ffn = PositionwiseFeedForward(
embed_dim, hidden_dim, kernel_size, dropout=dropout
)
def forward(self, x, padding_mask=None):
# B x T x C
residual = x
x = x.transpose(0, 1)
x, _ = self.self_attn(
query=x, key=x, value=x, key_padding_mask=padding_mask, need_weights=False
)
x = x.transpose(0, 1)
x = self.layer_norm(x + residual)
return self.ffn(x)
class LengthRegulator(nn.Module):
def forward(self, x, durations):
# x: B x T x C
out_lens = durations.sum(dim=1)
max_len = out_lens.max()
bsz, seq_len, dim = x.size()
out = x.new_zeros((bsz, max_len, dim))
for b in range(bsz):
indices = []
for t in range(seq_len):
indices.extend([t] * utils.item(durations[b, t]))
indices = torch.tensor(indices, dtype=torch.long).to(x.device)
out_len = utils.item(out_lens[b])
out[b, :out_len] = x[b].index_select(0, indices)
return out, out_lens
class VariancePredictor(nn.Module):
def __init__(self, args):
super().__init__()
self.conv1 = nn.Sequential(
nn.Conv1d(
args.encoder_embed_dim,
args.var_pred_hidden_dim,
kernel_size=args.var_pred_kernel_size,
padding=(args.var_pred_kernel_size - 1) // 2,
),
nn.ReLU(),
)
self.ln1 = nn.LayerNorm(args.var_pred_hidden_dim)
self.dropout_module = FairseqDropout(
p=args.var_pred_dropout, module_name=self.__class__.__name__
)
self.conv2 = nn.Sequential(
nn.Conv1d(
args.var_pred_hidden_dim,
args.var_pred_hidden_dim,
kernel_size=args.var_pred_kernel_size,
padding=1,
),
nn.ReLU(),
)
self.ln2 = nn.LayerNorm(args.var_pred_hidden_dim)
self.proj = nn.Linear(args.var_pred_hidden_dim, 1)
def forward(self, x):
# Input: B x T x C; Output: B x T
x = self.conv1(x.transpose(1, 2)).transpose(1, 2)
x = self.dropout_module(self.ln1(x))
x = self.conv2(x.transpose(1, 2)).transpose(1, 2)
x = self.dropout_module(self.ln2(x))
return self.proj(x).squeeze(dim=2)
class VarianceAdaptor(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
self.length_regulator = LengthRegulator()
self.duration_predictor = VariancePredictor(args)
self.pitch_predictor = VariancePredictor(args)
self.energy_predictor = VariancePredictor(args)
n_bins, steps = self.args.var_pred_n_bins, self.args.var_pred_n_bins - 1
self.pitch_bins = torch.linspace(args.pitch_min, args.pitch_max, steps)
self.embed_pitch = Embedding(n_bins, args.encoder_embed_dim)
self.energy_bins = torch.linspace(args.energy_min, args.energy_max, steps)
self.embed_energy = Embedding(n_bins, args.encoder_embed_dim)
def get_pitch_emb(self, x, tgt=None, factor=1.0):
out = self.pitch_predictor(x)
bins = self.pitch_bins.to(x.device)
if tgt is None:
out = out * factor
emb = self.embed_pitch(torch.bucketize(out, bins))
else:
emb = self.embed_pitch(torch.bucketize(tgt, bins))
return out, emb
def get_energy_emb(self, x, tgt=None, factor=1.0):
out = self.energy_predictor(x)
bins = self.energy_bins.to(x.device)
if tgt is None:
out = out * factor
emb = self.embed_energy(torch.bucketize(out, bins))
else:
emb = self.embed_energy(torch.bucketize(tgt, bins))
return out, emb
def forward(
self,
x,
padding_mask,
durations=None,
pitches=None,
energies=None,
d_factor=1.0,
p_factor=1.0,
e_factor=1.0,
):
# x: B x T x C
log_dur_out = self.duration_predictor(x)
dur_out = torch.clamp(
torch.round((torch.exp(log_dur_out) - 1) * d_factor).long(), min=0
)
dur_out.masked_fill_(padding_mask, 0)
pitch_out, pitch_emb = self.get_pitch_emb(x, pitches, p_factor)
x = x + pitch_emb
energy_out, energy_emb = self.get_energy_emb(x, energies, e_factor)
x = x + energy_emb
x, out_lens = self.length_regulator(
x, dur_out if durations is None else durations
)
return x, out_lens, log_dur_out, pitch_out, energy_out
class FastSpeech2Encoder(FairseqEncoder):
def __init__(self, args, src_dict, embed_speaker):
super().__init__(src_dict)
self.args = args
self.padding_idx = src_dict.pad()
self.n_frames_per_step = args.n_frames_per_step
self.out_dim = args.output_frame_dim * args.n_frames_per_step
self.embed_speaker = embed_speaker
self.spk_emb_proj = None
if embed_speaker is not None:
self.spk_emb_proj = nn.Linear(
args.encoder_embed_dim + args.speaker_embed_dim, args.encoder_embed_dim
)
self.dropout_module = FairseqDropout(
p=args.dropout, module_name=self.__class__.__name__
)
self.embed_tokens = Embedding(
len(src_dict), args.encoder_embed_dim, padding_idx=self.padding_idx
)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, args.encoder_embed_dim, self.padding_idx
)
self.pos_emb_alpha = nn.Parameter(torch.ones(1))
self.dec_pos_emb_alpha = nn.Parameter(torch.ones(1))
self.encoder_fft_layers = nn.ModuleList(
FFTLayer(
args.encoder_embed_dim,
args.encoder_attention_heads,
args.fft_hidden_dim,
args.fft_kernel_size,
dropout=args.dropout,
attention_dropout=args.attention_dropout,
)
for _ in range(args.encoder_layers)
)
self.var_adaptor = VarianceAdaptor(args)
self.decoder_fft_layers = nn.ModuleList(
FFTLayer(
args.decoder_embed_dim,
args.decoder_attention_heads,
args.fft_hidden_dim,
args.fft_kernel_size,
dropout=args.dropout,
attention_dropout=args.attention_dropout,
)
for _ in range(args.decoder_layers)
)
self.out_proj = nn.Linear(args.decoder_embed_dim, self.out_dim)
self.postnet = None
if args.add_postnet:
self.postnet = Postnet(
self.out_dim,
args.postnet_conv_dim,
args.postnet_conv_kernel_size,
args.postnet_layers,
args.postnet_dropout,
)
self.apply(model_init)
def forward(
self,
src_tokens,
src_lengths=None,
speaker=None,
durations=None,
pitches=None,
energies=None,
**kwargs,
):
x = self.embed_tokens(src_tokens)
enc_padding_mask = src_tokens.eq(self.padding_idx)
x += self.pos_emb_alpha * self.embed_positions(enc_padding_mask)
x = self.dropout_module(x)
for layer in self.encoder_fft_layers:
x = layer(x, enc_padding_mask)
if self.embed_speaker is not None:
bsz, seq_len, _ = x.size()
emb = self.embed_speaker(speaker).expand(bsz, seq_len, -1)
x = self.spk_emb_proj(torch.cat([x, emb], dim=2))
x, out_lens, log_dur_out, pitch_out, energy_out = self.var_adaptor(
x, enc_padding_mask, durations, pitches, energies
)
dec_padding_mask = lengths_to_padding_mask(out_lens)
x += self.dec_pos_emb_alpha * self.embed_positions(dec_padding_mask)
for layer in self.decoder_fft_layers:
x = layer(x, dec_padding_mask)
x = self.out_proj(x)
x_post = None
if self.postnet is not None:
x_post = x + self.postnet(x)
return x, x_post, out_lens, log_dur_out, pitch_out, energy_out
@register_model("fastspeech2")
class FastSpeech2Model(FairseqEncoderModel):
"""
Implementation for https://arxiv.org/abs/2006.04558
"""
NON_AUTOREGRESSIVE = True
@classmethod
def hub_models(cls):
base_url = "http://dl.fbaipublicfiles.com/fairseq/s2"
model_ids = [
"fastspeech2-en-ljspeech",
"fastspeech2-en-200_speaker-cv4",
]
return {i: f"{base_url}/{i}.tar.gz" for i in model_ids}
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
config_yaml="config.yaml",
vocoder: str = "griffin_lim",
fp16: bool = False,
**kwargs,
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
config_yaml=config_yaml,
vocoder=vocoder,
fp16=fp16,
**kwargs,
)
return TTSHubInterface(x["args"], x["task"], x["models"][0])
@staticmethod
def add_args(parser):
parser.add_argument("--dropout", type=float)
parser.add_argument("--output-frame-dim", type=int)
parser.add_argument("--speaker-embed-dim", type=int)
# FFT blocks
parser.add_argument("--fft-hidden-dim", type=int)
parser.add_argument("--fft-kernel-size", type=int)
parser.add_argument("--attention-dropout", type=float)
parser.add_argument("--encoder-layers", type=int)
parser.add_argument("--encoder-embed-dim", type=int)
parser.add_argument("--encoder-attention-heads", type=int)
parser.add_argument("--decoder-layers", type=int)
parser.add_argument("--decoder-embed-dim", type=int)
parser.add_argument("--decoder-attention-heads", type=int)
# variance predictor
parser.add_argument("--var-pred-n-bins", type=int)
parser.add_argument("--var-pred-hidden-dim", type=int)
parser.add_argument("--var-pred-kernel-size", type=int)
parser.add_argument("--var-pred-dropout", type=float)
# postnet
parser.add_argument("--add-postnet", action="store_true")
parser.add_argument("--postnet-dropout", type=float)
parser.add_argument("--postnet-layers", type=int)
parser.add_argument("--postnet-conv-dim", type=int)
parser.add_argument("--postnet-conv-kernel-size", type=int)
def __init__(self, encoder, args, src_dict):
super().__init__(encoder)
self._num_updates = 0
out_dim = args.output_frame_dim * args.n_frames_per_step
self.ctc_proj = None
if getattr(args, "ctc_weight", 0.0) > 0.0:
self.ctc_proj = nn.Linear(out_dim, len(src_dict))
@classmethod
def build_model(cls, args, task):
embed_speaker = task.get_speaker_embeddings(args)
encoder = FastSpeech2Encoder(args, task.src_dict, embed_speaker)
return cls(encoder, args, task.src_dict)
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
self._num_updates = num_updates
def get_normalized_probs(self, net_output, log_probs, sample=None):
logits = self.ctc_proj(net_output[0])
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
@register_model_architecture("fastspeech2", "fastspeech2")
def base_architecture(args):
args.dropout = getattr(args, "dropout", 0.2)
args.output_frame_dim = getattr(args, "output_frame_dim", 80)
args.speaker_embed_dim = getattr(args, "speaker_embed_dim", 64)
# FFT blocks
args.fft_hidden_dim = getattr(args, "fft_hidden_dim", 1024)
args.fft_kernel_size = getattr(args, "fft_kernel_size", 9)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.encoder_layers = getattr(args, "encoder_layers", 4)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 2)
args.decoder_layers = getattr(args, "decoder_layers", 4)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 2)
# variance predictor
args.var_pred_n_bins = getattr(args, "var_pred_n_bins", 256)
args.var_pred_hidden_dim = getattr(args, "var_pred_hidden_dim", 256)
args.var_pred_kernel_size = getattr(args, "var_pred_kernel_size", 3)
args.var_pred_dropout = getattr(args, "var_pred_dropout", 0.5)
# postnet
args.add_postnet = getattr(args, "add_postnet", False)
args.postnet_dropout = getattr(args, "postnet_dropout", 0.5)
args.postnet_layers = getattr(args, "postnet_layers", 5)
args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512)
args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/text_to_speech/fastspeech2.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Conv1d, ConvTranspose1d
from torch.nn.utils import remove_weight_norm, weight_norm
LRELU_SLOPE = 0.1
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
def get_padding(kernel_size, dilation=1):
return (kernel_size * dilation - dilation) // 2
class ResBlock(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock, self).__init__()
self.convs1 = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2]),
)
),
]
)
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=1,
padding=get_padding(kernel_size, 1),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=1,
padding=get_padding(kernel_size, 1),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=1,
padding=get_padding(kernel_size, 1),
)
),
]
)
self.convs2.apply(init_weights)
def forward(self, x):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for layer in self.convs1:
remove_weight_norm(layer)
for layer in self.convs2:
remove_weight_norm(layer)
class Generator(torch.nn.Module):
def __init__(self, cfg):
super(Generator, self).__init__()
self.num_kernels = len(cfg["resblock_kernel_sizes"])
self.num_upsamples = len(cfg["upsample_rates"])
self.conv_pre = weight_norm(
Conv1d(
cfg.get("model_in_dim", 80),
cfg["upsample_initial_channel"],
7,
1,
padding=3,
)
)
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(
zip(cfg["upsample_rates"], cfg["upsample_kernel_sizes"])
):
self.ups.append(
weight_norm(
ConvTranspose1d(
cfg["upsample_initial_channel"] // (2**i),
cfg["upsample_initial_channel"] // (2 ** (i + 1)),
k,
u,
padding=(k - u) // 2,
)
)
)
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = cfg["upsample_initial_channel"] // (2 ** (i + 1))
for k, d in zip(
cfg["resblock_kernel_sizes"], cfg["resblock_dilation_sizes"]
):
self.resblocks.append(ResBlock(ch, k, d))
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
def forward(self, x):
x = self.conv_pre(x)
for i in range(self.num_upsamples):
x = F.leaky_relu(x, LRELU_SLOPE)
x = self.ups[i](x)
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i * self.num_kernels + j](x)
else:
xs += self.resblocks[i * self.num_kernels + j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
x = torch.tanh(x)
return x
def remove_weight_norm(self):
print("Removing weight norm...")
for layer in self.ups:
remove_weight_norm(layer)
for layer in self.resblocks:
layer.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/text_to_speech/hifigan.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import List, Optional
import torch
from torch import nn
from fairseq import utils
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.models.text_to_speech.hub_interface import TTSHubInterface
from fairseq.models.text_to_speech.tacotron2 import Postnet, Prenet
from fairseq.modules import (
FairseqDropout,
LayerNorm,
PositionalEmbedding,
TransformerDecoderLayer,
TransformerEncoderLayer,
)
logger = logging.getLogger(__name__)
def encoder_init(m):
if isinstance(m, nn.Conv1d):
nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("relu"))
def Embedding(num_embeddings, embedding_dim):
m = nn.Embedding(num_embeddings, embedding_dim)
nn.init.normal_(m.weight, mean=0, std=embedding_dim**-0.5)
return m
class TTSTransformerEncoder(FairseqEncoder):
def __init__(self, args, src_dict, embed_speaker):
super().__init__(src_dict)
self.padding_idx = src_dict.pad()
self.embed_speaker = embed_speaker
self.spk_emb_proj = None
if embed_speaker is not None:
self.spk_emb_proj = nn.Linear(
args.encoder_embed_dim + args.speaker_embed_dim, args.encoder_embed_dim
)
self.dropout_module = FairseqDropout(
p=args.dropout, module_name=self.__class__.__name__
)
self.embed_tokens = nn.Embedding(
len(src_dict), args.encoder_embed_dim, padding_idx=self.padding_idx
)
assert args.encoder_conv_kernel_size % 2 == 1
self.prenet = nn.ModuleList(
nn.Sequential(
nn.Conv1d(
args.encoder_embed_dim,
args.encoder_embed_dim,
kernel_size=args.encoder_conv_kernel_size,
padding=((args.encoder_conv_kernel_size - 1) // 2),
),
nn.BatchNorm1d(args.encoder_embed_dim),
nn.ReLU(),
nn.Dropout(args.encoder_dropout),
)
for _ in range(args.encoder_conv_layers)
)
self.prenet_proj = nn.Linear(args.encoder_embed_dim, args.encoder_embed_dim)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, args.encoder_embed_dim, self.padding_idx
)
self.pos_emb_alpha = nn.Parameter(torch.ones(1))
self.transformer_layers = nn.ModuleList(
TransformerEncoderLayer(args)
for _ in range(args.encoder_transformer_layers)
)
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(args.encoder_embed_dim)
else:
self.layer_norm = None
self.apply(encoder_init)
def forward(self, src_tokens, src_lengths=None, speaker=None, **kwargs):
x = self.embed_tokens(src_tokens)
x = x.transpose(1, 2).contiguous() # B x T x C -> B x C x T
for conv in self.prenet:
x = conv(x)
x = x.transpose(1, 2).contiguous() # B x C x T -> B x T x C
x = self.prenet_proj(x)
padding_mask = src_tokens.eq(self.padding_idx)
positions = self.embed_positions(padding_mask)
x += self.pos_emb_alpha * positions
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
for layer in self.transformer_layers:
x = layer(x, padding_mask)
if self.layer_norm is not None:
x = self.layer_norm(x)
if self.embed_speaker is not None:
seq_len, bsz, _ = x.size()
emb = self.embed_speaker(speaker).transpose(0, 1)
emb = emb.expand(seq_len, bsz, -1)
x = self.spk_emb_proj(torch.cat([x, emb], dim=2))
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [padding_mask]
if padding_mask.any()
else [], # B x T
"encoder_embedding": [], # B x T x C
"encoder_states": [], # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
def decoder_init(m):
if isinstance(m, torch.nn.Conv1d):
nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("tanh"))
class TTSTransformerDecoder(FairseqIncrementalDecoder):
def __init__(self, args, src_dict, padding_idx=1):
super().__init__(None)
self._future_mask = torch.empty(0)
self.args = args
self.padding_idx = src_dict.pad() if src_dict else padding_idx
self.n_frames_per_step = args.n_frames_per_step
self.out_dim = args.output_frame_dim * args.n_frames_per_step
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.embed_positions = PositionalEmbedding(
args.max_target_positions, args.decoder_embed_dim, self.padding_idx
)
self.pos_emb_alpha = nn.Parameter(torch.ones(1))
self.prenet = nn.Sequential(
Prenet(
self.out_dim, args.prenet_layers, args.prenet_dim, args.prenet_dropout
),
nn.Linear(args.prenet_dim, args.decoder_embed_dim),
)
self.n_transformer_layers = args.decoder_transformer_layers
self.transformer_layers = nn.ModuleList(
TransformerDecoderLayer(args) for _ in range(self.n_transformer_layers)
)
if args.decoder_normalize_before:
self.layer_norm = LayerNorm(args.decoder_embed_dim)
else:
self.layer_norm = None
self.feat_proj = nn.Linear(args.decoder_embed_dim, self.out_dim)
self.eos_proj = nn.Linear(args.decoder_embed_dim, 1)
self.postnet = Postnet(
self.out_dim,
args.postnet_conv_dim,
args.postnet_conv_kernel_size,
args.postnet_layers,
args.postnet_dropout,
)
self.ctc_proj = None
if getattr(args, "ctc_weight", 0.0) > 0.0:
self.ctc_proj = nn.Linear(self.out_dim, len(src_dict))
self.apply(decoder_init)
def extract_features(
self,
prev_outputs,
encoder_out=None,
incremental_state=None,
target_lengths=None,
speaker=None,
**kwargs,
):
alignment_layer = self.n_transformer_layers - 1
self_attn_padding_mask = lengths_to_padding_mask(target_lengths)
positions = self.embed_positions(
self_attn_padding_mask, incremental_state=incremental_state
)
if incremental_state is not None:
prev_outputs = prev_outputs[:, -1:, :]
self_attn_padding_mask = self_attn_padding_mask[:, -1:]
if positions is not None:
positions = positions[:, -1:]
x = self.prenet(prev_outputs)
x += self.pos_emb_alpha * positions
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
if not self_attn_padding_mask.any():
self_attn_padding_mask = None
attn: Optional[torch.Tensor] = None
inner_states: List[Optional[torch.Tensor]] = [x]
for idx, transformer_layer in enumerate(self.transformer_layers):
if incremental_state is None:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
x, layer_attn, _ = transformer_layer(
x,
encoder_out["encoder_out"][0]
if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0)
else None,
encoder_out["encoder_padding_mask"][0]
if (
encoder_out is not None
and len(encoder_out["encoder_padding_mask"]) > 0
)
else None,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
)
inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
if attn is not None:
# average probabilities over heads, transpose to
# (B, src_len, tgt_len)
attn = attn.mean(dim=0).transpose(2, 1)
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
return x, {"attn": attn, "inner_states": inner_states}
def forward(
self,
prev_output_tokens,
encoder_out=None,
incremental_state=None,
target_lengths=None,
speaker=None,
**kwargs,
):
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
target_lengths=target_lengths,
speaker=speaker,
**kwargs,
)
attn = extra["attn"]
feat_out = self.feat_proj(x)
bsz, seq_len, _ = x.size()
eos_out = self.eos_proj(x)
post_feat_out = feat_out + self.postnet(feat_out)
return (
post_feat_out,
eos_out,
{
"attn": attn,
"feature_out": feat_out,
"inner_states": extra["inner_states"],
},
)
def get_normalized_probs(self, net_output, log_probs, sample):
logits = self.ctc_proj(net_output[2]["feature_out"])
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
# self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.
if (
self._future_mask.size(0) == 0
or (not self._future_mask.device == tensor.device)
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1
)
self._future_mask = self._future_mask.to(tensor)
return self._future_mask[:dim, :dim]
@register_model("tts_transformer")
class TTSTransformerModel(FairseqEncoderDecoderModel):
"""
Implementation for https://arxiv.org/pdf/1809.08895.pdf
"""
@classmethod
def hub_models(cls):
base_url = "http://dl.fbaipublicfiles.com/fairseq/s2"
model_ids = [
"tts_transformer-en-ljspeech",
"tts_transformer-en-200_speaker-cv4",
"tts_transformer-es-css10",
"tts_transformer-fr-cv7_css10",
"tts_transformer-ru-cv7_css10",
"tts_transformer-zh-cv7_css10",
"tts_transformer-ar-cv7_css10",
"tts_transformer-tr-cv7_css10",
"tts_transformer-vi-cv7",
]
return {i: f"{base_url}/{i}.tar.gz" for i in model_ids}
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
config_yaml="config.yaml",
vocoder: str = "griffin_lim",
fp16: bool = False,
**kwargs,
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
config_yaml=config_yaml,
vocoder=vocoder,
fp16=fp16,
**kwargs,
)
return TTSHubInterface(x["args"], x["task"], x["models"][0])
@staticmethod
def add_args(parser):
parser.add_argument("--dropout", type=float)
parser.add_argument("--output-frame-dim", type=int)
parser.add_argument("--speaker-embed-dim", type=int)
# encoder prenet
parser.add_argument("--encoder-dropout", type=float)
parser.add_argument("--encoder-conv-layers", type=int)
parser.add_argument("--encoder-conv-kernel-size", type=int)
# encoder transformer layers
parser.add_argument("--encoder-transformer-layers", type=int)
parser.add_argument("--encoder-embed-dim", type=int)
parser.add_argument("--encoder-ffn-embed-dim", type=int)
parser.add_argument("--encoder-normalize-before", action="store_true")
parser.add_argument("--encoder-attention-heads", type=int)
parser.add_argument("--attention-dropout", type=float)
parser.add_argument("--activation-dropout", "--relu-dropout", type=float)
parser.add_argument("--activation-fn", type=str, default="relu")
# decoder prenet
parser.add_argument("--prenet-dropout", type=float)
parser.add_argument("--prenet-layers", type=int)
parser.add_argument("--prenet-dim", type=int)
# decoder postnet
parser.add_argument("--postnet-dropout", type=float)
parser.add_argument("--postnet-layers", type=int)
parser.add_argument("--postnet-conv-dim", type=int)
parser.add_argument("--postnet-conv-kernel-size", type=int)
# decoder transformer layers
parser.add_argument("--decoder-transformer-layers", type=int)
parser.add_argument("--decoder-embed-dim", type=int)
parser.add_argument("--decoder-ffn-embed-dim", type=int)
parser.add_argument("--decoder-normalize-before", action="store_true")
parser.add_argument("--decoder-attention-heads", type=int)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._num_updates = 0
@classmethod
def build_model(cls, args, task):
embed_speaker = task.get_speaker_embeddings(args)
encoder = TTSTransformerEncoder(args, task.src_dict, embed_speaker)
decoder = TTSTransformerDecoder(args, task.src_dict)
return cls(encoder, decoder)
def forward_encoder(self, src_tokens, src_lengths, speaker=None, **kwargs):
return self.encoder(
src_tokens, src_lengths=src_lengths, speaker=speaker, **kwargs
)
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
self._num_updates = num_updates
@register_model_architecture("tts_transformer", "tts_transformer")
def base_architecture(args):
args.dropout = getattr(args, "dropout", 0.1)
args.output_frame_dim = getattr(args, "output_frame_dim", 80)
args.speaker_embed_dim = getattr(args, "speaker_embed_dim", 64)
# encoder prenet
args.encoder_dropout = getattr(args, "encoder_dropout", 0.5)
args.encoder_conv_layers = getattr(args, "encoder_conv_layers", 3)
args.encoder_conv_kernel_size = getattr(args, "encoder_conv_kernel_size", 5)
# encoder transformer layers
args.encoder_transformer_layers = getattr(args, "encoder_transformer_layers", 6)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(
args, "encoder_ffn_embed_dim", 4 * args.encoder_embed_dim
)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
# decoder prenet
args.prenet_dropout = getattr(args, "prenet_dropout", 0.5)
args.prenet_layers = getattr(args, "prenet_layers", 2)
args.prenet_dim = getattr(args, "prenet_dim", 256)
# decoder postnet
args.postnet_dropout = getattr(args, "postnet_dropout", 0.5)
args.postnet_layers = getattr(args, "postnet_layers", 5)
args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512)
args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5)
# decoder transformer layers
args.decoder_transformer_layers = getattr(args, "decoder_transformer_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", 4 * args.decoder_embed_dim
)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/text_to_speech/tts_transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import random
from pathlib import Path
from typing import Dict, Optional, Tuple
import torch
import torch.nn as nn
logger = logging.getLogger(__name__)
class TTSHubInterface(nn.Module):
def __init__(self, cfg, task, model):
super().__init__()
self.cfg = cfg
self.task = task
self.model = model
self.model.eval()
self.update_cfg_with_data_cfg(self.cfg, self.task.data_cfg)
self.generator = self.task.build_generator([self.model], self.cfg)
@classmethod
def phonemize(
cls,
text: str,
lang: Optional[str],
phonemizer: Optional[str] = None,
preserve_punct: bool = False,
to_simplified_zh: bool = False,
):
if to_simplified_zh:
import hanziconv
text = hanziconv.HanziConv.toSimplified(text)
if phonemizer == "g2p":
import g2p_en
g2p = g2p_en.G2p()
if preserve_punct:
return " ".join("|" if p == " " else p for p in g2p(text))
else:
res = [{",": "sp", ";": "sp"}.get(p, p) for p in g2p(text)]
return " ".join(p for p in res if p.isalnum())
if phonemizer == "g2pc":
import g2pc
g2p = g2pc.G2pC()
return " ".join([w[3] for w in g2p(text)])
elif phonemizer == "ipa":
assert lang is not None
import phonemizer
from phonemizer.separator import Separator
lang_map = {"en": "en-us", "fr": "fr-fr"}
return phonemizer.phonemize(
text,
backend="espeak",
language=lang_map.get(lang, lang),
separator=Separator(word="| ", phone=" "),
)
else:
return text
@classmethod
def tokenize(cls, text: str, tkn_cfg: Dict[str, str]):
sentencepiece_model = tkn_cfg.get("sentencepiece_model", None)
if sentencepiece_model is not None:
assert Path(sentencepiece_model).exists()
import sentencepiece as sp
spm = sp.SentencePieceProcessor()
spm.Load(sentencepiece_model)
return " ".join(spm.Encode(text, out_type=str))
else:
return text
@classmethod
def update_cfg_with_data_cfg(cls, cfg, data_cfg):
cfg["task"].vocoder = data_cfg.vocoder.get("type", "griffin_lim")
@classmethod
def get_model_input(
cls, task, text: str, speaker: Optional[int] = None, verbose: bool = False
):
phonemized = cls.phonemize(
text,
task.data_cfg.hub.get("lang", None),
task.data_cfg.hub.get("phonemizer", None),
task.data_cfg.hub.get("preserve_punct", False),
task.data_cfg.hub.get("to_simplified_zh", False),
)
tkn_cfg = task.data_cfg.bpe_tokenizer
tokenized = cls.tokenize(phonemized, tkn_cfg)
if verbose:
logger.info(f"text: {text}")
logger.info(f"phonemized: {phonemized}")
logger.info(f"tokenized: {tokenized}")
spk = task.data_cfg.hub.get("speaker", speaker)
n_speakers = len(task.speaker_to_id or {})
if spk is None and n_speakers > 0:
spk = random.randint(0, n_speakers - 1)
if spk is not None:
spk = max(0, min(spk, n_speakers - 1))
if verbose:
logger.info(f"speaker: {spk}")
spk = None if spk is None else torch.Tensor([[spk]]).long()
src_tokens = task.src_dict.encode_line(tokenized, add_if_not_exist=False).view(
1, -1
)
src_lengths = torch.Tensor([len(tokenized.split())]).long()
return {
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
"prev_output_tokens": None,
},
"target_lengths": None,
"speaker": spk,
}
@classmethod
def get_prediction(cls, task, model, generator, sample) -> Tuple[torch.Tensor, int]:
prediction = generator.generate(model, sample)
return prediction[0]["waveform"], task.sr
def predict(
self, text: str, speaker: Optional[int] = None, verbose: bool = False
) -> Tuple[torch.Tensor, int]:
sample = self.get_model_input(self.task, text, speaker, verbose=verbose)
return self.get_prediction(self.task, self.model, self.generator, sample)
class VocoderHubInterface(nn.Module):
"""Vocoder interface to run vocoder models through hub. Currently we only support unit vocoder"""
def __init__(self, cfg, model):
super().__init__()
self.vocoder = model
self.vocoder.eval()
self.sr = 16000
self.multispkr = self.vocoder.model.multispkr
if self.multispkr:
logger.info("multi-speaker vocoder")
self.num_speakers = cfg.get(
"num_speakers",
200,
) # following the default in codehifigan to set to 200
def get_model_input(
self,
text: str,
speaker: Optional[int] = -1,
):
units = list(map(int, text.strip().split()))
x = {
"code": torch.LongTensor(units).view(1, -1),
}
if not speaker:
speaker = -1
if self.multispkr:
assert (
speaker < self.num_speakers
), f"invalid --speaker-id ({speaker}) with total #speakers = {self.num_speakers}"
spk = random.randint(0, self.num_speakers - 1) if speaker == -1 else speaker
x["spkr"] = torch.LongTensor([spk]).view(1, 1)
return x
def get_prediction(self, sample, dur_prediction: Optional[bool] = True):
wav = self.vocoder(sample, dur_prediction)
return wav, self.sr
def predict(
self,
text: str,
speaker: Optional[int] = None,
dur_prediction: Optional[bool] = True,
):
sample = self.get_model_input(text, speaker)
return self.get_prediction(sample, dur_prediction)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/text_to_speech/hub_interface.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.modules.transformer_layer import TransformerEncoderLayer
from typing import Optional
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.modules import LayerNorm
from fairseq.modules.fairseq_dropout import FairseqDropout
from fairseq.modules.quant_noise import quant_noise
from torch import Tensor
class Adapter(nn.Module):
def __init__(self, cfg, red_fac=2):
super(Adapter, self).__init__()
self.cfg = cfg
self.embed_dim = cfg.encoder_embed_dim
self.quant_noise = getattr(cfg, "quant_noise_pq", 0)
self.quant_noise_block_size = getattr(cfg, "quant_noise_pq_block_size", 8) or 8
self.activation_fn = utils.get_activation_fn(
activation=getattr(cfg, "activation_fn", "relu") or "relu"
)
self.fc1 = quant_noise(
nn.Linear(self.embed_dim, self.embed_dim // red_fac),
p=self.quant_noise,
block_size=self.quant_noise_block_size,
)
self.fc2 = quant_noise(
nn.Linear(self.embed_dim // red_fac, self.embed_dim),
p=self.quant_noise,
block_size=self.quant_noise_block_size,
)
activation_dropout_p = getattr(cfg, "activation_dropout", 0) or 0
if activation_dropout_p == 0:
# for backwards compatibility with models that use cfg.relu_dropout
activation_dropout_p = getattr(cfg, "relu_dropout", 0) or 0
self.activation_dropout_module = FairseqDropout(
float(activation_dropout_p), module_name=self.__class__.__name__
)
def forward(self, x):
x = self.activation_fn(self.fc1(x))
if not hasattr(self.cfg, "adapter_dropout") or self.cfg.adapter_dropout:
x = self.activation_dropout_module(x)
x = self.fc2(x)
return x
class XMODTransformerEncoderLayerBase(TransformerEncoderLayer):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*cfg.encoder.normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, cfg):
super().__init__(cfg)
if hasattr(cfg, "adapter_modules") and cfg.adapter_modules:
export = getattr(cfg, "export", False)
if cfg.adapter_layer_norm:
self.adapter_layer_norm = LayerNorm(self.embed_dim, export=export)
self.adapter_modules = nn.ModuleDict(dict())
if hasattr(self.cfg, "bottleneck"):
bottleneck = self.cfg.bottleneck
else:
bottleneck = 2
for language in cfg.languages:
self.adapter_modules[str(language)] = Adapter(cfg, red_fac=bottleneck)
def lang_adapter(self, lang_id, x):
# If language adapters exist pass throught them
if hasattr(self.cfg, "adapter_modules") and self.cfg.adapter_modules:
if lang_id is None:
lang_id = ["en_XX"] * x.shape[1]
d_langs = [lang_id[0]]
lang_lengths = [1]
for lang in lang_id[1:]:
if lang == d_langs[-1]:
lang_lengths[-1] += 1
else:
d_langs.append(lang)
lang_lengths.append(1)
if (
not hasattr(self.cfg, "ln_before_adapter")
or not self.cfg.ln_before_adapter
):
residual = x
if self.cfg.adapter_layer_norm:
x = self.adapter_layer_norm(x)
elif self.cfg.adapter_reuse_layer_norm:
x = self.final_layer_norm(x)
if hasattr(self.cfg, "ln_before_adapter") and self.cfg.ln_before_adapter:
residual = x
split_x = torch.split(x, lang_lengths, 1)
x_ = []
for i, (lang, s_x) in enumerate(zip(d_langs, split_x)):
lang = lang.replace("_rom", "").replace("_zaw", "")
x_.append(self.adapter_modules[str(lang)](s_x))
x = torch.cat(x_, 1)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
return x
def forward(
self,
x,
encoder_padding_mask: Optional[Tensor],
attn_mask: Optional[Tensor] = None,
lang_id: Optional[list] = None,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, seq_len)` where padding elements are indicated by ``1``.
attn_mask (ByteTensor): binary tensor of shape `(tgt_len, src_len)`,
where `tgt_len` is the length of output and `src_len` is the
length of input, though here both are equal to `seq_len`.
`attn_mask[tgt_i, src_j] = 1` means that when calculating the
embedding for `tgt_i`, we exclude (mask out) `src_j`. This is
useful for strided self-attention.
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
# anything in original attn_mask = 1, becomes -1e8
# anything in original attn_mask = 0, becomes 0
# Note that we cannot use -inf here, because at some edge cases,
# the attention weight (before softmax) for some padded element in query
# will become -inf, which results in NaN in model parameters
if attn_mask is not None:
attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8)
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, _ = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask,
need_weights=False,
attn_mask=attn_mask,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
x = self.lang_adapter(lang_id, x)
if not self.normalize_before:
x = self.final_layer_norm(x)
return x
| EXA-1-master | exa/libraries/fairseq/fairseq/models/xmod/transformer_layer_xmod.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .model import * # noqa
from .transformer_layer_xmod import * # noqa
| EXA-1-master | exa/libraries/fairseq/fairseq/models/xmod/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from ..roberta.model_xlmr import XLMRModel
from fairseq.models.xmod.transformer_layer_xmod import XMODTransformerEncoderLayerBase
from ..roberta.model import base_architecture, RobertaEncoder
from fairseq.models.transformer import TransformerEncoder
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from typing import Optional
from fairseq.models.xmod.hub_interface import XMODHubInterface
import torch
from fairseq.distributed import fsdp_wrap
from fairseq.models import (
register_model,
register_model_architecture,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
DEFAULT_MIN_PARAMS_TO_WRAP = int(1e8)
@register_model("xmod")
class XMODModel(XLMRModel):
@classmethod
def hub_models(cls):
return {
"xmod.base": "https://dl.fbaipublicfiles.com/fairseq/models/xmod/xmod.base.81.1M.tar.gz",
"xmod.large.prenorm": "https://dl.fbaipublicfiles.com/fairseq/models/xmod/xmod.large.prenorm.81.500k.tar.gz",
"xmod.base.13.125k": "https://dl.fbaipublicfiles.com/fairseq/models/xmod/xmod.base.13.125k.tar.gz",
"xmod.base.30.125k": "https://dl.fbaipublicfiles.com/fairseq/models/xmod/xmod.base.30.125k.tar.gz",
"xmod.base.30.195k": "https://dl.fbaipublicfiles.com/fairseq/models/xmod/xmod.base.30.195k.tar.gz",
"xmod.base.60.125k": "https://dl.fbaipublicfiles.com/fairseq/models/xmod/xmod.base.60.125k.tar.gz",
"xmod.base.60.265k": "https://dl.fbaipublicfiles.com/fairseq/models/xmod/xmod.base.60.265k.tar.gz",
"xmod.base.75.125k": "https://dl.fbaipublicfiles.com/fairseq/models/xmod/xmod.base.75.125k.tar.gz",
"xmod.base.75.269k": "https://dl.fbaipublicfiles.com/fairseq/models/xmod/xmod.base.75.269k.tar.gz",
}
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
bpe="sentencepiece",
**kwargs,
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
**kwargs,
)
return XMODHubInterface(x["args"], x["task"], x["models"][0])
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
from omegaconf import OmegaConf
if OmegaConf.is_config(args):
OmegaConf.set_struct(args, False)
# make sure all arguments are present
base_architecture(args)
if not hasattr(args, "max_positions"):
if not hasattr(args, "tokens_per_sample"):
args.tokens_per_sample = task.max_positions()
args.max_positions = args.tokens_per_sample
encoder = XMODEncoder(args, task.source_dictionary)
if OmegaConf.is_config(args):
OmegaConf.set_struct(args, True)
return cls(args, encoder)
def forward(
self,
src_tokens,
features_only=False,
return_all_hiddens=False,
classification_head_name=None,
lang_id=None,
**kwargs,
):
if classification_head_name is not None:
features_only = True
x, extra = self.encoder(
src_tokens, features_only, return_all_hiddens, lang_id=lang_id, **kwargs
)
if classification_head_name is not None:
x = self.classification_heads[classification_head_name](x)
return x, extra
class XMODEncoder(RobertaEncoder):
"""XMOD encoder."""
def build_encoder(self, args, dictionary, embed_tokens):
encoder = XMODTransformerEncoder(args, dictionary, embed_tokens)
encoder.apply(init_bert_params)
return encoder
def forward(
self,
src_tokens,
features_only=False,
return_all_hiddens=False,
masked_tokens=None,
lang_id=None,
**unused,
):
"""
Args:
src_tokens (LongTensor): input tokens of shape `(batch, src_len)`
features_only (bool, optional): skip LM head and just return
features. If True, the output will be of shape
`(batch, src_len, embed_dim)`.
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
Returns:
tuple:
- the LM output of shape `(batch, src_len, vocab)`
- a dictionary of additional data, where 'inner_states'
is a list of hidden states. Note that the hidden
states have shape `(src_len, batch, vocab)`.
"""
x, extra = self.extract_features(
src_tokens, return_all_hiddens=return_all_hiddens, lang_id=lang_id
)
if not features_only:
x = self.output_layer(x, masked_tokens=masked_tokens)
return x, extra
def extract_features(
self, src_tokens, return_all_hiddens=False, lang_id=None, **kwargs
):
encoder_out = self.sentence_encoder(
src_tokens,
return_all_hiddens=return_all_hiddens,
lang_id=lang_id,
token_embeddings=kwargs.get("token_embeddings", None),
)
# T x B x C -> B x T x C
features = encoder_out["encoder_out"][0].transpose(0, 1)
inner_states = encoder_out["encoder_states"] if return_all_hiddens else None
return features, {"inner_states": inner_states}
class XMODTransformerEncoder(TransformerEncoder):
def build_encoder_layer(self, cfg):
layer = XMODTransformerEncoderLayerBase(cfg)
checkpoint = cfg.checkpoint_activations
if checkpoint:
offload_to_cpu = cfg.offload_activations
layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
# if we are checkpointing, enforce that FSDP always wraps the
# checkpointed layer, regardless of layer size
min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0
layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
return layer
def forward(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
lang_id=None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
return self.forward_scriptable(
src_tokens,
src_lengths,
return_all_hiddens,
token_embeddings,
lang_id=lang_id,
)
# TorchScript doesn't support super() method so that the scriptable Subclass
# can't access the base class model in Torchscript.
# Current workaround is to add a helper function with different name and
# call the helper function from scriptable Subclass.
def forward_scriptable(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
lang_id=None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
has_pads = src_tokens.device.type == "xla" or encoder_padding_mask.any()
x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings)
# account for padding while computing the representation
if has_pads:
x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x))
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_states = []
if return_all_hiddens:
encoder_states.append(x)
# encoder layers
for layer in self.layers:
x = layer(
x,
encoder_padding_mask=encoder_padding_mask if has_pads else None,
lang_id=lang_id,
)
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
# The Pytorch Mobile lite interpreter does not supports returning NamedTuple in
# `forward` so we use a dictionary instead.
# TorchScript does not support mixed values so the values are all lists.
# The empty list is equivalent to None.
src_lengths = (
src_tokens.ne(self.padding_idx)
.sum(dim=1, dtype=torch.int32)
.reshape(-1, 1)
.contiguous()
)
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask], # B x T
"encoder_embedding": [encoder_embedding], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [src_lengths],
}
@register_model_architecture("xmod", "xmod_base_13")
def roberta_base_architecture(args):
args.ffn_modules = getattr(args, "ffn_modules", False)
args.adapter_modules = getattr(args, "adapter_modules", True)
args.adapter_layer_norm = getattr(args, "adapter_layer_norm", False)
args.adapter_reuse_layer_norm = getattr(args, "adapter_reuse_layer_norm", True)
args.ln_before_adapter = getattr(args, "ln_before_adapter", True)
args.languages = getattr(
args,
"languages",
[
"ar_AR",
"en_XX",
"fi_FI",
"fr_XX",
"hi_IN",
"id_ID",
"ka_GE",
"ko_KR",
"ru_RU",
"sw_KE",
"ta_IN",
"th_TH",
"vi_VN",
],
)
base_architecture(args)
@register_model_architecture("xmod", "xmod_base_30")
def roberta_base_architecture(args):
args.ffn_modules = getattr(args, "ffn_modules", False)
args.adapter_modules = getattr(args, "adapter_modules", True)
args.adapter_layer_norm = getattr(args, "adapter_layer_norm", False)
args.adapter_reuse_layer_norm = getattr(args, "adapter_reuse_layer_norm", True)
args.ln_before_adapter = getattr(args, "ln_before_adapter", True)
args.languages = getattr(
args,
"languages",
[
"ar_AR",
"cs_CZ",
"en_XX",
"eu_ES",
"fi_FI",
"fr_XX",
"hi_IN",
"hr_HR",
"hu_HU",
"hy_AM",
"id_ID",
"it_IT",
"ka_GE",
"ko_KR",
"lt_LT",
"ml_IN",
"mn_MN",
"ms_MY",
"pl_PL",
"ro_RO",
"ru_RU",
"si_LK",
"sk_SK",
"sq_AL",
"sv_SE",
"sw_KE",
"ta_IN",
"th_TH",
"tl_XX",
"vi_VN",
],
)
base_architecture(args)
@register_model_architecture("xmod", "xmod_base_60")
def roberta_base_architecture(args):
args.ffn_modules = getattr(args, "ffn_modules", False)
args.adapter_modules = getattr(args, "adapter_modules", True)
args.adapter_layer_norm = getattr(args, "adapter_layer_norm", False)
args.adapter_reuse_layer_norm = getattr(args, "adapter_reuse_layer_norm", True)
args.ln_before_adapter = getattr(args, "ln_before_adapter", True)
args.languages = getattr(
args,
"languages",
[
"af_ZA",
"am_ET",
"ar_AR",
"be_BY",
"bn_IN",
"ca_ES",
"cs_CZ",
"cy_GB",
"da_DK",
"en_XX",
"eo_EO",
"et_EE",
"eu_ES",
"fa_IR",
"fi_FI",
"fr_XX",
"ga_IE",
"gl_ES",
"gu_IN",
"ha_NG",
"hi_IN",
"hr_HR",
"hu_HU",
"hy_AM",
"id_ID",
"is_IS",
"it_IT",
"ka_GE",
"ko_KR",
"ku_TR",
"la_VA",
"lt_LT",
"lv_LV",
"mk_MK",
"ml_IN",
"mn_MN",
"ms_MY",
"ne_NP",
"nl_XX",
"no_XX",
"pl_PL",
"ps_AF",
"pt_XX",
"ro_RO",
"ru_RU",
"sa_IN",
"sd_PK",
"si_LK",
"sk_SK",
"sl_SI",
"so_SO",
"sq_AL",
"sr_RS",
"sv_SE",
"sw_KE",
"ta_IN",
"te_IN",
"th_TH",
"tl_XX",
"vi_VN",
],
)
base_architecture(args)
@register_model_architecture("xmod", "xmod_base_75")
def roberta_base_architecture(args):
args.ffn_modules = getattr(args, "ffn_modules", False)
args.adapter_modules = getattr(args, "adapter_modules", True)
args.adapter_layer_norm = getattr(args, "adapter_layer_norm", False)
args.adapter_reuse_layer_norm = getattr(args, "adapter_reuse_layer_norm", True)
args.ln_before_adapter = getattr(args, "ln_before_adapter", True)
args.languages = getattr(
args,
"languages",
[
"af_ZA",
"am_ET",
"ar_AR",
"as_IN",
"be_BY",
"bn_IN",
"br_FR",
"bs_BA",
"ca_ES",
"cs_CZ",
"cy_GB",
"da_DK",
"en_XX",
"eo_EO",
"et_EE",
"eu_ES",
"fa_IR",
"fi_FI",
"fr_XX",
"fy_NL",
"ga_IE",
"gd_GB",
"gl_ES",
"gu_IN",
"ha_NG",
"hi_IN",
"hr_HR",
"hu_HU",
"hy_AM",
"id_ID",
"is_IS",
"it_IT",
"jv_ID",
"ka_GE",
"kn_IN",
"ko_KR",
"ku_TR",
"la_VA",
"lt_LT",
"lv_LV",
"mg_MG",
"mk_MK",
"ml_IN",
"mn_MN",
"mr_IN",
"ms_MY",
"ne_NP",
"nl_XX",
"no_XX",
"om_KE",
"or_IN",
"pa_IN",
"pl_PL",
"ps_AF",
"pt_XX",
"ro_RO",
"ru_RU",
"sa_IN",
"sd_PK",
"si_LK",
"sk_SK",
"sl_SI",
"so_SO",
"sq_AL",
"sr_RS",
"su_ID",
"sv_SE",
"sw_KE",
"ta_IN",
"te_IN",
"th_TH",
"tl_XX",
"vi_VN",
"xh_ZA",
"yi_DE",
],
)
base_architecture(args)
@register_model_architecture("xmod", "xmod_base")
def roberta_base_architecture(args):
args.ffn_modules = getattr(args, "ffn_modules", False)
args.adapter_modules = getattr(args, "adapter_modules", True)
args.adapter_layer_norm = getattr(args, "adapter_layer_norm", False)
args.adapter_reuse_layer_norm = getattr(args, "adapter_reuse_layer_norm", True)
args.ln_before_adapter = getattr(args, "ln_before_adapter", True)
args.languages = getattr(
args,
"languages",
[
"en_XX",
"id_ID",
"vi_VN",
"ru_RU",
"fa_IR",
"sv_SE",
"ja_XX",
"fr_XX",
"de_DE",
"ro_RO",
"ko_KR",
"hu_HU",
"es_XX",
"fi_FI",
"uk_UA",
"da_DK",
"pt_XX",
"no_XX",
"th_TH",
"pl_PL",
"bg_BG",
"nl_XX",
"zh_CN",
"he_IL",
"el_GR",
"it_IT",
"sk_SK",
"hr_HR",
"tr_TR",
"ar_AR",
"cs_CZ",
"lt_LT",
"hi_IN",
"zh_TW",
"ca_ES",
"ms_MY",
"sl_SI",
"lv_LV",
"ta_IN",
"bn_IN",
"et_EE",
"az_AZ",
"sq_AL",
"sr_RS",
"kk_KZ",
"ka_GE",
"tl_XX",
"ur_PK",
"is_IS",
"hy_AM",
"ml_IN",
"mk_MK",
"be_BY",
"la_VA",
"te_IN",
"eu_ES",
"gl_ES",
"mn_MN",
"kn_IN",
"ne_NP",
"sw_KE",
"si_LK",
"mr_IN",
"af_ZA",
"gu_IN",
"cy_GB",
"eo_EO",
"km_KH",
"ky_KG",
"uz_UZ",
"ps_AF",
"pa_IN",
"ga_IE",
"ha_NG",
"am_ET",
"lo_LA",
"ku_TR",
"so_SO",
"my_MM",
"or_IN",
"sa_IN",
],
)
base_architecture(args)
@register_model_architecture("xmod", "xmod_large_prenorm")
def roberta_base_architecture(args):
args.ffn_modules = getattr(args, "ffn_modules", False)
args.adapter_modules = getattr(args, "adapter_modules", True)
args.adapter_layer_norm = getattr(args, "adapter_layer_norm", True)
args.adapter_reuse_layer_norm = getattr(args, "adapter_reuse_layer_norm", False)
args.ln_before_adapter = getattr(args, "ln_before_adapter", False)
# args.bottleneck = getattr(args, "bottleneck", 8)
args.bottleneck = getattr(args, "bottleneck", 4)
args.languages = getattr(
args,
"languages",
[
"en_XX",
"id_ID",
"vi_VN",
"ru_RU",
"fa_IR",
"sv_SE",
"ja_XX",
"fr_XX",
"de_DE",
"ro_RO",
"ko_KR",
"hu_HU",
"es_XX",
"fi_FI",
"uk_UA",
"da_DK",
"pt_XX",
"no_XX",
"th_TH",
"pl_PL",
"bg_BG",
"nl_XX",
"zh_CN",
"he_IL",
"el_GR",
"it_IT",
"sk_SK",
"hr_HR",
"tr_TR",
"ar_AR",
"cs_CZ",
"lt_LT",
"hi_IN",
"zh_TW",
"ca_ES",
"ms_MY",
"sl_SI",
"lv_LV",
"ta_IN",
"bn_IN",
"et_EE",
"az_AZ",
"sq_AL",
"sr_RS",
"kk_KZ",
"ka_GE",
"tl_XX",
"ur_PK",
"is_IS",
"hy_AM",
"ml_IN",
"mk_MK",
"be_BY",
"la_VA",
"te_IN",
"eu_ES",
"gl_ES",
"mn_MN",
"kn_IN",
"ne_NP",
"sw_KE",
"si_LK",
"mr_IN",
"af_ZA",
"gu_IN",
"cy_GB",
"eo_EO",
"km_KH",
"ky_KG",
"uz_UZ",
"ps_AF",
"pa_IN",
"ga_IE",
"ha_NG",
"am_ET",
"lo_LA",
"ku_TR",
"so_SO",
"my_MM",
"or_IN",
"sa_IN",
],
)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.encoder_layers = getattr(args, "encoder_layers", 24)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
base_architecture(args)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/xmod/model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.models.roberta.hub_interface import RobertaHubInterface
import torch
import torch.nn.functional as F
class XMODHubInterface(RobertaHubInterface):
def extract_features(
self,
tokens: torch.LongTensor,
return_all_hiddens: bool = False,
lang_id=None,
) -> torch.Tensor:
if tokens.dim() == 1:
tokens = tokens.unsqueeze(0)
if tokens.size(-1) > self.model.max_positions():
raise ValueError(
"tokens exceeds maximum length: {} > {}".format(
tokens.size(-1), self.model.max_positions()
)
)
features, extra = self.model(
tokens.to(device=self.device),
features_only=True,
return_all_hiddens=return_all_hiddens,
lang_id=lang_id,
)
if return_all_hiddens:
# convert from T x B x C -> B x T x C
inner_states = extra["inner_states"]
return [inner_state.transpose(0, 1) for inner_state in inner_states]
else:
return features # just the last layer's features
def predict(
self,
head: str,
tokens: torch.LongTensor,
return_logits: bool = False,
lang_id=None,
):
features = self.extract_features(tokens.to(device=self.device), lang_id=lang_id)
logits = self.model.classification_heads[head](features)
if return_logits:
return logits
return F.log_softmax(logits, dim=-1)
| EXA-1-master | exa/libraries/fairseq/fairseq/models/xmod/hub_interface.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a network across multiple GPUs.
"""
from fairseq.dataclass.configs import FairseqConfig
from fairseq.distributed import utils as distributed_utils
from fairseq.trainer import Trainer
try:
from fairseq.model_parallel.megatron.mpu import (
get_data_parallel_rank,
get_data_parallel_world_size,
get_model_parallel_src_rank,
get_cuda_rng_tracker,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
class MegatronTrainer(Trainer):
"""Main class for model parallel with data parallel training."""
def __init__(self, cfg: FairseqConfig, task, model, criterion, **kwargs):
if not has_megatron_submodule:
raise ImportError(
"\n\nPlease install the megatron submodule:"
"\n\n git submodule update --init "
"fairseq/model_parallel/megatron"
)
super().__init__(cfg, task, model, criterion, **kwargs)
def clip_grad_norm(self, clip_norm):
def _aggregate_model_parallel_grad_norm(total_norm):
total_norm = total_norm**2
distributed_utils.all_reduce(
total_norm, group=distributed_utils.get_model_parallel_group()
)
total_norm = total_norm**0.5
return total_norm
return self.optimizer.clip_grad_norm(
clip_norm,
aggregate_norm_fn=_aggregate_model_parallel_grad_norm,
)
def save_checkpoint(self, filename, extra_state):
"""Save all training state in a checkpoint file."""
extra_state["rng_tracker_states"] = get_cuda_rng_tracker().get_states()
super().save_checkpoint(filename, extra_state)
def load_checkpoint(
self,
filename,
reset_optimizer=False,
reset_lr_scheduler=False,
optimizer_overrides=None,
reset_meters=False,
):
extra_state = super().load_checkpoint(
filename,
reset_optimizer=reset_optimizer,
reset_lr_scheduler=reset_lr_scheduler,
optimizer_overrides=optimizer_overrides,
reset_meters=reset_meters,
)
if extra_state is not None and "rng_tracker_states" in extra_state:
get_cuda_rng_tracker().set_states(extra_state["rng_tracker_states"])
return extra_state
| EXA-1-master | exa/libraries/fairseq/fairseq/model_parallel/megatron_trainer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import criterions, models, modules # noqa
| EXA-1-master | exa/libraries/fairseq/fairseq/model_parallel/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
# automatically import any Python files in the models/ directory
models_dir = os.path.dirname(__file__)
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
model_name = file[: file.find(".py")] if file.endswith(".py") else file
module = importlib.import_module("fairseq.model_parallel.models." + model_name)
| EXA-1-master | exa/libraries/fairseq/fairseq/model_parallel/models/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch.nn as nn
from fairseq.model_parallel.modules import (
ModelParallelTransformerDecoderLayer,
ModelParallelTransformerEncoderLayer,
)
from fairseq.models import register_model
from fairseq.models.transformer import (
TransformerDecoder,
TransformerEncoder,
TransformerModel,
)
try:
from fairseq.model_parallel.megatron.mpu import (
VocabParallelEmbedding,
copy_to_model_parallel_region,
gather_from_model_parallel_region,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
logger = logging.getLogger(__name__)
@register_model("model_parallel_transformer")
class ModelParallelTransformerModel(TransformerModel):
"""
Model parallel Transformer model.
"""
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
if not has_megatron_submodule:
raise ImportError(
"\n\nPlease install the megatron submodule:"
"\n\n git submodule update --init "
"fairseq/model_parallel/megatron"
)
dictionary.pad_to_multiple_(args.model_parallel_size * 8)
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
def _vocab_init(tensor, **kwargs):
nn.init.normal_(tensor, mean=0, std=num_embeddings**-0.5)
nn.init.constant_(tensor[1], 0)
emb = VocabParallelEmbedding(
num_embeddings, embed_dim, padding_idx, init_method=_vocab_init
)
# if provided, load from preloaded dictionaries
if path:
raise NotImplementedError(
"Loading of embedding from path is not supported for model parallel"
)
return emb
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return ModelParallelTransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return ModelParallelTransformerDecoder(
args,
tgt_dict,
embed_tokens,
no_encoder_attn=getattr(args, "no_cross_attention", False),
)
class ModelParallelTransformerEncoder(TransformerEncoder):
"""
Model parallel Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`ModelParallelTransformerEncoderLayer`.
"""
def __init__(self, args, dictionary, embed_tokens):
super().__init__(args, dictionary, embed_tokens)
if args.no_final_layer_norm:
self.layer_norm = None
def build_encoder_layer(self, args):
return ModelParallelTransformerEncoderLayer(args)
class ModelParallelTransformerDecoder(TransformerDecoder):
"""
Model Parallel Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`ModelParallelTransformerDecoderLayer`.
"""
def build_decoder_layer(self, args, no_encoder_attn=False):
return ModelParallelTransformerDecoderLayer(args, no_encoder_attn)
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
if not self.share_input_output_embed:
raise NotImplementedError(
"Model parallel training currently requires --share-decoder-input-output-embed"
)
features = copy_to_model_parallel_region(features)
# project back to size of vocabulary
x = self.output_projection(features)
if getattr(self.args, "criterion") != "vocab_parallel_cross_entropy":
x = gather_from_model_parallel_region(x).contiguous()
return x
| EXA-1-master | exa/libraries/fairseq/fairseq/model_parallel/models/transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from fairseq.model_parallel.models.transformer import ModelParallelTransformerDecoder
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer_lm import TransformerLanguageModel
try:
from fairseq.model_parallel.megatron.mpu import VocabParallelEmbedding
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
DEFAULT_MAX_TARGET_POSITIONS = 1024
@register_model("model_parallel_transformer_lm")
class ModelParallelTransformerLanguageModel(TransformerLanguageModel):
@staticmethod
def add_args(parser):
TransformerLanguageModel.add_args(parser)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
if not has_megatron_submodule:
raise ImportError(
"\n\nPlease install the megatron submodule:"
"\n\n git submodule update --init "
"fairseq/model_parallel/megatron"
)
# make sure all arguments are present in older models
base_lm_architecture(args)
task.source_dictionary.pad_to_multiple_(args.model_parallel_size * 8)
task.target_dictionary.pad_to_multiple_(args.model_parallel_size * 8)
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = getattr(
args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS
)
if args.character_embeddings:
raise NotImplementedError(
"Character embeddings is not supported for model parallel"
)
elif args.adaptive_input:
raise NotImplementedError(
"Adaptive input is not supported for model parallel"
)
else:
embed_tokens = cls.build_embedding(
args, task.source_dictionary, args.decoder_input_dim
)
decoder = ModelParallelTransformerDecoder(
args,
task.target_dictionary,
embed_tokens,
no_encoder_attn=True,
)
return cls(decoder)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
def _vocab_init(tensor, **kwargs):
nn.init.normal_(tensor, mean=0, std=embed_dim**-0.5)
nn.init.constant_(tensor[1], 0)
embed_tokens = VocabParallelEmbedding(
len(dictionary), embed_dim, dictionary.pad(), init_method=_vocab_init
)
return embed_tokens
def base_lm_architecture(args):
# backward compatibility for older model checkpoints
if hasattr(args, "no_tie_adaptive_proj"):
# previous models defined --no-tie-adaptive-proj, so use the existence of
# that option to determine if this is an "old" model checkpoint
args.no_decoder_final_norm = True # old models always set this to True
if args.no_tie_adaptive_proj is False:
args.tie_adaptive_proj = True
if hasattr(args, "decoder_final_norm"):
args.no_decoder_final_norm = not args.decoder_final_norm
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.relu_dropout = getattr(args, "relu_dropout", 0.0)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
# Model training is not stable without this
args.decoder_normalize_before = True
args.no_decoder_final_norm = getattr(args, "no_decoder_final_norm", False)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.adaptive_softmax_factor = getattr(args, "adaptive_softmax_factor", 4)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.character_embeddings = getattr(args, "character_embeddings", False)
args.character_filters = getattr(
args,
"character_filters",
"[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]",
)
args.character_embedding_dim = getattr(args, "character_embedding_dim", 4)
args.char_embedder_highway_layers = getattr(args, "char_embedder_highway_layers", 2)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.adaptive_input_factor = getattr(args, "adaptive_input_factor", 4)
args.adaptive_input_cutoff = getattr(args, "adaptive_input_cutoff", None)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.tie_adaptive_proj = getattr(args, "tie_adaptive_proj", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0.0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0.0)
args.add_bos_token = getattr(args, "add_bos_token", False)
@register_model_architecture("model_parallel_transformer_lm", "transformer_lm_megatron")
def transformer_lm_megatron(args):
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 3072)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 3072 * 4)
args.decoder_layers = getattr(args, "decoder_layers", 72)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 32)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_fn = getattr(args, "activation_fn", "gelu")
base_lm_architecture(args)
@register_model_architecture(
"model_parallel_transformer_lm", "transformer_lm_megatron_11b"
)
def transformer_lm_megatron_11b(args):
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 3072)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 3072 * 6)
args.decoder_layers = getattr(args, "decoder_layers", 72)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 32)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_fn = getattr(args, "activation_fn", "gelu")
base_lm_architecture(args)
| EXA-1-master | exa/libraries/fairseq/fairseq/model_parallel/models/transformer_lm.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .model import * # noqa
| EXA-1-master | exa/libraries/fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.model_parallel.models.pipeline_parallel_transformer.layers import (
Embedding,
TransformerDecoderEmbedding,
TransformerDecoderLayer,
TransformerDecoderOutputLayer,
TransformerEncoderEmbedding,
TransformerEncoderLayer,
TransformerEncoderLayerNorm,
)
from fairseq.models import (
BaseFairseqModel,
FairseqDecoder,
FairseqEncoder,
register_model,
register_model_architecture,
)
from fairseq.models.fairseq_encoder import EncoderOut
from fairseq.models.transformer import (
base_architecture,
transformer_iwslt_de_en,
transformer_wmt_en_de_big,
)
from fairseq.modules import SinusoidalPositionalEmbedding
logger = logging.getLogger(__name__)
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
TORCH_PIPE = False
RPC_INIT = False
def import_pipe():
global TORCH_PIPE
global RPC_INIT
try:
from torch.distributed.pipeline.sync import Pipe # noqa
global Pipe
from torch.distributed.pipeline.sync.utils import partition_model
global partition_model
from torch.distributed import rpc
import tempfile
TORCH_PIPE = True
# Initialize single process RPC agent since TORCH_PIPE requires
# RRef. RRef depends on RPC being initialized and as a result we initialize
# RPC with a single node.
tmpfile = tempfile.NamedTemporaryFile()
if not RPC_INIT:
rpc.init_rpc(
name="worker",
rank=0,
world_size=1,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(
init_method="file://{}".format(tmpfile.name),
),
)
RPC_INIT = True
logger.info("Using torch pipe")
except ImportError:
try:
from fairscale.nn import Pipe # noqa
logger.info("Using fairscale pipe")
except ImportError:
raise ImportError("Please install fairscale with: pip install fairscale")
@register_model("pipeline_parallel_transformer")
class PipelineParallelTransformerModel(BaseFairseqModel):
def __init__(self, encoder, decoder, balance, devices, chunks, checkpoint):
import_pipe()
super().__init__()
assert isinstance(encoder, FairseqEncoder)
assert isinstance(decoder, FairseqDecoder)
encoder_module_list = (
[encoder.embedding_layer]
+ list(encoder.encoder_layers)
+ [encoder.final_layer_norm]
)
self.num_encoder_modules = len(encoder_module_list)
decoder_module_list = (
[decoder.embedding_layer]
+ list(decoder.decoder_layers)
+ [decoder.decoder_output_layer]
)
self.num_decoder_modules = len(decoder_module_list)
module_list = encoder_module_list + decoder_module_list
self.devices = devices
if TORCH_PIPE:
self.model = Pipe(
partition_model(nn.Sequential(*module_list), balance, devices),
chunks=chunks,
checkpoint=checkpoint,
)
else:
self.model = Pipe(
nn.Sequential(*module_list),
balance=balance,
devices=devices,
chunks=chunks,
checkpoint=checkpoint,
)
self.encoder_max_positions = self.max_positions_helper(
encoder.embedding_layer, "max_source_positions"
)
self.decoder_max_positions = self.max_positions_helper(
decoder.embedding_layer, "max_target_positions"
)
self.adaptive_softmax = getattr(decoder, "adaptive_softmax", None)
# Note: To be populated during inference
self.encoder = None
self.decoder = None
def forward(self, src_tokens, src_lengths, prev_output_tokens):
if self.training:
input_lst = [src_tokens, src_lengths, prev_output_tokens]
input = tuple(i.to(self.devices[0], non_blocking=True) for i in input_lst)
if TORCH_PIPE:
return self.model(input).local_value()
else:
return self.model(input)
else:
assert self.encoder is not None and self.decoder is not None, (
"encoder and decoder need to be initialized by "
+ "calling the `prepare_for_inference_()` method"
)
encoder_output_tuple = self.encoder(input)
return self.decoder(encoder_output_tuple)
def prepare_for_inference_(self, cfg):
if self.encoder is not None and self.decoder is not None:
logger.info("Encoder and Decoder already initialized")
return
encoder_module_list = []
decoder_module_list = []
module_count = 0
for partition in self.model.partitions:
for module in partition:
if module_count < self.num_encoder_modules:
encoder_module_list.append(module)
else:
decoder_module_list.append(module)
module_count += 1
self.model = None
self.encoder = TransformerEncoder(
cfg.distributed_training, None, None, encoder_module_list
)
self.decoder = TransformerDecoder(
cfg.distributed_training,
None,
None,
decoder_module_list=decoder_module_list,
)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--num-embedding-chunks', type=int, metavar='N', default=1,
help='Number of embedding layer chunks (enables more even distribution'
'of optimizer states across data parallel nodes'
'when using optimizer state sharding and'
'a big embedding vocabulary)')
# fmt: on
@classmethod
def build_model_base(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if not hasattr(args, "max_source_positions"):
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if not hasattr(args, "max_target_positions"):
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim, path=None, num_embed_chunks=1):
assert embed_dim % num_embed_chunks == 0, (
f"Number of embedding chunks = {num_embed_chunks} should be "
+ f"divisible by the embedding dimension = {embed_dim}"
)
assert path is None or num_embed_chunks == 1, (
"Loading embedding from a path with number of embedding chunks > 1"
+ " is not yet supported"
)
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
# if provided, load from preloaded dictionaries
if path:
emb = Embedding(num_embeddings, embed_dim, padding_idx)
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
else:
embed_chunk_dim = embed_dim // num_embed_chunks
emb = nn.ModuleList()
for i in range(num_embed_chunks):
emb.append(Embedding(num_embeddings, embed_chunk_dim, padding_idx))
return emb
num_embed_chunks = args.num_embedding_chunks
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError("--share-all-embeddings requires a joined dictionary")
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise ValueError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
encoder_embed_tokens = build_embedding(
src_dict,
args.encoder_embed_dim,
args.encoder_embed_path,
num_embed_chunks,
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
assert args.share_decoder_input_output_embed or num_embed_chunks == 1, (
"Not sharing decoder I/O embeddings is not yet supported with number of "
+ "embedding chunks > 1"
)
encoder_embed_tokens = build_embedding(
src_dict,
args.encoder_embed_dim,
args.encoder_embed_path,
num_embed_chunks,
)
decoder_embed_tokens = build_embedding(
tgt_dict,
args.decoder_embed_dim,
args.decoder_embed_path,
num_embed_chunks,
)
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return (encoder, decoder)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoder(args, tgt_dict, embed_tokens)
@classmethod
def build_model(cls, args, task):
encoder, decoder = cls.build_model_base(args, task)
return PipelineParallelTransformerModel(
encoder=encoder,
decoder=decoder,
balance=utils.eval_str_list(args.pipeline_balance, type=int),
devices=utils.eval_str_list(args.pipeline_devices, type=int),
chunks=args.pipeline_chunks,
checkpoint=args.pipeline_checkpoint,
)
def output_layer(self, features, **kwargs):
"""Project features to the default output size (typically vocabulary size)."""
return self.decoder.output_layer(features, **kwargs)
def max_positions(self):
"""Maximum length supported by the model."""
return (self.encoder_max_positions, self.decoder_max_positions)
def max_positions_helper(
self, embedding_layer, max_positions_field="max_source_positions"
):
"""Maximum input length supported by the encoder or decoder."""
if embedding_layer.embed_positions is None:
return getattr(embedding_layer, max_positions_field)
return min(
getattr(embedding_layer, max_positions_field),
embedding_layer.embed_positions.max_positions,
)
def get_normalized_probs(self, net_output, log_probs, sample=None):
"""Get normalized probabilities (or log probs) from a net's output."""
if hasattr(self, "adaptive_softmax") and self.adaptive_softmax is not None:
if sample is not None:
assert "target" in sample
target = sample["target"]
else:
target = None
out = self.adaptive_softmax.get_log_prob(net_output, target=target)
return out.exp_() if not log_probs else out
# A Pipe() module returns a tuple of tensors as the output.
# In this case, the tuple has one element - the output tensor of logits
logits = net_output if isinstance(net_output, torch.Tensor) else net_output[0]
if log_probs:
return utils.log_softmax(logits, dim=-1, onnx_trace=False)
else:
return utils.softmax(logits, dim=-1, onnx_trace=False)
def max_decoder_positions(self):
"""Maximum length supported by the decoder."""
return self.decoder_max_positions
def load_state_dict(self, state_dict, strict=True, model_cfg=None):
"""Copies parameters and buffers from *state_dict* into this module and
its descendants.
Overrides the method in :class:`nn.Module`. Compared with that method
this additionally "upgrades" *state_dicts* from old checkpoints.
"""
self.upgrade_state_dict(state_dict)
is_regular_transformer = not any("model.partitions" in k for k in state_dict)
if is_regular_transformer:
state_dict = self.convert_to_pipeline_parallel_state_dict(state_dict)
return super().load_state_dict(state_dict, strict)
def convert_to_pipeline_parallel_state_dict(self, state_dict):
new_state_dict = self.state_dict()
encoder_layer_idx = 0
decoder_layer_idx = 0
encoder_key_suffixes = [
"self_attn.k_proj.weight",
"self_attn.k_proj.bias",
"self_attn.v_proj.weight",
"self_attn.v_proj.bias",
"self_attn.q_proj.weight",
"self_attn.q_proj.bias",
"self_attn.out_proj.weight",
"self_attn.out_proj.bias",
"self_attn_layer_norm.weight",
"self_attn_layer_norm.bias",
"fc1.weight",
"fc1.bias",
"fc2.weight",
"fc2.bias",
"final_layer_norm.weight",
"final_layer_norm.bias",
]
decoder_key_suffixes = [
"self_attn.k_proj.weight",
"self_attn.k_proj.bias",
"self_attn.v_proj.weight",
"self_attn.v_proj.bias",
"self_attn.q_proj.weight",
"self_attn.q_proj.bias",
"self_attn.out_proj.weight",
"self_attn.out_proj.bias",
"self_attn_layer_norm.weight",
"self_attn_layer_norm.bias",
"encoder_attn.k_proj.weight",
"encoder_attn.k_proj.bias",
"encoder_attn.v_proj.weight",
"encoder_attn.v_proj.bias",
"encoder_attn.q_proj.weight",
"encoder_attn.q_proj.bias",
"encoder_attn.out_proj.weight",
"encoder_attn.out_proj.bias",
"encoder_attn_layer_norm.weight",
"encoder_attn_layer_norm.bias",
"fc1.weight",
"fc1.bias",
"fc2.weight",
"fc2.bias",
"final_layer_norm.weight",
"final_layer_norm.bias",
]
for pid, partition in enumerate(self.model.partitions):
logger.info(f"Begin Partition {pid}")
for mid, module in enumerate(partition):
# fmt: off
if isinstance(module, TransformerEncoderEmbedding):
new_state_dict[f'model.partitions.{pid}.{mid}.embed_tokens.weight'] = state_dict['encoder.embed_tokens.weight']
new_state_dict[f'model.partitions.{pid}.{mid}.embed_positions._float_tensor'] = state_dict['encoder.embed_positions._float_tensor']
if isinstance(module, TransformerEncoderLayer):
for suffix in encoder_key_suffixes:
new_state_dict[f'model.partitions.{pid}.{mid}.{suffix}'] = state_dict[f'encoder.layers.{encoder_layer_idx}.{suffix}']
encoder_layer_idx += 1
if isinstance(module, TransformerDecoderLayer):
for suffix in decoder_key_suffixes:
new_state_dict[f'model.partitions.{pid}.{mid}.{suffix}'] = state_dict[f'decoder.layers.{decoder_layer_idx}.{suffix}']
decoder_layer_idx += 1
if isinstance(module, TransformerEncoderLayerNorm):
if 'encoder.layer_norm.weight' in state_dict:
new_state_dict[f'model.partitions.{pid}.{mid}.layer_norm.weight'] = state_dict['encoder.layer_norm.weight']
new_state_dict[f'model.partitions.{pid}.{mid}.layer_norm.bias'] = state_dict['encoder.layer_norm.bias']
if isinstance(module, TransformerDecoderEmbedding):
new_state_dict[f'model.partitions.{pid}.{mid}.embed_tokens.weight'] = state_dict['decoder.embed_tokens.weight']
new_state_dict[f'model.partitions.{pid}.{mid}.embed_positions._float_tensor'] = state_dict['decoder.embed_positions._float_tensor']
if isinstance(module, TransformerDecoderOutputLayer):
new_state_dict[f'model.partitions.{pid}.{mid}.output_projection.weight'] = state_dict['decoder.output_projection.weight']
# fmt: on
return new_state_dict
class TransformerEncoder(FairseqEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, args, dictionary, embed_tokens, encoder_module_list=None):
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
import_pipe()
self.use_pipeline = encoder_module_list is not None
if not self.use_pipeline:
self.embedding_layer = TransformerEncoderEmbedding(args, embed_tokens)
self.encoder_layers = nn.Sequential(
*[TransformerEncoderLayer(args) for i in range(args.encoder_layers)]
)
if isinstance(embed_tokens, nn.ModuleList):
emb_dim = sum(e.embedding_dim for e in embed_tokens)
else:
emb_dim = embed_tokens.embedding_dim
self.final_layer_norm = TransformerEncoderLayerNorm(args, emb_dim)
else:
encoder_balance = utils.eval_str_list(
args.pipeline_encoder_balance, type=int
)
encoder_devices = utils.eval_str_list(
args.pipeline_encoder_devices, type=int
)
assert sum(encoder_balance) == len(encoder_module_list), (
f"Sum of encoder_balance={encoder_balance} is not equal "
+ f"to num_encoder_modules={len(encoder_module_list)}"
)
if TORCH_PIPE:
self.model = Pipe(
module=partition_model(
nn.Sequential(*encoder_module_list),
encoder_balance,
encoder_devices,
),
chunks=args.pipeline_chunks,
checkpoint=args.pipeline_checkpoint,
)
else:
self.model = Pipe(
module=nn.Sequential(*encoder_module_list),
balance=encoder_balance,
devices=encoder_devices,
chunks=args.pipeline_chunks,
checkpoint=args.pipeline_checkpoint,
)
def forward(self, src_tokens, src_lengths):
"""
Args:
input_tuple(
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
)
Returns:
output_tuple(
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- prev_output_tokens
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
)
"""
dummy_prev_output_tokens = torch.zeros(
1, dtype=src_tokens.dtype, device=src_tokens.device
)
input_tuple = (src_tokens, src_lengths, dummy_prev_output_tokens)
if self.use_pipeline:
input_tuple = tuple(i.to(self.model.devices[0]) for i in input_tuple)
if TORCH_PIPE:
encoder_out = self.model(input_tuple).local_value()
else:
encoder_out = self.model(input_tuple)
else:
encoder_embed_output_tuple = self.embedding_layer(input_tuple)
encoder_layers_output = self.encoder_layers(encoder_embed_output_tuple)
encoder_out = self.final_layer_norm(encoder_layers_output)
# first element is the encoder output
# second element is the encoder padding mask
# the remaining elements of EncoderOut are not computed by
# the PipelineParallelTransformer
return EncoderOut(encoder_out[0], encoder_out[1], None, None, None, None)
def reorder_encoder_out(self, encoder_out, new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if encoder_out.encoder_out is not None:
encoder_out = encoder_out._replace(
encoder_out=encoder_out.encoder_out.index_select(1, new_order)
)
if encoder_out.encoder_padding_mask is not None:
encoder_out = encoder_out._replace(
encoder_padding_mask=encoder_out.encoder_padding_mask.index_select(
0, new_order
)
)
if encoder_out.encoder_embedding is not None:
encoder_out = encoder_out._replace(
encoder_embedding=encoder_out.encoder_embedding.index_select(
0, new_order
)
)
if encoder_out.encoder_states is not None:
for idx, state in enumerate(encoder_out.encoder_states):
encoder_out.encoder_states[idx] = state.index_select(1, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embedding_layer.embed_positions is None:
return self.embedding_layer.max_source_positions
return min(
self.embedding_layer.max_source_positions,
self.embedding_layer.embed_positions.max_positions,
)
class TransformerDecoder(FairseqDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self,
args,
dictionary,
embed_tokens,
no_encoder_attn=False,
decoder_module_list=None,
):
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
import_pipe()
self.use_pipeline = decoder_module_list is not None
if not self.use_pipeline:
self.embedding_layer = TransformerDecoderEmbedding(args, embed_tokens)
self.decoder_layers = nn.Sequential(
*[
TransformerDecoderLayer(args, no_encoder_attn)
for _ in range(args.decoder_layers)
]
)
self.decoder_output_layer = TransformerDecoderOutputLayer(
args, embed_tokens, dictionary
)
else:
decoder_balance = utils.eval_str_list(
args.pipeline_decoder_balance, type=int
)
decoder_devices = utils.eval_str_list(
args.pipeline_decoder_devices, type=int
)
assert sum(decoder_balance) == len(decoder_module_list), (
f"Sum of decoder_balance={decoder_balance} is not equal "
+ f"to num_decoder_modules={len(decoder_module_list)}"
)
if TORCH_PIPE:
self.model = Pipe(
module=partition_model(
nn.Sequential(*decoder_module_list),
decoder_balance,
decoder_devices,
),
chunks=args.pipeline_chunks,
checkpoint=args.pipeline_checkpoint,
)
else:
self.model = Pipe(
module=nn.Sequential(*decoder_module_list),
balance=decoder_balance,
devices=decoder_devices,
chunks=args.pipeline_chunks,
checkpoint=args.pipeline_checkpoint,
)
def forward(
self,
prev_output_tokens,
encoder_out=None,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
input_tuple = (
encoder_out.encoder_out,
encoder_out.encoder_padding_mask,
prev_output_tokens,
)
if self.use_pipeline:
input_tuple = tuple(i.to(self.model.devices[0]) for i in input_tuple)
if TORCH_PIPE:
return (self.model(input_tuple).local_value(),)
else:
return (self.model(input_tuple),)
else:
embed_layer_output = self.embedding_layer(input_tuple)
state = self.decoder_layers(embed_layer_output)
return (self.decoder_output_layer(state),)
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
if self.share_input_output_embed:
return F.linear(features, self.embed_tokens.weight)
else:
return F.linear(features, self.embed_out)
else:
return features
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embedding_layer.embed_positions is None:
return self.embedding_layer.max_target_positions
return min(
self.embedding_layer.max_target_positions,
self.embedding_layer.embed_positions.max_positions,
)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, "_future_mask")
or self._future_mask is None
or self._future_mask.device != tensor.device
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
layer_norm_map = {
"0": "self_attn_layer_norm",
"1": "encoder_attn_layer_norm",
"2": "final_layer_norm",
}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m)
if k in state_dict:
state_dict[
"{}.layers.{}.{}.{}".format(name, i, new, m)
] = state_dict[k]
del state_dict[k]
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
@register_model_architecture(
"pipeline_parallel_transformer", "transformer_iwslt_de_en_pipeline_parallel"
)
def transformer_iwslt_de_en_dist(args):
transformer_iwslt_de_en(args)
@register_model_architecture(
"pipeline_parallel_transformer", "transformer_wmt_en_de_big_pipeline_parallel"
)
def transformer_wmt_en_de_big_dist(args):
transformer_wmt_en_de_big(args)
| EXA-1-master | exa/libraries/fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.modules import (
AdaptiveSoftmax,
LayerNorm,
MultiheadAttention,
PositionalEmbedding,
)
EncoderOut = namedtuple(
"TransformerEncoderOut",
[
"encoder_out", # T x B x C
"encoder_padding_mask", # B x T
"encoder_embedding", # B x T x C
"encoder_states", # List[T x B x C]
],
)
class TransformerEncoderEmbedding(nn.Module):
"""Encoder Embedding + Positional Embedding"""
def __init__(self, args, embed_tokens):
super().__init__()
self.dropout = args.dropout
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
if isinstance(embed_tokens, nn.ModuleList):
self.padding_idx = embed_tokens[0].padding_idx
embed_dim = sum(e.embedding_dim for e in embed_tokens)
else:
self.padding_idx = embed_tokens.padding_idx
embed_dim = embed_tokens.embedding_dim
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = (
PositionalEmbedding(
args.max_source_positions,
embed_dim,
self.padding_idx,
learned=args.encoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
if getattr(args, "layernorm_embedding", False):
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
def forward(self, input):
# embed tokens and positions
src_tokens = input[0]
prev_output_tokens = input[2]
if isinstance(self.embed_tokens, nn.ModuleList):
x_embed_list = []
for embed_tokens_part in self.embed_tokens:
x_embed_list.append(embed_tokens_part(src_tokens))
embedded = torch.cat(x_embed_list, dim=-1)
else:
embedded = self.embed_tokens(src_tokens)
x = embed = self.embed_scale * embedded
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
if self.layernorm_embedding:
x = self.layernorm_embedding(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
return (x, encoder_padding_mask, prev_output_tokens)
class TransformerEncoderLayerNorm(nn.Module):
"""
Layer norm at the the end of all encoder layers if
args.encoder_enormalize_before = True
"""
def __init__(self, args, embed_dim):
super().__init__()
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(self, input):
x = input[0]
encoder_padding_mask = input[1]
prev_output_tokens = input[2]
if self.layer_norm:
x = self.layer_norm(x)
# keeping track of the incremental_state is not supported yet
return (x, encoder_padding_mask, prev_output_tokens)
class TransformerDecoderEmbedding(nn.Module):
"""Decoder Embedding + Positional Embedding"""
def __init__(self, args, embed_tokens):
super().__init__()
self.dropout = args.dropout
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = (
sum(e.embedding_dim for e in embed_tokens)
if isinstance(embed_tokens, nn.ModuleList)
else embed_tokens.embedding_dim
)
embed_dim = args.decoder_embed_dim
self.output_embed_dim = args.decoder_output_dim
padding_idx = (
embed_tokens[0].padding_idx
if isinstance(embed_tokens, nn.ModuleList)
else embed_tokens.padding_idx
)
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
args.max_target_positions,
embed_dim,
padding_idx,
learned=args.decoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
def forward(self, input):
mt_task = False
if isinstance(input, tuple):
if len(input) == 3:
encoder_out = input[0]
encoder_padding_mask = input[1]
prev_output_tokens = input[2]
incremental_state = None # Hardcoding to avoid passing of None objects
mt_task = True
else:
# HACK for now, need to fix (TODO sidgoyal)
prev_output_tokens = input[0]
# discard "src_lengths"
encoder_out = None
encoder_padding_mask = None
incremental_state = None
else:
prev_output_tokens = input
encoder_out = None
encoder_padding_mask = None
incremental_state = None
positions = (
self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
)
if self.embed_positions is not None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
if isinstance(self.embed_tokens, nn.ModuleList):
x_embed_list = []
for embed_tokens_part in self.embed_tokens:
x_embed_list.append(embed_tokens_part(prev_output_tokens))
x = self.embed_scale * torch.cat(x_embed_list, dim=-1)
else:
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
if mt_task:
return (x, encoder_out, encoder_padding_mask)
return x
class TransformerDecoderOutputLayer(nn.Module):
def __init__(self, args, embed_tokens, dictionary):
super().__init__()
self.share_input_output_embed = args.share_decoder_input_output_embed
self.embed_tokens = embed_tokens
self.output_embed_dim = args.decoder_output_dim
embed_dim = args.decoder_embed_dim
self.project_out_dim = (
Linear(embed_dim, self.output_embed_dim, bias=False)
if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights
else None
)
self.adaptive_softmax = None
if args.adaptive_softmax_cutoff is not None:
assert not isinstance(embed_tokens, nn.ModuleList)
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
options.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif not self.share_input_output_embed:
self.embed_tokens = nn.Parameter(
torch.Tensor(len(dictionary), self.output_embed_dim)
)
nn.init.normal_(
self.embed_tokens, mean=0, std=self.output_embed_dim**-0.5
)
if args.decoder_normalize_before and not getattr(
args, "no_decoder_final_norm", False
):
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(self, input, apply_final_proj=True):
if isinstance(input, tuple):
x = input[0]
else:
x = input
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
if apply_final_proj:
x = self.output_layer(x)
return x
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
if self.share_input_output_embed:
if isinstance(self.embed_tokens, nn.ModuleList):
output = None
for i, emb in enumerate(self.embed_tokens):
sidx = i * emb.embedding_dim
eidx = (i + 1) * emb.embedding_dim
if output is None:
output = F.linear(features[:, :, sidx:eidx], emb.weight)
else:
output += F.linear(features[:, :, sidx:eidx], emb.weight)
return output
else:
return F.linear(features, self.embed_tokens.weight)
else:
return F.linear(features, self.embed_tokens)
else:
return features
class TransformerEncoderLayer(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, args):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.self_attn = MultiheadAttention(
self.embed_dim,
args.encoder_attention_heads,
dropout=args.attention_dropout,
self_attention=True,
)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, "activation_fn", "relu")
)
self.activation_dropout = getattr(args, "activation_dropout", 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, "relu_dropout", 0)
self.normalize_before = args.encoder_normalize_before
self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)
self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layer_norms.{}.{}".format(name, old, m)
if k in state_dict:
state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k]
del state_dict[k]
def forward(self, input):
"""
Args:
input (Tuple):
input[0] (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
input[1] (ByteTensor/FloatTensor): encoder padding mask -
binary ByteTensor of shape `(batch, src_len)` where padding elements
are indicated by ``1``.
input[2] (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing)
Returns:
output (Tuple):
output[0] (Tensor): encoded output of shape `(batch, src_len, embed_dim)`
output[1] (ByteTensor/FloatTensor): encoder padding mask
output[2] (LongTensor): previous decoder outputs
"""
x = input[0]
encoder_padding_mask = input[1]
prev_output_tokens = input[2]
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
x, _ = self.self_attn(
query=x, key=x, value=x, key_padding_mask=encoder_padding_mask
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
return (x, encoder_padding_mask, prev_output_tokens)
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
class TransformerDecoderLayer(nn.Module):
"""Decoder layer block.
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.decoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False
):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.self_attn = MultiheadAttention(
embed_dim=self.embed_dim,
num_heads=args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=True,
)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, "activation_fn", "relu")
)
self.activation_dropout = getattr(args, "activation_dropout", 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, "relu_dropout", 0)
self.normalize_before = args.decoder_normalize_before
# use layerNorm rather than FusedLayerNorm for exporting.
# char_inputs can be used to determint this.
# TODO remove this once we update apex with the fix
export = getattr(args, "char_inputs", False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = MultiheadAttention(
self.embed_dim,
args.decoder_attention_heads,
kdim=getattr(args, "encoder_embed_dim", None),
vdim=getattr(args, "encoder_embed_dim", None),
dropout=args.attention_dropout,
encoder_decoder_attention=True,
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)
self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(self, input):
"""
Args:
input (Tuple):
input[0] (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
input[1] (Tensor): encoder output of shape `(batch, src_len, embed_dim)`
input[2] (ByteTensor/FloatTensor): encoder padding mask -
binary ByteTensor of shape `(batch, src_len)` where padding elements
are indicated by ``1``.
Returns:
output (Tuple):
output[0] (Tensor): encoded output of shape `(batch, src_len, embed_dim)`
output[1] (ByteTensor/FloatTensor): encoder padding mask
output[2] (LongTensor): previous decoder outputs
"""
# Note: incremental state is not yet supported
mt_task = False
if isinstance(input, tuple):
x = input[0]
encoder_out = input[1]
encoder_padding_mask = input[2]
incremental_state = None
mt_task = True
else:
x = input
encoder_out = None
encoder_padding_mask = None
incremental_state = None
if incremental_state is None:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
# TODO: add back prev_self_attn_state, prev_attn_state,
# self_attn_padding_mask
prev_self_attn_state = None
prev_attn_state = None
self_attn_padding_mask = None
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
if prev_self_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_self_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.self_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
if self.encoder_attn is not None:
residual = x
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True)
if prev_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=(not self.training and self.need_attn),
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
if mt_task:
return (x, encoder_out, encoder_padding_mask)
return x
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, "_future_mask")
or self._future_mask is None
or self._future_mask.device != tensor.device
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(
utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1
)
return self._future_mask[:dim, :dim]
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim**-0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
| EXA-1-master | exa/libraries/fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/layers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .model import * # noqa
| EXA-1-master | exa/libraries/fairseq/fairseq/model_parallel/models/roberta/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
RoBERTa: A Robustly Optimized BERT Pretraining Approach.
"""
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.model_parallel.models.transformer import ModelParallelTransformerEncoder
from fairseq.models import register_model, register_model_architecture
from fairseq.models.roberta import (
roberta_base_architecture,
roberta_prenorm_architecture,
RobertaEncoder,
RobertaModel,
)
from fairseq.modules import LayerNorm
try:
from fairseq.model_parallel.megatron.mpu import (
copy_to_model_parallel_region,
gather_from_model_parallel_region,
ColumnParallelLinear,
VocabParallelEmbedding,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
logger = logging.getLogger(__name__)
@register_model("model_parallel_roberta")
class ModelParallelRobertaModel(RobertaModel):
def __init__(self, args, encoder):
super().__init__(args, encoder)
self.classification_heads = nn.ModuleDict()
@staticmethod
def add_args(parser):
RobertaModel.add_args(parser)
parser.add_argument(
"--no-final-layer-norm",
action="store_true",
help=(
"don't add final layernorm (only applicable when "
"--encoder-normalize-before=True"
),
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present
base_architecture(args)
task.source_dictionary.pad_to_multiple_(args.model_parallel_size * 8)
task.target_dictionary.pad_to_multiple_(args.model_parallel_size * 8)
if not hasattr(args, "max_positions"):
args.max_positions = args.tokens_per_sample
if getattr(args, "untie_weights_roberta", False):
raise NotImplementedError(
"--untie-weights-roberta is not supported in model parallel mode"
)
encoder = ModelParallelRobertaEncoder(args, task.source_dictionary)
return cls(args, encoder)
def forward(
self,
src_tokens,
features_only=False,
return_all_hiddens=False,
classification_head_name=None,
**kwargs
):
if classification_head_name is not None:
features_only = True
x, extra = self.encoder(src_tokens, features_only, return_all_hiddens, **kwargs)
if classification_head_name is not None:
x = self.classification_heads[classification_head_name](x)
return x, extra
def register_classification_head(
self, name, num_classes=None, inner_dim=None, **kwargs
):
"""Register a classification head."""
if name in self.classification_heads:
prev_num_classes = self.classification_heads[name].out_proj.out_features
prev_inner_dim = self.classification_heads[name].dense.out_features
if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
logger.warning(
're-registering head "{}" with num_classes {} (prev: {}) '
"and inner_dim {} (prev: {})".format(
name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
)
)
self.classification_heads[name] = ModelParallelRobertaClassificationHead(
self.args.encoder_embed_dim,
inner_dim or self.args.encoder_embed_dim,
num_classes,
self.args.pooler_activation_fn,
self.args.pooler_dropout,
)
class ModelParallelRobertaLMHead(nn.Module):
"""Head for masked language modeling."""
def __init__(self, embed_dim, output_dim, activation_fn, weight=None):
super().__init__()
self.dense = ColumnParallelLinear(embed_dim, embed_dim, gather_output=True)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.layer_norm = LayerNorm(embed_dim)
if weight is None:
weight = nn.Linear(embed_dim, output_dim, bias=False).weight
self.weight = weight
self.bias = nn.Parameter(torch.zeros(output_dim))
def forward(self, features, masked_tokens=None, **kwargs):
# Only project the unmasked tokens while training,
# saves both memory and computation
if masked_tokens is not None:
features = features[masked_tokens, :]
x = self.dense(features)
x = self.activation_fn(x)
x = self.layer_norm(x)
x = copy_to_model_parallel_region(x)
# project back to size of vocabulary with bias
x = F.linear(x, self.weight)
x = gather_from_model_parallel_region(x).contiguous()
x = x + self.bias
return x
class ModelParallelRobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self, input_dim, inner_dim, num_classes, activation_fn, pooler_dropout
):
super().__init__()
self.dense = ColumnParallelLinear(input_dim, inner_dim, gather_output=True)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class ModelParallelRobertaEncoder(RobertaEncoder):
"""RoBERTa encoder."""
def __init__(self, args, dictionary):
super().__init__(args, dictionary)
assert not self.args.untie_weights_roberta
def build_embedding(self, vocab_size, embedding_dim, padding_idx):
return VocabParallelEmbedding(vocab_size, embedding_dim, padding_idx)
def build_encoder(self, args, dictionary, embed_tokens):
return ModelParallelTransformerEncoder(args, dictionary, embed_tokens)
def build_lm_head(self, embed_dim, output_dim, activation_fn, weight):
return ModelParallelRobertaLMHead(embed_dim, output_dim, activation_fn, weight)
@register_model_architecture("model_parallel_roberta", "model_parallel_roberta")
def base_architecture(args):
args.no_final_layer_norm = getattr(args, "no_final_layer_norm", False)
# model parallel RoBERTa defaults to "Pre-LN" formulation
roberta_prenorm_architecture(args)
# earlier versions of model parallel RoBERTa removed the final layer norm
@register_model_architecture("model_parallel_roberta", "model_parallel_roberta_v1")
def model_parallel_roberta_v1_architecture(args):
args.no_final_layer_norm = getattr(args, "no_final_layer_norm", True)
base_architecture(args)
@register_model_architecture(
"model_parallel_roberta", "model_parallel_roberta_postnorm"
)
def model_parallel_roberta_postnorm_architecture(args):
# the original BERT/RoBERTa uses the "Post-LN" formulation
roberta_base_architecture(args)
@register_model_architecture("model_parallel_roberta", "model_parallel_roberta_base")
def model_parallel_roberta_base_architecture(args):
base_architecture(args)
@register_model_architecture("model_parallel_roberta", "model_parallel_roberta_large")
def model_parallel_roberta_large_architecture(args):
args.encoder_layers = getattr(args, "encoder_layers", 24)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
base_architecture(args)
| EXA-1-master | exa/libraries/fairseq/fairseq/model_parallel/models/roberta/model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.fairseq_dropout import FairseqDropout
try:
from fairseq.model_parallel.megatron.mpu import (
ColumnParallelLinear,
RowParallelLinear,
get_cuda_rng_tracker,
get_model_parallel_world_size,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
@with_incremental_state
class ModelParallelMultiheadAttention(nn.Module):
"""Model parallel Multi-headed attention.
This performs the Multi-headed attention over multiple gpus.
See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
self_attention=False,
encoder_decoder_attention=False,
):
super().__init__()
if not has_megatron_submodule:
raise ImportError(
"\n\nPlease install the megatron submodule:"
"\n\n git submodule update --init "
"fairseq/model_parallel/megatron"
)
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.model_parallel_size = get_model_parallel_world_size()
self.num_heads_partition = num_heads // self.model_parallel_size
assert (
self.num_heads_partition * self.model_parallel_size == num_heads
), "Number of heads must be divisible by model parallel size"
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim**-0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert (
not self.self_attention or self.qkv_same_dim
), "Self-attention requires query, key and value to be of the same size"
self.k_proj = ColumnParallelLinear(
self.kdim, embed_dim, bias=bias, gather_output=False
)
self.v_proj = ColumnParallelLinear(
self.vdim, embed_dim, bias=bias, gather_output=False
)
self.q_proj = ColumnParallelLinear(
embed_dim, embed_dim, bias=bias, gather_output=False
)
self.out_proj = RowParallelLinear(
embed_dim, embed_dim, bias=bias, input_is_parallel=True
)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
**unused_kwargs,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
"""
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
is_tpu = query.device.type == "xla"
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads_partition, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads_partition, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads_partition, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads_partition, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(
bsz * self.num_heads_partition, -1, self.head_dim
)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(
bsz * self.num_heads_partition, -1, self.head_dim
)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = (
ModelParallelMultiheadAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
)
saved_state["prev_key"] = k.view(
bsz, self.num_heads_partition, -1, self.head_dim
)
saved_state["prev_value"] = v.view(
bsz, self.num_heads_partition, -1, self.head_dim
)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
src_len = k.size(1)
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [
bsz * self.num_heads_partition,
tgt_len,
src_len,
]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(
bsz, self.num_heads_partition, tgt_len, src_len
)
if not is_tpu:
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
else:
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf"))
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.view(
bsz * self.num_heads_partition, tgt_len, src_len
)
attn_weights_float = utils.softmax(attn_weights, dim=-1)
attn_weights = attn_weights_float.type_as(attn_weights)
with get_cuda_rng_tracker().fork():
attn_probs = self.dropout_module(attn_weights)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [
bsz * self.num_heads_partition,
tgt_len,
self.head_dim,
]
embed_dim_partition = embed_dim // self.model_parallel_size
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim_partition)
attn = self.out_proj(attn)
# return attn_weights None to keep the return type same as single gpu multihead attention
# This will be deprecated.
attn_weights: Optional[Tensor] = None
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
filler = torch.zeros(batch_size, src_len - prev_key_padding_mask.size(1))
if prev_key_padding_mask.is_cuda:
filler = filler.cuda()
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
elif key_padding_mask is not None:
filler = torch.zeros(batch_size, src_len - key_padding_mask.size(1))
if key_padding_mask.is_cuda:
filler = filler.cuda()
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
def reorder_incremental_state(
self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
if input_buffer[k] is not None:
input_buffer[k] = input_buffer[k].index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
| EXA-1-master | exa/libraries/fairseq/fairseq/model_parallel/modules/multihead_attention.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
from .multihead_attention import ModelParallelMultiheadAttention
from .transformer_layer import (
ModelParallelTransformerEncoderLayer,
ModelParallelTransformerDecoderLayer,
)
__all__ = [
"ModelParallelMultiheadAttention",
"ModelParallelTransformerEncoderLayer",
"ModelParallelTransformerDecoderLayer",
]
| EXA-1-master | exa/libraries/fairseq/fairseq/model_parallel/modules/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.model_parallel.modules import ModelParallelMultiheadAttention
from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer
try:
from fairseq.model_parallel.megatron.mpu import (
ColumnParallelLinear,
RowParallelLinear,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
class ModelParallelTransformerEncoderLayer(TransformerEncoderLayer):
"""Encoder layer block over multiple gpus.
See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details.
"""
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
if q_noise > 0:
raise NotImplementedError
return ColumnParallelLinear(input_dim, output_dim, gather_output=False)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
if q_noise > 0:
raise NotImplementedError
return RowParallelLinear(input_dim, output_dim, input_is_parallel=True)
def build_self_attention(self, embed_dim, args, **unused_kwargs):
return ModelParallelMultiheadAttention(
embed_dim,
args.encoder_attention_heads,
dropout=args.attention_dropout,
self_attention=True,
)
class ModelParallelTransformerDecoderLayer(TransformerDecoderLayer):
"""Decoder layer block.
See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details.
"""
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
if q_noise > 0:
raise NotImplementedError
return ColumnParallelLinear(input_dim, output_dim, gather_output=False)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
if q_noise > 0:
raise NotImplementedError
return RowParallelLinear(input_dim, output_dim, input_is_parallel=True)
def build_self_attention(self, embed_dim, args, **unused_kwargs):
return ModelParallelMultiheadAttention(
embed_dim=embed_dim,
num_heads=args.decoder_attention_heads,
dropout=args.attention_dropout,
self_attention=not getattr(args, "cross_self_attention", False),
)
def build_encoder_attention(self, embed_dim, args, **unused_kwargs):
return ModelParallelMultiheadAttention(
embed_dim=embed_dim,
num_heads=args.decoder_attention_heads,
kdim=getattr(args, "encoder_embed_dim", None),
vdim=getattr(args, "encoder_embed_dim", None),
dropout=args.attention_dropout,
encoder_decoder_attention=True,
)
| EXA-1-master | exa/libraries/fairseq/fairseq/model_parallel/modules/transformer_layer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from fairseq import utils
from fairseq.logging import metrics
from fairseq.criterions import FairseqCriterion, register_criterion
try:
from fairseq.model_parallel.megatron.mpu.cross_entropy import (
vocab_parallel_cross_entropy,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
@register_criterion("vocab_parallel_cross_entropy")
class VocabParallelCrossEntropyCriterion(FairseqCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
if not has_megatron_submodule:
raise ImportError(
"\n\nPlease install the megatron submodule:"
"\n\n git submodule update --init "
"fairseq/model_parallel/megatron"
)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
target = sample["target"]
loss = vocab_parallel_cross_entropy(net_output[0].float(), target)
loss = (loss * (target != self.padding_idx)).sum()
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
else:
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| EXA-1-master | exa/libraries/fairseq/fairseq/model_parallel/criterions/vocab_parallel_cross_entropy.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
# automatically import any Python files in the criterions/ directory
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_"):
module = file[: file.find(".py")]
importlib.import_module("fairseq.model_parallel.criterions." + module)
| EXA-1-master | exa/libraries/fairseq/fairseq/model_parallel/criterions/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import numpy as np
from fairseq.dataclass import FairseqDataclass
from fairseq.scoring import BaseScorer, register_scorer
@dataclass
class BertScoreScorerConfig(FairseqDataclass):
bert_score_lang: str = field(default="en", metadata={"help": "BERTScore language"})
@register_scorer("bert_score", dataclass=BertScoreScorerConfig)
class BertScoreScorer(BaseScorer):
def __init__(self, cfg):
super(BertScoreScorer, self).__init__(cfg)
try:
import bert_score as _bert_score
except ImportError:
raise ImportError("Please install BERTScore: pip install bert-score")
self.cfg = cfg
self._bert_score = _bert_score
self.scores = None
def add_string(self, ref, pred):
self.ref.append(ref)
self.pred.append(pred)
def score(self, order=4):
_, _, self.scores = self._bert_score.score(
self.pred, self.ref, lang=self.cfg.bert_score_lang
)
self.scores = self.scores.numpy()
return np.mean(self.scores)
def result_string(self, order=4):
return f"BERTScore: {self.score():.4f}"
| EXA-1-master | exa/libraries/fairseq/fairseq/scoring/bertscore.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ctypes
import math
import sys
from dataclasses import dataclass, field
import torch
from fairseq.dataclass import FairseqDataclass
from fairseq.scoring import BaseScorer, register_scorer
from fairseq.scoring.tokenizer import EvaluationTokenizer
class BleuStat(ctypes.Structure):
_fields_ = [
("reflen", ctypes.c_size_t),
("predlen", ctypes.c_size_t),
("match1", ctypes.c_size_t),
("count1", ctypes.c_size_t),
("match2", ctypes.c_size_t),
("count2", ctypes.c_size_t),
("match3", ctypes.c_size_t),
("count3", ctypes.c_size_t),
("match4", ctypes.c_size_t),
("count4", ctypes.c_size_t),
]
@dataclass
class SacrebleuConfig(FairseqDataclass):
sacrebleu_tokenizer: EvaluationTokenizer.ALL_TOKENIZER_TYPES = field(
default="13a", metadata={"help": "tokenizer"}
)
sacrebleu_lowercase: bool = field(
default=False, metadata={"help": "apply lowercasing"}
)
sacrebleu_char_level: bool = field(
default=False, metadata={"help": "evaluate at character level"}
)
@register_scorer("sacrebleu", dataclass=SacrebleuConfig)
class SacrebleuScorer(BaseScorer):
def __init__(self, cfg):
super(SacrebleuScorer, self).__init__(cfg)
import sacrebleu
self.sacrebleu = sacrebleu
self.tokenizer = EvaluationTokenizer(
tokenizer_type=cfg.sacrebleu_tokenizer,
lowercase=cfg.sacrebleu_lowercase,
character_tokenization=cfg.sacrebleu_char_level,
)
def add_string(self, ref, pred):
self.ref.append(self.tokenizer.tokenize(ref))
self.pred.append(self.tokenizer.tokenize(pred))
def _score(self, order=4):
if order != 4:
raise NotImplementedError
# tokenization and lowercasing are performed by self.tokenizer instead.
return self.sacrebleu.corpus_bleu(self.pred, [self.ref], tokenize="none")
def score(self, order=4):
return self._score(order).score
def result_string(self, order=4):
return self._score(order).format()
@dataclass
class BleuConfig(FairseqDataclass):
pad: int = field(default=1, metadata={"help": "padding index"})
eos: int = field(default=2, metadata={"help": "eos index"})
unk: int = field(default=3, metadata={"help": "unk index"})
@register_scorer("bleu", dataclass=BleuConfig)
class Scorer(object):
def __init__(self, cfg):
self.stat = BleuStat()
self.pad = cfg.pad
self.eos = cfg.eos
self.unk = cfg.unk
try:
from fairseq import libbleu
except ImportError as e:
sys.stderr.write(
"ERROR: missing libbleu.so. run `pip install --editable .`\n"
)
raise e
self.C = ctypes.cdll.LoadLibrary(libbleu.__file__)
self.reset()
def reset(self, one_init=False):
if one_init:
self.C.bleu_one_init(ctypes.byref(self.stat))
else:
self.C.bleu_zero_init(ctypes.byref(self.stat))
def add(self, ref, pred):
if not isinstance(ref, torch.IntTensor):
raise TypeError("ref must be a torch.IntTensor (got {})".format(type(ref)))
if not isinstance(pred, torch.IntTensor):
raise TypeError("pred must be a torch.IntTensor(got {})".format(type(pred)))
# don't match unknown words
rref = ref.clone()
assert not rref.lt(0).any()
rref[rref.eq(self.unk)] = -999
rref = rref.contiguous().view(-1)
pred = pred.contiguous().view(-1)
self.C.bleu_add(
ctypes.byref(self.stat),
ctypes.c_size_t(rref.size(0)),
ctypes.c_void_p(rref.data_ptr()),
ctypes.c_size_t(pred.size(0)),
ctypes.c_void_p(pred.data_ptr()),
ctypes.c_int(self.pad),
ctypes.c_int(self.eos),
)
def score(self, order=4):
psum = sum(
math.log(p) if p > 0 else float("-Inf") for p in self.precision()[:order]
)
return self.brevity() * math.exp(psum / order) * 100
def precision(self):
def ratio(a, b):
return a / b if b > 0 else 0
return [
ratio(self.stat.match1, self.stat.count1),
ratio(self.stat.match2, self.stat.count2),
ratio(self.stat.match3, self.stat.count3),
ratio(self.stat.match4, self.stat.count4),
]
def brevity(self):
r = self.stat.reflen / self.stat.predlen
return min(1, math.exp(1 - r))
def result_string(self, order=4):
assert order <= 4, "BLEU scores for order > 4 aren't supported"
fmt = "BLEU{} = {:2.2f}, {:2.1f}"
for _ in range(1, order):
fmt += "/{:2.1f}"
fmt += " (BP={:.3f}, ratio={:.3f}, syslen={}, reflen={})"
bleup = [p * 100 for p in self.precision()[:order]]
return fmt.format(
order,
self.score(order=order),
*bleup,
self.brevity(),
self.stat.predlen / self.stat.reflen,
self.stat.predlen,
self.stat.reflen
)
| EXA-1-master | exa/libraries/fairseq/fairseq/scoring/bleu.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
from abc import ABC, abstractmethod
from fairseq import registry
from omegaconf import DictConfig
class BaseScorer(ABC):
def __init__(self, cfg):
self.cfg = cfg
self.ref = []
self.pred = []
def add_string(self, ref, pred):
self.ref.append(ref)
self.pred.append(pred)
@abstractmethod
def score(self) -> float:
pass
@abstractmethod
def result_string(self) -> str:
pass
_build_scorer, register_scorer, SCORER_REGISTRY, _ = registry.setup_registry(
"--scoring", default="bleu"
)
def build_scorer(choice, tgt_dict):
_choice = choice._name if isinstance(choice, DictConfig) else choice
if _choice == "bleu":
from fairseq.scoring import bleu
return bleu.Scorer(
bleu.BleuConfig(pad=tgt_dict.pad(), eos=tgt_dict.eos(), unk=tgt_dict.unk())
)
return _build_scorer(choice)
# automatically import any Python files in the current directory
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_"):
module = file[: file.find(".py")]
importlib.import_module("fairseq.scoring." + module)
| EXA-1-master | exa/libraries/fairseq/fairseq/scoring/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from fairseq.dataclass import FairseqDataclass
from fairseq.scoring import BaseScorer, register_scorer
from fairseq.scoring.tokenizer import EvaluationTokenizer
@dataclass
class WerScorerConfig(FairseqDataclass):
wer_tokenizer: EvaluationTokenizer.ALL_TOKENIZER_TYPES = field(
default="none", metadata={"help": "sacreBLEU tokenizer to use for evaluation"}
)
wer_remove_punct: bool = field(
default=False, metadata={"help": "remove punctuation"}
)
wer_char_level: bool = field(
default=False, metadata={"help": "evaluate at character level"}
)
wer_lowercase: bool = field(default=False, metadata={"help": "lowercasing"})
@register_scorer("wer", dataclass=WerScorerConfig)
class WerScorer(BaseScorer):
def __init__(self, cfg):
super().__init__(cfg)
self.reset()
try:
import editdistance as ed
except ImportError:
raise ImportError("Please install editdistance to use WER scorer")
self.ed = ed
self.tokenizer = EvaluationTokenizer(
tokenizer_type=self.cfg.wer_tokenizer,
lowercase=self.cfg.wer_lowercase,
punctuation_removal=self.cfg.wer_remove_punct,
character_tokenization=self.cfg.wer_char_level,
)
def reset(self):
self.distance = 0
self.ref_length = 0
def add_string(self, ref, pred):
ref_items = self.tokenizer.tokenize(ref).split()
pred_items = self.tokenizer.tokenize(pred).split()
self.distance += self.ed.eval(ref_items, pred_items)
self.ref_length += len(ref_items)
def result_string(self):
return f"WER: {self.score():.2f}"
def score(self):
return 100.0 * self.distance / self.ref_length if self.ref_length > 0 else 0
| EXA-1-master | exa/libraries/fairseq/fairseq/scoring/wer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unicodedata
import sacrebleu as sb
from fairseq.dataclass import ChoiceEnum
SACREBLEU_V2_ABOVE = int(sb.__version__[0]) >= 2
class EvaluationTokenizer(object):
"""A generic evaluation-time tokenizer, which leverages built-in tokenizers
in sacreBLEU (https://github.com/mjpost/sacrebleu). It additionally provides
lowercasing, punctuation removal and character tokenization, which are
applied after sacreBLEU tokenization.
Args:
tokenizer_type (str): the type of sacreBLEU tokenizer to apply.
lowercase (bool): lowercase the text.
punctuation_removal (bool): remove punctuation (based on unicode
category) from text.
character_tokenization (bool): tokenize the text to characters.
"""
SPACE = chr(32)
SPACE_ESCAPE = chr(9601)
_ALL_TOKENIZER_TYPES = (
sb.BLEU.TOKENIZERS
if SACREBLEU_V2_ABOVE
else ["none", "13a", "intl", "zh", "ja-mecab"]
)
ALL_TOKENIZER_TYPES = ChoiceEnum(_ALL_TOKENIZER_TYPES)
def __init__(
self,
tokenizer_type: str = "13a",
lowercase: bool = False,
punctuation_removal: bool = False,
character_tokenization: bool = False,
):
assert (
tokenizer_type in self._ALL_TOKENIZER_TYPES
), f"{tokenizer_type}, {self._ALL_TOKENIZER_TYPES}"
self.lowercase = lowercase
self.punctuation_removal = punctuation_removal
self.character_tokenization = character_tokenization
if SACREBLEU_V2_ABOVE:
self.tokenizer = sb.BLEU(tokenize=str(tokenizer_type)).tokenizer
else:
self.tokenizer = sb.tokenizers.TOKENIZERS[tokenizer_type]()
@classmethod
def remove_punctuation(cls, sent: str):
"""Remove punctuation based on Unicode category."""
return cls.SPACE.join(
t
for t in sent.split(cls.SPACE)
if not all(unicodedata.category(c)[0] == "P" for c in t)
)
def tokenize(self, sent: str):
tokenized = self.tokenizer(sent)
if self.punctuation_removal:
tokenized = self.remove_punctuation(tokenized)
if self.character_tokenization:
tokenized = self.SPACE.join(
list(tokenized.replace(self.SPACE, self.SPACE_ESCAPE))
)
if self.lowercase:
tokenized = tokenized.lower()
return tokenized
| EXA-1-master | exa/libraries/fairseq/fairseq/scoring/tokenizer.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.