python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from collections.abc import Collection from dataclasses import dataclass, field from typing import List from omegaconf import II from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class CosineLRScheduleConfig(FairseqDataclass): warmup_updates: int = field( default=0, metadata={"help": "warmup the learning rate linearly for the first N updates"}, ) warmup_init_lr: float = field( default=-1, metadata={ "help": "initial learning rate during warmup phase; default is cfg.lr" }, ) lr: List[float] = field( default=II("optimization.lr"), metadata={"help": "max learning rate, must be more than cfg.min_lr"}, ) min_lr: float = field(default=0.0, metadata={"help": "min learning rate"}) t_mult: float = field( default=1.0, metadata={"help": "factor to grow the length of each period"} ) lr_period_updates: float = field( default=-1, metadata={"help": "initial number of updates per period"} ) lr_shrink: float = field( default=0.1, metadata={"help": "shrink factor for annealing"} ) # This is not required, but is for convenience in inferring lr_period_updates max_update: int = II("optimization.max_update") @register_lr_scheduler("cosine", dataclass=CosineLRScheduleConfig) class CosineLRSchedule(FairseqLRScheduler): """Assign LR based on a cyclical schedule that follows the cosine function. See https://arxiv.org/pdf/1608.03983.pdf for details. We also support a warmup phase where we linearly increase the learning rate from some initial learning rate (``--warmup-init-lr``) until the configured max learning rate (``--lr``). During warmup:: lrs = torch.linspace(cfg.warmup_init_lr, cfg.lr, cfg.warmup_updates) lr = lrs[update_num] After warmup:: lr = cfg.min_lr + 0.5*(cfg.lr - cfg.min_lr)*(1 + cos(t_curr / t_i)) where ``t_curr`` is current percentage of updates within the current period range and ``t_i`` is the current period range, which is scaled by ``t_mul`` after every iteration. """ def __init__(self, cfg: CosineLRScheduleConfig, fairseq_optimizer): super().__init__(cfg, fairseq_optimizer) if isinstance(cfg.lr, Collection) and len(cfg.lr) > 1: raise ValueError( "Cannot use a fixed learning rate schedule with cosine." f" Consider --lr-scheduler=fixed instead. ({cfg.lr})" ) self.max_lr = cfg.lr[0] if isinstance(cfg.lr, Collection) else cfg.lr assert ( self.max_lr > cfg.min_lr ), f"max_lr (={cfg.lr}) must be more than min_lr (={cfg.min_lr})" warmup_end_lr = self.max_lr if cfg.warmup_init_lr < 0: cfg.warmup_init_lr = cfg.min_lr self.t_mult = cfg.t_mult self.period = cfg.lr_period_updates if self.period <= 0: assert ( cfg.max_update > 0 ), "Either --max_update or --lr-period-updates must be set" self.period = cfg.max_update - cfg.warmup_updates if cfg.warmup_updates > 0: # linearly warmup for the first cfg.warmup_updates self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates else: self.lr_step = 1 self.warmup_updates = cfg.warmup_updates self.lr_shrink = cfg.lr_shrink # initial learning rate self.lr = cfg.warmup_init_lr self.optimizer.set_lr(self.lr) def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if num_updates < self.cfg.warmup_updates: self.lr = self.cfg.warmup_init_lr + num_updates * self.lr_step else: curr_updates = num_updates - self.cfg.warmup_updates if self.t_mult != 1: i = math.floor( math.log( 1 - curr_updates / self.period * (1 - self.t_mult), self.t_mult ) ) t_i = self.t_mult ** i * self.period t_curr = ( curr_updates - (1 - self.t_mult ** i) / (1 - self.t_mult) * self.period ) else: i = math.floor(curr_updates / self.period) t_i = self.period t_curr = curr_updates - (self.period * i) lr_shrink = self.lr_shrink ** i min_lr = self.cfg.min_lr * lr_shrink max_lr = self.max_lr * lr_shrink self.lr = min_lr + 0.5 * (max_lr - min_lr) * ( 1 + math.cos(math.pi * t_curr / t_i) ) self.optimizer.set_lr(self.lr) return self.lr
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/optim/lr_scheduler/cosine_lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections.abc import Collection from dataclasses import dataclass, field from typing import List from omegaconf import II from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class StepLRScheduleConfig(FairseqDataclass): warmup_updates: int = field( default=0, metadata={"help": "warmup the learning rate linearly for the first N updates"}, ) warmup_init_lr: float = field( default=-1, metadata={ "help": "initial learning rate during warmup phase; default is cfg.lr" }, ) lr: List[float] = field( default=II("optimization.lr"), metadata={"help": "max learning rate, must be more than cfg.min_lr"}, ) min_lr: float = field(default=0.0, metadata={"help": "min learning rate"}) lr_deacy_period: int = field(default=25000, metadata={"help": "decay period"}) lr_decay: float = field(default=0.5, metadata={"help": "decay factor"}) @register_lr_scheduler("step", dataclass=StepLRScheduleConfig) class StepLRSchedule(FairseqLRScheduler): """Decay learning rate every k updates by a fixed factor """ def __init__(self, cfg: StepLRScheduleConfig, fairseq_optimizer): super().__init__(cfg, fairseq_optimizer) self.max_lr = cfg.lr[0] if isinstance(cfg.lr, Collection) else cfg.lr self.min_lr = cfg.min_lr self.lr_deacy_period = cfg.lr_deacy_period self.lr_decay = cfg.lr_decay self.warmup_updates = cfg.warmup_updates self.warmup_init_lr = ( cfg.warmup_init_lr if cfg.warmup_init_lr >= 0 else self.min_lr ) assert(self.lr_deacy_period > 0) assert(self.lr_decay <= 1) assert(self.min_lr >= 0) assert(self.max_lr > self.min_lr) if cfg.warmup_updates > 0: # linearly warmup for the first cfg.warmup_updates self.warmup_lr_step = ( (self.max_lr - self.warmup_init_lr) / self.warmup_updates ) else: self.warmup_lr_step = 1 # initial learning rate self.lr = self.warmup_init_lr self.optimizer.set_lr(self.lr) def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if num_updates < self.cfg.warmup_updates: self.lr = self.warmup_init_lr + num_updates * self.warmup_lr_step else: curr_updates = num_updates - self.cfg.warmup_updates lr_mult = self.lr_decay ** (curr_updates // self.lr_deacy_period) self.lr = max(self.max_lr * lr_mult, self.min_lr) self.optimizer.set_lr(self.lr) return self.lr
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/optim/lr_scheduler/step_lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from dataclasses import dataclass, field from typing import List from omegaconf import II from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class TriangularLRScheduleConfig(FairseqDataclass): max_lr: float = field( default="???", metadata={"help": "max learning rate, must be more than cfg.lr"} ) lr_period_updates: float = field( default=5000, metadata={"help": "initial number of updates per period (cycle length)"}, ) lr_shrink: float = field( default=0.1, metadata={"help": "shrink factor for annealing"} ) shrink_min: bool = field( default=False, metadata={"help": "if set, also shrinks min lr"} ) lr: List[float] = II("optimization.lr") @register_lr_scheduler("triangular", dataclass=TriangularLRScheduleConfig) class TriangularLRSchedule(FairseqLRScheduler): """Assign LR based on a triangular cyclical schedule. See https://arxiv.org/pdf/1506.01186.pdf for details. """ def __init__(self, cfg: TriangularLRScheduleConfig, optimizer): super().__init__(cfg, optimizer) if len(cfg.lr) > 1: raise ValueError( "Cannot use a fixed learning rate schedule with triangular." " Consider --lr-scheduler=fixed instead." ) lr = cfg.lr[0] assert cfg.max_lr > lr, "max_lr must be more than lr" self.min_lr = lr self.max_lr = cfg.max_lr self.stepsize = cfg.lr_period_updates // 2 self.lr_shrink = cfg.lr_shrink self.shrink_min = cfg.shrink_min # initial learning rate self.lr = self.min_lr self.optimizer.set_lr(self.lr) def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" cycle = math.floor(num_updates / (2 * self.stepsize)) lr_shrink = self.lr_shrink ** cycle max_lr = self.max_lr * lr_shrink if self.shrink_min: min_lr = self.min_lr * lr_shrink else: min_lr = self.min_lr x = abs(num_updates / self.stepsize - 2 * (cycle + 1) + 1) self.lr = min_lr + (max_lr - min_lr) * max(0, (1 - x)) self.optimizer.set_lr(self.lr) return self.lr
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/optim/lr_scheduler/triangular_lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional import torch from torch import Tensor @torch.jit.script def script_skip_tensor_list(x: List[Tensor], mask): res = [xi[mask] if xi.size(0) == mask.size(0) else xi[:, mask] for xi in x] outputs = [] for i, t in enumerate(res): if t.numel() != 0: outputs.append(t) else: outputs.append(x[i]) return outputs @torch.jit.script def script_skip_tensor(x: Tensor, mask): # None case if x.size(0) == 0: return x res = x[mask] if x.size(0) == mask.size(0) else x[:, mask] if res.numel() == 0: return x else: return res @torch.jit.script def expand_2d_or_3d_tensor(x, trg_dim: int, padding_idx: int): """ Expand 2D/3D tensor on dim=1 """ if x is None: return None assert x.dim() == 2 or x.dim() == 3 assert trg_dim >= x.size(1), (trg_dim, x.size()) if trg_dim == x.size(1): return x dims = [x.size(0), trg_dim - x.size(1)] if x.dim() == 3: dims.append(x.size(2)) x = torch.cat([x, torch.zeros(dims).to(x).fill_(padding_idx)], 1) return x @torch.jit.script def coalesce(x: Optional[Tensor], y: Tensor) -> Tensor: return x if x is not None else y @torch.jit.script def fill_tensors( x: Optional[Tensor], mask, y: Optional[Tensor], padding_idx: int ) -> Optional[Tensor]: """ Filling tensor x with y at masked positions (dim=0). """ if x is None or x.size()[0] == 0 or y is None: return x assert x.dim() == y.dim() and mask.size(0) == x.size(0) assert x.dim() == 2 or (x.dim() == 3 and x.size(2) == y.size(2)) n_selected = mask.sum() if n_selected == 0: return x assert n_selected == y.size(0) if n_selected == x.size(0): return y if x.size(1) < y.size(1): x = expand_2d_or_3d_tensor(x, y.size(1), padding_idx) x[mask] = y elif x.size(1) > y.size(1): x[mask] = torch.tensor(padding_idx).type_as(x) if x.dim() == 2: x[mask, : y.size(1)] = y else: x[mask, : y.size(1), :] = y else: x[mask] = y return x
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/model_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Base classes for various fairseq models. """ import logging from argparse import Namespace from typing import Dict, List, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.data import Dictionary from fairseq.dataclass.utils import ( convert_namespace_to_omegaconf, gen_parser_from_dataclass, ) from fairseq.models import FairseqDecoder, FairseqEncoder from omegaconf import DictConfig from torch import Tensor logger = logging.getLogger(__name__) def check_type(module, expected_type): if hasattr(module, "unwrapped_module"): assert isinstance(module.unwrapped_module, expected_type), \ f"{type(module.unwrapped_module)} != {expected_type}" else: assert isinstance(module, expected_type), f"{type(module)} != {expected_type}" class BaseFairseqModel(nn.Module): """Base class for fairseq models.""" def __init__(self): super().__init__() self._is_generation_fast = False @classmethod def add_args(cls, parser): """Add model-specific arguments to the parser.""" dc = getattr(cls, "__dataclass", None) if dc is not None: # do not set defaults so that settings defaults from various architectures still works gen_parser_from_dataclass(parser, dc(), delete_default=True) @classmethod def build_model(cls, args, task): """Build a new model instance.""" raise NotImplementedError("Model must implement the build_model method") def get_targets(self, sample, net_output): """Get targets from either the sample or the net's output.""" return sample["target"] def get_normalized_probs( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): """Get normalized probabilities (or log probs) from a net's output.""" return self.get_normalized_probs_scriptable(net_output, log_probs, sample) # TorchScript doesn't support super() method so that the scriptable Subclass # can't access the base class model in Torchscript. # Current workaround is to add a helper function with different name and # call the helper function from scriptable Subclass. def get_normalized_probs_scriptable( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): """Scriptable helper function for get_normalized_probs in ~BaseFairseqModel""" if hasattr(self, "decoder"): return self.decoder.get_normalized_probs(net_output, log_probs, sample) elif torch.is_tensor(net_output): # syntactic sugar for simple models which don't have a decoder # (e.g., the classification tutorial) logits = net_output.float() if log_probs: return F.log_softmax(logits, dim=-1) else: return F.softmax(logits, dim=-1) raise NotImplementedError def extract_features(self, *args, **kwargs): """Similar to *forward* but only return features.""" return self(*args, **kwargs) def max_positions(self): """Maximum length supported by the model.""" return None def load_state_dict( self, state_dict, strict=True, model_cfg: Optional[DictConfig] = None, args: Optional[Namespace] = None, ): """Copies parameters and buffers from *state_dict* into this module and its descendants. Overrides the method in :class:`nn.Module`. Compared with that method this additionally "upgrades" *state_dicts* from old checkpoints. """ if model_cfg is None and args is not None: logger.warn("using 'args' is deprecated, please update your code to use dataclass config") model_cfg = convert_namespace_to_omegaconf(args).model self.upgrade_state_dict(state_dict) from fairseq.checkpoint_utils import prune_state_dict new_state_dict = prune_state_dict(state_dict, model_cfg) return super().load_state_dict(new_state_dict, strict) def upgrade_state_dict(self, state_dict): """Upgrade old state dicts to work with newer code.""" self.upgrade_state_dict_named(state_dict, "") def upgrade_state_dict_named(self, state_dict, name): """Upgrade old state dicts to work with newer code. Args: state_dict (dict): state dictionary to upgrade, in place name (str): the state dict key corresponding to the current module """ assert state_dict is not None def do_upgrade(m, prefix): if len(prefix) > 0: prefix += "." for n, c in m.named_children(): name = prefix + n if hasattr(c, "upgrade_state_dict_named"): c.upgrade_state_dict_named(state_dict, name) elif hasattr(c, "upgrade_state_dict"): c.upgrade_state_dict(state_dict) do_upgrade(c, name) do_upgrade(self, name) def set_num_updates(self, num_updates): """State from trainer to pass along to model at every update.""" for m in self.modules(): if hasattr(m, "set_num_updates") and m != self: m.set_num_updates(num_updates) def prepare_for_inference_(self, cfg: DictConfig): """Prepare model for inference.""" kwargs = {} kwargs["beamable_mm_beam_size"] = ( None if getattr(cfg.generation, "no_beamable_mm", False) else getattr(cfg.generation, "beam", 5) ) kwargs["need_attn"] = getattr(cfg.generation, "print_alignment", False) if getattr(cfg.generation, "retain_dropout", False): kwargs["retain_dropout"] = cfg.generation.retain_dropout kwargs["retain_dropout_modules"] = cfg.generation.retain_dropout_modules self.make_generation_fast_(**kwargs) def make_generation_fast_(self, **kwargs): """ Legacy entry point to optimize model for faster generation. Prefer prepare_for_inference_. """ if self._is_generation_fast: return # only apply once self._is_generation_fast = True # remove weight norm from all modules in the network def apply_remove_weight_norm(module): try: nn.utils.remove_weight_norm(module) except (AttributeError, ValueError): # this module didn't have weight norm return self.apply(apply_remove_weight_norm) def apply_make_generation_fast_(module, prefix): if len(prefix) > 0: prefix += "." base_func = BaseFairseqModel.make_generation_fast_ for n, m in module.named_modules(): if ( m != self and hasattr(m, "make_generation_fast_") # don't call this implementation again, e.g., if # children modules also inherit from BaseFairseqModel and m.make_generation_fast_.__func__ is not base_func ): name = prefix + n m.make_generation_fast_(name=name, **kwargs) apply_make_generation_fast_(self, "") def train(mode=True): if mode: raise RuntimeError("cannot train after make_generation_fast") # this model should no longer be used for training self.eval() self.train = train def prepare_for_onnx_export_(self, **kwargs): """Make model exportable via ONNX trace.""" seen = set() def apply_prepare_for_onnx_export_(module): if ( module != self and hasattr(module, "prepare_for_onnx_export_") and module not in seen ): seen.add(module) module.prepare_for_onnx_export_(**kwargs) self.apply(apply_prepare_for_onnx_export_) @classmethod def from_pretrained( cls, model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", **kwargs, ): """ Load a :class:`~fairseq.models.FairseqModel` from a pre-trained model file. Downloads and caches the pre-trained model file if needed. The base implementation returns a :class:`~fairseq.hub_utils.GeneratorHubInterface`, which can be used to generate translations or sample from language models. The underlying :class:`~fairseq.models.FairseqModel` can be accessed via the *generator.models* attribute. Other models may override this to implement custom hub interfaces. Args: model_name_or_path (str): either the name of a pre-trained model to load or a path/URL to a pre-trained model state dict checkpoint_file (str, optional): colon-separated list of checkpoint files in the model archive to ensemble (default: 'model.pt') data_name_or_path (str, optional): point args.data to the archive at the given path/URL. Can start with '.' or './' to reuse the model archive path. """ from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), **kwargs, ) logger.info(x["args"]) return hub_utils.GeneratorHubInterface(x["args"], x["task"], x["models"]) @classmethod def hub_models(cls): return {} class FairseqEncoderDecoderModel(BaseFairseqModel): """Base class for encoder-decoder models. Args: encoder (FairseqEncoder): the encoder decoder (FairseqDecoder): the decoder """ def __init__(self, encoder, decoder): super().__init__() self.encoder = encoder self.decoder = decoder check_type(self.encoder, FairseqEncoder) check_type(self.decoder, FairseqDecoder) def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): """ Run the forward pass for an encoder-decoder model. First feed a batch of source tokens through the encoder. Then, feed the encoder output and previous decoder outputs (i.e., teacher forcing) to the decoder to produce the next outputs:: encoder_out = self.encoder(src_tokens, src_lengths) return self.decoder(prev_output_tokens, encoder_out) Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) decoder_out = self.decoder( prev_output_tokens, encoder_out=encoder_out, **kwargs ) return decoder_out def forward_decoder(self, prev_output_tokens, **kwargs): return self.decoder(prev_output_tokens, **kwargs) def extract_features(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) features = self.decoder.extract_features( prev_output_tokens, encoder_out=encoder_out, **kwargs ) return features def output_layer(self, features, **kwargs): """Project features to the default output size (typically vocabulary size).""" return self.decoder.output_layer(features, **kwargs) def max_positions(self): """Maximum length supported by the model.""" return (self.encoder.max_positions(), self.decoder.max_positions()) def max_decoder_positions(self): """Maximum length supported by the decoder.""" return self.decoder.max_positions() class FairseqModel(FairseqEncoderDecoderModel): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) utils.deprecation_warning( "FairseqModel is deprecated, please use FairseqEncoderDecoderModel " "or BaseFairseqModel instead", stacklevel=4, ) class FairseqMultiModel(BaseFairseqModel): """Base class for combining multiple encoder-decoder models.""" def __init__(self, encoders, decoders): super().__init__() assert encoders.keys() == decoders.keys() self.keys = list(encoders.keys()) for key in self.keys: check_type(encoders[key], FairseqEncoder) check_type(decoders[key], FairseqDecoder) self.models = nn.ModuleDict( { key: FairseqEncoderDecoderModel(encoders[key], decoders[key]) for key in self.keys } ) @staticmethod def build_shared_embeddings( dicts: Dict[str, Dictionary], langs: List[str], embed_dim: int, build_embedding: callable, pretrained_embed_path: Optional[str] = None, ): """ Helper function to build shared embeddings for a set of languages after checking that all dicts corresponding to those languages are equivalent. Args: dicts: Dict of lang_id to its corresponding Dictionary langs: languages that we want to share embeddings for embed_dim: embedding dimension build_embedding: callable function to actually build the embedding pretrained_embed_path: Optional path to load pretrained embeddings """ shared_dict = dicts[langs[0]] if any(dicts[lang] != shared_dict for lang in langs): raise ValueError( "--share-*-embeddings requires a joined dictionary: " "--share-encoder-embeddings requires a joined source " "dictionary, --share-decoder-embeddings requires a joined " "target dictionary, and --share-all-embeddings requires a " "joint source + target dictionary." ) return build_embedding(shared_dict, embed_dim, pretrained_embed_path) def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): raise NotImplementedError def max_positions(self): """Maximum length supported by the model.""" return { key: ( self.models[key].encoder.max_positions(), self.models[key].decoder.max_positions(), ) for key in self.keys } def max_decoder_positions(self): """Maximum length supported by the decoder.""" return min(model.decoder.max_positions() for model in self.models.values()) @property def encoder(self): return self.models[self.keys[0]].encoder @property def decoder(self): return self.models[self.keys[0]].decoder def forward_decoder(self, prev_output_tokens, **kwargs): return self.decoder(prev_output_tokens, **kwargs) def load_state_dict( self, state_dict, strict=True, model_cfg=None, args: Optional[Namespace] = None, ): """Copies parameters and buffers from *state_dict* into this module and its descendants. Overrides the method in :class:`nn.Module`. Compared with that method this additionally "upgrades" *state_dicts* from old checkpoints. """ if model_cfg is None and args is not None: logger.warn("using 'args' is deprecated, please update your code to use dataclass config") model_cfg = convert_namespace_to_omegaconf(args).model self.upgrade_state_dict(state_dict) from fairseq.checkpoint_utils import prune_state_dict new_state_dict = prune_state_dict(state_dict, model_cfg) return super().load_state_dict(new_state_dict, strict) class FairseqLanguageModel(BaseFairseqModel): """Base class for decoder-only models. Args: decoder (FairseqDecoder): the decoder """ def __init__(self, decoder): super().__init__() self.decoder = decoder check_type(self.decoder, FairseqDecoder) def forward(self, src_tokens, **kwargs): """ Run the forward pass for a decoder-only model. Feeds a batch of tokens through the decoder to predict the next tokens. Args: src_tokens (LongTensor): tokens on which to condition the decoder, of shape `(batch, tgt_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` Returns: tuple: - the decoder's output of shape `(batch, seq_len, vocab)` - a dictionary with any model-specific outputs """ return self.decoder(src_tokens, **kwargs) def forward_decoder(self, prev_output_tokens, **kwargs): return self.decoder(prev_output_tokens, **kwargs) def extract_features(self, src_tokens, **kwargs): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, seq_len, embed_dim)` - a dictionary with any model-specific outputs """ return self.decoder.extract_features(src_tokens, **kwargs) def output_layer(self, features, **kwargs): """Project features to the default output size (typically vocabulary size).""" return self.decoder.output_layer(features, **kwargs) def max_positions(self): """Maximum length supported by the model.""" return self.decoder.max_positions() def max_decoder_positions(self): """Maximum length supported by the decoder.""" return self.decoder.max_positions() @property def supported_targets(self): return {"future"} class FairseqEncoderModel(BaseFairseqModel): """Base class for encoder-only models. Args: encoder (FairseqEncoder): the encoder """ def __init__(self, encoder): super().__init__() self.encoder = encoder check_type(self.encoder, FairseqEncoder) def forward(self, src_tokens, src_lengths, **kwargs): """ Run the forward pass for a encoder-only model. Feeds a batch of tokens through the encoder to generate features. Args: src_tokens (LongTensor): input tokens of shape `(batch, src_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` Returns: the encoder's output, typically of shape `(batch, src_len, features)` """ return self.encoder(src_tokens, src_lengths, **kwargs) def get_normalized_probs(self, net_output, log_probs, sample=None): """Get normalized probabilities (or log probs) from a net's output.""" encoder_out = net_output["encoder_out"] if torch.is_tensor(encoder_out): logits = encoder_out.float() if log_probs: return F.log_softmax(logits, dim=-1) else: return F.softmax(logits, dim=-1) raise NotImplementedError def max_positions(self): """Maximum length supported by the model.""" return self.encoder.max_positions()
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/fairseq_model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, NamedTuple, Optional import torch import torch.nn as nn from torch import Tensor EncoderOut = NamedTuple( "EncoderOut", [ ("encoder_out", Tensor), # T x B x C ("encoder_padding_mask", Optional[Tensor]), # B x T ("encoder_embedding", Optional[Tensor]), # B x T x C ("encoder_states", Optional[List[Tensor]]), # List[T x B x C] ("src_tokens", Optional[Tensor]), # B x T ("src_lengths", Optional[Tensor]), # B x 1 ], ) class FairseqEncoder(nn.Module): """Base class for encoders.""" def __init__(self, dictionary): super().__init__() self.dictionary = dictionary def forward(self, src_tokens, src_lengths=None, **kwargs): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): lengths of each source sentence of shape `(batch)` """ raise NotImplementedError def forward_torchscript(self, net_input: Dict[str, Tensor]): """A TorchScript-compatible version of forward. Encoders which use additional arguments may want to override this method for TorchScript compatibility. """ if torch.jit.is_scripting(): return self.forward( src_tokens=net_input["src_tokens"], src_lengths=net_input["src_lengths"], ) else: return self.forward_non_torchscript(net_input) @torch.jit.unused def forward_non_torchscript(self, net_input: Dict[str, Tensor]): encoder_input = { k: v for k, v in net_input.items() if k != "prev_output_tokens" } return self.forward(**encoder_input) def reorder_encoder_out(self, encoder_out, new_order): """ Reorder encoder output according to `new_order`. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: `encoder_out` rearranged according to `new_order` """ raise NotImplementedError def max_positions(self): """Maximum input length supported by the encoder.""" return 1e6 # an arbitrary large number def upgrade_state_dict_named(self, state_dict, name): """Upgrade old state dicts to work with newer code.""" return state_dict def set_num_updates(self, num_updates): """State from trainer to pass along to model at every update.""" def _apply(m): if hasattr(m, "set_num_updates") and m != self: m.set_num_updates(num_updates) self.apply(_apply)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/fairseq_encoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import math import os import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.models import ( CompositeEncoder, FairseqDecoder, FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( DownsampledMultiHeadAttention, FairseqDropout, GradMultiply, LayerNorm, LearnedPositionalEmbedding, LinearizedConvolution, ) logger = logging.getLogger(__name__) @register_model("fconv_self_att") class FConvModelSelfAtt(FairseqEncoderDecoderModel): @classmethod def hub_models(cls): return { "conv.stories.pretrained": { "path": "https://dl.fbaipublicfiles.com/fairseq/models/stories_checkpoint.tar.gz", "checkpoint_file": "pretrained_checkpoint.pt", "tokenizer": "nltk", }, "conv.stories": { "path": "https://dl.fbaipublicfiles.com/fairseq/models/stories_checkpoint.tar.gz", "checkpoint_file": "fusion_checkpoint.pt", "tokenizer": "nltk", "pretrained": "True", "pretrained_checkpoint": "./pretrained_checkpoint.pt", }, # Test set containing dictionaries "data.stories": "https://dl.fbaipublicfiles.com/fairseq/data/stories_test.tar.bz2", } def __init__(self, encoder, decoder, pretrained_encoder=None): super().__init__(encoder, decoder) self.encoder.num_attention_layers = sum( layer is not None for layer in decoder.attention ) self.pretrained_encoder = pretrained_encoder if self.pretrained_encoder is None: encoders = {"encoder": encoder} else: encoders = {"encoder": encoder, "pretrained": self.pretrained_encoder} # for fusion model, CompositeEncoder contains both pretrained and training encoders # these are forwarded and then combined in the decoder self.encoder = CompositeEncoder(encoders) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-layers', type=str, metavar='EXPR', help='encoder layers [(dim, kernel_size), ...]') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-layers', type=str, metavar='EXPR', help='decoder layers [(dim, kernel_size), ...]') parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension') parser.add_argument('--decoder-attention', type=str, metavar='EXPR', help='decoder attention [True, ...]') parser.add_argument('--self-attention', type=str, metavar='EXPR', help='decoder self-attention layers, ex: [True] + [False]*5') parser.add_argument('--multihead-attention-nheads', type=int, help='Number of heads to use in attention') parser.add_argument('--multihead-self-attention-nheads', type=int, help='Number of heads to use in self-attention') parser.add_argument('--encoder-attention', type=str, metavar='EXPR', help='encoder attention [True, ...]') parser.add_argument('--encoder-attention-nheads', type=int, help='Number of heads to use in encoder attention') parser.add_argument('--project-input', type=str, metavar='EXPR', help='Use projections in self-attention [True, ...]') parser.add_argument('--gated-attention', type=str, metavar='EXPR', help='Use GLU layers in self-attention projections [True, ...]') parser.add_argument('--downsample', type=str, metavar='EXPR', help='Use downsampling in self-attention [True, ...]') parser.add_argument('--pretrained-checkpoint', metavar='DIR', help='path to load checkpoint from pretrained model') parser.add_argument('--pretrained', type=str, metavar='EXPR', help='use pretrained model when training [True, ...]') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" trained_encoder, trained_decoder = None, None pretrained = eval(args.pretrained) if pretrained: logger.info("loading pretrained model") if not os.path.exists(args.pretrained_checkpoint): new_pretrained_checkpoint = os.path.join( args.data, args.pretrained_checkpoint ) if os.path.exists(new_pretrained_checkpoint): args.pretrained_checkpoint = new_pretrained_checkpoint trained_model = checkpoint_utils.load_model_ensemble( filenames=[args.pretrained_checkpoint], task=task, )[0][0] trained_decoder = list(trained_model.children())[1] trained_encoder = list(trained_model.children())[0] # freeze pretrained model for param in trained_decoder.parameters(): param.requires_grad = False for param in trained_encoder.parameters(): param.requires_grad = False encoder = FConvEncoder( task.source_dictionary, embed_dim=args.encoder_embed_dim, convolutions=eval(args.encoder_layers), dropout=args.dropout, max_positions=args.max_source_positions, attention=eval(args.encoder_attention), attention_nheads=args.encoder_attention_nheads, ) decoder = FConvDecoder( task.target_dictionary, embed_dim=args.decoder_embed_dim, convolutions=eval(args.decoder_layers), out_embed_dim=args.decoder_out_embed_dim, attention=eval(args.decoder_attention), dropout=args.dropout, max_positions=args.max_target_positions, selfattention=eval(args.self_attention), attention_nheads=args.multihead_attention_nheads, selfattention_nheads=args.multihead_self_attention_nheads, project_input=eval(args.project_input), gated_attention=eval(args.gated_attention), downsample=eval(args.downsample), pretrained=pretrained, trained_decoder=trained_decoder, ) model = FConvModelSelfAtt(encoder, decoder, trained_encoder) return model @property def pretrained(self): return self.pretrained_encoder is not None class FConvEncoder(FairseqEncoder): """Convolutional encoder""" def __init__( self, dictionary, embed_dim=512, max_positions=1024, convolutions=((512, 3),) * 20, dropout=0.1, attention=False, attention_nheads=1, ): super().__init__(dictionary) self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.num_attention_layers = None num_embeddings = len(dictionary) self.padding_idx = dictionary.pad() self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx) self.embed_positions = PositionalEmbedding( max_positions, embed_dim, self.padding_idx, ) def expand_bool_array(val): if isinstance(val, bool): # expand True into [True, True, ...] and do the same with False return [val] * len(convolutions) return val attention = expand_bool_array(attention) in_channels = convolutions[0][0] self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) self.projections = nn.ModuleList() self.convolutions = nn.ModuleList() self.attention = nn.ModuleList() self.attproj = nn.ModuleList() for i, (out_channels, kernel_size) in enumerate(convolutions): self.projections.append( Linear(in_channels, out_channels) if in_channels != out_channels else None ) self.convolutions.append( ConvTBC(in_channels, out_channels * 2, kernel_size, dropout=dropout) ) self.attention.append( SelfAttention(out_channels, embed_dim, attention_nheads) if attention[i] else None ) in_channels = out_channels self.fc2 = Linear(in_channels, embed_dim) def forward(self, src_tokens, src_lengths): # embed tokens and positions x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens) x = self.dropout_module(x) input_embedding = x.transpose(0, 1) # project to size of convolution x = self.fc1(x) encoder_padding_mask = src_tokens.eq(self.padding_idx).t() # -> T x B if not encoder_padding_mask.any(): encoder_padding_mask = None # B x T x C -> T x B x C x = x.transpose(0, 1) # temporal convolutions for proj, conv, attention in zip( self.projections, self.convolutions, self.attention ): residual = x if proj is None else proj(x) if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0) x = self.dropout_module(x) padding_l = (conv.kernel_size[0] - 1) // 2 padding_r = conv.kernel_size[0] // 2 x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r)) x = conv(x) x = F.glu(x, dim=2) if attention is not None: x = attention(x) x = (x + residual) * math.sqrt(0.5) # T x B x C -> B x T x C x = x.transpose(1, 0) # project back to size of embedding x = self.fc2(x) if encoder_padding_mask is not None: encoder_padding_mask = encoder_padding_mask.t() # -> B x T x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0) # scale gradients (this only affects backward, not forward) x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers)) # add output to input embedding for attention y = (x + input_embedding.transpose(0, 1)) * math.sqrt(0.5) return { "encoder_out": (x, y), "encoder_padding_mask": encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, new_order): encoder_out["encoder_out"] = tuple( eo.index_select(0, new_order) for eo in encoder_out["encoder_out"] ) if encoder_out["encoder_padding_mask"] is not None: encoder_out["encoder_padding_mask"] = encoder_out[ "encoder_padding_mask" ].index_select(0, new_order) if "pretrained" in encoder_out: encoder_out["pretrained"]["encoder_out"] = tuple( eo.index_select(0, new_order) for eo in encoder_out["pretrained"]["encoder_out"] ) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" return self.embed_positions.max_positions @with_incremental_state class FConvDecoder(FairseqDecoder): """Convolutional decoder""" def __init__( self, dictionary, embed_dim=512, out_embed_dim=256, max_positions=1024, convolutions=((512, 3),) * 8, attention=True, dropout=0.1, selfattention=False, attention_nheads=1, selfattention_nheads=1, project_input=False, gated_attention=False, downsample=False, pretrained=False, trained_decoder=None, ): super().__init__(dictionary) self.register_buffer("version", torch.Tensor([2])) self.pretrained = pretrained self.pretrained_decoder = trained_decoder self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.need_attn = True in_channels = convolutions[0][0] def expand_bool_array(val): if isinstance(val, bool): # expand True into [True, True, ...] and do the same with False return [val] * len(convolutions) return val attention = expand_bool_array(attention) selfattention = expand_bool_array(selfattention) if not isinstance(attention, list) or len(attention) != len(convolutions): raise ValueError( "Attention is expected to be a list of booleans of " "length equal to the number of layers." ) num_embeddings = len(dictionary) padding_idx = dictionary.pad() self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) self.embed_positions = PositionalEmbedding( max_positions, embed_dim, padding_idx, ) self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) self.projections = nn.ModuleList() self.convolutions = nn.ModuleList() self.attention = nn.ModuleList() self.selfattention = nn.ModuleList() self.attproj = nn.ModuleList() for i, (out_channels, kernel_size) in enumerate(convolutions): self.projections.append( Linear(in_channels, out_channels) if in_channels != out_channels else None ) self.convolutions.append( LinearizedConv1d( in_channels, out_channels * 2, kernel_size, padding=(kernel_size - 1), dropout=dropout, ) ) self.attention.append( DownsampledMultiHeadAttention( out_channels, embed_dim, attention_nheads, project_input=project_input, gated=False, downsample=False, ) if attention[i] else None ) self.attproj.append( Linear(out_channels, embed_dim, dropout=dropout) if attention[i] else None ) self.selfattention.append( SelfAttention( out_channels, embed_dim, selfattention_nheads, project_input=project_input, gated=gated_attention, downsample=downsample, ) if selfattention[i] else None ) in_channels = out_channels self.fc2 = Linear(in_channels, out_embed_dim) self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout) # model fusion if self.pretrained: # independent gates are learned from the concatenated input self.gate1 = nn.Sequential( Linear(out_embed_dim * 2, out_embed_dim), nn.Sigmoid() ) self.gate2 = nn.Sequential( Linear(out_embed_dim * 2, out_embed_dim), nn.Sigmoid() ) # pretrained and trained models are joined self.joining = nn.Sequential( Linear(out_embed_dim * 2, out_embed_dim * 2), LayerNorm(out_embed_dim * 2), nn.GLU(), Linear(out_embed_dim, out_embed_dim * 2), LayerNorm(out_embed_dim * 2), nn.GLU(), Linear(out_embed_dim, out_embed_dim), LayerNorm(out_embed_dim), ) # pretrained model contains an output layer that is nhid -> vocab size # but the models are combined in their hidden state # the hook stores the output of the pretrained model forward self.pretrained_outputs = {} def save_output(): def hook(a, b, output): self.pretrained_outputs["out"] = output return hook self.pretrained_decoder.fc2.register_forward_hook(save_output()) def forward(self, prev_output_tokens, encoder_out): trained_encoder_out = encoder_out["pretrained"] if self.pretrained else None encoder_out = encoder_out["encoder"]["encoder_out"] encoder_a, encoder_b = self._split_encoder_out(encoder_out) # embed positions positions = self.embed_positions(prev_output_tokens) # embed tokens and positions x = self.embed_tokens(prev_output_tokens) + positions x = self.dropout_module(x) target_embedding = x.transpose(0, 1) # project to size of convolution x = self.fc1(x) # B x T x C -> T x B x C x = x.transpose(0, 1) # temporal convolutions avg_attn_scores = None for proj, conv, attention, selfattention, attproj in zip( self.projections, self.convolutions, self.attention, self.selfattention, self.attproj, ): residual = x if proj is None else proj(x) x = self.dropout_module(x) x = conv(x) x = F.glu(x, dim=2) # attention if attention is not None: r = x x, attn_scores = attention( attproj(x) + target_embedding, encoder_a, encoder_b ) x = x + r if not self.training and self.need_attn: if avg_attn_scores is None: avg_attn_scores = attn_scores else: avg_attn_scores.add_(attn_scores) if selfattention is not None: x = selfattention(x) x = (x + residual) * math.sqrt(0.5) # T x B x C -> B x T x C x = x.transpose(0, 1) # project back to size of vocabulary x = self.fc2(x) x = self.dropout_module(x) if not self.pretrained: x = self.fc3(x) # fusion gating if self.pretrained: trained_x, _ = self.pretrained_decoder.forward( prev_output_tokens, trained_encoder_out ) y = torch.cat([x, self.pretrained_outputs["out"]], dim=-1) gate1 = self.gate1(y) gate2 = self.gate2(y) gated_x1 = gate1 * x gated_x2 = gate2 * self.pretrained_outputs["out"] fusion = torch.cat([gated_x1, gated_x2], dim=-1) fusion = self.joining(fusion) fusion_output = self.fc3(fusion) return fusion_output, avg_attn_scores else: return x, avg_attn_scores def max_positions(self): """Maximum output length supported by the decoder.""" return self.embed_positions.max_positions def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def _split_encoder_out(self, encoder_out): """Split and transpose encoder outputs.""" # transpose only once to speed up attention layers encoder_a, encoder_b = encoder_out encoder_a = encoder_a.transpose(0, 1).contiguous() encoder_b = encoder_b.transpose(0, 1).contiguous() result = (encoder_a, encoder_b) return result class SelfAttention(nn.Module): def __init__( self, out_channels, embed_dim, num_heads, project_input=False, gated=False, downsample=False, ): super().__init__() self.attention = DownsampledMultiHeadAttention( out_channels, embed_dim, num_heads, dropout=0, bias=True, project_input=project_input, gated=gated, downsample=downsample, ) self.in_proj_q = Linear(out_channels, embed_dim) self.in_proj_k = Linear(out_channels, embed_dim) self.in_proj_v = Linear(out_channels, embed_dim) self.ln = LayerNorm(out_channels) def forward(self, x): residual = x query = self.in_proj_q(x) key = self.in_proj_k(x) value = self.in_proj_v(x) x, _ = self.attention( query, key, value, mask_future_timesteps=True, use_scalar_bias=True ) return self.ln(x + residual) def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) m.weight.data.normal_(0, 0.1) return m def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx): m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) m.weight.data.normal_(0, 0.1) return m def Linear(in_features, out_features, dropout=0.0): """Weight-normalized Linear layer (input: N x T x C)""" m = nn.Linear(in_features, out_features) m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features)) m.bias.data.zero_() return m def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs): """Weight-normalized Conv1d layer optimized for decoding""" m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) m.weight.data.normal_(mean=0, std=std) m.bias.data.zero_() return m def ConvTBC(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs): """Weight-normalized Conv1d layer""" from fairseq.modules import ConvTBC m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) m.weight.data.normal_(mean=0, std=std) m.bias.data.zero_() return m @register_model_architecture("fconv_self_att", "fconv_self_att") def base_architecture(args): args.dropout = getattr(args, "dropout", 0.1) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_layers = getattr(args, "encoder_layers", "[(512, 3)] * 3") args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_layers = getattr(args, "decoder_layers", "[(512, 3)] * 8") args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256) args.decoder_attention = getattr(args, "decoder_attention", "True") args.self_attention = getattr(args, "self_attention", "False") args.encoder_attention = getattr(args, "encoder_attention", "False") args.multihead_attention_nheads = getattr(args, "multihead_attention_nheads", 1) args.multihead_self_attention_nheads = getattr( args, "multihead_self_attention_nheads", 1 ) args.encoder_attention_nheads = getattr(args, "encoder_attention_nheads", 1) args.project_input = getattr(args, "project_input", "False") args.gated_attention = getattr(args, "gated_attention", "False") args.downsample = getattr(args, "downsample", "False") args.pretrained_checkpoint = getattr(args, "pretrained_checkpoint", "") args.pretrained = getattr(args, "pretrained", "False") @register_model_architecture("fconv_self_att", "fconv_self_att_wp") def fconv_self_att_wp(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) args.encoder_layers = getattr( args, "encoder_layers", "[(128, 3)] * 2 + [(512,3)] * 1" ) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) args.decoder_layers = getattr( args, "decoder_layers", "[(512, 4)] * 4 + [(768, 4)] * 2 + [(1024, 4)] * 1" ) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256) args.self_attention = getattr(args, "self_attention", "True") args.multihead_self_attention_nheads = getattr( args, "multihead_self_attention_nheads", 4 ) args.project_input = getattr(args, "project_input", "True") args.gated_attention = getattr(args, "gated_attention", "True") args.downsample = getattr(args, "downsample", "True") base_architecture(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/fconv_self_att.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq import utils from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.fconv import FConvDecoder from fairseq.utils import safe_hasattr @register_model("fconv_lm") class FConvLanguageModel(FairseqLanguageModel): def __init__(self, decoder): super().__init__(decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability" ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-layers", type=str, metavar="EXPR", help="decoder layers [(dim, kernel_size), ...]", ) parser.add_argument( "--decoder-out-embed-dim", type=int, metavar="N", help="decoder output embedding dimension", ) parser.add_argument( "--adaptive-softmax-cutoff", metavar="EXPR", help="comma separated list of adaptive softmax cutoff points. " "Must be used with adaptive_loss criterion", ) parser.add_argument( "--adaptive-softmax-dropout", type=float, metavar="D", help="sets adaptive softmax dropout for the tail projections", ) parser.add_argument( "--decoder-attention", type=str, metavar="EXPR", help="decoder attention [True, ...]", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_lm_architecture(args) if safe_hasattr(args, "max_target_positions") and not safe_hasattr( args, "tokens_per_sample" ): args.tokens_per_sample = args.max_target_positions decoder = FConvDecoder( dictionary=task.target_dictionary, embed_dim=args.decoder_embed_dim, convolutions=eval(args.decoder_layers), out_embed_dim=args.decoder_embed_dim, attention=eval(args.decoder_attention), dropout=args.dropout, max_positions=args.tokens_per_sample, share_embed=False, positional_embeddings=False, adaptive_softmax_cutoff=( utils.eval_str_list(args.adaptive_softmax_cutoff, type=int) if args.criterion == "adaptive_loss" else None ), adaptive_softmax_dropout=args.adaptive_softmax_dropout, ) return FConvLanguageModel(decoder) @register_model_architecture("fconv_lm", "fconv_lm") def base_lm_architecture(args): args.dropout = getattr(args, "dropout", 0.1) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128) args.decoder_layers = getattr(args, "decoder_layers", "[(1268, 4)] * 13") args.decoder_attention = getattr(args, "decoder_attention", "False") args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) @register_model_architecture("fconv_lm", "fconv_lm_dauphin_wikitext103") def fconv_lm_dauphin_wikitext103(args): layers = "[(850, 6)] * 3" layers += " + [(850, 1)] * 1" layers += " + [(850, 5)] * 4" layers += " + [(850, 1)] * 1" layers += " + [(850, 4)] * 3" layers += " + [(1024, 4)] * 1" layers += " + [(2048, 4)] * 1" args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 280) args.decoder_layers = getattr(args, "decoder_layers", layers) args.decoder_attention = getattr(args, "decoder_attention", "False") args.adaptive_softmax_cutoff = getattr( args, "adaptive_softmax_cutoff", "10000,20000,200000" ) base_lm_architecture(args) @register_model_architecture("fconv_lm", "fconv_lm_dauphin_gbw") def fconv_lm_dauphin_gbw(args): layers = "[(512, 5)]" layers += " + [(128, 1, 0), (128, 5, 0), (512, 1, 3)] * 3" layers += " + [(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3" layers += " + [(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 6" layers += " + [(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]" args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128) args.decoder_layers = getattr(args, "decoder_layers", layers) args.decoder_attention = getattr(args, "decoder_attention", "False") args.adaptive_softmax_cutoff = getattr( args, "adaptive_softmax_cutoff", "10000,50000,200000" ) base_lm_architecture(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/fconv_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq import utils from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.lstm import Embedding, LSTMDecoder DEFAULT_MAX_TARGET_POSITIONS = 1e5 @register_model("lstm_lm") class LSTMLanguageModel(FairseqLanguageModel): def __init__(self, decoder): super().__init__(decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-hidden-size', type=int, metavar='N', help='decoder hidden size') parser.add_argument('--decoder-layers', type=int, metavar='N', help='number of decoder layers') parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension') parser.add_argument('--decoder-attention', type=str, metavar='BOOL', help='decoder attention') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion') parser.add_argument('--residuals', default=False, action='store_true', help='applying residuals between LSTM layers') # Granular dropout settings (if not specified these default to --dropout) parser.add_argument('--decoder-dropout-in', type=float, metavar='D', help='dropout probability for decoder input embedding') parser.add_argument('--decoder-dropout-out', type=float, metavar='D', help='dropout probability for decoder output') parser.add_argument('--share-decoder-input-output-embed', default=False, action='store_true', help='share decoder input and output embeddings') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if getattr(args, "max_target_positions", None) is not None: max_target_positions = args.max_target_positions else: max_target_positions = getattr( args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS ) def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) embed_dict = utils.parse_embedding(embed_path) utils.print_embed_overlap(embed_dict, dictionary) return utils.load_embedding(embed_dict, dictionary, embed_tokens) pretrained_decoder_embed = None if args.decoder_embed_path: pretrained_decoder_embed = load_pretrained_embedding_from_file( args.decoder_embed_path, task.target_dictionary, args.decoder_embed_dim ) if args.share_decoder_input_output_embed: # double check all parameters combinations are valid if task.source_dictionary != task.target_dictionary: raise ValueError( "--share-decoder-input-output-embeddings requires a joint dictionary" ) if args.decoder_embed_dim != args.decoder_out_embed_dim: raise ValueError( "--share-decoder-input-output-embeddings requires " "--decoder-embed-dim to match --decoder-out-embed-dim" ) decoder = LSTMDecoder( dictionary=task.dictionary, embed_dim=args.decoder_embed_dim, hidden_size=args.decoder_hidden_size, out_embed_dim=args.decoder_out_embed_dim, num_layers=args.decoder_layers, dropout_in=args.decoder_dropout_in, dropout_out=args.decoder_dropout_out, attention=False, # decoder-only language model doesn't support attention encoder_output_units=0, pretrained_embed=pretrained_decoder_embed, share_input_output_embed=args.share_decoder_input_output_embed, adaptive_softmax_cutoff=( utils.eval_str_list(args.adaptive_softmax_cutoff, type=int) if args.criterion == "adaptive_loss" else None ), max_target_positions=max_target_positions, residuals=args.residuals, ) return cls(decoder) @register_model_architecture("lstm_lm", "lstm_lm") def base_architecture(args): args.dropout = getattr(args, "dropout", 0.1) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_hidden_size = getattr( args, "decoder_hidden_size", args.decoder_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 1) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) args.decoder_attention = getattr(args, "decoder_attention", "0") args.decoder_dropout_in = getattr(args, "decoder_dropout_in", args.dropout) args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.adaptive_softmax_cutoff = getattr( args, "adaptive_softmax_cutoff", "10000,50000,200000" ) args.residuals = getattr(args, "residuals", False)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/lstm_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import OrderedDict from fairseq import utils from fairseq.models import ( FairseqMultiModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( Embedding, TransformerDecoder, TransformerEncoder, TransformerModel, base_architecture, ) from fairseq.utils import safe_hasattr @register_model("multilingual_transformer") class MultilingualTransformerModel(FairseqMultiModel): """Train Transformer models for multiple language pairs simultaneously. Requires `--task multilingual_translation`. We inherit all arguments from TransformerModel and assume that all language pairs use a single Transformer architecture. In addition, we provide several options that are specific to the multilingual setting. Args: --share-encoder-embeddings: share encoder embeddings across all source languages --share-decoder-embeddings: share decoder embeddings across all target languages --share-encoders: share all encoder params (incl. embeddings) across all source languages --share-decoders: share all decoder params (incl. embeddings) across all target languages """ def __init__(self, encoders, decoders): super().__init__(encoders, decoders) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" TransformerModel.add_args(parser) parser.add_argument( "--share-encoder-embeddings", action="store_true", help="share encoder embeddings across languages", ) parser.add_argument( "--share-decoder-embeddings", action="store_true", help="share decoder embeddings across languages", ) parser.add_argument( "--share-encoders", action="store_true", help="share encoders across languages", ) parser.add_argument( "--share-decoders", action="store_true", help="share decoders across languages", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" from fairseq.tasks.multilingual_translation import MultilingualTranslationTask assert isinstance(task, MultilingualTranslationTask) # make sure all arguments are present in older models base_multilingual_architecture(args) if not safe_hasattr(args, "max_source_positions"): args.max_source_positions = 1024 if not safe_hasattr(args, "max_target_positions"): args.max_target_positions = 1024 src_langs = [lang_pair.split("-")[0] for lang_pair in task.model_lang_pairs] tgt_langs = [lang_pair.split("-")[1] for lang_pair in task.model_lang_pairs] if args.share_encoders: args.share_encoder_embeddings = True if args.share_decoders: args.share_decoder_embeddings = True def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb # build shared embeddings (if applicable) shared_encoder_embed_tokens, shared_decoder_embed_tokens = None, None if args.share_all_embeddings: if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim" ) if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path ): raise ValueError( "--share-all-embeddings not compatible with --decoder-embed-path" ) shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings( dicts=task.dicts, langs=task.langs, embed_dim=args.encoder_embed_dim, build_embedding=build_embedding, pretrained_embed_path=args.encoder_embed_path, ) shared_decoder_embed_tokens = shared_encoder_embed_tokens args.share_decoder_input_output_embed = True else: if args.share_encoder_embeddings: shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings( dicts=task.dicts, langs=src_langs, embed_dim=args.encoder_embed_dim, build_embedding=build_embedding, pretrained_embed_path=args.encoder_embed_path, ) if args.share_decoder_embeddings: shared_decoder_embed_tokens = FairseqMultiModel.build_shared_embeddings( dicts=task.dicts, langs=tgt_langs, embed_dim=args.decoder_embed_dim, build_embedding=build_embedding, pretrained_embed_path=args.decoder_embed_path, ) # encoders/decoders for each language lang_encoders, lang_decoders = {}, {} def get_encoder(lang): if lang not in lang_encoders: if shared_encoder_embed_tokens is not None: encoder_embed_tokens = shared_encoder_embed_tokens else: encoder_embed_tokens = build_embedding( task.dicts[lang], args.encoder_embed_dim, args.encoder_embed_path, ) lang_encoders[lang] = cls._get_module_class( True, args, task.dicts[lang], encoder_embed_tokens, src_langs ) return lang_encoders[lang] def get_decoder(lang): if lang not in lang_decoders: if shared_decoder_embed_tokens is not None: decoder_embed_tokens = shared_decoder_embed_tokens else: decoder_embed_tokens = build_embedding( task.dicts[lang], args.decoder_embed_dim, args.decoder_embed_path, ) lang_decoders[lang] = cls._get_module_class( False, args, task.dicts[lang], decoder_embed_tokens, tgt_langs ) return lang_decoders[lang] # shared encoders/decoders (if applicable) shared_encoder, shared_decoder = None, None if args.share_encoders: shared_encoder = get_encoder(src_langs[0]) if args.share_decoders: shared_decoder = get_decoder(tgt_langs[0]) encoders, decoders = OrderedDict(), OrderedDict() for lang_pair, src, tgt in zip(task.model_lang_pairs, src_langs, tgt_langs): encoders[lang_pair] = ( shared_encoder if shared_encoder is not None else get_encoder(src) ) decoders[lang_pair] = ( shared_decoder if shared_decoder is not None else get_decoder(tgt) ) return MultilingualTransformerModel(encoders, decoders) @classmethod def _get_module_class(cls, is_encoder, args, lang_dict, embed_tokens, langs): module_class = TransformerEncoder if is_encoder else TransformerDecoder return module_class(args, lang_dict, embed_tokens) def load_state_dict(self, state_dict, strict=True, model_cfg=None): state_dict_subset = state_dict.copy() for k, _ in state_dict.items(): assert k.startswith("models.") lang_pair = k.split(".")[1] if lang_pair not in self.models: del state_dict_subset[k] super().load_state_dict(state_dict_subset, strict=strict, model_cfg=model_cfg) @register_model_architecture("multilingual_transformer", "multilingual_transformer") def base_multilingual_architecture(args): base_architecture(args) args.share_encoder_embeddings = getattr(args, "share_encoder_embeddings", False) args.share_decoder_embeddings = getattr(args, "share_decoder_embeddings", False) args.share_encoders = getattr(args, "share_encoders", False) args.share_decoders = getattr(args, "share_decoders", False) @register_model_architecture( "multilingual_transformer", "multilingual_transformer_iwslt_de_en" ) def multilingual_transformer_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.encoder_layers = getattr(args, "encoder_layers", 6) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) args.decoder_layers = getattr(args, "decoder_layers", 6) base_multilingual_architecture(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/multilingual_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, Optional, Tuple import torch.nn as nn from fairseq import utils from torch import Tensor class FairseqDecoder(nn.Module): """Base class for decoders.""" def __init__(self, dictionary): super().__init__() self.dictionary = dictionary self.onnx_trace = False self.adaptive_softmax = None def forward(self, prev_output_tokens, encoder_out=None, **kwargs): """ Args: prev_output_tokens (LongTensor): shifted output tokens of shape `(batch, tgt_len)`, for teacher forcing encoder_out (dict, optional): output from the encoder, used for encoder-side attention Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, **kwargs ) x = self.output_layer(x) return x, extra def extract_features(self, prev_output_tokens, encoder_out=None, **kwargs): """ Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ raise NotImplementedError def output_layer(self, features, **kwargs): """ Project features to the default output size, e.g., vocabulary size. Args: features (Tensor): features returned by *extract_features*. """ raise NotImplementedError def get_normalized_probs( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): """Get normalized probabilities (or log probs) from a net's output.""" return self.get_normalized_probs_scriptable(net_output, log_probs, sample) # TorchScript doesn't support super() method so that the scriptable Subclass # can't access the base class model in Torchscript. # Current workaround is to add a helper function with different name and # call the helper function from scriptable Subclass. def get_normalized_probs_scriptable( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): """Get normalized probabilities (or log probs) from a net's output.""" if hasattr(self, "adaptive_softmax") and self.adaptive_softmax is not None: if sample is not None: assert "target" in sample target = sample["target"] else: target = None out = self.adaptive_softmax.get_log_prob(net_output[0], target=target) return out.exp_() if not log_probs else out logits = net_output[0] if log_probs: return utils.log_softmax(logits, dim=-1, onnx_trace=self.onnx_trace) else: return utils.softmax(logits, dim=-1, onnx_trace=self.onnx_trace) def max_positions(self): """Maximum input length supported by the decoder.""" return 1e6 # an arbitrary large number def upgrade_state_dict_named(self, state_dict, name): """Upgrade old state dicts to work with newer code.""" return state_dict def prepare_for_onnx_export_(self): self.onnx_trace = True
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/fairseq_decoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" import argparse import importlib import os from contextlib import ExitStack from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.utils import merge_with_parent from hydra.core.config_store import ConfigStore from omegaconf import open_dict, OmegaConf from .composite_encoder import CompositeEncoder from .distributed_fairseq_model import DistributedFairseqModel from .fairseq_decoder import FairseqDecoder from .fairseq_encoder import FairseqEncoder from .fairseq_incremental_decoder import FairseqIncrementalDecoder from .fairseq_model import ( BaseFairseqModel, FairseqEncoderDecoderModel, FairseqEncoderModel, FairseqLanguageModel, FairseqModel, FairseqMultiModel, ) MODEL_REGISTRY = {} MODEL_DATACLASS_REGISTRY = {} ARCH_MODEL_REGISTRY = {} ARCH_MODEL_NAME_REGISTRY = {} ARCH_MODEL_INV_REGISTRY = {} ARCH_CONFIG_REGISTRY = {} __all__ = [ "BaseFairseqModel", "CompositeEncoder", "DistributedFairseqModel", "FairseqDecoder", "FairseqEncoder", "FairseqEncoderDecoderModel", "FairseqEncoderModel", "FairseqIncrementalDecoder", "FairseqLanguageModel", "FairseqModel", "FairseqMultiModel", ] def build_model(cfg: FairseqDataclass, task): model = None model_type = getattr(cfg, "_name", None) or getattr(cfg, "arch", None) if not model_type and len(cfg) == 1: # this is hit if config object is nested in directory that is named after model type model_type = next(iter(cfg)) if model_type in MODEL_DATACLASS_REGISTRY: cfg = cfg[model_type] else: raise Exception( "Could not infer model type from directory. Please add _name field to indicate model type. " "Available models: " + str(MODEL_DATACLASS_REGISTRY.keys()) + " Requested model type: " + model_type ) if model_type in ARCH_MODEL_REGISTRY: # case 1: legacy models model = ARCH_MODEL_REGISTRY[model_type] elif model_type in MODEL_DATACLASS_REGISTRY: # case 2: config-driven models model = MODEL_REGISTRY[model_type] if model_type in MODEL_DATACLASS_REGISTRY: # set defaults from dataclass. note that arch name and model name can be the same dc = MODEL_DATACLASS_REGISTRY[model_type] if isinstance(cfg, argparse.Namespace): cfg = dc.from_namespace(cfg) else: cfg = merge_with_parent(dc(), cfg) else: if model_type in ARCH_CONFIG_REGISTRY: with open_dict(cfg) if OmegaConf.is_config(cfg) else ExitStack(): # this calls the different "arch" functions (like base_architecture()) that you indicate # if you specify --arch on the command line. this is only applicable to the old argparse based models # hydra models should expose different architectures via different config files # it will modify the cfg object and default parameters according to the arch ARCH_CONFIG_REGISTRY[model_type](cfg) assert model is not None, ( f"Could not infer model type from {cfg}. " "Available models: {}".format( MODEL_DATACLASS_REGISTRY.keys() ) + f" Requested model type: {model_type}" ) return model.build_model(cfg, task) def register_model(name, dataclass=None): """ New model types can be added to fairseq with the :func:`register_model` function decorator. For example:: @register_model('lstm') class LSTM(FairseqEncoderDecoderModel): (...) .. note:: All models must implement the :class:`BaseFairseqModel` interface. Typically you will extend :class:`FairseqEncoderDecoderModel` for sequence-to-sequence tasks or :class:`FairseqLanguageModel` for language modeling tasks. Args: name (str): the name of the model """ def register_model_cls(cls): if name in MODEL_REGISTRY: raise ValueError("Cannot register duplicate model ({})".format(name)) if not issubclass(cls, BaseFairseqModel): raise ValueError( "Model ({}: {}) must extend BaseFairseqModel".format(name, cls.__name__) ) MODEL_REGISTRY[name] = cls if dataclass is not None and not issubclass(dataclass, FairseqDataclass): raise ValueError( "Dataclass {} must extend FairseqDataclass".format(dataclass) ) cls.__dataclass = dataclass if dataclass is not None: MODEL_DATACLASS_REGISTRY[name] = dataclass cs = ConfigStore.instance() node = dataclass() node._name = name cs.store(name=name, group="model", node=node, provider="fairseq") @register_model_architecture(name, name) def noop(_): pass return cls return register_model_cls def register_model_architecture(model_name, arch_name): """ New model architectures can be added to fairseq with the :func:`register_model_architecture` function decorator. After registration, model architectures can be selected with the ``--arch`` command-line argument. For example:: @register_model_architecture('lstm', 'lstm_luong_wmt_en_de') def lstm_luong_wmt_en_de(cfg): args.encoder_embed_dim = getattr(cfg.model, 'encoder_embed_dim', 1000) (...) The decorated function should take a single argument *cfg*, which is a :class:`omegaconf.DictConfig`. The decorated function should modify these arguments in-place to match the desired architecture. Args: model_name (str): the name of the Model (Model must already be registered) arch_name (str): the name of the model architecture (``--arch``) """ def register_model_arch_fn(fn): if model_name not in MODEL_REGISTRY: raise ValueError( "Cannot register model architecture for unknown model type ({})".format( model_name ) ) if arch_name in ARCH_MODEL_REGISTRY: raise ValueError( "Cannot register duplicate model architecture ({})".format(arch_name) ) if not callable(fn): raise ValueError( "Model architecture must be callable ({})".format(arch_name) ) ARCH_MODEL_REGISTRY[arch_name] = MODEL_REGISTRY[model_name] ARCH_MODEL_NAME_REGISTRY[arch_name] = model_name ARCH_MODEL_INV_REGISTRY.setdefault(model_name, []).append(arch_name) ARCH_CONFIG_REGISTRY[arch_name] = fn return fn return register_model_arch_fn def import_models(models_dir, namespace): for file in os.listdir(models_dir): path = os.path.join(models_dir, file) if ( not file.startswith("_") and not file.startswith(".") and (file.endswith(".py") or os.path.isdir(path)) ): model_name = file[: file.find(".py")] if file.endswith(".py") else file importlib.import_module(namespace + "." + model_name) # extra `model_parser` for sphinx if model_name in MODEL_REGISTRY: parser = argparse.ArgumentParser(add_help=False) group_archs = parser.add_argument_group("Named architectures") group_archs.add_argument( "--arch", choices=ARCH_MODEL_INV_REGISTRY[model_name] ) group_args = parser.add_argument_group( "Additional command-line arguments" ) MODEL_REGISTRY[model_name].add_args(group_args) globals()[model_name + "_parser"] = parser # automatically import any Python files in the models/ directory models_dir = os.path.dirname(__file__) import_models(models_dir, "fairseq.models")
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os from typing import Any, Dict from fairseq import checkpoint_utils from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( TransformerDecoder, TransformerEncoder, TransformerModel, base_architecture as transformer_base_architecture, ) @register_model("transformer_from_pretrained_xlm") class TransformerFromPretrainedXLMModel(TransformerModel): @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" TransformerModel.add_args(parser) parser.add_argument( "--pretrained-xlm-checkpoint", type=str, metavar="STR", help="XLM model to use for initializing transformer encoder and/or decoder", ) parser.add_argument( "--init-encoder-only", action="store_true", help="if set, don't load the XLM weights and embeddings into decoder", ) parser.add_argument( "--init-decoder-only", action="store_true", help="if set, don't load the XLM weights and embeddings into encoder", ) @classmethod def build_model(self, args, task, cls_dictionary=MaskedLMDictionary): assert hasattr(args, "pretrained_xlm_checkpoint"), ( "You must specify a path for --pretrained-xlm-checkpoint to use " "--arch transformer_from_pretrained_xlm" ) assert isinstance(task.source_dictionary, cls_dictionary) and isinstance( task.target_dictionary, cls_dictionary ), ( "You should use a MaskedLMDictionary when using --arch " "transformer_from_pretrained_xlm because the pretrained XLM model " "was trained using data binarized with MaskedLMDictionary. " "For translation, you may want to use --task " "translation_from_pretrained_xlm" ) assert not ( getattr(args, "init_encoder_only", False) and getattr(args, "init_decoder_only", False) ), "Only one of --init-encoder-only and --init-decoder-only can be set." return super().build_model(args, task) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoderFromPretrainedXLM(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoderFromPretrainedXLM(args, tgt_dict, embed_tokens) def upgrade_state_dict_with_xlm_weights( state_dict: Dict[str, Any], pretrained_xlm_checkpoint: str ) -> Dict[str, Any]: """ Load XLM weights into a Transformer encoder or decoder model. Args: state_dict: state dict for either TransformerEncoder or TransformerDecoder pretrained_xlm_checkpoint: checkpoint to load XLM weights from Raises: AssertionError: If architecture (num layers, attention heads, etc.) does not match between the current Transformer encoder or decoder and the pretrained_xlm_checkpoint """ if not os.path.exists(pretrained_xlm_checkpoint): raise IOError("Model file not found: {}".format(pretrained_xlm_checkpoint)) state = checkpoint_utils.load_checkpoint_to_cpu(pretrained_xlm_checkpoint) xlm_state_dict = state["model"] for key in xlm_state_dict.keys(): for search_key in ["embed_tokens", "embed_positions", "layers"]: if search_key in key: subkey = key[key.find(search_key) :] assert subkey in state_dict, ( "{} Transformer encoder / decoder " "state_dict does not contain {}. Cannot " "load {} from pretrained XLM checkpoint " "{} into Transformer.".format( str(state_dict.keys()), subkey, key, pretrained_xlm_checkpoint ) ) state_dict[subkey] = xlm_state_dict[key] return state_dict class TransformerEncoderFromPretrainedXLM(TransformerEncoder): def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens) if getattr(args, "init_decoder_only", False): # Don't load XLM weights for encoder if --init-decoder-only return assert hasattr(args, "pretrained_xlm_checkpoint"), ( "--pretrained-xlm-checkpoint must be specified to load Transformer " "encoder from pretrained XLM" ) xlm_loaded_state_dict = upgrade_state_dict_with_xlm_weights( state_dict=self.state_dict(), pretrained_xlm_checkpoint=args.pretrained_xlm_checkpoint, ) self.load_state_dict(xlm_loaded_state_dict, strict=True) class TransformerDecoderFromPretrainedXLM(TransformerDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(args, dictionary, embed_tokens, no_encoder_attn) if getattr(args, "init_encoder_only", False): # Don't load XLM weights for decoder if --init-encoder-only return assert hasattr(args, "pretrained_xlm_checkpoint"), ( "--pretrained-xlm-checkpoint must be specified to load Transformer " "decoder from pretrained XLM" ) xlm_loaded_state_dict = upgrade_state_dict_with_xlm_weights( state_dict=self.state_dict(), pretrained_xlm_checkpoint=args.pretrained_xlm_checkpoint, ) self.load_state_dict(xlm_loaded_state_dict, strict=True) @register_model_architecture( "transformer_from_pretrained_xlm", "transformer_from_pretrained_xlm" ) def base_architecture(args): transformer_base_architecture(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/transformer_from_pretrained_xlm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import signal import threading import torch import torch.nn as nn from torch.nn.parallel import DistributedDataParallel from fairseq.distributed import ( DistributedTimeoutWrapper, LegacyDistributedDataParallel, ModuleProxyWrapper, TPUDistributedDataParallel, ) logger = logging.getLogger(__name__) _GOSSIP_DISABLED = False try: import gossip except ImportError: _GOSSIP_DISABLED = True def DistributedFairseqModel(args, model, process_group, device): """ Wrap a *model* to support distributed data parallel training. This is similar to the built-in DistributedDataParallel, but allows additional configuration of the DistributedDataParallel class to use, and also provides easier access to the wrapped model by forwarding requests for missing attributes to the wrapped model. Args: args (argparse.Namespace): fairseq args model (BaseFairseqModel): model to wrap process_group: the c10d process group to be used for distributed data parallel all-reduction. device: device to move model to """ assert isinstance(model, nn.Module) if args.tpu: wrapped_model = TPUDistributedDataParallel( module=model.to(device), process_group=process_group, ) # forward missing getattr and state_dict/load_state_dict to orig model wrapped_model = ModuleProxyWrapper(wrapped_model) elif args.ddp_backend in {"c10d", "pytorch_ddp"}: wrapped_model = DistributedDataParallel( module=model.to(device), device_ids=[args.device_id], output_device=args.device_id, broadcast_buffers=args.broadcast_buffers, bucket_cap_mb=args.bucket_cap_mb, process_group=process_group, find_unused_parameters=args.find_unused_parameters, gradient_as_bucket_view=args.gradient_as_bucket_view, ) if args.ddp_comm_hook == "fp16": logger.info("enable fp16 communication hook in DDP") try: from torch.distributed.algorithms.ddp_comm_hooks import ( register_ddp_comm_hook, DDPCommHookType, ) except: logger.error( "Could not import from torch.distributed.algorithms.ddp_comm_hooks; you may need to update your pytorch version" ) raise register_ddp_comm_hook(DDPCommHookType.FP16_COMPRESS, wrapped_model) # forward missing getattr and state_dict/load_state_dict to orig model wrapped_model = ModuleProxyWrapper(wrapped_model) elif args.ddp_backend in {"no_c10d", "legacy_ddp"}: wrapped_model = LegacyDistributedDataParallel( module=model.to(device), buffer_size=2 ** 28, process_group=process_group, ) # forward missing getattr and state_dict/load_state_dict to orig model wrapped_model = ModuleProxyWrapper(wrapped_model) elif args.ddp_backend == "slow_mo": if _GOSSIP_DISABLED: raise ImportError( "Cannot find gossip library. Please install from: " "github.com/facebookresearch/stochastic_gradient_push" ) # The values of slowmo_momentum below were obtained by tuning on the # En-De 16 dataset by training the transformer_wmt_en_de_large model if args.slowmo_momentum is None: if args.distributed_world_size <= 16: args.slowmo_momentum = 0.0 elif args.distributed_world_size <= 32: args.slowmo_momentum = 0.2 elif args.distributed_world_size <= 64: args.slowmo_momentum = 0.5 else: args.slowmo_momentum = 0.6 wrapped_model = gossip.GossipDataParallel( module=model.to(device), device_ids=[args.device_id], output_device=args.device_id, broadcast_buffers=args.broadcast_buffers, nprocs_per_node=args.nprocs_per_node, slowmo_momentum=args.slowmo_momentum, localsgd=(args.slowmo_algorithm == "LocalSGD"), localsgd_frequency=args.localsgd_frequency, ) # forward missing getattr and state_dict/load_state_dict to orig model wrapped_model = ModuleProxyWrapper(wrapped_model) elif args.ddp_backend == "fully_sharded": try: from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP except ImportError: raise ImportError( "Cannot find FullyShardedDataParallel. " "Please install fairscale with: pip install fairscale" ) assert isinstance(model, FSDP), "expected model to already be wrapped in FSDP" wrapped_model = model if args.memory_efficient_fp16: wrapped_model = wrapped_model.half() if not args.cpu_offload: wrapped_model = wrapped_model.to(device=device) else: raise ValueError("Unknown --ddp-backend: " + args.ddp_backend) # kill hung distributed jobs after a timeout if getattr(args, "heartbeat_timeout", -1) > 0: wrapped_model = DistributedTimeoutWrapper( wrapped_model, timeout=getattr(args, "heartbeat_timeout", -1) ) return wrapped_model
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/distributed_fairseq_model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II DEFAULT_MAX_TARGET_POSITIONS = 1024 @dataclass class TransformerLanguageModelConfig(FairseqDataclass): activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field( default="relu", metadata={"help": "activation function to use"} ) dropout: float = field(default=0.1, metadata={"help": "dropout probability"}) attention_dropout: float = field( default=0.0, metadata={"help": "dropout probability for attention weights"} ) activation_dropout: float = field( default=0.0, metadata={"help": "dropout probability after activation in FFN."} ) relu_dropout: float = field( default=0.0, metadata={"help": "dropout probability after activation in FFN."} ) decoder_embed_dim: int = field( default=512, metadata={"help": "decoder embedding dimension"} ) decoder_output_dim: int = field( default=512, metadata={"help": "decoder output dimension"} ) decoder_input_dim: int = field( default=512, metadata={"help": "decoder input dimension"} ) decoder_ffn_embed_dim: int = field( default=2048, metadata={"help": "decoder embedding dimension for FFN"} ) decoder_layers: int = field(default=6, metadata={"help": "num decoder layers"}) decoder_attention_heads: int = field( default=8, metadata={"help": "num decoder attention heads"} ) decoder_normalize_before: bool = field( default=False, metadata={"help": "apply layernorm before each decoder block"} ) no_decoder_final_norm: bool = field( default=False, metadata={"help": "don't add an extra layernorm after the last decoder block"}, ) adaptive_softmax_cutoff: Optional[str] = field( default=None, metadata={ "help": "comma separated list of adaptive softmax cutoff points. " "Must be used with adaptive_loss criterion" }, ) adaptive_softmax_dropout: float = field( default=0, metadata={"help": "sets adaptive softmax dropout for the tail projections"}, ) adaptive_softmax_factor: float = field( default=4, metadata={"help": "adaptive input factor"} ) no_token_positional_embeddings: bool = field( default=False, metadata={ "help": "if set, disables positional embeddings (outside self attention)" }, ) share_decoder_input_output_embed: bool = field( default=False, metadata={"help": "share decoder input and output embeddings"} ) character_embeddings: bool = field( default=False, metadata={ "help": "if set, uses character embedding convolutions to produce token embeddings" }, ) character_filters: str = field( default="[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]", metadata={"help": "size of character embeddings"}, ) character_embedding_dim: int = field( default=4, metadata={"help": "size of character embeddings"} ) char_embedder_highway_layers: int = field( default=2, metadata={"help": "number of highway layers for character token embeddder"}, ) adaptive_input: bool = field( default=False, metadata={"help": "if set, uses adaptive input"} ) adaptive_input_factor: float = field( default=4, metadata={"help": "adaptive input factor"} ) adaptive_input_cutoff: Optional[str] = field( default=None, metadata={"help": "comma separated list of adaptive input cutoff points."}, ) tie_adaptive_weights: bool = field( default=False, metadata={ "help": "if set, ties the weights of adaptive softmax and adaptive input" }, ) tie_adaptive_proj: bool = field( default=False, metadata={ "help": "if set, ties the projection weights of adaptive softmax and adaptive input" }, ) decoder_learned_pos: bool = field( default=False, metadata={"help": "use learned positional embeddings in the decoder"}, ) layernorm_embedding: bool = field( default=False, metadata={"help": "add layernorm to embedding"} ) no_scale_embedding: bool = field( default=False, metadata={"help": "if True, dont scale embeddings"} ) checkpoint_activations: bool = field( default=False, metadata={"help": "checkpoint activations at each layer"} ) offload_activations: bool = field( default=False, metadata={"help": "move checkpointed activations to CPU after they are used."}, ) # config for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019) decoder_layerdrop: float = field( default=0.0, metadata={"help": "LayerDrop probability for decoder"} ) decoder_layers_to_keep: Optional[str] = field( default=None, metadata={ "help": "which layers to *keep* when pruning as a comma-separated list" }, ) # config for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020) quant_noise_pq: float = field( default=0.0, metadata={"help": "iterative PQ quantization noise at training time"}, ) quant_noise_pq_block_size: int = field( default=8, metadata={"help": "block size of quantization noise at training time"}, ) quant_noise_scalar: float = field( default=0.0, metadata={ "help": "scalar quantization noise and scalar quantization at training time" }, ) # config for Fully Sharded Data Parallel (FSDP) training min_params_to_wrap: int = field( default=DEFAULT_MIN_PARAMS_TO_WRAP, metadata={ "help": ( "minimum number of params for a layer to be wrapped with FSDP() when " "training with --ddp-backend=fully_sharded. Smaller values will " "improve memory efficiency, but may make torch.distributed " "communication less efficient due to smaller input sizes. This option " "is set to 0 (i.e., always wrap) when --checkpoint-activations or " "--offload-activations are passed." ) } ) # config for "BASE Layers: Simplifying Training of Large, Sparse Models" base_layers: Optional[int] = field( default=0, metadata={"help": "number of BASE layers in total"} ) base_sublayers: Optional[int] = field( default=1, metadata={"help": "number of sublayers in each BASE layer"} ) base_shuffle: Optional[int] = field( default=1, metadata={"help": "shuffle tokens between workers before computing assignment"} ) # NormFormer scale_fc: Optional[bool] = field(default=False, metadata={"help": 'Insert LayerNorm between fully connected layers'}) scale_attn: Optional[bool] = field(default=False, metadata={"help": 'Insert LayerNorm after attention'}) scale_heads: Optional[bool] = field(default=False, metadata={"help": 'Learn a scale coefficient for each attention head'}) scale_resids: Optional[bool] = field(default=False, metadata={"help": 'Learn a scale coefficient for each residual connection'}) # options from other parts of the config add_bos_token: bool = II("task.add_bos_token") tokens_per_sample: int = II("task.tokens_per_sample") max_target_positions: Optional[int] = II("task.max_target_positions") tpu: bool = II("common.tpu") @register_model("transformer_lm", dataclass=TransformerLanguageModelConfig) class TransformerLanguageModel(FairseqLanguageModel): @classmethod def hub_models(cls): def moses_fastbpe(path): return {"path": path, "tokenizer": "moses", "bpe": "fastbpe"} def spm(path): return {"path": path, "tokenizer": "space", "bpe": "sentencepiece"} return { "transformer_lm.gbw.adaptive_huge": "https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_gbw_huge.tar.bz2", "transformer_lm.wiki103.adaptive": "https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_wiki103.v2.tar.bz2", "transformer_lm.wmt19.en": moses_fastbpe( "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.en.tar.bz2" ), "transformer_lm.wmt19.de": moses_fastbpe( "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.de.tar.bz2" ), "transformer_lm.wmt19.ru": moses_fastbpe( "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.ru.tar.bz2" ), "transformer_lm.wmt20.en": spm( "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.en.tar.gz" ), "transformer_lm.wmt20.ta": spm( "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.ta.tar.gz" ), "transformer_lm.wmt20.iu.news": spm( "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.iu.news.tar.gz" ), "transformer_lm.wmt20.iu.nh": spm( "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.iu.nh.tar.gz" ), } def __init__(self, decoder): super().__init__(decoder) @classmethod def build_model(cls, args, task): """Build a new model instance.""" if args.decoder_layers_to_keep: args.decoder_layers = len(args.decoder_layers_to_keep.split(",")) if safe_getattr(args, "max_target_positions", None) is None: args.max_target_positions = safe_getattr( args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS ) if args.character_embeddings: embed_tokens = CharacterTokenEmbedder( task.source_dictionary, eval(args.character_filters), args.character_embedding_dim, args.decoder_embed_dim, args.char_embedder_highway_layers, ) elif args.adaptive_input: embed_tokens = AdaptiveInput( len(task.source_dictionary), task.source_dictionary.pad(), args.decoder_input_dim, args.adaptive_input_factor, args.decoder_embed_dim, options.eval_str_list(args.adaptive_input_cutoff, type=int), args.quant_noise_pq, args.quant_noise_pq_block_size, ) else: embed_tokens = cls.build_embedding( args, task.source_dictionary, args.decoder_input_dim ) if args.tie_adaptive_weights: assert args.adaptive_input assert args.adaptive_input_factor == args.adaptive_softmax_factor assert ( args.adaptive_softmax_cutoff == args.adaptive_input_cutoff ), "{} != {}".format( args.adaptive_softmax_cutoff, args.adaptive_input_cutoff ) assert args.decoder_input_dim == args.decoder_output_dim decoder = TransformerDecoder( args, task.target_dictionary, embed_tokens, no_encoder_attn=True ) return cls(decoder) @classmethod def build_embedding(cls, args, dictionary, embed_dim, path=None): embed_tokens = Embedding(len(dictionary), embed_dim, dictionary.pad()) return embed_tokens def base_lm_architecture(args): # backward compatibility for older model checkpoints if safe_hasattr(args, "no_tie_adaptive_proj"): # previous models defined --no-tie-adaptive-proj, so use the existence of # that option to determine if this is an "old" model checkpoint args.no_decoder_final_norm = True # old models always set this to True if args.no_tie_adaptive_proj is False: args.tie_adaptive_proj = True if safe_hasattr(args, "decoder_final_norm"): args.no_decoder_final_norm = not args.decoder_final_norm args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 2048) args.decoder_layers = safe_getattr(args, "decoder_layers", 6) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8) args.adaptive_softmax_cutoff = safe_getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0) args.adaptive_softmax_factor = safe_getattr(args, "adaptive_softmax_factor", 4) args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", False) args.activation_fn = safe_getattr(args, "activation_fn", "relu") args.decoder_layerdrop = safe_getattr(args, "decoder_layerdrop", 0) args.decoder_layers_to_keep = safe_getattr(args, "decoder_layers_to_keep", None) args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0) args.base_layers = safe_getattr(args, "base_layers", 0) args.base_sublayers = safe_getattr(args, "base_sublayers", 1) args.base_shuffle = safe_getattr(args, "base_shuffle", False) args.add_bos_token = safe_getattr(args, "add_bos_token", False) args.no_token_positional_embeddings = safe_getattr( args, "no_token_positional_embeddings", False ) args.share_decoder_input_output_embed = safe_getattr( args, "share_decoder_input_output_embed", False ) args.character_embeddings = safe_getattr(args, "character_embeddings", False) args.decoder_output_dim = safe_getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = safe_getattr(args, "decoder_input_dim", args.decoder_embed_dim) # Model training is not stable without this args.decoder_normalize_before = True args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", False) args.adaptive_input = safe_getattr(args, "adaptive_input", False) args.adaptive_input_factor = safe_getattr(args, "adaptive_input_factor", 4) args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", None) args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", False) args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", False) args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", False) args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False) args.checkpoint_activations = safe_getattr(args, "checkpoint_activations", False) args.offload_activations = safe_getattr(args, "offload_activations", False) args.scale_fc = safe_getattr(args, 'scale_fc', False) args.scale_attn = safe_getattr(args, 'scale_attn', False) args.scale_heads = safe_getattr(args, 'scale_heads', False) args.scale_resids = safe_getattr(args, 'scale_resids', False) if args.offload_activations: args.checkpoint_activations = True @register_model_architecture("transformer_lm", "transformer_lm_big") def transformer_lm_big(args): args.decoder_layers = safe_getattr(args, "decoder_layers", 12) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) base_lm_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_wiki103") @register_model_architecture("transformer_lm", "transformer_lm_baevski_wiki103") def transformer_lm_baevski_wiki103(args): args.decoder_layers = safe_getattr(args, "decoder_layers", 16) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8) args.dropout = safe_getattr(args, "dropout", 0.3) args.adaptive_input = safe_getattr(args, "adaptive_input", True) args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", True) args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", "20000,60000") args.adaptive_softmax_cutoff = safe_getattr( args, "adaptive_softmax_cutoff", "20000,60000" ) args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0.2) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_dropout = safe_getattr(args, "activation_dropout", 0.1) args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", True) args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", True) transformer_lm_big(args) @register_model_architecture("transformer_lm", "transformer_lm_gbw") @register_model_architecture("transformer_lm", "transformer_lm_baevski_gbw") def transformer_lm_baevski_gbw(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", True) transformer_lm_big(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt") def transformer_lm_gpt(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 768) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 3072) args.decoder_layers = safe_getattr(args, "decoder_layers", 12) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 12) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt2_small") def transformer_lm_gpt2_small(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_layers = safe_getattr(args, "decoder_layers", 24) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt2_tiny") def transformer_lm_gpt2_tiny(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 64) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 64) args.decoder_layers = safe_getattr(args, "decoder_layers", 2) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 1) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt2_medium") def transformer_lm_gpt2_medium(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1280) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 5120) args.decoder_layers = safe_getattr(args, "decoder_layers", 36) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 20) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt2_big") def transformer_lm_gpt2_big(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1600) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 6400) args.decoder_layers = safe_getattr(args, "decoder_layers", 48) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 25) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args) def base_gpt3_architecture(args): args.decoder_input_dim = args.decoder_embed_dim args.decoder_output_dim = args.decoder_embed_dim args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4) # GPT-3 used learned positional embeddings, rather than sinusoidal args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True) args.dropout = safe_getattr(args, "dropout", 0.0) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") args.share_decoder_input_output_embed = True base_lm_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_small") def transformer_lm_gpt3_small(args): # 125M params args.decoder_layers = safe_getattr(args, "decoder_layers", 12) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 768) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 12) base_gpt3_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_medium") def transformer_lm_gpt3_medium(args): # 350M params args.decoder_layers = safe_getattr(args, "decoder_layers", 24) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) base_gpt3_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_large") def transformer_lm_gpt3_large(args): # 760M params args.decoder_layers = safe_getattr(args, "decoder_layers", 24) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1536) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) base_gpt3_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_xl") def transformer_lm_gpt3_xl(args): # 1.3B params args.decoder_layers = safe_getattr(args, "decoder_layers", 24) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 2048) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) base_gpt3_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_2_7") def transformer_lm_gpt3_2_7(args): # 2.7B params args.decoder_layers = safe_getattr(args, "decoder_layers", 32) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 2560) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) base_gpt3_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_6_7") def transformer_lm_gpt3_6_7(args): # 6.7B params args.decoder_layers = safe_getattr(args, "decoder_layers", 32) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 4096) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) base_gpt3_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_13") def transformer_lm_gpt3_13(args): # 13B params args.decoder_layers = safe_getattr(args, "decoder_layers", 40) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 5120) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 40) base_gpt3_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_175") def transformer_lm_gpt3_175(args): # 175B params args.decoder_layers = safe_getattr(args, "decoder_layers", 96) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 12288) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 96) base_gpt3_architecture(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/transformer_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, BeamableMM, FairseqDropout, GradMultiply, LearnedPositionalEmbedding, LinearizedConvolution, ) @register_model("fconv") class FConvModel(FairseqEncoderDecoderModel): """ A fully convolutional model, i.e. a convolutional encoder and a convolutional decoder, as described in `"Convolutional Sequence to Sequence Learning" (Gehring et al., 2017) <https://arxiv.org/abs/1705.03122>`_. Args: encoder (FConvEncoder): the encoder decoder (FConvDecoder): the decoder The Convolutional model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.fconv_parser :prog: """ @classmethod def hub_models(cls): def moses_subword(path): return { "path": path, "tokenizer": "moses", "bpe": "subword_nmt", } return { "conv.wmt14.en-fr": moses_subword( "https://dl.fbaipublicfiles.com/fairseq/models/wmt14.v2.en-fr.fconv-py.tar.bz2" ), "conv.wmt14.en-de": moses_subword( "https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-de.fconv-py.tar.bz2" ), "conv.wmt17.en-de": moses_subword( "https://dl.fbaipublicfiles.com/fairseq/models/wmt17.v2.en-de.fconv-py.tar.bz2" ), } def __init__(self, encoder, decoder): super().__init__(encoder, decoder) self.encoder.num_attention_layers = sum( layer is not None for layer in decoder.attention ) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-layers', type=str, metavar='EXPR', help='encoder layers [(dim, kernel_size), ...]') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-layers', type=str, metavar='EXPR', help='decoder layers [(dim, kernel_size), ...]') parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension') parser.add_argument('--decoder-attention', type=str, metavar='EXPR', help='decoder attention [True, ...]') parser.add_argument('--share-input-output-embed', action='store_true', help='share input and output embeddings (requires' ' --decoder-out-embed-dim and --decoder-embed-dim' ' to be equal)') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure that all args are properly defaulted (in case there are any new ones) base_architecture(args) encoder_embed_dict = None if args.encoder_embed_path: encoder_embed_dict = utils.parse_embedding(args.encoder_embed_path) utils.print_embed_overlap(encoder_embed_dict, task.source_dictionary) decoder_embed_dict = None if args.decoder_embed_path: decoder_embed_dict = utils.parse_embedding(args.decoder_embed_path) utils.print_embed_overlap(decoder_embed_dict, task.target_dictionary) encoder = FConvEncoder( dictionary=task.source_dictionary, embed_dim=args.encoder_embed_dim, embed_dict=encoder_embed_dict, convolutions=eval(args.encoder_layers), dropout=args.dropout, max_positions=args.max_source_positions, ) decoder = FConvDecoder( dictionary=task.target_dictionary, embed_dim=args.decoder_embed_dim, embed_dict=decoder_embed_dict, convolutions=eval(args.decoder_layers), out_embed_dim=args.decoder_out_embed_dim, attention=eval(args.decoder_attention), dropout=args.dropout, max_positions=args.max_target_positions, share_embed=args.share_input_output_embed, ) return FConvModel(encoder, decoder) class FConvEncoder(FairseqEncoder): """ Convolutional encoder consisting of `len(convolutions)` layers. Args: dictionary (~fairseq.data.Dictionary): encoding dictionary embed_dim (int, optional): embedding dimension embed_dict (str, optional): filename from which to load pre-trained embeddings max_positions (int, optional): maximum supported input sequence length convolutions (list, optional): the convolutional layer structure. Each list item `i` corresponds to convolutional layer `i`. Layers are given as ``(out_channels, kernel_width, [residual])``. Residual connections are added between layers when ``residual=1`` (which is the default behavior). dropout (float, optional): dropout to be applied before each conv layer """ def __init__( self, dictionary, embed_dim=512, embed_dict=None, max_positions=1024, convolutions=((512, 3),) * 20, dropout=0.1, ): super().__init__(dictionary) self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.num_attention_layers = None num_embeddings = len(dictionary) self.padding_idx = dictionary.pad() self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx) if embed_dict: self.embed_tokens = utils.load_embedding( embed_dict, self.dictionary, self.embed_tokens ) self.embed_positions = PositionalEmbedding( max_positions, embed_dim, self.padding_idx, ) convolutions = extend_conv_spec(convolutions) in_channels = convolutions[0][0] self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) self.projections = nn.ModuleList() self.convolutions = nn.ModuleList() self.residuals = [] layer_in_channels = [in_channels] for _, (out_channels, kernel_size, residual) in enumerate(convolutions): if residual == 0: residual_dim = out_channels else: residual_dim = layer_in_channels[-residual] self.projections.append( Linear(residual_dim, out_channels) if residual_dim != out_channels else None ) if kernel_size % 2 == 1: padding = kernel_size // 2 else: padding = 0 self.convolutions.append( ConvTBC( in_channels, out_channels * 2, kernel_size, dropout=dropout, padding=padding, ) ) self.residuals.append(residual) in_channels = out_channels layer_in_channels.append(out_channels) self.fc2 = Linear(in_channels, embed_dim) def forward(self, src_tokens, src_lengths): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (tuple): a tuple with two elements, where the first element is the last encoder layer's output and the second element is the same quantity summed with the input embedding (used for attention). The shape of both tensors is `(batch, src_len, embed_dim)`. - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens) x = self.dropout_module(x) input_embedding = x # project to size of convolution x = self.fc1(x) # used to mask padding in input encoder_padding_mask = src_tokens.eq(self.padding_idx).t() # -> T x B if not encoder_padding_mask.any(): encoder_padding_mask = None # B x T x C -> T x B x C x = x.transpose(0, 1) residuals = [x] # temporal convolutions for proj, conv, res_layer in zip( self.projections, self.convolutions, self.residuals ): if res_layer > 0: residual = residuals[-res_layer] residual = residual if proj is None else proj(residual) else: residual = None if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0) x = self.dropout_module(x) if conv.kernel_size[0] % 2 == 1: # padding is implicit in the conv x = conv(x) else: padding_l = (conv.kernel_size[0] - 1) // 2 padding_r = conv.kernel_size[0] // 2 x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r)) x = conv(x) x = F.glu(x, dim=2) if residual is not None: x = (x + residual) * math.sqrt(0.5) residuals.append(x) # T x B x C -> B x T x C x = x.transpose(1, 0) # project back to size of embedding x = self.fc2(x) if encoder_padding_mask is not None: encoder_padding_mask = encoder_padding_mask.t() # -> B x T x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0) # scale gradients (this only affects backward, not forward) x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers)) # add output to input embedding for attention y = (x + input_embedding) * math.sqrt(0.5) return { "encoder_out": (x, y), "encoder_padding_mask": encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, new_order): if encoder_out["encoder_out"] is not None: encoder_out["encoder_out"] = ( encoder_out["encoder_out"][0].index_select(0, new_order), encoder_out["encoder_out"][1].index_select(0, new_order), ) if encoder_out["encoder_padding_mask"] is not None: encoder_out["encoder_padding_mask"] = encoder_out[ "encoder_padding_mask" ].index_select(0, new_order) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" return self.embed_positions.max_positions class AttentionLayer(nn.Module): def __init__(self, conv_channels, embed_dim, bmm=None): super().__init__() # projects from output of convolution to embedding dimension self.in_projection = Linear(conv_channels, embed_dim) # projects from embedding dimension to convolution size self.out_projection = Linear(embed_dim, conv_channels) self.bmm = bmm if bmm is not None else torch.bmm def forward(self, x, target_embedding, encoder_out, encoder_padding_mask): residual = x # attention x = (self.in_projection(x) + target_embedding) * math.sqrt(0.5) x = self.bmm(x, encoder_out[0]) # don't attend over padding if encoder_padding_mask is not None: x = ( x.float() .masked_fill(encoder_padding_mask.unsqueeze(1), float("-inf")) .type_as(x) ) # FP16 support: cast to float and back # softmax over last dim sz = x.size() x = F.softmax(x.view(sz[0] * sz[1], sz[2]), dim=1) x = x.view(sz) attn_scores = x x = self.bmm(x, encoder_out[1]) # scale attention output (respecting potentially different lengths) s = encoder_out[1].size(1) if encoder_padding_mask is None: x = x * (s * math.sqrt(1.0 / s)) else: s = s - encoder_padding_mask.type_as(x).sum( dim=1, keepdim=True ) # exclude padding s = s.unsqueeze(-1) x = x * (s * s.rsqrt()) # project back x = (self.out_projection(x) + residual) * math.sqrt(0.5) return x, attn_scores def make_generation_fast_(self, beamable_mm_beam_size=None, **kwargs): """Replace torch.bmm with BeamableMM.""" if beamable_mm_beam_size is not None: del self.bmm self.add_module("bmm", BeamableMM(beamable_mm_beam_size)) class FConvDecoder(FairseqIncrementalDecoder): """Convolutional decoder""" def __init__( self, dictionary, embed_dim=512, embed_dict=None, out_embed_dim=256, max_positions=1024, convolutions=((512, 3),) * 20, attention=True, dropout=0.1, share_embed=False, positional_embeddings=True, adaptive_softmax_cutoff=None, adaptive_softmax_dropout=0.0, ): super().__init__(dictionary) self.register_buffer("version", torch.Tensor([2])) self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.need_attn = True convolutions = extend_conv_spec(convolutions) in_channels = convolutions[0][0] if isinstance(attention, bool): # expand True into [True, True, ...] and do the same with False attention = [attention] * len(convolutions) if not isinstance(attention, list) or len(attention) != len(convolutions): raise ValueError( "Attention is expected to be a list of booleans of " "length equal to the number of layers." ) num_embeddings = len(dictionary) padding_idx = dictionary.pad() self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) if embed_dict: self.embed_tokens = utils.load_embedding( embed_dict, self.dictionary, self.embed_tokens ) self.embed_positions = ( PositionalEmbedding( max_positions, embed_dim, padding_idx, ) if positional_embeddings else None ) self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) self.projections = nn.ModuleList() self.convolutions = nn.ModuleList() self.attention = nn.ModuleList() self.residuals = [] layer_in_channels = [in_channels] for i, (out_channels, kernel_size, residual) in enumerate(convolutions): if residual == 0: residual_dim = out_channels else: residual_dim = layer_in_channels[-residual] self.projections.append( Linear(residual_dim, out_channels) if residual_dim != out_channels else None ) self.convolutions.append( LinearizedConv1d( in_channels, out_channels * 2, kernel_size, padding=(kernel_size - 1), dropout=dropout, ) ) self.attention.append( AttentionLayer(out_channels, embed_dim) if attention[i] else None ) self.residuals.append(residual) in_channels = out_channels layer_in_channels.append(out_channels) self.adaptive_softmax = None self.fc2 = self.fc3 = None if adaptive_softmax_cutoff is not None: assert not share_embed self.adaptive_softmax = AdaptiveSoftmax( num_embeddings, in_channels, adaptive_softmax_cutoff, dropout=adaptive_softmax_dropout, ) else: self.fc2 = Linear(in_channels, out_embed_dim) if share_embed: assert out_embed_dim == embed_dim, ( "Shared embed weights implies same dimensions " " out_embed_dim={} vs embed_dim={}".format(out_embed_dim, embed_dim) ) self.fc3 = nn.Linear(out_embed_dim, num_embeddings) self.fc3.weight = self.embed_tokens.weight else: self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout) def forward( self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused ): if encoder_out is not None: encoder_padding_mask = encoder_out["encoder_padding_mask"] encoder_out = encoder_out["encoder_out"] # split and transpose encoder outputs encoder_a, encoder_b = self._split_encoder_out( encoder_out, incremental_state ) if self.embed_positions is not None: pos_embed = self.embed_positions(prev_output_tokens, incremental_state) else: pos_embed = 0 if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] x = self._embed_tokens(prev_output_tokens, incremental_state) # embed tokens and combine with positional embeddings x += pos_embed x = self.dropout_module(x) target_embedding = x # project to size of convolution x = self.fc1(x) # B x T x C -> T x B x C x = self._transpose_if_training(x, incremental_state) # temporal convolutions avg_attn_scores = None num_attn_layers = len(self.attention) residuals = [x] for proj, conv, attention, res_layer in zip( self.projections, self.convolutions, self.attention, self.residuals ): if res_layer > 0: residual = residuals[-res_layer] residual = residual if proj is None else proj(residual) else: residual = None x = self.dropout_module(x) x = conv(x, incremental_state) x = F.glu(x, dim=2) # attention if attention is not None: x = self._transpose_if_training(x, incremental_state) x, attn_scores = attention( x, target_embedding, (encoder_a, encoder_b), encoder_padding_mask ) if not self.training and self.need_attn: attn_scores = attn_scores / num_attn_layers if avg_attn_scores is None: avg_attn_scores = attn_scores else: avg_attn_scores.add_(attn_scores) x = self._transpose_if_training(x, incremental_state) # residual if residual is not None: x = (x + residual) * math.sqrt(0.5) residuals.append(x) # T x B x C -> B x T x C x = self._transpose_if_training(x, incremental_state) # project back to size of vocabulary if not using adaptive softmax if self.fc2 is not None and self.fc3 is not None: x = self.fc2(x) x = self.dropout_module(x) x = self.fc3(x) return x, avg_attn_scores def reorder_incremental_state(self, incremental_state, new_order): super().reorder_incremental_state(incremental_state, new_order) encoder_out = utils.get_incremental_state( self, incremental_state, "encoder_out" ) if encoder_out is not None: encoder_out = tuple(eo.index_select(0, new_order) for eo in encoder_out) utils.set_incremental_state( self, incremental_state, "encoder_out", encoder_out ) def max_positions(self): """Maximum output length supported by the decoder.""" return ( self.embed_positions.max_positions if self.embed_positions is not None else float("inf") ) def upgrade_state_dict(self, state_dict): if utils.item(state_dict.get("decoder.version", torch.Tensor([1]))[0]) < 2: # old models use incorrect weight norm dimension for i, conv in enumerate(self.convolutions): # reconfigure weight norm nn.utils.remove_weight_norm(conv) self.convolutions[i] = nn.utils.weight_norm(conv, dim=0) state_dict["decoder.version"] = torch.Tensor([1]) return state_dict def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def _embed_tokens(self, tokens, incremental_state): if incremental_state is not None: # keep only the last token for incremental forward pass tokens = tokens[:, -1:] return self.embed_tokens(tokens) def _split_encoder_out(self, encoder_out, incremental_state): """Split and transpose encoder outputs. This is cached when doing incremental inference. """ cached_result = utils.get_incremental_state( self, incremental_state, "encoder_out" ) if cached_result is not None: return cached_result # transpose only once to speed up attention layers encoder_a, encoder_b = encoder_out encoder_a = encoder_a.transpose(1, 2).contiguous() result = (encoder_a, encoder_b) if incremental_state is not None: utils.set_incremental_state(self, incremental_state, "encoder_out", result) return result def _transpose_if_training(self, x, incremental_state): if incremental_state is None: x = x.transpose(0, 1) return x def extend_conv_spec(convolutions): """ Extends convolutional spec that is a list of tuples of 2 or 3 parameters (kernel size, dim size and optionally how many layers behind to look for residual) to default the residual propagation param if it is not specified """ extended = [] for spec in convolutions: if len(spec) == 3: extended.append(spec) elif len(spec) == 2: extended.append(spec + (1,)) else: raise Exception( "invalid number of parameters in convolution spec " + str(spec) + ". expected 2 or 3" ) return tuple(extended) def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, 0, 0.1) nn.init.constant_(m.weight[padding_idx], 0) return m def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx): m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) nn.init.normal_(m.weight, 0, 0.1) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, dropout=0.0): """Weight-normalized Linear layer (input: N x T x C)""" m = nn.Linear(in_features, out_features) nn.init.normal_(m.weight, mean=0, std=math.sqrt((1 - dropout) / in_features)) nn.init.constant_(m.bias, 0) return nn.utils.weight_norm(m) def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs): """Weight-normalized Conv1d layer optimized for decoding""" m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) nn.init.normal_(m.weight, mean=0, std=std) nn.init.constant_(m.bias, 0) return nn.utils.weight_norm(m, dim=2) def ConvTBC(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs): """Weight-normalized Conv1d layer""" from fairseq.modules import ConvTBC m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) nn.init.normal_(m.weight, mean=0, std=std) nn.init.constant_(m.bias, 0) return nn.utils.weight_norm(m, dim=2) @register_model_architecture("fconv", "fconv") def base_architecture(args): args.dropout = getattr(args, "dropout", 0.1) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_layers = getattr(args, "encoder_layers", "[(512, 3)] * 20") args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_layers = getattr(args, "decoder_layers", "[(512, 3)] * 20") args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256) args.decoder_attention = getattr(args, "decoder_attention", "True") args.share_input_output_embed = getattr(args, "share_input_output_embed", False) @register_model_architecture("fconv", "fconv_iwslt_de_en") def fconv_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) args.encoder_layers = getattr(args, "encoder_layers", "[(256, 3)] * 4") args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) args.decoder_layers = getattr(args, "decoder_layers", "[(256, 3)] * 3") args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256) base_architecture(args) @register_model_architecture("fconv", "fconv_wmt_en_ro") def fconv_wmt_en_ro(args): args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) base_architecture(args) @register_model_architecture("fconv", "fconv_wmt_en_de") def fconv_wmt_en_de(args): convs = "[(512, 3)] * 9" # first 9 layers have 512 units convs += " + [(1024, 3)] * 4" # next 4 layers have 1024 units convs += " + [(2048, 1)] * 2" # final 2 layers use 1x1 convolutions args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768) args.encoder_layers = getattr(args, "encoder_layers", convs) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 768) args.decoder_layers = getattr(args, "decoder_layers", convs) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) base_architecture(args) @register_model_architecture("fconv", "fconv_wmt_en_fr") def fconv_wmt_en_fr(args): convs = "[(512, 3)] * 6" # first 6 layers have 512 units convs += " + [(768, 3)] * 4" # next 4 layers have 768 units convs += " + [(1024, 3)] * 3" # next 3 layers have 1024 units convs += " + [(2048, 1)] * 1" # next 1 layer uses 1x1 convolutions convs += " + [(4096, 1)] * 1" # final 1 layer uses 1x1 convolutions args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768) args.encoder_layers = getattr(args, "encoder_layers", convs) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 768) args.decoder_layers = getattr(args, "decoder_layers", convs) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) base_architecture(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/fconv.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture, ) from fairseq.modules import AdaptiveSoftmax, FairseqDropout from torch import Tensor DEFAULT_MAX_SOURCE_POSITIONS = 1e5 DEFAULT_MAX_TARGET_POSITIONS = 1e5 @register_model("lstm") class LSTMModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-freeze-embed', action='store_true', help='freeze encoder embeddings') parser.add_argument('--encoder-hidden-size', type=int, metavar='N', help='encoder hidden size') parser.add_argument('--encoder-layers', type=int, metavar='N', help='number of encoder layers') parser.add_argument('--encoder-bidirectional', action='store_true', help='make all layers of encoder bidirectional') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-freeze-embed', action='store_true', help='freeze decoder embeddings') parser.add_argument('--decoder-hidden-size', type=int, metavar='N', help='decoder hidden size') parser.add_argument('--decoder-layers', type=int, metavar='N', help='number of decoder layers') parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension') parser.add_argument('--decoder-attention', type=str, metavar='BOOL', help='decoder attention') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion') parser.add_argument('--share-decoder-input-output-embed', default=False, action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', default=False, action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') # Granular dropout settings (if not specified these default to --dropout) parser.add_argument('--encoder-dropout-in', type=float, metavar='D', help='dropout probability for encoder input embedding') parser.add_argument('--encoder-dropout-out', type=float, metavar='D', help='dropout probability for encoder output') parser.add_argument('--decoder-dropout-in', type=float, metavar='D', help='dropout probability for decoder input embedding') parser.add_argument('--decoder-dropout-out', type=float, metavar='D', help='dropout probability for decoder output') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure that all args are properly defaulted (in case there are any new ones) base_architecture(args) if args.encoder_layers != args.decoder_layers: raise ValueError("--encoder-layers must match --decoder-layers") max_source_positions = getattr( args, "max_source_positions", DEFAULT_MAX_SOURCE_POSITIONS ) max_target_positions = getattr( args, "max_target_positions", DEFAULT_MAX_TARGET_POSITIONS ) def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) embed_dict = utils.parse_embedding(embed_path) utils.print_embed_overlap(embed_dict, dictionary) return utils.load_embedding(embed_dict, dictionary, embed_tokens) if args.encoder_embed_path: pretrained_encoder_embed = load_pretrained_embedding_from_file( args.encoder_embed_path, task.source_dictionary, args.encoder_embed_dim ) else: num_embeddings = len(task.source_dictionary) pretrained_encoder_embed = Embedding( num_embeddings, args.encoder_embed_dim, task.source_dictionary.pad() ) if args.share_all_embeddings: # double check all parameters combinations are valid if task.source_dictionary != task.target_dictionary: raise ValueError("--share-all-embeddings requires a joint dictionary") if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path ): raise ValueError( "--share-all-embed not compatible with --decoder-embed-path" ) if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( "--share-all-embeddings requires --encoder-embed-dim to " "match --decoder-embed-dim" ) pretrained_decoder_embed = pretrained_encoder_embed args.share_decoder_input_output_embed = True else: # separate decoder input embeddings pretrained_decoder_embed = None if args.decoder_embed_path: pretrained_decoder_embed = load_pretrained_embedding_from_file( args.decoder_embed_path, task.target_dictionary, args.decoder_embed_dim, ) # one last double check of parameter combinations if args.share_decoder_input_output_embed and ( args.decoder_embed_dim != args.decoder_out_embed_dim ): raise ValueError( "--share-decoder-input-output-embeddings requires " "--decoder-embed-dim to match --decoder-out-embed-dim" ) if args.encoder_freeze_embed: pretrained_encoder_embed.weight.requires_grad = False if args.decoder_freeze_embed: pretrained_decoder_embed.weight.requires_grad = False encoder = LSTMEncoder( dictionary=task.source_dictionary, embed_dim=args.encoder_embed_dim, hidden_size=args.encoder_hidden_size, num_layers=args.encoder_layers, dropout_in=args.encoder_dropout_in, dropout_out=args.encoder_dropout_out, bidirectional=args.encoder_bidirectional, pretrained_embed=pretrained_encoder_embed, max_source_positions=max_source_positions, ) decoder = LSTMDecoder( dictionary=task.target_dictionary, embed_dim=args.decoder_embed_dim, hidden_size=args.decoder_hidden_size, out_embed_dim=args.decoder_out_embed_dim, num_layers=args.decoder_layers, dropout_in=args.decoder_dropout_in, dropout_out=args.decoder_dropout_out, attention=utils.eval_bool(args.decoder_attention), encoder_output_units=encoder.output_units, pretrained_embed=pretrained_decoder_embed, share_input_output_embed=args.share_decoder_input_output_embed, adaptive_softmax_cutoff=( utils.eval_str_list(args.adaptive_softmax_cutoff, type=int) if args.criterion == "adaptive_loss" else None ), max_target_positions=max_target_positions, residuals=False, ) return cls(encoder, decoder) def forward( self, src_tokens, src_lengths, prev_output_tokens, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, ): encoder_out = self.encoder(src_tokens, src_lengths=src_lengths) decoder_out = self.decoder( prev_output_tokens, encoder_out=encoder_out, incremental_state=incremental_state, ) return decoder_out class LSTMEncoder(FairseqEncoder): """LSTM encoder.""" def __init__( self, dictionary, embed_dim=512, hidden_size=512, num_layers=1, dropout_in=0.1, dropout_out=0.1, bidirectional=False, left_pad=True, pretrained_embed=None, padding_idx=None, max_source_positions=DEFAULT_MAX_SOURCE_POSITIONS, ): super().__init__(dictionary) self.num_layers = num_layers self.dropout_in_module = FairseqDropout( dropout_in*1.0, module_name=self.__class__.__name__ ) self.dropout_out_module = FairseqDropout( dropout_out*1.0, module_name=self.__class__.__name__ ) self.bidirectional = bidirectional self.hidden_size = hidden_size self.max_source_positions = max_source_positions num_embeddings = len(dictionary) self.padding_idx = padding_idx if padding_idx is not None else dictionary.pad() if pretrained_embed is None: self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx) else: self.embed_tokens = pretrained_embed self.lstm = LSTM( input_size=embed_dim, hidden_size=hidden_size, num_layers=num_layers, dropout=self.dropout_out_module.p if num_layers > 1 else 0.0, bidirectional=bidirectional, ) self.left_pad = left_pad self.output_units = hidden_size if bidirectional: self.output_units *= 2 def forward( self, src_tokens: Tensor, src_lengths: Tensor, enforce_sorted: bool = True, ): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): lengths of each source sentence of shape `(batch)` enforce_sorted (bool, optional): if True, `src_tokens` is expected to contain sequences sorted by length in a decreasing order. If False, this condition is not required. Default: True. """ if self.left_pad: # nn.utils.rnn.pack_padded_sequence requires right-padding; # convert left-padding to right-padding src_tokens = utils.convert_padding_direction( src_tokens, torch.zeros_like(src_tokens).fill_(self.padding_idx), left_to_right=True, ) bsz, seqlen = src_tokens.size() # embed tokens x = self.embed_tokens(src_tokens) x = self.dropout_in_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) # pack embedded source tokens into a PackedSequence packed_x = nn.utils.rnn.pack_padded_sequence( x, src_lengths.cpu(), enforce_sorted=enforce_sorted ) # apply LSTM if self.bidirectional: state_size = 2 * self.num_layers, bsz, self.hidden_size else: state_size = self.num_layers, bsz, self.hidden_size h0 = x.new_zeros(*state_size) c0 = x.new_zeros(*state_size) packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0)) # unpack outputs and apply dropout x, _ = nn.utils.rnn.pad_packed_sequence( packed_outs, padding_value=self.padding_idx * 1.0 ) x = self.dropout_out_module(x) assert list(x.size()) == [seqlen, bsz, self.output_units] if self.bidirectional: final_hiddens = self.combine_bidir(final_hiddens, bsz) final_cells = self.combine_bidir(final_cells, bsz) encoder_padding_mask = src_tokens.eq(self.padding_idx).t() return tuple( ( x, # seq_len x batch x hidden final_hiddens, # num_layers x batch x num_directions*hidden final_cells, # num_layers x batch x num_directions*hidden encoder_padding_mask, # seq_len x batch ) ) def combine_bidir(self, outs, bsz: int): out = outs.view(self.num_layers, 2, bsz, -1).transpose(1, 2).contiguous() return out.view(self.num_layers, bsz, -1) def reorder_encoder_out(self, encoder_out: Tuple[Tensor, Tensor, Tensor, Tensor], new_order): return tuple( ( encoder_out[0].index_select(1, new_order), encoder_out[1].index_select(1, new_order), encoder_out[2].index_select(1, new_order), encoder_out[3].index_select(1, new_order), ) ) def max_positions(self): """Maximum input length supported by the encoder.""" return self.max_source_positions class AttentionLayer(nn.Module): def __init__(self, input_embed_dim, source_embed_dim, output_embed_dim, bias=False): super().__init__() self.input_proj = Linear(input_embed_dim, source_embed_dim, bias=bias) self.output_proj = Linear( input_embed_dim + source_embed_dim, output_embed_dim, bias=bias ) def forward(self, input, source_hids, encoder_padding_mask): # input: bsz x input_embed_dim # source_hids: srclen x bsz x source_embed_dim # x: bsz x source_embed_dim x = self.input_proj(input) # compute attention attn_scores = (source_hids * x.unsqueeze(0)).sum(dim=2) # don't attend over padding if encoder_padding_mask is not None: attn_scores = ( attn_scores.float() .masked_fill_(encoder_padding_mask, float("-inf")) .type_as(attn_scores) ) # FP16 support: cast to float and back attn_scores = F.softmax(attn_scores, dim=0) # srclen x bsz # sum weighted sources x = (attn_scores.unsqueeze(2) * source_hids).sum(dim=0) x = torch.tanh(self.output_proj(torch.cat((x, input), dim=1))) return x, attn_scores class LSTMDecoder(FairseqIncrementalDecoder): """LSTM decoder.""" def __init__( self, dictionary, embed_dim=512, hidden_size=512, out_embed_dim=512, num_layers=1, dropout_in=0.1, dropout_out=0.1, attention=True, encoder_output_units=512, pretrained_embed=None, share_input_output_embed=False, adaptive_softmax_cutoff=None, max_target_positions=DEFAULT_MAX_TARGET_POSITIONS, residuals=False, ): super().__init__(dictionary) self.dropout_in_module = FairseqDropout( dropout_in*1.0, module_name=self.__class__.__name__ ) self.dropout_out_module = FairseqDropout( dropout_out*1.0, module_name=self.__class__.__name__ ) self.hidden_size = hidden_size self.share_input_output_embed = share_input_output_embed self.need_attn = True self.max_target_positions = max_target_positions self.residuals = residuals self.num_layers = num_layers self.adaptive_softmax = None num_embeddings = len(dictionary) padding_idx = dictionary.pad() if pretrained_embed is None: self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) else: self.embed_tokens = pretrained_embed self.encoder_output_units = encoder_output_units if encoder_output_units != hidden_size and encoder_output_units != 0: self.encoder_hidden_proj = Linear(encoder_output_units, hidden_size) self.encoder_cell_proj = Linear(encoder_output_units, hidden_size) else: self.encoder_hidden_proj = self.encoder_cell_proj = None # disable input feeding if there is no encoder # input feeding is described in arxiv.org/abs/1508.04025 input_feed_size = 0 if encoder_output_units == 0 else hidden_size self.layers = nn.ModuleList( [ LSTMCell( input_size=input_feed_size + embed_dim if layer == 0 else hidden_size, hidden_size=hidden_size, ) for layer in range(num_layers) ] ) if attention: # TODO make bias configurable self.attention = AttentionLayer( hidden_size, encoder_output_units, hidden_size, bias=False ) else: self.attention = None if hidden_size != out_embed_dim: self.additional_fc = Linear(hidden_size, out_embed_dim) if adaptive_softmax_cutoff is not None: # setting adaptive_softmax dropout to dropout_out for now but can be redefined self.adaptive_softmax = AdaptiveSoftmax( num_embeddings, hidden_size, adaptive_softmax_cutoff, dropout=dropout_out, ) elif not self.share_input_output_embed: self.fc_out = Linear(out_embed_dim, num_embeddings, dropout=dropout_out) def forward( self, prev_output_tokens, encoder_out: Optional[Tuple[Tensor, Tensor, Tensor, Tensor]] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, src_lengths: Optional[Tensor] = None, ): x, attn_scores = self.extract_features( prev_output_tokens, encoder_out, incremental_state ) return self.output_layer(x), attn_scores def extract_features( self, prev_output_tokens, encoder_out: Optional[Tuple[Tensor, Tensor, Tensor, Tensor]] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, ): """ Similar to *forward* but only return features. """ # get outputs from encoder if encoder_out is not None: encoder_outs = encoder_out[0] encoder_hiddens = encoder_out[1] encoder_cells = encoder_out[2] encoder_padding_mask = encoder_out[3] else: encoder_outs = torch.empty(0) encoder_hiddens = torch.empty(0) encoder_cells = torch.empty(0) encoder_padding_mask = torch.empty(0) srclen = encoder_outs.size(0) if incremental_state is not None and len(incremental_state) > 0: prev_output_tokens = prev_output_tokens[:, -1:] bsz, seqlen = prev_output_tokens.size() # embed tokens x = self.embed_tokens(prev_output_tokens) x = self.dropout_in_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) # initialize previous states (or get from cache during incremental generation) if incremental_state is not None and len(incremental_state) > 0: prev_hiddens, prev_cells, input_feed = self.get_cached_state( incremental_state ) elif encoder_out is not None: # setup recurrent cells prev_hiddens = [encoder_hiddens[i] for i in range(self.num_layers)] prev_cells = [encoder_cells[i] for i in range(self.num_layers)] if self.encoder_hidden_proj is not None: prev_hiddens = [self.encoder_hidden_proj(y) for y in prev_hiddens] prev_cells = [self.encoder_cell_proj(y) for y in prev_cells] input_feed = x.new_zeros(bsz, self.hidden_size) else: # setup zero cells, since there is no encoder zero_state = x.new_zeros(bsz, self.hidden_size) prev_hiddens = [zero_state for i in range(self.num_layers)] prev_cells = [zero_state for i in range(self.num_layers)] input_feed = None assert ( srclen > 0 or self.attention is None ), "attention is not supported if there are no encoder outputs" attn_scores: Optional[Tensor] = ( x.new_zeros(srclen, seqlen, bsz) if self.attention is not None else None ) outs = [] for j in range(seqlen): # input feeding: concatenate context vector from previous time step if input_feed is not None: input = torch.cat((x[j, :, :], input_feed), dim=1) else: input = x[j] for i, rnn in enumerate(self.layers): # recurrent cell hidden, cell = rnn(input, (prev_hiddens[i], prev_cells[i])) # hidden state becomes the input to the next layer input = self.dropout_out_module(hidden) if self.residuals: input = input + prev_hiddens[i] # save state for next time step prev_hiddens[i] = hidden prev_cells[i] = cell # apply attention using the last layer's hidden state if self.attention is not None: assert attn_scores is not None out, attn_scores[:, j, :] = self.attention( hidden, encoder_outs, encoder_padding_mask ) else: out = hidden out = self.dropout_out_module(out) # input feeding if input_feed is not None: input_feed = out # save final output outs.append(out) # Stack all the necessary tensors together and store prev_hiddens_tensor = torch.stack(prev_hiddens) prev_cells_tensor = torch.stack(prev_cells) cache_state = torch.jit.annotate( Dict[str, Optional[Tensor]], { "prev_hiddens": prev_hiddens_tensor, "prev_cells": prev_cells_tensor, "input_feed": input_feed, }, ) self.set_incremental_state(incremental_state, "cached_state", cache_state) # collect outputs across time steps x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size) # T x B x C -> B x T x C x = x.transpose(1, 0) if hasattr(self, "additional_fc") and self.adaptive_softmax is None: x = self.additional_fc(x) x = self.dropout_out_module(x) # srclen x tgtlen x bsz -> bsz x tgtlen x srclen if not self.training and self.need_attn and self.attention is not None: assert attn_scores is not None attn_scores = attn_scores.transpose(0, 2) else: attn_scores = None return x, attn_scores def output_layer(self, x): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: if self.share_input_output_embed: x = F.linear(x, self.embed_tokens.weight) else: x = self.fc_out(x) return x def get_cached_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], ) -> Tuple[List[Tensor], List[Tensor], Optional[Tensor]]: cached_state = self.get_incremental_state(incremental_state, "cached_state") assert cached_state is not None prev_hiddens_ = cached_state["prev_hiddens"] assert prev_hiddens_ is not None prev_cells_ = cached_state["prev_cells"] assert prev_cells_ is not None prev_hiddens = [prev_hiddens_[i] for i in range(self.num_layers)] prev_cells = [prev_cells_[j] for j in range(self.num_layers)] input_feed = cached_state[ "input_feed" ] # can be None for decoder-only language models return prev_hiddens, prev_cells, input_feed def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor, ): if incremental_state is None or len(incremental_state) == 0: return prev_hiddens, prev_cells, input_feed = self.get_cached_state(incremental_state) prev_hiddens = [p.index_select(0, new_order) for p in prev_hiddens] prev_cells = [p.index_select(0, new_order) for p in prev_cells] if input_feed is not None: input_feed = input_feed.index_select(0, new_order) cached_state_new = torch.jit.annotate( Dict[str, Optional[Tensor]], { "prev_hiddens": torch.stack(prev_hiddens), "prev_cells": torch.stack(prev_cells), "input_feed": input_feed, }, ) self.set_incremental_state(incremental_state, "cached_state", cached_state_new), return def max_positions(self): """Maximum output length supported by the decoder.""" return self.max_target_positions def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.uniform_(m.weight, -0.1, 0.1) nn.init.constant_(m.weight[padding_idx], 0) return m def LSTM(input_size, hidden_size, **kwargs): m = nn.LSTM(input_size, hidden_size, **kwargs) for name, param in m.named_parameters(): if "weight" in name or "bias" in name: param.data.uniform_(-0.1, 0.1) return m def LSTMCell(input_size, hidden_size, **kwargs): m = nn.LSTMCell(input_size, hidden_size, **kwargs) for name, param in m.named_parameters(): if "weight" in name or "bias" in name: param.data.uniform_(-0.1, 0.1) return m def Linear(in_features, out_features, bias=True, dropout=0.0): """Linear layer (input: N x T x C)""" m = nn.Linear(in_features, out_features, bias=bias) m.weight.data.uniform_(-0.1, 0.1) if bias: m.bias.data.uniform_(-0.1, 0.1) return m @register_model_architecture("lstm", "lstm") def base_architecture(args): args.dropout = getattr(args, "dropout", 0.1) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_freeze_embed = getattr(args, "encoder_freeze_embed", False) args.encoder_hidden_size = getattr( args, "encoder_hidden_size", args.encoder_embed_dim ) args.encoder_layers = getattr(args, "encoder_layers", 1) args.encoder_bidirectional = getattr(args, "encoder_bidirectional", False) args.encoder_dropout_in = getattr(args, "encoder_dropout_in", args.dropout) args.encoder_dropout_out = getattr(args, "encoder_dropout_out", args.dropout) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_freeze_embed = getattr(args, "decoder_freeze_embed", False) args.decoder_hidden_size = getattr( args, "decoder_hidden_size", args.decoder_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 1) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) args.decoder_attention = getattr(args, "decoder_attention", "1") args.decoder_dropout_in = getattr(args, "decoder_dropout_in", args.dropout) args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.adaptive_softmax_cutoff = getattr( args, "adaptive_softmax_cutoff", "10000,50000,200000" ) @register_model_architecture("lstm", "lstm_wiseman_iwslt_de_en") def lstm_wiseman_iwslt_de_en(args): args.dropout = getattr(args, "dropout", 0.1) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) args.encoder_dropout_in = getattr(args, "encoder_dropout_in", 0) args.encoder_dropout_out = getattr(args, "encoder_dropout_out", 0) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256) args.decoder_dropout_in = getattr(args, "decoder_dropout_in", 0) args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout) base_architecture(args) @register_model_architecture("lstm", "lstm_luong_wmt_en_de") def lstm_luong_wmt_en_de(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1000) args.encoder_layers = getattr(args, "encoder_layers", 4) args.encoder_dropout_out = getattr(args, "encoder_dropout_out", 0) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1000) args.decoder_layers = getattr(args, "decoder_layers", 4) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 1000) args.decoder_dropout_out = getattr(args, "decoder_dropout_out", 0) base_architecture(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/lstm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( TransformerModel, base_architecture, transformer_wmt_en_de_big, ) @register_model("transformer_align") class TransformerAlignModel(TransformerModel): """ See "Jointly Learning to Align and Translate with Transformer Models" (Garg et al., EMNLP 2019). """ def __init__(self, encoder, decoder, args): super().__init__(args, encoder, decoder) self.alignment_heads = args.alignment_heads self.alignment_layer = args.alignment_layer self.full_context_alignment = args.full_context_alignment @staticmethod def add_args(parser): # fmt: off super(TransformerAlignModel, TransformerAlignModel).add_args(parser) parser.add_argument('--alignment-heads', type=int, metavar='D', help='Number of cross attention heads per layer to supervised with alignments') parser.add_argument('--alignment-layer', type=int, metavar='D', help='Layer number which has to be supervised. 0 corresponding to the bottommost layer.') parser.add_argument('--full-context-alignment', action='store_true', help='Whether or not alignment is supervised conditioned on the full target context.') # fmt: on @classmethod def build_model(cls, args, task): # set any default arguments transformer_align(args) transformer_model = TransformerModel.build_model(args, task) return TransformerAlignModel( transformer_model.encoder, transformer_model.decoder, args ) def forward(self, src_tokens, src_lengths, prev_output_tokens): encoder_out = self.encoder(src_tokens, src_lengths) return self.forward_decoder(prev_output_tokens, encoder_out) def forward_decoder( self, prev_output_tokens, encoder_out=None, incremental_state=None, features_only=False, **extra_args, ): attn_args = { "alignment_layer": self.alignment_layer, "alignment_heads": self.alignment_heads, } decoder_out = self.decoder(prev_output_tokens, encoder_out, **attn_args) if self.full_context_alignment: attn_args["full_context_alignment"] = self.full_context_alignment _, alignment_out = self.decoder( prev_output_tokens, encoder_out, features_only=True, **attn_args, **extra_args, ) decoder_out[1]["attn"] = alignment_out["attn"] return decoder_out @register_model_architecture("transformer_align", "transformer_align") def transformer_align(args): args.alignment_heads = getattr(args, "alignment_heads", 1) args.alignment_layer = getattr(args, "alignment_layer", 4) args.full_context_alignment = getattr(args, "full_context_alignment", False) base_architecture(args) @register_model_architecture("transformer_align", "transformer_wmt_en_de_big_align") def transformer_wmt_en_de_big_align(args): args.alignment_heads = getattr(args, "alignment_heads", 1) args.alignment_layer = getattr(args, "alignment_layer", 4) transformer_wmt_en_de_big(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/transformer_align.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .fairseq_encoder import FairseqEncoder class CompositeEncoder(FairseqEncoder): """ A wrapper around a dictionary of :class:`FairseqEncoder` objects. We run forward on each encoder and return a dictionary of outputs. The first encoder's dictionary is used for initialization. Args: encoders (dict): a dictionary of :class:`FairseqEncoder` objects. """ def __init__(self, encoders): super().__init__(next(iter(encoders.values())).dictionary) self.encoders = encoders for key in self.encoders: self.add_module(key, self.encoders[key]) def forward(self, src_tokens, src_lengths): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: the outputs from each Encoder """ encoder_out = {} for key in self.encoders: encoder_out[key] = self.encoders[key](src_tokens, src_lengths) return encoder_out def reorder_encoder_out(self, encoder_out, new_order): """Reorder encoder output according to new_order.""" for key in self.encoders: encoder_out[key] = self.encoders[key].reorder_encoder_out( encoder_out[key], new_order ) return encoder_out def max_positions(self): return min(self.encoders[key].max_positions() for key in self.encoders) def upgrade_state_dict(self, state_dict): for key in self.encoders: self.encoders[key].upgrade_state_dict(state_dict) return state_dict
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/composite_encoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( LayerNorm, SinusoidalPositionalEmbedding, TransformerSentenceEncoder, ) from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import safe_hasattr logger = logging.getLogger(__name__) @register_model("masked_lm") class MaskedLMModel(FairseqEncoderModel): """ Class for training a Masked Language Model. It also supports an additional sentence level prediction if the sent-loss argument is set. """ def __init__(self, args, encoder): super().__init__(encoder) self.args = args # if specified then apply bert initialization on the model. We need # to explictly call this to make sure that the output embeddings # and projection layers are also correctly initialized if getattr(args, "apply_bert_init", False): self.apply(init_bert_params) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # Arguments related to dropout parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability" ) parser.add_argument( "--attention-dropout", type=float, metavar="D", help="dropout probability for" " attention weights", ) parser.add_argument( "--act-dropout", type=float, metavar="D", help="dropout probability after" " activation in FFN", ) # Arguments related to hidden states and self-attention parser.add_argument( "--encoder-ffn-embed-dim", type=int, metavar="N", help="encoder embedding dimension for FFN", ) parser.add_argument( "--encoder-layers", type=int, metavar="N", help="num encoder layers" ) parser.add_argument( "--encoder-attention-heads", type=int, metavar="N", help="num encoder attention heads", ) # Arguments related to input and output embeddings parser.add_argument( "--encoder-embed-dim", type=int, metavar="N", help="encoder embedding dimension", ) parser.add_argument( "--share-encoder-input-output-embed", action="store_true", help="share encoder input" " and output embeddings", ) parser.add_argument( "--encoder-learned-pos", action="store_true", help="use learned positional embeddings in the encoder", ) parser.add_argument( "--no-token-positional-embeddings", action="store_true", help="if set, disables positional embeddings" " (outside self attention)", ) parser.add_argument( "--num-segment", type=int, metavar="N", help="num segment in the input" ) parser.add_argument( "--max-positions", type=int, help="number of positional embeddings to learn" ) # Arguments related to sentence level prediction parser.add_argument( "--sentence-class-num", type=int, metavar="N", help="number of classes for sentence task", ) parser.add_argument( "--sent-loss", action="store_true", help="if set," " calculate sentence level predictions", ) # Arguments related to parameter initialization parser.add_argument( "--apply-bert-init", action="store_true", help="use custom param initialization for BERT", ) # misc params parser.add_argument( "--activation-fn", choices=utils.get_available_activation_fns(), help="activation function to use", ) parser.add_argument( "--pooler-activation-fn", choices=utils.get_available_activation_fns(), help="Which activation function to use for pooler layer.", ) parser.add_argument( "--encoder-normalize-before", action="store_true", help="apply layernorm before each encoder block", ) def forward(self, src_tokens, segment_labels=None, **kwargs): return self.encoder(src_tokens, segment_labels=segment_labels, **kwargs) def max_positions(self): return self.encoder.max_positions @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not safe_hasattr(args, "max_positions"): args.max_positions = args.tokens_per_sample logger.info(args) encoder = MaskedLMEncoder(args, task.dictionary) return cls(args, encoder) class MaskedLMEncoder(FairseqEncoder): """ Encoder for Masked Language Modelling. """ def __init__(self, args, dictionary): super().__init__(dictionary) self.padding_idx = dictionary.pad() self.vocab_size = dictionary.__len__() self.max_positions = args.max_positions self.sentence_encoder = TransformerSentenceEncoder( padding_idx=self.padding_idx, vocab_size=self.vocab_size, num_encoder_layers=args.encoder_layers, embedding_dim=args.encoder_embed_dim, ffn_embedding_dim=args.encoder_ffn_embed_dim, num_attention_heads=args.encoder_attention_heads, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.act_dropout, max_seq_len=self.max_positions, num_segments=args.num_segment, use_position_embeddings=not args.no_token_positional_embeddings, encoder_normalize_before=args.encoder_normalize_before, apply_bert_init=args.apply_bert_init, activation_fn=args.activation_fn, learned_pos_embedding=args.encoder_learned_pos, ) self.share_input_output_embed = args.share_encoder_input_output_embed self.embed_out = None self.sentence_projection_layer = None self.sentence_out_dim = args.sentence_class_num self.lm_output_learned_bias = None # Remove head is set to true during fine-tuning self.load_softmax = not getattr(args, "remove_head", False) self.masked_lm_pooler = nn.Linear( args.encoder_embed_dim, args.encoder_embed_dim ) self.pooler_activation = utils.get_activation_fn(args.pooler_activation_fn) self.lm_head_transform_weight = nn.Linear( args.encoder_embed_dim, args.encoder_embed_dim ) self.activation_fn = utils.get_activation_fn(args.activation_fn) self.layer_norm = LayerNorm(args.encoder_embed_dim) self.lm_output_learned_bias = None if self.load_softmax: self.lm_output_learned_bias = nn.Parameter(torch.zeros(self.vocab_size)) if not self.share_input_output_embed: self.embed_out = nn.Linear( args.encoder_embed_dim, self.vocab_size, bias=False ) if args.sent_loss: self.sentence_projection_layer = nn.Linear( args.encoder_embed_dim, self.sentence_out_dim, bias=False ) def forward(self, src_tokens, segment_labels=None, masked_tokens=None, **unused): """ Forward pass for Masked LM encoder. This first computes the token embedding using the token embedding matrix, position embeddings (if specified) and segment embeddings (if specified). Here we assume that the sentence representation corresponds to the output of the classification_token (see bert_task or cross_lingual_lm task for more details). Args: - src_tokens: B x T matrix representing sentences - segment_labels: B x T matrix representing segment label for tokens Returns: - a tuple of the following: - logits for predictions in format B x T x C to be used in softmax afterwards - a dictionary of additional data, where 'pooled_output' contains the representation for classification_token and 'inner_states' is a list of internal model states used to compute the predictions (similar in ELMO). 'sentence_logits' is the prediction logit for NSP task and is only computed if this is specified in the input arguments. """ inner_states, sentence_rep = self.sentence_encoder( src_tokens, segment_labels=segment_labels, ) x = inner_states[-1].transpose(0, 1) # project masked tokens only if masked_tokens is not None: x = x[masked_tokens, :] x = self.layer_norm(self.activation_fn(self.lm_head_transform_weight(x))) pooled_output = self.pooler_activation(self.masked_lm_pooler(sentence_rep)) # project back to size of vocabulary if self.share_input_output_embed and hasattr( self.sentence_encoder.embed_tokens, "weight" ): x = F.linear(x, self.sentence_encoder.embed_tokens.weight) elif self.embed_out is not None: x = self.embed_out(x) if self.lm_output_learned_bias is not None: x = x + self.lm_output_learned_bias sentence_logits = None if self.sentence_projection_layer: sentence_logits = self.sentence_projection_layer(pooled_output) return x, { "inner_states": inner_states, "pooled_output": pooled_output, "sentence_logits": sentence_logits, } def max_positions(self): """Maximum output length supported by the encoder.""" return self.max_positions def upgrade_state_dict_named(self, state_dict, name): if isinstance( self.sentence_encoder.embed_positions, SinusoidalPositionalEmbedding ): state_dict[ name + ".sentence_encoder.embed_positions._float_tensor" ] = torch.FloatTensor(1) if not self.load_softmax: for k in list(state_dict.keys()): if ( "embed_out.weight" in k or "sentence_projection_layer.weight" in k or "lm_output_learned_bias" in k ): del state_dict[k] return state_dict @register_model_architecture("masked_lm", "masked_lm") def base_architecture(args): args.dropout = getattr(args, "dropout", 0.1) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.act_dropout = getattr(args, "act_dropout", 0.0) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.share_encoder_input_output_embed = getattr( args, "share_encoder_input_output_embed", False ) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.num_segment = getattr(args, "num_segment", 2) args.sentence_class_num = getattr(args, "sentence_class_num", 2) args.sent_loss = getattr(args, "sent_loss", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.activation_fn = getattr(args, "activation_fn", "relu") args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh") args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) @register_model_architecture("masked_lm", "bert_base") def bert_base_architecture(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768) args.share_encoder_input_output_embed = getattr( args, "share_encoder_input_output_embed", True ) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True) args.num_segment = getattr(args, "num_segment", 2) args.encoder_layers = getattr(args, "encoder_layers", 12) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 3072) args.sentence_class_num = getattr(args, "sentence_class_num", 2) args.sent_loss = getattr(args, "sent_loss", True) args.apply_bert_init = getattr(args, "apply_bert_init", True) args.activation_fn = getattr(args, "activation_fn", "gelu") args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh") args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) base_architecture(args) @register_model_architecture("masked_lm", "bert_large") def bert_large_architecture(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_layers = getattr(args, "encoder_layers", 24) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) bert_base_architecture(args) @register_model_architecture("masked_lm", "xlm_base") def xlm_architecture(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.share_encoder_input_output_embed = getattr( args, "share_encoder_input_output_embed", True ) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True) args.num_segment = getattr(args, "num_segment", 1) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.sent_loss = getattr(args, "sent_loss", False) args.activation_fn = getattr(args, "activation_fn", "gelu") args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh") args.apply_bert_init = getattr(args, "apply_bert_init", True) base_architecture(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/masked_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import Dict, Optional from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.models import FairseqDecoder from torch import Tensor logger = logging.getLogger(__name__) @with_incremental_state class FairseqIncrementalDecoder(FairseqDecoder): """Base class for incremental decoders. Incremental decoding is a special mode at inference time where the Model only receives a single timestep of input corresponding to the previous output token (for teacher forcing) and must produce the next output *incrementally*. Thus the model must cache any long-term state that is needed about the sequence, e.g., hidden states, convolutional states, etc. Compared to the standard :class:`FairseqDecoder` interface, the incremental decoder interface allows :func:`forward` functions to take an extra keyword argument (*incremental_state*) that can be used to cache state across time-steps. The :class:`FairseqIncrementalDecoder` interface also defines the :func:`reorder_incremental_state` method, which is used during beam search to select and reorder the incremental state based on the selection of beams. To learn more about how incremental decoding works, refer to `this blog <http://www.telesens.co/2019/04/21/understanding-incremental-decoding-in-fairseq/>`_. """ def __init__(self, dictionary): super().__init__(dictionary) def forward( self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs ): """ Args: prev_output_tokens (LongTensor): shifted output tokens of shape `(batch, tgt_len)`, for teacher forcing encoder_out (dict, optional): output from the encoder, used for encoder-side attention incremental_state (dict, optional): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ raise NotImplementedError def extract_features( self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs ): """ Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ raise NotImplementedError def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor, ): """Reorder incremental state. This will be called when the order of the input has changed from the previous time step. A typical use case is beam search, where the input order changes between time steps based on the selection of beams. """ pass def reorder_incremental_state_scripting( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor, ): """Main entry point for reordering the incremental state. Due to limitations in TorchScript, we call this function in :class:`fairseq.sequence_generator.SequenceGenerator` instead of calling :func:`reorder_incremental_state` directly. """ for module in self.modules(): if hasattr(module, "reorder_incremental_state"): result = module.reorder_incremental_state(incremental_state, new_order) if result is not None: incremental_state = result def set_beam_size(self, beam_size): """Sets the beam size in the decoder and all children.""" if getattr(self, "_beam_size", -1) != beam_size: seen = set() def apply_set_beam_size(module): if ( module != self and hasattr(module, "set_beam_size") and module not in seen ): seen.add(module) module.set_beam_size(beam_size) self.apply(apply_set_beam_size) self._beam_size = beam_size
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/fairseq_incremental_decoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, DynamicConv, FairseqDropout, LayerNorm, LightweightConv, MultiheadAttention, PositionalEmbedding, ) from fairseq.utils import safe_hasattr @register_model("lightconv") class LightConvModel(FairseqEncoderDecoderModel): """ LightConv and DynamicConv model from `"Pay Less Attention with Lightweight and Dynamic Convolutions" (Wu, et al, 2019) <https://openreview.net/pdf?id=SkVhlh09tX>`_. To use LightConv please set ``--encoder-conv-type lightweight --decoder-conv-type lightweight`` To use DynamicConv please set ``--encoder-conv-type dynamic --decoder-conv-type dynamic`` Args: encoder (LightConvEncoder): the encoder decoder (LightConvDecoder): the decoder The LightConv model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.lightconv_parser :prog: """ @classmethod def hub_models(cls): # fmt: off def moses_subword(path): return { 'path': path, 'tokenizer': 'moses', 'bpe': 'subword_nmt', } return { 'lightconv.no_glu.iwslt14.de-en': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/iwslt14.de-en.lightconv.tar.gz'), 'dynamicconv.no_glu.iwslt14.de-en': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/iwslt14.de-en.dynamicconv.tar.gz'), 'lightconv.no_glu.wmt16.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.lightconv.tar.gz'), 'dynamicconv.no_glu.wmt16.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.dynamicconv.tar.gz'), 'lightconv.glu.wmt16.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.lightconv-glu.tar.gz'), 'dynamicconv.glu.wmt16.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.dynamicconv-glu.tar.gz'), 'lightconv.glu.wmt17.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.lightconv-glu.tar.gz'), 'dynamicconv.glu.wmt17.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.dynamicconv-glu.tar.gz'), 'lightconv.glu.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt14.en-fr.joined-dict.lightconv-glu.tar.gz'), 'dynamicconv.glu.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt14.en-fr.joined-dict.dynamicconv-glu.tar.gz'), 'lightconv.glu.wmt17.zh-en': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt17.zh-en.lightconv-glu.tar.gz'), 'dynamicconv.glu.wmt17.zh-en': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt17.zh-en.dynamicconv-glu.tar.gz'), } # fmt: on def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability" ) parser.add_argument( "--attention-dropout", type=float, metavar="D", help="dropout probability for attention weights", ) parser.add_argument( "--relu-dropout", type=float, metavar="D", help="dropout probability after ReLU in FFN", ) parser.add_argument( "--input-dropout", type=float, metavar="D", help="dropout probability of the inputs", ) parser.add_argument( "--encoder-embed-path", type=str, metavar="STR", help="path to pre-trained encoder embedding", ) parser.add_argument( "--encoder-embed-dim", type=int, metavar="N", help="encoder embedding dimension", ) parser.add_argument( "--encoder-conv-dim", type=int, metavar="N", help="encoder embedding dimension", ) parser.add_argument( "--encoder-ffn-embed-dim", type=int, metavar="N", help="encoder embedding dimension for FFN", ) parser.add_argument( "--encoder-layers", type=int, metavar="N", help="num encoder layers" ) parser.add_argument( "--encoder-attention-heads", type=int, metavar="N", help="num encoder attention heads or LightConv/DynamicConv heads", ) parser.add_argument( "--encoder-normalize-before", action="store_true", help="apply layernorm before each encoder block", ) parser.add_argument( "--encoder-learned-pos", action="store_true", help="use learned positional embeddings in the encoder", ) parser.add_argument( "--decoder-embed-path", type=str, metavar="STR", help="path to pre-trained decoder embedding", ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-conv-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-ffn-embed-dim", type=int, metavar="N", help="decoder embedding dimension for FFN", ) parser.add_argument( "--decoder-layers", type=int, metavar="N", help="num decoder layers" ) parser.add_argument( "--decoder-attention-heads", type=int, metavar="N", help="num decoder attention heads or LightConv/DynamicConv heads", ) parser.add_argument( "--decoder-learned-pos", action="store_true", help="use learned positional embeddings in the decoder", ) parser.add_argument( "--decoder-normalize-before", action="store_true", help="apply layernorm before each decoder block", ) parser.add_argument( "--share-decoder-input-output-embed", action="store_true", help="share decoder input and output embeddings", ) parser.add_argument( "--share-all-embeddings", action="store_true", help="share encoder, decoder and output embeddings" " (requires shared dictionary and embed dim)", ) parser.add_argument( "--adaptive-softmax-cutoff", metavar="EXPR", help="comma separated list of adaptive softmax cutoff points. " "Must be used with adaptive_loss criterion", ), parser.add_argument( "--adaptive-softmax-dropout", type=float, metavar="D", help="sets adaptive softmax dropout for the tail projections", ) """LightConv and DynamicConv arguments""" parser.add_argument( "--encoder-kernel-size-list", type=lambda x: utils.eval_str_list(x, int), help='list of kernel size (default: "[3,7,15,31,31,31,31]")', ) parser.add_argument( "--decoder-kernel-size-list", type=lambda x: utils.eval_str_list(x, int), help='list of kernel size (default: "[3,7,15,31,31,31]")', ) parser.add_argument( "--encoder-glu", type=utils.eval_bool, help="glu after in proj" ) parser.add_argument( "--decoder-glu", type=utils.eval_bool, help="glu after in proj" ) parser.add_argument( "--encoder-conv-type", default="dynamic", type=str, choices=["dynamic", "lightweight"], help="type of convolution", ) parser.add_argument( "--decoder-conv-type", default="dynamic", type=str, choices=["dynamic", "lightweight"], help="type of convolution", ) parser.add_argument("--weight-softmax", default=True, type=utils.eval_bool) parser.add_argument( "--weight-dropout", type=float, metavar="D", help="dropout probability for conv weights", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not safe_hasattr(args, "max_source_positions"): args.max_source_positions = 1024 if not safe_hasattr(args, "max_target_positions"): args.max_target_positions = 1024 src_dict, tgt_dict = task.source_dictionary, task.target_dictionary def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise RuntimeError( "--share-all-embeddings requires a joined dictionary" ) if args.encoder_embed_dim != args.decoder_embed_dim: raise RuntimeError( "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim" ) if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path ): raise RuntimeError( "--share-all-embeddings not compatible with --decoder-embed-path" ) encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) encoder = LightConvEncoder(args, src_dict, encoder_embed_tokens) decoder = LightConvDecoder(args, tgt_dict, decoder_embed_tokens) return LightConvModel(encoder, decoder) class LightConvEncoder(FairseqEncoder): """ LightConv encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`LightConvEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.dropout_module = FairseqDropout( args.dropout, module_name=self.__class__.__name__ ) embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = ( PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None ) self.layers = nn.ModuleList([]) self.layers.extend( [ LightConvEncoderLayer( args, kernel_size=args.encoder_kernel_size_list[i] ) for i in range(args.encoder_layers) ] ) self.register_buffer("version", torch.Tensor([2])) self.normalize = args.encoder_normalize_before if self.normalize: self.layer_norm = LayerNorm(embed_dim) def forward(self, src_tokens, **unused): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask) if self.normalize: x = self.layer_norm(x) return { "encoder_out": x, # T x B x C "encoder_padding_mask": encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out["encoder_out"] is not None: encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select( 1, new_order ) if encoder_out["encoder_padding_mask"] is not None: encoder_out["encoder_padding_mask"] = encoder_out[ "encoder_padding_mask" ].index_select(0, new_order) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions) class LightConvDecoder(FairseqIncrementalDecoder): """ LightConv decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`LightConvDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs. Default: ``False`` """ def __init__( self, args, dictionary, embed_tokens, no_encoder_attn=False, final_norm=True ): super().__init__(dictionary) self.dropout_module = FairseqDropout( args.dropout, module_name=self.__class__.__name__ ) self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = ( Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None ) self.embed_positions = ( PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None ) self.layers = nn.ModuleList([]) self.layers.extend( [ LightConvDecoderLayer( args, no_encoder_attn, kernel_size=args.decoder_kernel_size_list[i] ) for i in range(args.decoder_layers) ] ) self.adaptive_softmax = None self.project_out_dim = ( Linear(embed_dim, output_embed_dim, bias=False) if embed_dim != output_embed_dim and not args.tie_adaptive_weights else None ) if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), output_embed_dim, utils.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter( torch.Tensor(len(dictionary), output_embed_dim) ) nn.init.normal_(self.embed_out, mean=0, std=output_embed_dim ** -0.5) self.register_buffer("version", torch.Tensor([2])) self.normalize = args.decoder_normalize_before and final_norm if self.normalize: self.layer_norm = LayerNorm(embed_dim) def forward( self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs ): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the last decoder layer's output of shape `(batch, tgt_len, vocab)` - the last decoder layer's attention weights of shape `(batch, tgt_len, src_len)` """ # embed positions positions = ( self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None ) if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out["encoder_out"] if encoder_out is not None else None, encoder_out["encoder_padding_mask"] if encoder_out is not None else None, incremental_state, ) inner_states.append(x) if self.normalize: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: x = F.linear(x, self.embed_tokens.weight) else: x = F.linear(x, self.embed_out) return x, {"attn": attn, "inner_states": inner_states} def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions) def buffered_future_mask(self, tensor): dim = tensor.size(0) if ( not hasattr(self, "_future_mask") or self._future_mask is None or self._future_mask.device != tensor.device ): self._future_mask = torch.triu( utils.fill_with_neg_inf(tensor.new(dim, dim)), 1 ) if self._future_mask.size(0) < dim: self._future_mask = torch.triu( utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1 ) return self._future_mask[:dim, :dim] class LightConvEncoderLayer(nn.Module): """Encoder layer block. Args: args (argparse.Namespace): parsed command-line arguments kernel_size: kernel size of the convolution """ def __init__(self, args, kernel_size=0): super().__init__() self.embed_dim = args.encoder_embed_dim self.conv_dim = args.encoder_conv_dim padding_l = ( kernel_size // 2 if kernel_size % 2 == 1 else ((kernel_size - 1) // 2, kernel_size // 2) ) if args.encoder_glu: self.linear1 = Linear(self.embed_dim, 2 * self.conv_dim) self.act = nn.GLU() else: self.linear1 = Linear(self.embed_dim, self.conv_dim) self.act = None if args.encoder_conv_type == "lightweight": self.conv = LightweightConv( self.conv_dim, kernel_size, padding_l=padding_l, weight_softmax=args.weight_softmax, num_heads=args.encoder_attention_heads, weight_dropout=args.weight_dropout, ) elif args.encoder_conv_type == "dynamic": self.conv = DynamicConv( self.conv_dim, kernel_size, padding_l=padding_l, weight_softmax=args.weight_softmax, num_heads=args.encoder_attention_heads, weight_dropout=args.weight_dropout, ) else: raise NotImplementedError self.linear2 = Linear(self.conv_dim, self.embed_dim) self.dropout_module = FairseqDropout( args.dropout, module_name=self.__class__.__name__ ) self.relu_dropout_module = FairseqDropout( args.relu_dropout, module_name=self.__class__.__name__ ) self.input_dropout_module = FairseqDropout( args.input_dropout, module_name=self.__class__.__name__ ) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.layer_norms = nn.ModuleList([LayerNorm(self.embed_dim) for _ in range(2)]) def forward(self, x, encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(0, x, before=True) x = self.input_dropout_module(x) x = self.linear1(x) if self.act is not None: x = self.act(x) if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.transpose(0, 1).unsqueeze(2), 0) x = self.conv(x) x = self.linear2(x) x = self.dropout_module(x) x = residual + x x = self.maybe_layer_norm(0, x, after=True) residual = x x = self.maybe_layer_norm(1, x, before=True) x = F.relu(self.fc1(x)) x = self.relu_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = residual + x x = self.maybe_layer_norm(1, x, after=True) return x def maybe_layer_norm(self, i, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return self.layer_norms[i](x) else: return x def extra_repr(self): return ( "dropout={}, relu_dropout={}, input_dropout={}, normalize_before={}".format( self.dropout_module.p, self.relu_dropout_module.p, self.input_dropout_module.p, self.normalize_before, ) ) class LightConvDecoderLayer(nn.Module): """Decoder layer block. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs. Default: ``False`` kernel_size: kernel size of the convolution """ def __init__(self, args, no_encoder_attn=False, kernel_size=0): super().__init__() self.embed_dim = args.decoder_embed_dim self.conv_dim = args.decoder_conv_dim if args.decoder_glu: self.linear1 = Linear(self.embed_dim, 2 * self.conv_dim) self.act = nn.GLU() else: self.linear1 = Linear(self.embed_dim, self.conv_dim) self.act = None if args.decoder_conv_type == "lightweight": self.conv = LightweightConv( self.conv_dim, kernel_size, padding_l=kernel_size - 1, weight_softmax=args.weight_softmax, num_heads=args.decoder_attention_heads, weight_dropout=args.weight_dropout, ) elif args.decoder_conv_type == "dynamic": self.conv = DynamicConv( self.conv_dim, kernel_size, padding_l=kernel_size - 1, weight_softmax=args.weight_softmax, num_heads=args.decoder_attention_heads, weight_dropout=args.weight_dropout, ) else: raise NotImplementedError self.linear2 = Linear(self.conv_dim, self.embed_dim) self.dropout_module = FairseqDropout( args.dropout, module_name=self.__class__.__name__ ) self.relu_dropout_module = FairseqDropout( args.relu_dropout, module_name=self.__class__.__name__ ) self.input_dropout_module = FairseqDropout( args.input_dropout, module_name=self.__class__.__name__ ) self.normalize_before = args.decoder_normalize_before self.conv_layer_norm = LayerNorm(self.embed_dim) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True, ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) self.need_attn = True def forward( self, x, encoder_out, encoder_padding_mask, incremental_state, prev_conv_state=None, prev_attn_state=None, conv_mask=None, conv_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.conv_layer_norm, x, before=True) if prev_conv_state is not None: if incremental_state is None: incremental_state = {} self.conv._set_input_buffer(incremental_state, prev_conv_state) x = self.input_dropout_module(x) x = self.linear1(x) if self.act is not None: x = self.act(x) x = self.conv(x, incremental_state=incremental_state) x = self.linear2(x) x = self.dropout_module(x) x = residual + x x = self.maybe_layer_norm(self.conv_layer_norm, x, after=True) attn = None if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x = self.dropout_module(x) x = residual + x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = F.relu(self.fc1(x)) x = self.relu_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x, attn def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def extra_repr(self): return ( "dropout={}, relu_dropout={}, input_dropout={}, normalize_before={}".format( self.dropout_module.p, self.relu_dropout_module.p, self.input_dropout_module.p, self.normalize_before, ) ) def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m @register_model_architecture("lightconv", "lightconv") def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 7) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.relu_dropout = getattr(args, "relu_dropout", 0.0) args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.encoder_conv_dim = getattr(args, "encoder_conv_dim", args.encoder_embed_dim) args.decoder_conv_dim = getattr(args, "decoder_conv_dim", args.decoder_embed_dim) args.encoder_kernel_size_list = getattr( args, "encoder_kernel_size_list", [3, 7, 15, 31, 31, 31, 31] ) args.decoder_kernel_size_list = getattr( args, "decoder_kernel_size_list", [3, 7, 15, 31, 31, 31] ) if len(args.encoder_kernel_size_list) == 1: args.encoder_kernel_size_list = ( args.encoder_kernel_size_list * args.encoder_layers ) if len(args.decoder_kernel_size_list) == 1: args.decoder_kernel_size_list = ( args.decoder_kernel_size_list * args.decoder_layers ) assert ( len(args.encoder_kernel_size_list) == args.encoder_layers ), "encoder_kernel_size_list doesn't match encoder_layers" assert ( len(args.decoder_kernel_size_list) == args.decoder_layers ), "decoder_kernel_size_list doesn't match decoder_layers" args.encoder_glu = getattr(args, "encoder_glu", True) args.decoder_glu = getattr(args, "decoder_glu", True) args.input_dropout = getattr(args, "input_dropout", 0.1) args.weight_dropout = getattr(args, "weight_dropout", args.attention_dropout) @register_model_architecture("lightconv", "lightconv_iwslt_de_en") def lightconv_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.encoder_layers = getattr(args, "encoder_layers", 7) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) args.decoder_layers = getattr(args, "decoder_layers", 6) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.weight_dropout = getattr(args, "weight_dropout", 0.1) args.encoder_glu = getattr(args, "encoder_glu", False) args.decoder_glu = getattr(args, "decoder_glu", False) args.input_dropout = getattr(args, "input_dropout", 0.0) base_architecture(args) @register_model_architecture("lightconv", "lightconv_wmt_en_de") def lightconv_wmt_en_de(args): base_architecture(args) @register_model_architecture("lightconv", "lightconv_wmt_en_de_big") def lightconv_wmt_en_de_big(args): args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.3) base_architecture(args) @register_model_architecture("lightconv", "lightconv_wmt_en_fr_big") def lightconv_wmt_en_fr_big(args): args.dropout = getattr(args, "dropout", 0.1) lightconv_wmt_en_de_big(args) @register_model_architecture("lightconv", "lightconv_wmt_zh_en_big") def lightconv_wmt_zh_en_big(args): args.dropout = getattr(args, "dropout", 0.2) args.attention_dropout = getattr(args, "attention_dropout", 0.2) args.weight_dropout = getattr(args, "weight_dropout", 0.2) lightconv_wmt_en_de_big(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/lightconv.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq import utils from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.lightconv import Embedding, LightConvDecoder from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder @register_model("lightconv_lm") class LightConvLanguageModel(FairseqLanguageModel): def __init__(self, decoder): super().__init__(decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--dropout", default=0.1, type=float, metavar="D", help="dropout probability", ) parser.add_argument( "--attention-dropout", default=0.0, type=float, metavar="D", help="dropout probability for attention weights", ) parser.add_argument( "--relu-dropout", default=0.0, type=float, metavar="D", help="dropout probability after ReLU in FFN", ) parser.add_argument( "--input-dropout", type=float, metavar="D", help="dropout probability of the inputs", ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-output-dim", type=int, metavar="N", help="decoder output dimension", ) parser.add_argument( "--decoder-input-dim", type=int, metavar="N", help="decoder input dimension" ) parser.add_argument( "--decoder-ffn-embed-dim", type=int, metavar="N", help="decoder embedding dimension for FFN", ) parser.add_argument( "--decoder-layers", type=int, metavar="N", help="num decoder layers" ) parser.add_argument( "--decoder-attention-heads", type=int, metavar="N", help="num decoder attention heads or LightConv/DynamicConv heads", ) parser.add_argument( "--decoder-normalize-before", default=False, action="store_true", help="apply layernorm before each decoder block", ) parser.add_argument( "--adaptive-softmax-cutoff", metavar="EXPR", help="comma separated list of adaptive softmax cutoff points. " "Must be used with adaptive_loss criterion", ) parser.add_argument( "--adaptive-softmax-dropout", type=float, metavar="D", help="sets adaptive softmax dropout for the tail projections", ) parser.add_argument( "--adaptive-softmax-factor", type=float, metavar="N", help="adaptive input factor", ) parser.add_argument( "--no-token-positional-embeddings", default=False, action="store_true", help="if set, disables positional embeddings (outside self attention)", ) parser.add_argument( "--share-decoder-input-output-embed", default=False, action="store_true", help="share decoder input and output embeddings", ) parser.add_argument( "--character-embeddings", default=False, action="store_true", help="if set, uses character embedding convolutions to produce token embeddings", ) parser.add_argument( "--character-filters", type=str, metavar="LIST", default="[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]", help="size of character embeddings", ) parser.add_argument( "--character-embedding-dim", type=int, metavar="N", default=4, help="size of character embeddings", ) parser.add_argument( "--char-embedder-highway-layers", type=int, metavar="N", default=2, help="number of highway layers for character token embeddder", ) parser.add_argument( "--adaptive-input", default=False, action="store_true", help="if set, uses adaptive input", ) parser.add_argument( "--adaptive-input-factor", type=float, metavar="N", help="adaptive input factor", ) parser.add_argument( "--adaptive-input-cutoff", metavar="EXPR", help="comma separated list of adaptive input cutoff points.", ) parser.add_argument( "--tie-adaptive-weights", action="store_true", help="if set, ties the weights of adaptive softmax and adaptive input", ) parser.add_argument( "--tie-adaptive-proj", action="store_true", help="if set, ties the projection weights of adaptive softmax and adaptive input", ) parser.add_argument( "--decoder-learned-pos", action="store_true", help="use learned positional embeddings in the decoder", ) """LightConv and DynamicConv arguments""" parser.add_argument( "--decoder-kernel-size-list", type=lambda x: utils.eval_str_list(x, int), help='list of kernel size (default: "[3,7,15,31,31,31]")', ) parser.add_argument( "--decoder-glu", type=utils.eval_bool, help="glu after in proj" ) parser.add_argument( "--decoder-conv-type", default="dynamic", type=str, choices=["dynamic", "lightweight"], help="type of convolution", ) parser.add_argument("--weight-softmax", default=True, type=utils.eval_bool) parser.add_argument( "--weight-dropout", type=float, metavar="D", help="dropout probability for conv weights", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_lm_architecture(args) if getattr(args, "max_source_positions", None) is None: args.max_source_positions = args.tokens_per_sample if getattr(args, "max_target_positions", None) is None: args.max_target_positions = args.tokens_per_sample if args.character_embeddings: embed_tokens = CharacterTokenEmbedder( task.dictionary, eval(args.character_filters), args.character_embedding_dim, args.decoder_embed_dim, args.char_embedder_highway_layers, ) elif args.adaptive_input: embed_tokens = AdaptiveInput( len(task.dictionary), task.dictionary.pad(), args.decoder_input_dim, args.adaptive_input_factor, args.decoder_embed_dim, utils.eval_str_list(args.adaptive_input_cutoff, type=int), ) else: embed_tokens = Embedding( len(task.dictionary), args.decoder_input_dim, task.dictionary.pad() ) if args.tie_adaptive_weights: assert args.adaptive_input assert args.adaptive_input_factor == args.adaptive_softmax_factor assert ( args.adaptive_softmax_cutoff == args.adaptive_input_cutoff ), "{} != {}".format( args.adaptive_softmax_cutoff, args.adaptive_input_cutoff ) assert args.decoder_input_dim == args.decoder_output_dim decoder = LightConvDecoder( args, task.output_dictionary, embed_tokens, no_encoder_attn=True, final_norm=False, ) return LightConvLanguageModel(decoder) @register_model_architecture("lightconv_lm", "lightconv_lm") def base_lm_architecture(args): args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.adaptive_softmax_factor = getattr(args, "adaptive_softmax_factor", 4) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.character_embeddings = getattr(args, "character_embeddings", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.decoder_conv_dim = getattr(args, "decoder_conv_dim", args.decoder_embed_dim) # The model training is not stable without this args.decoder_normalize_before = True args.adaptive_input = getattr(args, "adaptive_input", False) args.adaptive_input_factor = getattr(args, "adaptive_input_factor", 4) args.adaptive_input_cutoff = getattr(args, "adaptive_input_cutoff", None) args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) args.tie_adaptive_proj = getattr(args, "tie_adaptive_proj", False) args.decoder_kernel_size_list = getattr( args, "decoder_kernel_size_list", [3, 7, 15, 31, 31, 31] ) if len(args.decoder_kernel_size_list) == 1: args.decoder_kernel_size_list = ( args.decoder_kernel_size_list * args.decoder_layers ) assert ( len(args.decoder_kernel_size_list) == args.decoder_layers ), "decoder_kernel_size_list doesn't match decoder_layers" args.decoder_glu = getattr(args, "decoder_glu", True) args.input_dropout = getattr(args, "input_dropout", 0.1) args.weight_dropout = getattr(args, "weight_dropout", args.attention_dropout) @register_model_architecture("lightconv_lm", "lightconv_lm_gbw") def lightconv_lm_gbw(args): args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.dropout = getattr(args, "dropout", 0.1) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) base_lm_architecture(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/lightconv_lm.py
#!/usr/bin/env python3 """ This module has the EMA class used to store a copy of the exponentially decayed model params. Typical usage of EMA class involves initializing an object using an existing model (random or from a seed model) and setting the config like ema_decay, ema_start_update which determine how the EMA model is updated. After every update of the model i.e. at the end of the train_step, the EMA should be updated by passing the new model to the EMA.step function. The EMA model state dict can be stored in the extra state under the key of "ema" and dumped into a checkpoint and loaded. The EMA object can be passed to tasks by setting task.uses_ema property. EMA is a smoothed/ensemble model which might have better performance when used for inference or further fine-tuning. EMA class has a reverse function to load the EMA params into a model and use it like a regular model. """ import copy import logging import torch from fairseq import checkpoint_utils class EMA(object): """Exponential Moving Average of Fairseq Models EMA keeps a copy of the exponentially decayed model params. The set of params should include both gradient-descent and non-gradient descent params, such as batch mean/var and buffers. This is a modified implementation of the open source code in https://github.com/zhawe01/fairseq-gec.git, and internal source code in fbcode/mobile-vision/projects/classification_pytorch/lib/utils/model_ema.py. Similar to TF EMA. https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage. EMA provides a averaged and smoothed set of model weights, and has been shown to improve vision models. EMA class does all necessary functions to update, reload, or init EMA methods. EMA object is initialized from an arbitrary model. By default, it is stored in the same device (unless device specified at initialization) and with the same precision as the model (unless ema_fp32 is True). ema_fp32 is recommended. This stores the EMA parameters in fp32 only for the EMA update step, and is used at the default precision otherwise. EMA is usually enabled using EMAConfig with store_ema=True. Some important parameters to configure EMA are 1) ema_decay - The decay of EMA 2) ema_update_freq - EMA is updated every this many model updates. 3) ema_start_update - Start EMA update after this many model updates [default 0] Key methods: 1) step - One update of EMA using new model 2) restore - Update EMA from a state dict 3) reverse - Load EMA into a model 4) get_decay, _set_decay - Used to get or set the decay. Note _set_decay is called from step. 5) build_fp32_params - Used to initialize or update the fp32 copy of EMA params. Note this is enabled only when ema_fp32=True """ def __init__(self, model, config, device=None): """ @param model model to initialize the EMA with @param config EMAConfig object with configuration like ema_decay, ema_update_freq, ema_fp32 @param device If provided, copy EMA to this device (e.g. gpu). Otherwise EMA is in the same device as the model. """ self.decay = config.ema_decay self.model = copy.deepcopy(model) self.model.requires_grad_(False) self.config = config self.fp32_params = {} if self.config.ema_seed_model is not None: state = checkpoint_utils.load_ema_from_checkpoint(self.config.ema_seed_model) self.model.load_state_dict(state["model"], strict=True) if device is not None: logging.info(f"Copying EMA model to device {device}") self.model = self.model.to(device=device) if self.config.ema_fp32: self.build_fp32_params() self.update_freq_counter = 0 def get_model(self): return self.model def build_fp32_params(self, state_dict=None): """ Store a copy of the EMA params in fp32. If state dict is passed, the EMA params is copied from the provided state dict. Otherwise, it is copied from the current EMA model parameters. """ if not self.config.ema_fp32: raise RuntimeError( "build_fp32_params should not be called if ema_fp32=False. " "Use ema_fp32=True if this is really intended." ) if state_dict is None: state_dict = self.model.state_dict() def _to_float(t): return t.float() if torch.is_floating_point(t) else t for param_key in state_dict: if param_key in self.fp32_params: self.fp32_params[param_key].copy_(state_dict[param_key]) else: self.fp32_params[param_key] = _to_float(state_dict[param_key]) def restore(self, state_dict, build_fp32_params=False): """ Load data from a model spec into EMA model """ self.model.load_state_dict(state_dict, strict=False) if build_fp32_params: self.build_fp32_params(state_dict) def _set_decay(self, decay): self.decay = decay def get_decay(self): return self.decay def _step_internal(self, new_model, updates=None): """ One update of the EMA model based on new model weights """ decay = self.decay ema_state_dict = {} ema_params = self.fp32_params if self.config.ema_fp32 else self.model.state_dict() for key, param in new_model.state_dict().items(): try: ema_param = ema_params[key] except KeyError: ema_param = param.float().clone() if param.ndim == 1 else copy.deepcopy(param) if param.shape != ema_param.shape: raise ValueError( "incompatible tensor shapes between model param and ema param" + "{} vs. {}".format(param.shape, ema_param.shape) ) if "version" in key: # Do not decay a model.version pytorch param continue ema_param.mul_(decay) ema_param.add_(param.to(dtype=ema_param.dtype), alpha=1-decay) ema_state_dict[key] = ema_param self.restore(ema_state_dict, build_fp32_params=False) def step(self, new_model, updates=None): """ One update of EMA which is done every self.config.ema_update_freq updates of the model. @param updates The current number of model updates done. Decay is set of 0 if model updates < ema_start_update, which means the model will be simply copied over to the EMA. When model updates >= ema_start_updates, then EMA is updated with a decay of self.config.ema_decay. """ self._set_decay( 0 if updates is not None and updates < self.config.ema_start_update else self.config.ema_decay ) if updates is not None and self.config.ema_update_freq > 1: self.update_freq_counter += 1 if self.update_freq_counter >= self.config.ema_update_freq: self._step_internal(new_model, updates) self.update_freq_counter = 0 else: self._step_internal(new_model, updates) def reverse(self, model): """ Load the model parameters from EMA model. Useful for inference or fine-tuning from the EMA model. """ model.load_state_dict(self.model.state_dict(), strict=False) return model
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/ema/ema.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import os from .ema import EMA def build_ema(model, cfg, device): return EMA(model, cfg, device) # automatically import any Python files in the models/ema/ directory for file in sorted(os.listdir(os.path.dirname(__file__))): if file.endswith(".py") and not file.startswith("_"): file_name = file[: file.find(".py")] importlib.import_module("fairseq.models.ema." + file_name)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/ema/__init__.py
#!/usr/bin/env python3 import logging import math from typing import Dict, List, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils, utils from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.models.transformer import Embedding, TransformerDecoder from fairseq.modules import LayerNorm, PositionalEmbedding, TransformerEncoderLayer from torch import Tensor logger = logging.getLogger(__name__) @register_model("convtransformer") class ConvTransformerModel(FairseqEncoderDecoderModel): """ Transformer-based Speech translation model from ESPNet-ST https://arxiv.org/abs/2004.10234 """ def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--input-feat-per-channel", type=int, metavar="N", help="encoder input dimension per input channel", ) parser.add_argument( "--activation-fn", choices=utils.get_available_activation_fns(), help="activation function to use", ) parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability" ) parser.add_argument( "--attention-dropout", type=float, metavar="D", help="dropout probability for attention weights", ) parser.add_argument( "--activation-dropout", "--relu-dropout", type=float, metavar="D", help="dropout probability after activation in FFN.", ) parser.add_argument( "--encoder-embed-dim", type=int, metavar="N", help="encoder embedding dimension", ) parser.add_argument( "--encoder-ffn-embed-dim", type=int, metavar="N", help="encoder embedding dimension for FFN", ) parser.add_argument( "--encoder-layers", type=int, metavar="N", help="num encoder layers" ) parser.add_argument( "--encoder-attention-heads", type=int, metavar="N", help="num encoder attention heads", ) parser.add_argument( "--encoder-normalize-before", action="store_true", help="apply layernorm before each encoder block", ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-ffn-embed-dim", type=int, metavar="N", help="decoder embedding dimension for FFN", ) parser.add_argument( "--decoder-layers", type=int, metavar="N", help="num decoder layers" ) parser.add_argument( "--decoder-attention-heads", type=int, metavar="N", help="num decoder attention heads", ) parser.add_argument( "--decoder-normalize-before", action="store_true", help="apply layernorm before each decoder block", ) parser.add_argument( "--decoder-output-dim", type=int, metavar="N", help="decoder output dimension (extra linear layer if different from decoder embed dim)", ) parser.add_argument( "--share-decoder-input-output-embed", action="store_true", help="share decoder input and output embeddings", ) parser.add_argument( "--layernorm-embedding", action="store_true", help="add layernorm to embedding", ) parser.add_argument( "--no-scale-embedding", action="store_true", help="if True, dont scale embeddings", ) parser.add_argument( "--load-pretrained-encoder-from", type=str, metavar="STR", help="model to take encoder weights from (for initialization)", ) parser.add_argument( "--load-pretrained-decoder-from", type=str, metavar="STR", help="model to take decoder weights from (for initialization)", ) parser.add_argument( "--conv-out-channels", type=int, metavar="INT", help="the number of output channels of conv layer", ) @classmethod def build_encoder(cls, args): encoder = ConvTransformerEncoder(args) if getattr(args, "load_pretrained_encoder_from", None): encoder = checkpoint_utils.load_pretrained_component_from_model( component=encoder, checkpoint=args.load_pretrained_encoder_from ) return encoder @classmethod def build_decoder(cls, args, task, embed_tokens): decoder = TransformerDecoderNoExtra(args, task.target_dictionary, embed_tokens) if getattr(args, "load_pretrained_decoder_from", None): decoder = checkpoint_utils.load_pretrained_component_from_model( component=decoder, checkpoint=args.load_pretrained_decoder_from ) return decoder @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) def build_embedding(dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() return Embedding(num_embeddings, embed_dim, padding_idx) decoder_embed_tokens = build_embedding( task.target_dictionary, args.decoder_embed_dim ) encoder = cls.build_encoder(args) decoder = cls.build_decoder(args, task, decoder_embed_tokens) return cls(encoder, decoder) @staticmethod @torch.jit.unused def set_batch_first(lprobs): lprobs.batch_first = True def get_normalized_probs( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): # net_output['encoder_out'] is a (B, T, D) tensor lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample) if self.training: self.set_batch_first(lprobs) return lprobs def output_layout(self): return "BTD" """ The forward method inherited from the base class has a **kwargs argument in its input, which is not supported in torchscript. This method overrites the forward method definition without **kwargs. """ def forward(self, src_tokens, src_lengths, prev_output_tokens): encoder_out = self.encoder(src_tokens=src_tokens, src_lengths=src_lengths) decoder_out = self.decoder( prev_output_tokens=prev_output_tokens, encoder_out=encoder_out ) return decoder_out class ConvTransformerEncoder(FairseqEncoder): """Conv + Transformer encoder""" def __init__(self, args): """Construct an Encoder object.""" super().__init__(None) self.dropout = args.dropout self.embed_scale = ( 1.0 if args.no_scale_embedding else math.sqrt(args.encoder_embed_dim) ) self.padding_idx = 1 self.in_channels = 1 self.input_dim = args.input_feat_per_channel self.conv = torch.nn.Sequential( torch.nn.Conv2d(1, args.conv_out_channels, 3, stride=2, padding=3 // 2), torch.nn.ReLU(), torch.nn.Conv2d( args.conv_out_channels, args.conv_out_channels, 3, stride=2, padding=3 // 2, ), torch.nn.ReLU(), ) transformer_input_dim = self.infer_conv_output_dim( self.in_channels, self.input_dim, args.conv_out_channels ) self.out = torch.nn.Linear(transformer_input_dim, args.encoder_embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, args.encoder_embed_dim, self.padding_idx, learned=False, ) self.transformer_layers = nn.ModuleList([]) self.transformer_layers.extend( [TransformerEncoderLayer(args) for i in range(args.encoder_layers)] ) if args.encoder_normalize_before: self.layer_norm = LayerNorm(args.encoder_embed_dim) else: self.layer_norm = None def pooling_ratio(self): return 4 def infer_conv_output_dim(self, in_channels, input_dim, out_channels): sample_seq_len = 200 sample_bsz = 10 x = torch.randn(sample_bsz, in_channels, sample_seq_len, input_dim) x = torch.nn.Conv2d(1, out_channels, 3, stride=2, padding=3 // 2)(x) x = torch.nn.Conv2d(out_channels, out_channels, 3, stride=2, padding=3 // 2)(x) x = x.transpose(1, 2) mb, seq = x.size()[:2] return x.contiguous().view(mb, seq, -1).size(-1) def forward(self, src_tokens, src_lengths): """Encode input sequence. :param torch.Tensor xs: input tensor :param torch.Tensor masks: input mask :return: position embedded tensor and mask :rtype Tuple[torch.Tensor, torch.Tensor]: """ bsz, max_seq_len, _ = src_tokens.size() x = ( src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim) .transpose(1, 2) .contiguous() ) x = self.conv(x) bsz, _, output_seq_len, _ = x.size() x = x.transpose(1, 2).transpose(0, 1).contiguous().view(output_seq_len, bsz, -1) x = self.out(x) x = self.embed_scale * x subsampling_factor = int(max_seq_len * 1.0 / output_seq_len + 0.5) input_len_0 = (src_lengths.float() / subsampling_factor).ceil().long() input_len_1 = x.size(0) * torch.ones([src_lengths.size(0)]).long().to( input_len_0.device ) input_lengths = torch.min(input_len_0, input_len_1) encoder_padding_mask = lengths_to_padding_mask(input_lengths) positions = self.embed_positions(encoder_padding_mask).transpose(0, 1) x += positions x = F.dropout(x, p=self.dropout, training=self.training) for layer in self.transformer_layers: x = layer(x, encoder_padding_mask) if not encoder_padding_mask.any(): maybe_encoder_padding_mask = None else: maybe_encoder_padding_mask = encoder_padding_mask return { "encoder_out": [x], "encoder_padding_mask": [maybe_encoder_padding_mask] if maybe_encoder_padding_mask is not None else [], "encoder_embedding": [], "encoder_states": [], "src_tokens": [], "src_lengths": [], } @torch.jit.export def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)] if len(encoder_out["encoder_padding_mask"]) == 0: new_encoder_padding_mask = [] else: new_encoder_padding_mask = [ (encoder_out["encoder_padding_mask"][0]).index_select(0, new_order) ] if len(encoder_out["encoder_embedding"]) == 0: new_encoder_embedding = [] else: new_encoder_embedding = [ (encoder_out["encoder_embedding"][0]).index_select(0, new_order) ] encoder_states = encoder_out["encoder_states"] if len(encoder_states) > 0: for idx, state in enumerate(encoder_states): encoder_states[idx] = state.index_select(1, new_order) return { "encoder_out": new_encoder_out, "encoder_padding_mask": new_encoder_padding_mask, "encoder_embedding": new_encoder_embedding, "encoder_states": encoder_states, "src_tokens": [], "src_lengths": [], } class TransformerDecoderNoExtra(TransformerDecoder): def extract_features( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]], incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, full_context_alignment: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, ): # call scriptable method from parent class x, _ = self.extract_features_scriptable( prev_output_tokens, encoder_out, incremental_state, full_context_alignment, alignment_layer, alignment_heads, ) return x, None @register_model_architecture(model_name="convtransformer", arch_name="convtransformer") def base_architecture(args): args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) args.max_source_positions = getattr(args, "max_source_positions", 3000) args.max_target_positions = getattr(args, "max_target_positions", 1024) args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) args.conv_out_channels = getattr(args, "conv_out_channels", args.encoder_embed_dim) @register_model_architecture("convtransformer", "convtransformer_espnet") def convtransformer_espnet(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) args.encoder_layers = getattr(args, "encoder_layers", 12) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/speech_to_text/convtransformer.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import copy from typing import Dict, List, Optional, Tuple from fairseq import utils, checkpoint_utils from fairseq.models import (FairseqEncoderDecoderModel, FairseqEncoder, register_model, register_model_architecture) from fairseq.models.transformer import Embedding, TransformerDecoder from fairseq.models.wav2vec import Wav2VecEncoder from fairseq.modules.layer_norm import LayerNorm from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.utils import safe_hasattr from torch import Tensor import torch.nn as nn logger = logging.getLogger(__name__) class Conv1dAdaptor(nn.Module): def __init__(self, in_dim, out_dim, n_layers=3, kernel_size=3, stride=2, add_layernorm=False): super().__init__() self.layers = nn.ModuleList( nn.Conv1d(in_dim if i == 0 else out_dim, out_dim * 2, kernel_size, stride=stride, padding=kernel_size // 2) for i in range(n_layers) ) self.layernorms = None if add_layernorm: self.layernorms = nn.ModuleList(LayerNorm(out_dim) for _ in range(n_layers)) self.stride = stride @classmethod def add_args(cls, parser): parser.add_argument("--adaptor-n-layers", type=int) parser.add_argument("--adaptor-kernel-size", type=int) parser.add_argument("--adaptor-stride", type=int) parser.add_argument("--adaptor-layernorm", action='store_true') def get_out_seq_lens_tensor(self, in_seq_lens_tensor): out = in_seq_lens_tensor.clone() for _ in self.layers: out = ((out.float() - 1) / self.stride + 1).floor().long() return out def forward(self, x, padding_mask): # T x B x C -> B x C x T x = x.transpose(0, 1).transpose(1, 2) for i, layer in enumerate(self.layers): x = nn.functional.glu(layer(x), dim=1) if self.layernorms is not None: x = self.layernorms[i](x.transpose(1, 2)).transpose(1, 2) # B x C x T -> T x B x C x = x.transpose(1, 2).transpose(0, 1) if padding_mask is None: out_padding_mask = None else: out_lengths = self.get_out_seq_lens_tensor((~padding_mask).sum(1)) out_padding_mask = lengths_to_padding_mask(out_lengths) return x, out_padding_mask def add_wav2vec_asr_args(parser): parser.add_argument("--w2v-path", help="path to wav2vec 2.0 model") parser.add_argument( "--no-pretrained-weights", action="store_true", help="if true, does not load pretrained weights", ) parser.add_argument( "--dropout-input", type=float, metavar="D", help="dropout to apply to the input (after feat extr)", ) parser.add_argument( "--final-dropout", type=float, metavar="D", help="dropout after transformer and before final projection", ) parser.add_argument( "--apply-mask", action="store_true", help="apply masking during fine-tuning" ) parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability inside wav2vec 2.0 model", ) parser.add_argument( "--attention-dropout", type=float, metavar="D", help="dropout probability for attention weights inside wav2vec 2.0 model", ) parser.add_argument( "--activation-dropout", "--relu-dropout", type=float, metavar="D", help="dropout probability after activation in FFN inside wav2vec 2.0 model", ) parser.add_argument( "--mask-length", type=int, help="repeat the mask indices multiple times" ) parser.add_argument( "--mask-prob", type=float, help="probability of replacing a token with mask" ) parser.add_argument( "--mask-selection", type=str, choices=["static", "uniform", "normal", "poisson"], help="how to choose masks", ) parser.add_argument( "--mask-other", type=float, help="stdev of the mask length in case of 'normal' selection strategy", ) parser.add_argument( "--no-mask-overlap", action="store_true", help="whether to allow masks to overlap", ) parser.add_argument( "--mask-channel-length", type=int, help="repeat the mask indices multiple times" ) parser.add_argument( "--mask-channel-prob", type=float, help="probability of replacing a token with mask", ) parser.add_argument( "--mask-channel-selection", type=str, choices=["static", "uniform", "normal", "poisson"], help="how to choose masks", ) parser.add_argument( "--mask-channel-other", type=float, help="stdev of the mask length in case of 'normal' selection strategy", ) parser.add_argument( "--no-mask-channel-overlap", action="store_true", help="whether to allow masks to overlap", ) parser.add_argument( "--freeze-finetune-updates", default=0, type=int, help="dont finetune wav2vec for this many updates", ) parser.add_argument( "--feature-grad-mult", default=None, type=float, help="reset feature grad mult in wav2vec 2.0 to this", ) parser.add_argument( "--layerdrop", default=0.0, type=float, help="probability of dropping a layer in wav2vec 2.0", ) parser.add_argument("--w2v-args", default=None) class Wav2VecEncoderWithAdaptor(FairseqEncoder): def __init__(self, args): super().__init__(None) self.w2v_encoder = Wav2VecEncoder(args) encoder_out_dim = self.w2v_encoder.w2v_model.encoder.embedding_dim # Projection + 8x shrinking self.adaptor = Conv1dAdaptor( encoder_out_dim, args.decoder_embed_dim, n_layers=args.adaptor_n_layers, kernel_size=args.adaptor_kernel_size, stride=args.adaptor_stride, add_layernorm=args.adaptor_layernorm ) for k, p in self.w2v_encoder.w2v_model.named_parameters(): # Freeze pretrained models by default if safe_hasattr(args, 'finetune_w2v_params') and XMTransformerModel.finetune_params( args.finetune_w2v_params, k): p.requires_grad = True else: p.requires_grad = False @classmethod def add_args(cls, parser): add_wav2vec_asr_args(parser) parser.add_argument( "--normalize", action="store_true", help="if set, normalizes input to have 0 mean and unit variance", ) parser.add_argument("--finetune-w2v-params", type=str, metavar="STR", help="comma-separated param strings to finetune.") Conv1dAdaptor.add_args(parser) def forward(self, src_tokens, src_lengths=None, **kwargs): padding_mask = lengths_to_padding_mask(src_lengths) out = self.w2v_encoder.forward(src_tokens, padding_mask, tbc=True) x = out["encoder_out"] enc_padding_mask = None if out["encoder_padding_mask"] is not None: enc_padding_mask = out["encoder_padding_mask"].transpose(0, 1) # T X B --> B X T x, enc_padding_mask = self.adaptor(x, enc_padding_mask) return { "encoder_out": [x], # T x B x C "encoder_padding_mask": [enc_padding_mask] if enc_padding_mask.any() else [], # B x T "encoder_embedding": [], # B x T x C "encoder_states": [], # List[T x B x C] "src_tokens": [], "src_lengths": [], } def reorder_encoder_out(self, encoder_out, new_order): new_encoder_out = ( [] if len(encoder_out["encoder_out"]) == 0 else [x.index_select(1, new_order) for x in encoder_out["encoder_out"]] ) new_encoder_padding_mask = ( [] if len(encoder_out["encoder_padding_mask"]) == 0 else [x.index_select(0, new_order) for x in encoder_out["encoder_padding_mask"]] ) new_encoder_embedding = ( [] if len(encoder_out["encoder_embedding"]) == 0 else [x.index_select(0, new_order) for x in encoder_out["encoder_embedding"]] ) encoder_states = encoder_out["encoder_states"] if len(encoder_states) > 0: for idx, state in enumerate(encoder_states): encoder_states[idx] = state.index_select(1, new_order) return { "encoder_out": new_encoder_out, # T x B x C "encoder_padding_mask": new_encoder_padding_mask, # B x T "encoder_embedding": new_encoder_embedding, # B x T x C "encoder_states": encoder_states, # List[T x B x C] "src_tokens": [], # B x T "src_lengths": [], # B x 1 } def add_decoder_args(parser): parser.add_argument("--activation-fn", type=str, default='relu', choices=utils.get_available_activation_fns(), help="activation function to use") parser.add_argument("--decoder-dropout", type=float, metavar="D", help="dropout probability") parser.add_argument("--decoder-attention-dropout", type=float, metavar="D", help="dropout probability for attention weights") parser.add_argument("--decoder-activation-dropout", type=float, metavar="D", help="dropout probability after activation in FFN.") parser.add_argument("--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension") parser.add_argument("--decoder-ffn-embed-dim", type=int, metavar="N", help="decoder embedding dimension for FFN") parser.add_argument("--decoder-layers", type=int, metavar="N", help="num decoder layers") parser.add_argument("--decoder-attention-heads", type=int, metavar="N", help="num decoder attention heads") parser.add_argument("--decoder-normalize-before", action="store_true", help="apply layernorm before each decoder block") parser.add_argument("--layernorm-embedding", action="store_true", help="add layernorm to embedding") parser.add_argument("--no-scale-embedding", action="store_true", help="if True, dont scale embeddings") parser.add_argument( "--load-pretrained-decoder-from", type=str, metavar="STR", help="model to take decoder weights from (for initialization)" ) parser.add_argument("--finetune-decoder-params", type=str, metavar="STR", help="comma-separated param strings to finetune.") parser.add_argument("--checkpoint-activations", action="store_true") @register_model("xm_transformer") class XMTransformerModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def add_args(cls, parser): """Add model-specific arguments to the parser.""" Wav2VecEncoderWithAdaptor.add_args(parser) add_decoder_args(parser) @classmethod def build_encoder(cls, args): _args = copy.deepcopy(args) state = checkpoint_utils.load_checkpoint_to_cpu(args.w2v_path) if state.get("cfg") is not None: encoder_embed_dim = state["cfg"]._content["model"]["encoder_embed_dim"] elif state.get("args") is not None: encoder_embed_dim = state["args"].encoder_embed_dim else: raise ValueError(f"Invalid config in {args.w2v_path}") _args.decoder_embed_dim = encoder_embed_dim encoder = Wav2VecEncoderWithAdaptor(_args) return encoder @classmethod def build_decoder(cls, args, task, embed_tokens): _args = copy.deepcopy(args) _args.dropout = args.decoder_dropout _args.attention_dropout = args.decoder_attention_dropout _args.activation_dropout = args.decoder_activation_dropout _args.max_target_positions = 1024 decoder = TransformerDecoder(_args, task.target_dictionary, embed_tokens) if getattr(args, "load_pretrained_decoder_from", None): decoder = checkpoint_utils.load_pretrained_component_from_model( component=decoder, checkpoint=args.load_pretrained_decoder_from ) for k, p in decoder.named_parameters(): # Freeze pretrained models by default if safe_hasattr(args, 'finetune_decoder_params') and XMTransformerModel.finetune_params( args.finetune_decoder_params, k): p.requires_grad = True else: p.requires_grad = False return decoder @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) def build_embedding(dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() return Embedding(num_embeddings, embed_dim, padding_idx) decoder_embed_tokens = build_embedding(task.target_dictionary, args.decoder_embed_dim) encoder = cls.build_encoder(args) decoder = cls.build_decoder(args, task, decoder_embed_tokens) return cls(encoder, decoder) def get_normalized_probs( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): # net_output['encoder_out'] is a (B, T, D) tensor lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample) lprobs.batch_first = True return lprobs def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): """ The forward method inherited from the base class has a **kwargs argument in its input, which is not supported in torchscript. This method overrites the forward method definition without **kwargs. """ encoder_out = self.encoder(src_tokens=src_tokens, src_lengths=src_lengths, **kwargs) decoder_out = self.decoder(prev_output_tokens=prev_output_tokens, encoder_out=encoder_out) return decoder_out def upgrade_state_dict(self, state_dict): for k, _ in state_dict.items(): if 'adaptor.layers' in state_dict: print(k) new = k.replace('adaptor.layers', 'adaptor_layers') state_dict[new] = state_dict[k] del state_dict[k] @staticmethod def finetune_params(finetune_params, param_name): if finetune_params == "all": return True finetune_params_list = finetune_params.split(",") for finetune_param in finetune_params_list: if finetune_param in param_name: return True return False def set_default_w2v_encoder_args(args): args.no_pretrained_weights = getattr(args, "no_pretrained_weights", False) args.dropout_input = getattr(args, "dropout_input", 0) args.final_dropout = getattr(args, "final_dropout", 0) args.apply_mask = getattr(args, "apply_mask", False) args.dropout = getattr(args, "dropout", 0) args.attention_dropout = getattr(args, "attention_dropout", 0) args.activation_dropout = getattr(args, "activation_dropout", 0) args.mask_length = getattr(args, "mask_length", 10) args.mask_prob = getattr(args, "mask_prob", 0.5) args.mask_selection = getattr(args, "mask_selection", "static") args.mask_other = getattr(args, "mask_other", 0) args.no_mask_overlap = getattr(args, "no_mask_overlap", False) args.mask_channel_length = getattr(args, "mask_channel_length", 10) args.mask_channel_prob = getattr(args, "mask_channel_prob", 0.5) args.mask_channel_before = getattr(args, "mask_channel_before", False) args.mask_channel_selection = getattr(args, "mask_channel_selection", "static") args.mask_channel_other = getattr(args, "mask_channel_other", 0) args.no_mask_channel_overlap = getattr(args, "no_mask_channel_overlap", False) args.freeze_finetune_updates = getattr(args, "freeze_finetune_updates", 0) args.feature_grad_mult = 0.1 args.layerdrop = getattr(args, "layerdrop", 0.0) args.normalize = getattr(args, "normalize", False) def set_default_adaptor_args(args): args.adaptor_n_layers = getattr(args, "adaptor_n_layers", 3) args.adaptor_kernel_size = getattr(args, "adaptor_kernel_size", 3) args.adaptor_stride = getattr(args, "adaptor_stride", 2) args.adaptor_layernorm = getattr(args, "adaptor_layernorm", False) def set_default_mbart_decoder_args(args): args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4 * 1024) args.decoder_layers = getattr(args, 'decoder_layers', 12) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', True) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0) args.adaptive_input = getattr(args, "adaptive_input", False) args.decoder_attention_dropout = getattr(args, 'decoder_attention_dropout', 0.) args.decoder_activation_dropout = getattr(args, 'decoder_activation_dropout', 0.) args.decoder_dropout = getattr(args, 'decoder_dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr( args, 'share_decoder_input_output_embed', True ) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) args.no_scale_embedding = getattr(args, 'no_scale_embedding', False) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) args.layernorm_embedding = getattr(args, 'layernorm_embedding', True) args.activation_fn = getattr(args, 'activation_fn', 'gelu') args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh') args.pooler_dropout = getattr(args, 'pooler_dropout', 0.0) args.checkpoint_activations = getattr(args, "checkpoint_activations", False) @register_model_architecture(model_name="xm_transformer", arch_name="xm_transformer") def base_architecture(args): set_default_w2v_encoder_args(args) set_default_adaptor_args(args) set_default_mbart_decoder_args(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/speech_to_text/xm_transformer.py
#!/usr/bin/env python3 from ast import literal_eval from typing import List, Tuple import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils, utils from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture, ) @register_model("s2t_berard") class BerardModel(FairseqEncoderDecoderModel): """Implementation of a model similar to https://arxiv.org/abs/1802.04200 Paper title: End-to-End Automatic Speech Translation of Audiobooks An implementation is available in tensorflow at https://github.com/eske/seq2seq Relevant files in this implementation are the config (https://github.com/eske/seq2seq/blob/master/config/LibriSpeech/AST.yaml) and the model code (https://github.com/eske/seq2seq/blob/master/translate/models.py). The encoder and decoder try to be close to the original implementation. The attention is an MLP as in Bahdanau et al. (https://arxiv.org/abs/1409.0473). There is no state initialization by averaging the encoder outputs. """ def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): parser.add_argument( "--input-layers", type=str, metavar="EXPR", help="List of linear layer dimensions. These " "layers are applied to the input features and " "are followed by tanh and possibly dropout.", ) parser.add_argument( "--dropout", type=float, metavar="D", help="Dropout probability to use in the encoder/decoder. " "Note that this parameters control dropout in various places, " "there is no fine-grained control for dropout for embeddings " "vs LSTM layers for example.", ) parser.add_argument( "--in-channels", type=int, metavar="N", help="Number of encoder input channels. " "Typically value is 1.", ) parser.add_argument( "--conv-layers", type=str, metavar="EXPR", help="List of conv layers " "(format: (channels, kernel, stride)).", ) parser.add_argument( "--num-blstm-layers", type=int, metavar="N", help="Number of encoder bi-LSTM layers.", ) parser.add_argument( "--lstm-size", type=int, metavar="N", help="LSTM hidden size." ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="Embedding dimension of the decoder target tokens.", ) parser.add_argument( "--decoder-hidden-dim", type=int, metavar="N", help="Decoder LSTM hidden dimension.", ) parser.add_argument( "--decoder-num-layers", type=int, metavar="N", help="Number of decoder LSTM layers.", ) parser.add_argument( "--attention-dim", type=int, metavar="N", help="Hidden layer dimension in MLP attention.", ) parser.add_argument( "--output-layer-dim", type=int, metavar="N", help="Hidden layer dim for linear layer prior to output projection.", ) parser.add_argument( "--load-pretrained-encoder-from", type=str, metavar="STR", help="model to take encoder weights from (for initialization)", ) parser.add_argument( "--load-pretrained-decoder-from", type=str, metavar="STR", help="model to take decoder weights from (for initialization)", ) @classmethod def build_encoder(cls, args, task): encoder = BerardEncoder( input_layers=literal_eval(args.input_layers), conv_layers=literal_eval(args.conv_layers), in_channels=args.input_channels, input_feat_per_channel=args.input_feat_per_channel, num_blstm_layers=args.num_blstm_layers, lstm_size=args.lstm_size, dropout=args.dropout, ) if getattr(args, "load_pretrained_encoder_from", None): encoder = checkpoint_utils.load_pretrained_component_from_model( component=encoder, checkpoint=args.load_pretrained_encoder_from ) return encoder @classmethod def build_decoder(cls, args, task): decoder = LSTMDecoder( dictionary=task.target_dictionary, embed_dim=args.decoder_embed_dim, num_layers=args.decoder_num_layers, hidden_size=args.decoder_hidden_dim, dropout=args.dropout, encoder_output_dim=2 * args.lstm_size, # bidirectional attention_dim=args.attention_dim, output_layer_dim=args.output_layer_dim, ) if getattr(args, "load_pretrained_decoder_from", None): decoder = checkpoint_utils.load_pretrained_component_from_model( component=decoder, checkpoint=args.load_pretrained_decoder_from ) return decoder @classmethod def build_model(cls, args, task): """Build a new model instance.""" encoder = cls.build_encoder(args, task) decoder = cls.build_decoder(args, task) return cls(encoder, decoder) def get_normalized_probs(self, net_output, log_probs, sample=None): # net_output['encoder_out'] is a (B, T, D) tensor lprobs = super().get_normalized_probs(net_output, log_probs, sample) # lprobs is a (B, T, D) tensor lprobs.batch_first = True return lprobs class BerardEncoder(FairseqEncoder): def __init__( self, input_layers: List[int], conv_layers: List[Tuple[int]], in_channels: int, input_feat_per_channel: int, num_blstm_layers: int, lstm_size: int, dropout: float, ): """ Args: input_layers: list of linear layer dimensions. These layers are applied to the input features and are followed by tanh and possibly dropout. conv_layers: list of conv2d layer configurations. A configuration is a tuple (out_channels, conv_kernel_size, stride). in_channels: number of input channels. input_feat_per_channel: number of input features per channel. These are speech features, typically 40 or 80. num_blstm_layers: number of bidirectional LSTM layers. lstm_size: size of the LSTM hidden (and cell) size. dropout: dropout probability. Dropout can be applied after the linear layers and LSTM layers but not to the convolutional layers. """ super().__init__(None) self.input_layers = nn.ModuleList() in_features = input_feat_per_channel for out_features in input_layers: if dropout > 0: self.input_layers.append( nn.Sequential( nn.Linear(in_features, out_features), nn.Dropout(p=dropout) ) ) else: self.input_layers.append(nn.Linear(in_features, out_features)) in_features = out_features self.in_channels = in_channels self.input_dim = input_feat_per_channel self.conv_kernel_sizes_and_strides = [] self.conv_layers = nn.ModuleList() lstm_input_dim = input_layers[-1] for conv_layer in conv_layers: out_channels, conv_kernel_size, conv_stride = conv_layer self.conv_layers.append( nn.Conv2d( in_channels, out_channels, conv_kernel_size, stride=conv_stride, padding=conv_kernel_size // 2, ) ) self.conv_kernel_sizes_and_strides.append((conv_kernel_size, conv_stride)) in_channels = out_channels lstm_input_dim //= conv_stride lstm_input_dim *= conv_layers[-1][0] self.lstm_size = lstm_size self.num_blstm_layers = num_blstm_layers self.lstm = nn.LSTM( input_size=lstm_input_dim, hidden_size=lstm_size, num_layers=num_blstm_layers, dropout=dropout, bidirectional=True, ) self.output_dim = 2 * lstm_size # bidirectional if dropout > 0: self.dropout = nn.Dropout(p=dropout) else: self.dropout = None def forward(self, src_tokens, src_lengths=None, **kwargs): """ Args src_tokens: padded tensor (B, T, C * feat) src_lengths: tensor of original lengths of input utterances (B,) """ bsz, max_seq_len, _ = src_tokens.size() # (B, C, T, feat) x = ( src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim) .transpose(1, 2) .contiguous() ) for input_layer in self.input_layers: x = input_layer(x) x = torch.tanh(x) for conv_layer in self.conv_layers: x = conv_layer(x) bsz, _, output_seq_len, _ = x.size() # (B, C, T, feat) -> (B, T, C, feat) -> (T, B, C, feat) -> # (T, B, C * feat) x = x.transpose(1, 2).transpose(0, 1).contiguous().view(output_seq_len, bsz, -1) input_lengths = src_lengths.clone() for k, s in self.conv_kernel_sizes_and_strides: p = k // 2 input_lengths = (input_lengths.float() + 2 * p - k) / s + 1 input_lengths = input_lengths.floor().long() packed_x = nn.utils.rnn.pack_padded_sequence(x, input_lengths) h0 = x.new(2 * self.num_blstm_layers, bsz, self.lstm_size).zero_() c0 = x.new(2 * self.num_blstm_layers, bsz, self.lstm_size).zero_() packed_outs, _ = self.lstm(packed_x, (h0, c0)) # unpack outputs and apply dropout x, output_lengths = nn.utils.rnn.pad_packed_sequence(packed_outs) if self.dropout is not None: x = self.dropout(x) encoder_padding_mask = ( lengths_to_padding_mask(output_lengths).to(src_tokens.device).t() ) return { "encoder_out": x, # (T, B, C) "encoder_padding_mask": encoder_padding_mask, # (T, B) } def reorder_encoder_out(self, encoder_out, new_order): encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select( 1, new_order ) encoder_out["encoder_padding_mask"] = encoder_out[ "encoder_padding_mask" ].index_select(1, new_order) return encoder_out class MLPAttention(nn.Module): """The original attention from Badhanau et al. (2014) https://arxiv.org/abs/1409.0473, based on a Multi-Layer Perceptron. The attention score between position i in the encoder and position j in the decoder is: alpha_ij = V_a * tanh(W_ae * enc_i + W_ad * dec_j + b_a) """ def __init__(self, decoder_hidden_state_dim, context_dim, attention_dim): super().__init__() self.context_dim = context_dim self.attention_dim = attention_dim # W_ae and b_a self.encoder_proj = nn.Linear(context_dim, self.attention_dim, bias=True) # W_ad self.decoder_proj = nn.Linear( decoder_hidden_state_dim, self.attention_dim, bias=False ) # V_a self.to_scores = nn.Linear(self.attention_dim, 1, bias=False) def forward(self, decoder_state, source_hids, encoder_padding_mask): """The expected input dimensions are: decoder_state: bsz x decoder_hidden_state_dim source_hids: src_len x bsz x context_dim encoder_padding_mask: src_len x bsz """ src_len, bsz, _ = source_hids.size() # (src_len*bsz) x context_dim (to feed through linear) flat_source_hids = source_hids.view(-1, self.context_dim) # (src_len*bsz) x attention_dim encoder_component = self.encoder_proj(flat_source_hids) # src_len x bsz x attention_dim encoder_component = encoder_component.view(src_len, bsz, self.attention_dim) # 1 x bsz x attention_dim decoder_component = self.decoder_proj(decoder_state).unsqueeze(0) # Sum with broadcasting and apply the non linearity # src_len x bsz x attention_dim hidden_att = torch.tanh( (decoder_component + encoder_component).view(-1, self.attention_dim) ) # Project onto the reals to get attentions scores (src_len x bsz) attn_scores = self.to_scores(hidden_att).view(src_len, bsz) # Mask + softmax (src_len x bsz) if encoder_padding_mask is not None: attn_scores = ( attn_scores.float() .masked_fill_(encoder_padding_mask, float("-inf")) .type_as(attn_scores) ) # FP16 support: cast to float and back # srclen x bsz normalized_masked_attn_scores = F.softmax(attn_scores, dim=0) # Sum weighted sources (bsz x context_dim) attn_weighted_context = ( source_hids * normalized_masked_attn_scores.unsqueeze(2) ).sum(dim=0) return attn_weighted_context, normalized_masked_attn_scores class LSTMDecoder(FairseqIncrementalDecoder): def __init__( self, dictionary, embed_dim, num_layers, hidden_size, dropout, encoder_output_dim, attention_dim, output_layer_dim, ): """ Args: dictionary: target text dictionary. embed_dim: embedding dimension for target tokens. num_layers: number of LSTM layers. hidden_size: hidden size for LSTM layers. dropout: dropout probability. Dropout can be applied to the embeddings, the LSTM layers, and the context vector. encoder_output_dim: encoder output dimension (hidden size of encoder LSTM). attention_dim: attention dimension for MLP attention. output_layer_dim: size of the linear layer prior to output projection. """ super().__init__(dictionary) self.num_layers = num_layers self.hidden_size = hidden_size num_embeddings = len(dictionary) padding_idx = dictionary.pad() self.embed_tokens = nn.Embedding(num_embeddings, embed_dim, padding_idx) if dropout > 0: self.dropout = nn.Dropout(p=dropout) else: self.dropout = None self.layers = nn.ModuleList() for layer_id in range(num_layers): input_size = embed_dim if layer_id == 0 else encoder_output_dim self.layers.append( nn.LSTMCell(input_size=input_size, hidden_size=hidden_size) ) self.context_dim = encoder_output_dim self.attention = MLPAttention( decoder_hidden_state_dim=hidden_size, context_dim=encoder_output_dim, attention_dim=attention_dim, ) self.deep_output_layer = nn.Linear( hidden_size + encoder_output_dim + embed_dim, output_layer_dim ) self.output_projection = nn.Linear(output_layer_dim, num_embeddings) def forward( self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs ): encoder_padding_mask = encoder_out["encoder_padding_mask"] encoder_outs = encoder_out["encoder_out"] if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] bsz, seqlen = prev_output_tokens.size() srclen = encoder_outs.size(0) # embed tokens embeddings = self.embed_tokens(prev_output_tokens) x = embeddings if self.dropout is not None: x = self.dropout(x) # B x T x C -> T x B x C x = x.transpose(0, 1) # initialize previous states (or get from cache during incremental # generation) cached_state = utils.get_incremental_state( self, incremental_state, "cached_state" ) if cached_state is not None: prev_hiddens, prev_cells = cached_state else: prev_hiddens = [encoder_out["encoder_out"].mean(dim=0)] * self.num_layers prev_cells = [x.new_zeros(bsz, self.hidden_size)] * self.num_layers attn_scores = x.new_zeros(bsz, srclen) attention_outs = [] outs = [] for j in range(seqlen): input = x[j, :, :] attention_out = None for i, layer in enumerate(self.layers): # the previous state is one layer below except for the bottom # layer where the previous state is the state emitted by the # top layer hidden, cell = layer( input, ( prev_hiddens[(i - 1) % self.num_layers], prev_cells[(i - 1) % self.num_layers], ), ) if self.dropout is not None: hidden = self.dropout(hidden) prev_hiddens[i] = hidden prev_cells[i] = cell if attention_out is None: attention_out, attn_scores = self.attention( hidden, encoder_outs, encoder_padding_mask ) if self.dropout is not None: attention_out = self.dropout(attention_out) attention_outs.append(attention_out) input = attention_out # collect the output of the top layer outs.append(hidden) # cache previous states (no-op except during incremental generation) utils.set_incremental_state( self, incremental_state, "cached_state", (prev_hiddens, prev_cells) ) # collect outputs across time steps x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size) attention_outs_concat = torch.cat(attention_outs, dim=0).view( seqlen, bsz, self.context_dim ) # T x B x C -> B x T x C x = x.transpose(0, 1) attention_outs_concat = attention_outs_concat.transpose(0, 1) # concat LSTM output, attention output and embedding # before output projection x = torch.cat((x, attention_outs_concat, embeddings), dim=2) x = self.deep_output_layer(x) x = torch.tanh(x) if self.dropout is not None: x = self.dropout(x) # project back to size of vocabulary x = self.output_projection(x) # to return the full attn_scores tensor, we need to fix the decoder # to account for subsampling input frames # return x, attn_scores return x, None def reorder_incremental_state(self, incremental_state, new_order): super().reorder_incremental_state(incremental_state, new_order) cached_state = utils.get_incremental_state( self, incremental_state, "cached_state" ) if cached_state is None: return def reorder_state(state): if isinstance(state, list): return [reorder_state(state_i) for state_i in state] return state.index_select(0, new_order) new_state = tuple(map(reorder_state, cached_state)) utils.set_incremental_state(self, incremental_state, "cached_state", new_state) @register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard") def berard(args): """The original version: "End-to-End Automatic Speech Translation of Audiobooks" (https://arxiv.org/abs/1802.04200) """ args.input_layers = getattr(args, "input_layers", "[256, 128]") args.conv_layers = getattr(args, "conv_layers", "[(16, 3, 2), (16, 3, 2)]") args.num_blstm_layers = getattr(args, "num_blstm_layers", 3) args.lstm_size = getattr(args, "lstm_size", 256) args.dropout = getattr(args, "dropout", 0.2) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128) args.decoder_num_layers = getattr(args, "decoder_num_layers", 2) args.decoder_hidden_dim = getattr(args, "decoder_hidden_dim", 512) args.attention_dim = getattr(args, "attention_dim", 512) args.output_layer_dim = getattr(args, "output_layer_dim", 128) args.load_pretrained_encoder_from = getattr( args, "load_pretrained_encoder_from", None ) args.load_pretrained_decoder_from = getattr( args, "load_pretrained_decoder_from", None ) @register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard_256_3_3") def berard_256_3_3(args): """Used in * "Harnessing Indirect Training Data for End-to-End Automatic Speech Translation: Tricks of the Trade" (https://arxiv.org/abs/1909.06515) * "CoVoST: A Diverse Multilingual Speech-To-Text Translation Corpus" (https://arxiv.org/pdf/2002.01320.pdf) * "Self-Supervised Representations Improve End-to-End Speech Translation" (https://arxiv.org/abs/2006.12124) """ args.decoder_num_layers = getattr(args, "decoder_num_layers", 3) berard(args) @register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard_512_3_2") def berard_512_3_2(args): args.num_blstm_layers = getattr(args, "num_blstm_layers", 3) args.lstm_size = getattr(args, "lstm_size", 512) args.dropout = getattr(args, "dropout", 0.3) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) args.decoder_num_layers = getattr(args, "decoder_num_layers", 2) args.decoder_hidden_dim = getattr(args, "decoder_hidden_dim", 1024) args.attention_dim = getattr(args, "attention_dim", 512) args.output_layer_dim = getattr(args, "output_layer_dim", 256) berard(args) @register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard_512_5_3") def berard_512_5_3(args): args.num_blstm_layers = getattr(args, "num_blstm_layers", 5) args.lstm_size = getattr(args, "lstm_size", 512) args.dropout = getattr(args, "dropout", 0.3) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) args.decoder_num_layers = getattr(args, "decoder_num_layers", 3) args.decoder_hidden_dim = getattr(args, "decoder_hidden_dim", 1024) args.attention_dim = getattr(args, "attention_dim", 512) args.output_layer_dim = getattr(args, "output_layer_dim", 256) berard(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/speech_to_text/berard.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .berard import * # noqa from .convtransformer import * # noqa from .s2t_transformer import * # noqa from .xm_transformer import * # noqa
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/speech_to_text/__init__.py
#!/usr/bin/env python3 import logging import math from typing import Dict, List, Optional, Tuple from pathlib import Path import torch import torch.nn as nn from fairseq import checkpoint_utils, utils from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.models.transformer import Embedding, TransformerDecoder from fairseq.modules import ( FairseqDropout, LayerNorm, PositionalEmbedding, TransformerEncoderLayer, ) from torch import Tensor logger = logging.getLogger(__name__) class Conv1dSubsampler(nn.Module): """Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation via gated linear units (https://arxiv.org/abs/1911.08460) Args: in_channels (int): the number of input channels mid_channels (int): the number of intermediate channels out_channels (int): the number of output channels kernel_sizes (List[int]): the kernel size for each convolutional layer """ def __init__( self, in_channels: int, mid_channels: int, out_channels: int, kernel_sizes: List[int] = (3, 3), ): super(Conv1dSubsampler, self).__init__() self.n_layers = len(kernel_sizes) self.conv_layers = nn.ModuleList( nn.Conv1d( in_channels if i == 0 else mid_channels // 2, mid_channels if i < self.n_layers - 1 else out_channels * 2, k, stride=2, padding=k // 2, ) for i, k in enumerate(kernel_sizes) ) def get_out_seq_lens_tensor(self, in_seq_lens_tensor): out = in_seq_lens_tensor.clone() for _ in range(self.n_layers): out = ((out.float() - 1) / 2 + 1).floor().long() return out def forward(self, src_tokens, src_lengths): bsz, in_seq_len, _ = src_tokens.size() # B x T x (C x D) x = src_tokens.transpose(1, 2).contiguous() # -> B x (C x D) x T for conv in self.conv_layers: x = conv(x) x = nn.functional.glu(x, dim=1) _, _, out_seq_len = x.size() x = x.transpose(1, 2).transpose(0, 1).contiguous() # -> T x B x (C x D) return x, self.get_out_seq_lens_tensor(src_lengths) @register_model("s2t_transformer") class S2TTransformerModel(FairseqEncoderDecoderModel): """Adapted Transformer model (https://arxiv.org/abs/1706.03762) for speech-to-text tasks. The Transformer encoder/decoder remains the same. A trainable input subsampler is prepended to the Transformer encoder to project inputs into the encoder dimension as well as downsample input sequence for computational efficiency.""" def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # input parser.add_argument( "--conv-kernel-sizes", type=str, metavar="N", help="kernel sizes of Conv1d subsampling layers", ) parser.add_argument( "--conv-channels", type=int, metavar="N", help="# of channels in Conv1d subsampling layers", ) # Transformer parser.add_argument( "--activation-fn", type=str, default="relu", choices=utils.get_available_activation_fns(), help="activation function to use", ) parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability" ) parser.add_argument( "--attention-dropout", type=float, metavar="D", help="dropout probability for attention weights", ) parser.add_argument( "--activation-dropout", "--relu-dropout", type=float, metavar="D", help="dropout probability after activation in FFN.", ) parser.add_argument( "--encoder-embed-dim", type=int, metavar="N", help="encoder embedding dimension", ) parser.add_argument( "--encoder-ffn-embed-dim", type=int, metavar="N", help="encoder embedding dimension for FFN", ) parser.add_argument( "--encoder-layers", type=int, metavar="N", help="num encoder layers" ) parser.add_argument( "--encoder-attention-heads", type=int, metavar="N", help="num encoder attention heads", ) parser.add_argument( "--encoder-normalize-before", action="store_true", help="apply layernorm before each encoder block", ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-ffn-embed-dim", type=int, metavar="N", help="decoder embedding dimension for FFN", ) parser.add_argument( "--decoder-layers", type=int, metavar="N", help="num decoder layers" ) parser.add_argument( "--decoder-attention-heads", type=int, metavar="N", help="num decoder attention heads", ) parser.add_argument( "--decoder-normalize-before", action="store_true", help="apply layernorm before each decoder block", ) parser.add_argument( "--share-decoder-input-output-embed", action="store_true", help="share decoder input and output embeddings", ) parser.add_argument( "--layernorm-embedding", action="store_true", help="add layernorm to embedding", ) parser.add_argument( "--no-scale-embedding", action="store_true", help="if True, dont scale embeddings", ) parser.add_argument( "--load-pretrained-encoder-from", type=str, metavar="STR", help="model to take encoder weights from (for initialization)", ) parser.add_argument( '--encoder-freezing-updates', type=int, metavar='N', help='freeze encoder for first N updates' ) @classmethod def build_encoder(cls, args): encoder = S2TTransformerEncoder(args) pretraining_path = getattr(args, "load_pretrained_encoder_from", None) if pretraining_path is not None: if not Path(pretraining_path).exists(): logger.warning( f"skipped pretraining because {pretraining_path} does not exist" ) else: encoder = checkpoint_utils.load_pretrained_component_from_model( component=encoder, checkpoint=pretraining_path ) logger.info(f"loaded pretrained encoder from: {pretraining_path}") return encoder @classmethod def build_decoder(cls, args, task, embed_tokens): return TransformerDecoderScriptable(args, task.target_dictionary, embed_tokens) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) def build_embedding(dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() return Embedding(num_embeddings, embed_dim, padding_idx) decoder_embed_tokens = build_embedding( task.target_dictionary, args.decoder_embed_dim ) encoder = cls.build_encoder(args) decoder = cls.build_decoder(args, task, decoder_embed_tokens) return cls(encoder, decoder) def get_normalized_probs( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): # net_output['encoder_out'] is a (B, T, D) tensor lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample) lprobs.batch_first = True return lprobs def forward(self, src_tokens, src_lengths, prev_output_tokens): """ The forward method inherited from the base class has a **kwargs argument in its input, which is not supported in torchscript. This method overwrites the forward method definition without **kwargs. """ encoder_out = self.encoder(src_tokens=src_tokens, src_lengths=src_lengths) decoder_out = self.decoder( prev_output_tokens=prev_output_tokens, encoder_out=encoder_out ) return decoder_out class S2TTransformerEncoder(FairseqEncoder): """Speech-to-text Transformer encoder that consists of input subsampler and Transformer encoder.""" def __init__(self, args): super().__init__(None) self.encoder_freezing_updates = args.encoder_freezing_updates self.num_updates = 0 self.dropout_module = FairseqDropout( p=args.dropout, module_name=self.__class__.__name__ ) self.embed_scale = math.sqrt(args.encoder_embed_dim) if args.no_scale_embedding: self.embed_scale = 1.0 self.padding_idx = 1 self.subsample = Conv1dSubsampler( args.input_feat_per_channel * args.input_channels, args.conv_channels, args.encoder_embed_dim, [int(k) for k in args.conv_kernel_sizes.split(",")], ) self.embed_positions = PositionalEmbedding( args.max_source_positions, args.encoder_embed_dim, self.padding_idx ) self.transformer_layers = nn.ModuleList( [TransformerEncoderLayer(args) for _ in range(args.encoder_layers)] ) if args.encoder_normalize_before: self.layer_norm = LayerNorm(args.encoder_embed_dim) else: self.layer_norm = None def _forward(self, src_tokens, src_lengths, return_all_hiddens=False): x, input_lengths = self.subsample(src_tokens, src_lengths) x = self.embed_scale * x encoder_padding_mask = lengths_to_padding_mask(input_lengths) positions = self.embed_positions(encoder_padding_mask).transpose(0, 1) x += positions x = self.dropout_module(x) encoder_states = [] for layer in self.transformer_layers: x = layer(x, encoder_padding_mask) if return_all_hiddens: encoder_states.append(x) if self.layer_norm is not None: x = self.layer_norm(x) return { "encoder_out": [x], # T x B x C "encoder_padding_mask": [encoder_padding_mask] if encoder_padding_mask.any() else [], # B x T "encoder_embedding": [], # B x T x C "encoder_states": encoder_states, # List[T x B x C] "src_tokens": [], "src_lengths": [], } def forward(self, src_tokens, src_lengths, return_all_hiddens=False): if self.num_updates < self.encoder_freezing_updates: with torch.no_grad(): x = self._forward(src_tokens, src_lengths, return_all_hiddens=return_all_hiddens) else: x = self._forward(src_tokens, src_lengths, return_all_hiddens=return_all_hiddens) return x def reorder_encoder_out(self, encoder_out, new_order): new_encoder_out = ( [] if len(encoder_out["encoder_out"]) == 0 else [x.index_select(1, new_order) for x in encoder_out["encoder_out"]] ) new_encoder_padding_mask = ( [] if len(encoder_out["encoder_padding_mask"]) == 0 else [x.index_select(0, new_order) for x in encoder_out["encoder_padding_mask"]] ) new_encoder_embedding = ( [] if len(encoder_out["encoder_embedding"]) == 0 else [x.index_select(0, new_order) for x in encoder_out["encoder_embedding"]] ) encoder_states = encoder_out["encoder_states"] if len(encoder_states) > 0: for idx, state in enumerate(encoder_states): encoder_states[idx] = state.index_select(1, new_order) return { "encoder_out": new_encoder_out, # T x B x C "encoder_padding_mask": new_encoder_padding_mask, # B x T "encoder_embedding": new_encoder_embedding, # B x T x C "encoder_states": encoder_states, # List[T x B x C] "src_tokens": [], # B x T "src_lengths": [], # B x 1 } def set_num_updates(self, num_updates): super().set_num_updates(num_updates) self.num_updates = num_updates class TransformerDecoderScriptable(TransformerDecoder): def extract_features( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, full_context_alignment: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, ): # call scriptable method from parent class x, _ = self.extract_features_scriptable( prev_output_tokens, encoder_out, incremental_state, full_context_alignment, alignment_layer, alignment_heads, ) return x, None @register_model_architecture(model_name="s2t_transformer", arch_name="s2t_transformer") def base_architecture(args): args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0) # Convolutional subsampler args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5") args.conv_channels = getattr(args, "conv_channels", 1024) # Transformer args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 12) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.dropout = getattr(args, "dropout", 0.1) args.attention_dropout = getattr(args, "attention_dropout", args.dropout) args.activation_dropout = getattr(args, "activation_dropout", args.dropout) args.activation_fn = getattr(args, "activation_fn", "relu") args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) @register_model_architecture("s2t_transformer", "s2t_transformer_s") def s2t_transformer_s(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 8) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) args.dropout = getattr(args, "dropout", 0.1) base_architecture(args) @register_model_architecture("s2t_transformer", "s2t_transformer_xs") def s2t_transformer_xs(args): args.encoder_layers = getattr(args, "encoder_layers", 6) args.decoder_layers = getattr(args, "decoder_layers", 3) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 4) args.dropout = getattr(args, "dropout", 0.3) s2t_transformer_s(args) @register_model_architecture("s2t_transformer", "s2t_transformer_sp") def s2t_transformer_sp(args): args.encoder_layers = getattr(args, "encoder_layers", 16) s2t_transformer_s(args) @register_model_architecture("s2t_transformer", "s2t_transformer_m") def s2t_transformer_m(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 512 * 4) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.dropout = getattr(args, "dropout", 0.15) base_architecture(args) @register_model_architecture("s2t_transformer", "s2t_transformer_mp") def s2t_transformer_mp(args): args.encoder_layers = getattr(args, "encoder_layers", 16) s2t_transformer_m(args) @register_model_architecture("s2t_transformer", "s2t_transformer_l") def s2t_transformer_l(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024 * 4) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.2) base_architecture(args) @register_model_architecture("s2t_transformer", "s2t_transformer_lp") def s2t_transformer_lp(args): args.encoder_layers = getattr(args, "encoder_layers", 16) s2t_transformer_l(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/speech_to_text/s2t_transformer.py
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import logging from collections.abc import Iterable from itertools import repeat from typing import List, Optional, Tuple import torch from torch import Tensor # ------------------------------------------------------------------------------ # assert_equal() # ------------------------------------------------------------------------------ def assert_equal(value1, value2, name1=None, name2=None): """Asserts two values are equal otherwise raise an error.""" str_name1 = "" if name1 is None else "{} ".format(name1) str_name2 = "" if name2 is None else "{} ".format(name2) if value1 != value2: str_value1 = "{}" if name1 is None else "({})" str_value1 = str_value1.format(value1) str_value2 = "{}" if name2 is None else "({})" str_value2 = str_value2.format(value2) raise ValueError( "Expected {}{} == {}{}".format(str_name1, str_value1, str_name2, str_value2) ) def fill_config(config, key, value): if value is not None: if key not in config or config[key] is None: config[key] = value assert_equal(value, config[key], "value", f'config["{key}"]') # ------------------------------------------------------------------------------ # check_and_return_expected() # ------------------------------------------------------------------------------ def check_and_return_expected(value, undefined_value, expected_value, name=None): """ Return the expected value while checking if the given value is undefined or equal to the expected value. """ if (undefined_value is None and value is None) or (undefined_value == value): return expected_value if value != expected_value: str_name = "" if name is None else "{} ".format(name) str_value = "{}" if name is None else "({})" str_value = str_value.format(value) raise ValueError( "Expected {}{} == {}".format(str_name, str_value, expected_value) ) return expected_value # ------------------------------------------------------------------------------ # get_time_axis() # ------------------------------------------------------------------------------ def get_time_axis(layout): """ Extract the time axis from the layout, for example for breaking sequence into segments. """ if layout in ["TB", "TBD"]: return 0 if layout in ["BT", "BTD"]: return 1 if layout in ["BCTD"]: return 2 raise ValueError("Unsupported layout = {}".format(layout)) # ------------------------------------------------------------------------------ # get_batch_axis() # ------------------------------------------------------------------------------ def get_batch_axis(layout): """ Extract the batch axis from the layout """ if layout in ["TB", "TBD"]: return 1 if layout in ["BT", "BTD", "BCTD"]: return 0 raise ValueError("Unsupported layout = {}".format(layout)) # ------------------------------------------------------------------------------ # monotonically_increasing_and_bounded() # ------------------------------------------------------------------------------ def monotonically_increasing_and_bounded(iterable, min=None, max=None): """ Check if the elements in the given iterable are monotonically increasing and bounded by upper/lower bounds. """ if not isinstance(iterable, Iterable): raise TypeError( "Expected iterable to be of type Iterable, got ({})".format( iterable.__class__.__name__ ) ) for i in range(len(iterable)): if min is not None and iterable[i] < min: return False if max is not None and iterable[i] > max: return False if i > 0 and iterable[i] <= iterable[i - 1]: return False return True # ------------------------------------------------------------------------------ # to_pair() # ------------------------------------------------------------------------------ def to_pair(value, name): """Make a pair (of type tuple) of given value.""" if isinstance(value, Iterable): if len(value) != 2: raise ValueError( "Expected `{}` to have exactly 2 elements, got: ({})".format( name, value ) ) return value return tuple(repeat(value, 2)) # ------------------------------------------------------------------------------ # infer_conv_output_attrs() # ------------------------------------------------------------------------------ # TODO(cfyeh): figure out if we can get `output_dim` without calling the module. def infer_conv_output_attrs( module, input_channels, input_dim, batch_size=1, max_length=8 ): """Get output attributes of a module with input.""" input = torch.randn(batch_size, input_channels, max_length, input_dim) output = module(input) output_channels = output.shape[1] output_dim = output.shape[-1] return output_channels, output_dim # ------------------------------------------------------------------------------ # NoOp # ------------------------------------------------------------------------------ class NoOp(torch.nn.Module): """ NoOp simply passes the input as the output. """ def __init__(self): super().__init__() def forward(self, input: Tensor) -> Tensor: return input # ------------------------------------------------------------------------------ # Permute: a torch.nn.Module applies permutation on the input tensor. # ------------------------------------------------------------------------------ class Permute(torch.nn.Module): def __init__(self, dims): super().__init__() self.dims = dims def forward(self, input: Tensor) -> Tensor: return input.permute(self.dims).contiguous() # ------------------------------------------------------------------------------ # lengths_to_padding_mask() # ------------------------------------------------------------------------------ def lengths_to_padding_mask(lengths: Tensor) -> Tensor: """Convert lengths of shape (B, ) to padding mask.""" batch_size = lengths.shape[0] max_length = int(torch.max(lengths).item()) padding_mask = torch.arange( # [0, ..., T-1] max_length, device=lengths.device, dtype=lengths.dtype ).expand(batch_size, max_length) >= lengths.unsqueeze(1) return padding_mask # ------------------------------------------------------------------------------ # lengths_to_attention_mask() # ------------------------------------------------------------------------------ def lengths_to_attention_mask( lengths: Tensor, left_context: Optional[int] = None, right_context: Optional[int] = None, ) -> Optional[Tensor]: """ Generate attention mask based on (lengths, left_context, right_context). left_context is None means unlimited left context. right_context is None means unlimited right context. """ if left_context is None and right_context is None: return None max_length = int(torch.max(lengths).item()) # For example, with `max_length` == 5, # indices = tensor([ # [ 0, 1, 2, 3, 4, 5], # [-1, 0, 1, 2, 3, 4], # [-2, -1, 0, 1, 2, 3], # [-3, -2, -1, 0, 1, 2], # [-4, -3, -2, -1, 0, 1], # [-5, -4, -3, -2, -1, 0], # ]) # In some cases the second torch.arange is created on cpu which causes a # failure. Adding the device option to guard against it. indices = torch.arange( max_length, device=lengths.device, dtype=lengths.dtype ).expand(max_length, max_length) - torch.arange( max_length, device=lengths.device ).view( max_length, -1 ) # For example, with `max_length` == 5, # bool_mask = tensor([ # [True, True, True, True, True], # [True, True, True, True, True], # [True, True, True, True, True], # [True, True, True, True, True], # [True, True, True, True, True], # ]) bool_mask = ( torch.tensor([True]).to(device=lengths.device).expand(max_length, max_length) ) # For example, with `max_length` == 5, left_context == 2 # left_mask = tensor([ # [ True, True, True, True, True], # [ True, True, True, True, True], # [ True, True, True, True, True], # [False, True, True, True, True], # [False, False, True, True, True], # ]) if left_context is not None: left_mask = indices >= -left_context bool_mask = bool_mask & left_mask # For example, with `max_length` == 5, right_context == 1 # right_mask = tensor([ # [True, True, False, False, False], # [True, True, True, False, False], # [True, True, True, True, False], # [True, True, True, True, True], # [True, True, True, True, True], # ]) if right_context is not None: right_mask = indices <= right_context bool_mask = bool_mask & right_mask bool_mask = (~bool_mask).to(device=lengths.device) return bool_mask # ------------------------------------------------------------------------------ # infer_output_norm() # ------------------------------------------------------------------------------ def infer_output_norm(module, output_norm=None): """ Infer the output norm (string and module) needed on the module gvien desired output normalization. """ if output_norm == module.output_norm(): # output_norm already matches module.output_norm(). return (None, NoOp()) if output_norm is None and module.output_norm() is not None: logger = logging.getLogger("infer_output_norm()") logger.warning( "trying to set output_norm ({}) ".format(output_norm) + "but got module.output_norm() ({}), ".format(module.output_norm()) + "the combined output_norm() will be ({})".format(module.output_norm()) ) return (None, NoOp()) if output_norm == "log_softmax": if module.output_norm() is not None: raise ValueError( "incompatible output_norm ({}) ".format(output_norm) + "and module.output_norm() ({})".format(module.output_norm()) ) else: return ("log_softmax", torch.nn.LogSoftmax(dim=-1)) if output_norm == "softmax": if module.output_norm() is not None: raise ValueError( "incompatible output_norm ({}) ".format(output_norm) + "and module.output_norm() ({})".format(module.output_norm()) ) else: return ("softmax", torch.nn.Softmax(dim=-1)) raise ValueError( "output_norm ({}) not in ".format(output_norm) + "supported list = [None, softmax, log_softmax]" ) # ------------------------------------------------------------------------------ # infer_channels_from_layout() # ------------------------------------------------------------------------------ def infer_channels_from_layout(layout, channels): """Extract the number of channels from the layout.""" if layout in ("TBD", "BTD"): if channels is not None and channels != 1: raise ValueError( "Expected channels ({}) to be 1 for layout = {}".format( channels, layout ) ) if channels is None: return 1 return channels # ------------------------------------------------------------------------------ # pad_sequence() # ------------------------------------------------------------------------------ @torch.jit.export def pad_sequence( sequence: Tensor, time_axis: int, extra_left_context: int = 0, extra_right_context: int = 0, ) -> Tensor: """Pad extra left/right contexts to the sequence.""" if extra_left_context == 0 and extra_right_context == 0: return sequence tensors_to_concat = [] if extra_left_context: size = (extra_left_context,) fill_value = 0 indices = torch.full( size=size, fill_value=fill_value, dtype=torch.long, device=sequence.device, ) left_padding = torch.index_select(sequence, time_axis, indices) tensors_to_concat.append(left_padding) tensors_to_concat.append(sequence) # NOTE(cfyeh): for efficiency reason we pad 0 instead of the last frame for # extra right contexts. if extra_right_context: size = list(sequence.shape) size[time_axis] = extra_right_context right_padding = torch.zeros(size, dtype=sequence.dtype, device=sequence.device) tensors_to_concat.append(right_padding) padded_sequence = torch.cat(tensors_to_concat, dim=time_axis) return padded_sequence # ------------------------------------------------------------------------------ # sequence_to_segments() # ------------------------------------------------------------------------------ @torch.jit.export def sequence_to_segments( sequence: Tensor, time_axis: int, lengths: Tensor, segment_size: Optional[int] = None, extra_left_context: int = 0, extra_right_context: int = 0, ) -> List[Tuple[Tensor, Tensor]]: """Breaks sequence into segments.""" sequence = pad_sequence( sequence=sequence, time_axis=time_axis, extra_left_context=extra_left_context, extra_right_context=extra_right_context, ) lengths = lengths + extra_left_context + extra_right_context segments: List[Tuple[Tensor, Tensor]] = [] if segment_size is None: segments.append((sequence, lengths)) return segments offset = 0 end = sequence.shape[time_axis] step = segment_size size = extra_left_context + segment_size + extra_right_context while offset + extra_left_context + extra_right_context < end: clamped_size = min(size, end - offset) segment_lengths = torch.clamp(lengths - offset, min=0, max=clamped_size) indices = torch.arange( start=offset, end=(offset + clamped_size), step=1, dtype=torch.long, device=sequence.device, ) segment_tensor = torch.index_select(sequence, time_axis, indices) segments.append((segment_tensor, segment_lengths)) offset = offset + step return segments # ------------------------------------------------------------------------------ # segments_to_sequence() # ------------------------------------------------------------------------------ @torch.jit.export def segments_to_sequence( segments: List[Tuple[Tensor, Tensor]], time_axis: int ) -> Tuple[Tensor, Tensor]: """Concatenate segments into a full sequence.""" if len(segments) == 1: return segments[0] tensors_to_concat: List[Tensor] = [] lengths_to_stack: List[Tensor] = [] for tensor, lengths in segments: tensors_to_concat.append(tensor) lengths_to_stack.append(lengths) sequence = torch.cat(tensors_to_concat, dim=time_axis) lengths = torch.stack(lengths_to_stack, dim=0) lengths = torch.sum(lengths, dim=0) return sequence, lengths def lengths_to_encoder_padding_mask(lengths, batch_first: bool = False): """ convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor Args: lengths: a (B, )-shaped tensor batch_first: whether to return a (B, T) tensor Return: max_length: maximum length of B sequences encoder_padding_mask: a (max_length, B) binary mask, where [t, b] = False for t < lengths[b] and True otherwise TODO: kernelize this function if benchmarking shows this function is slow """ max_lengths = torch.max(lengths).item() bsz = lengths.size(0) encoder_padding_mask = torch.arange( max_lengths ).to( # a (T, ) tensor with [0, ..., T-1] lengths.device ).view( # move to the right device 1, max_lengths ).expand( # reshape to (1, T)-shaped tensor bsz, -1 ) > lengths.view( # expand to (B, T)-shaped tensor bsz, 1 ).expand( -1, max_lengths ) if not batch_first: return encoder_padding_mask.t(), max_lengths else: return encoder_padding_mask, max_lengths # ------------------------------------------------------------------------------ # attention suppression # ------------------------------------------------------------------------------ def attention_suppression(attention_weights: Tensor, scale: float): # B, H, qlen, klen -> B, H, qlen, 1 attention_prob = torch.nn.functional.softmax(attention_weights.float(), dim=-1) attention_nozeros = attention_prob.to(torch.bool) nozeros_sum = torch.sum(attention_nozeros.to(torch.float), dim=-1, keepdim=True) # For very sparse situation, we need get round about 0s key_sum = torch.sum(attention_prob, dim=-1, keepdim=True) # nozeros_sum should > 1 key_mean = key_sum / (nozeros_sum + 1e-8) # std calculation dis = (attention_prob - key_mean) * (attention_prob - key_mean) # if attention_prob[i] < threshold, then dis_masked[i] = 0; for all i dis_masked = torch.where( attention_nozeros, dis, attention_prob.new_zeros(attention_prob.size()) ) key_var = torch.sum(dis_masked, dim=-1, keepdim=True) key_var = key_var / (nozeros_sum - 1.0 + 1e-8) key_std = torch.sqrt(key_var) key_thread = key_mean - scale * key_std # if attention_prob[i] >= key_thread, then attention_prob[i] # , otherwise "-inf" inf_tensor = attention_prob.new_zeros(attention_prob.size()).detach() inf_tensor[:] = float("-inf") attention_weights_float = torch.where( attention_prob < key_thread, inf_tensor, attention_weights.float(), ) return attention_weights_float.type_as(attention_weights) def layer_norm_backward_hook(module, grad_input, grad_output, clamp_value): return tuple(torch.clamp(v, min=-clamp_value, max=clamp_value) for v in grad_input)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/speech_to_text/utils.py
#!/usr/bin/env python3 # Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import math import re from functools import partial from typing import List, Optional, Tuple import torch import torch.nn as nn from fairseq.models import ( FairseqEncoder, ) from fairseq.models.speech_to_text.utils import ( NoOp, lengths_to_padding_mask, segments_to_sequence, ) from fairseq.models.speech_to_text.utils import ( attention_suppression, layer_norm_backward_hook, ) from torch import Tensor, device as Device from torch.ao.quantization.qconfig import ( default_dynamic_qconfig, per_channel_dynamic_qconfig, ) class RelativePositionEmbedding(nn.Module): """ Implementation according to https://arxiv.org/abs/1803.02155 """ def __init__(self, head_dim, max_position, norm_init=True): super().__init__() self.head_dim = head_dim self.max_position = max_position self.embeddings = nn.Parameter(torch.Tensor(max_position * 2 + 1, head_dim)) if norm_init: nn.init.xavier_normal_(self.embeddings) else: nn.init.xavier_uniform_(self.embeddings) def forward(self, input: Tensor): output = nn.functional.embedding(input.long(), self.embeddings) return output class Fp32LayerNorm(nn.Module): def __init__( self, input_dim, clamp_grad=True, max_grad_value=256, eps=1e-5, elementwise_affine=True, ): super().__init__() self.torch_module = torch.nn.LayerNorm( input_dim, eps=eps, elementwise_affine=elementwise_affine ) if clamp_grad: hook = partial(layer_norm_backward_hook, clamp_value=max_grad_value) self.torch_module.register_backward_hook(hook) def forward(self, input): output = torch.nn.functional.layer_norm( input.float(), self.torch_module.normalized_shape, self.torch_module.weight.float() if self.torch_module.weight is not None else None, self.torch_module.bias.float() if self.torch_module.bias is not None else None, self.torch_module.eps, ).type_as(input) return output # ------------------------------------------------------------------------------ # PositionwiseFF # ------------------------------------------------------------------------------ class PositionwiseFF(nn.Module): """ FFN layer in transformer. Args: input_dim: input embedding dimension ffn_dim: FFN layer inner dimension dropout_on_fc1: dropout for first linear layer dropout_on_fc2: dropout fr second linear layer activation_fn: activation function used after first linear layer. \ Only relu or gelu is supported. """ def __init__( self, input_dim, ffn_dim, dropout_on_fc1, dropout_on_fc2, activation_fn ): super(PositionwiseFF, self).__init__() self.input_dim = input_dim self.ffn_dim = ffn_dim if activation_fn == "relu": ac = nn.ReLU() elif activation_fn == "gelu": ac = nn.GELU() else: raise ValueError("Unsupported activation_fn = ({})".format(activation_fn)) # fc1 -> ac -> dropout -> fc2 -> dropout self.module = nn.Sequential( nn.Linear(input_dim, ffn_dim), ac, nn.Dropout(dropout_on_fc1), nn.Linear(ffn_dim, input_dim), nn.Dropout(dropout_on_fc2), ) self.layer_norm = Fp32LayerNorm(input_dim) def forward(self, input): module_out = self.module(self.layer_norm(input)) output = module_out + input return output def quantize_(self, params=None): if params and "per_channel" in params and params["per_channel"]: qconfig = per_channel_dynamic_qconfig else: qconfig = default_dynamic_qconfig torch.ao.quantization.quantize_dynamic( self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True ) return self # ------------------------------------------------------------------------------ # SummarizationLayer # ------------------------------------------------------------------------------ class SummarizationLayer(nn.Module): def __init__(self, method, segment_size, embedding_dim): super(SummarizationLayer, self).__init__() self.segment_size = segment_size self.embedding_dim = embedding_dim nonlin_match = re.match(r"nonlinear\((?P<act>[a-z]+),(?P<dim>[0-9]+)\)", method) self.method = method if method == "mean": self.module = nn.AvgPool1d( kernel_size=segment_size, stride=segment_size, ceil_mode=True, ) elif method == "max": self.module = nn.MaxPool1d( kernel_size=segment_size, stride=segment_size, ceil_mode=True, ) elif method == "linear": self.module = nn.Linear(segment_size, 1) elif nonlin_match: nonlin_args = nonlin_match.groupdict() act_type = nonlin_args["act"] hid_dim = int(nonlin_args["dim"]) if act_type == "relu": act = nn.ReLU() elif act_type == "gelu": act = nn.GELU() else: raise ValueError("Unsupported activation_fn = ({})".format(act_type)) self.module = nn.Sequential( nn.Linear(segment_size, hid_dim), act, nn.Linear(hid_dim, 1), ) else: raise ValueError("Unsupported summarization method = ({})".format(method)) def forward(self, input): # T, B, D -> B, D, T input = input.permute(1, 2, 0) if self.method == "mean" or self.method == "max": output = self.module(input) output = output.permute(2, 0, 1) return output full_seg_length = input.size(2) // self.segment_size * self.segment_size if full_seg_length > 0: # at least one seg is full B = input.size(0) D = input.size(1) input_todo = ( input[:, :, :full_seg_length] .contiguous() .view(B, -1, self.segment_size) ) output = self.module(input_todo) output = output.view(B, D, -1) else: output = input.new_zeros(input.size(0), input.size(1), 0) left = input.size(2) - full_seg_length if left > 0: # when last seg is not full, use zeros as last memory placeholder zeros = input.new_zeros(input.size(0), input.size(1), 1) output = torch.cat([output, zeros], dim=2) output = output.permute(2, 0, 1) return output # ------------------------------------------------------------------------------ # NoSegAugmentedMemoryMultiheadAttentionBmm # ------------------------------------------------------------------------------ class NoSegAugmentedMemoryMultiheadAttentionBmm(nn.Module): """ Whole utterance augmented memory multihead attention using BMM. Different with previous augmented memory multihead attention where the utterance is chunked into segments. Here we use attention mask achieve so. The input embedding [right_context, utterance, summary] is a concatenation of right context, utterance and summary. Right context block is the concatenation of all the right context for each segments. [right_context_0, right_context_1, ..., right_context_n] For example, if we have utterance = [v0, v1, v2, ...., v20]. segment size 8, right_context size 4. Then the right context blocks = [v8, v9, v10, v11, v16, v17, v18, v19, 0, 0, 0, 0], where v8, v9, v10, and v11 are the right context for first segment. v16, v17, v18 and v19 are the right context for second segment. 0, 0, 0 and 0 are right context for the last segment. utterance is corresponding to input embedding sequence summary is concatenation of average of each segments. [summary_0, summary_1, ..., ]. In augmented memory multihead attention, the query is [right_context, utterance, summary], key is [memory, right_context, utterance]. Different with AugmentedMemoryMultiheadAttentionBmm, memory here is passed from previous attention layer. For the first attention layer, memory is average of each segment. Memory is a concatenation of memory from each segments in previous attention layer. For example, current layer is i, then memory is [m_0, m_1, ..., m_n]. Each m_k is the output from seg_k in layer i-1. args: input_dim: input embedding dimension num_heads: number of heads in multihead self-attention dropout: attention dropout std_scale: if std_scale is not None. The weak attention suppression is turned on. For std_scale = 0.5, all the attention smaller than mean + 0.5 * std will be suppressed. scaled_init: whether to use scaled init for linear weight tanh_on_mem: whether to use tanh on memory output use_mem: whether to use memory or not. When max_memory_size is 0, then we don't have memory anymore. layer_index: current self-attention layer index that is used in depth initialization max_relative_position: max relative position used in relative position embedding rpe_old_option: To be compatible with previous model. The previous model was trained with attention += attention + rpe. The correct equation should be attention = attention + rpe """ def __init__( self, input_dim, num_heads, dropout=0.0, std_scale=None, scaled_init=False, tanh_on_mem=False, use_mem=True, mini_batches=False, negative_inf="-inf", layer_index=-1, max_relative_position=0, rpe_old_option=True, ): if input_dim % num_heads: raise ValueError( "input_dim ({}) must be divisible by num_heads ({})".format( input_dim, num_heads ) ) super().__init__() embed_dim = input_dim self.e2h_kv = torch.nn.Linear(input_dim, 2 * input_dim, bias=True) self.e2h_q = torch.nn.Linear(input_dim, input_dim, bias=True) self.rpe_old_option = rpe_old_option if max_relative_position > 0: self.use_rpe = True self.rpe_k = RelativePositionEmbedding( head_dim=input_dim // num_heads, max_position=max_relative_position, ) self.rpe_v = RelativePositionEmbedding( head_dim=input_dim // num_heads, max_position=max_relative_position, ) else: self.use_rpe = False self.rpe_k = None self.rpe_v = None if scaled_init: if layer_index == -1: gain = 1.0 / math.sqrt(2) else: # https://arxiv.org/abs/2005.09684 depthwise initialization # stablize the training greatly. Use depthwise initialization to # replace incremental loss. gain = 1.0 / math.sqrt(layer_index + 1) torch.nn.init.xavier_uniform_(self.e2h_kv.weight, gain=gain) torch.nn.init.xavier_uniform_(self.e2h_q.weight, gain=gain) self.out_proj = torch.nn.Linear(embed_dim, embed_dim, bias=True) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.scaling = self.head_dim ** -0.5 self.std_scale = std_scale self.use_mem = use_mem self.mini_batches = mini_batches self.negative_inf = negative_inf if tanh_on_mem: self.squash_mem = torch.tanh self.nonlinear_squash_mem = True else: self.squash_mem = NoOp() self.nonlinear_squash_mem = False def prepare_qkv( self, input: Tensor, mems: Tensor, lengths: Tensor, summary_length: int, lc_length: int, ): # T: right_context length + utterance_length + summary_length T, B, D = input.shape mem_length = mems.size(0) utterance_length = torch.max(lengths) right_context_blocks_length = T - utterance_length - summary_length rc_block = input[:right_context_blocks_length, :, :] utterance_block = input[right_context_blocks_length : T - summary_length, :, :] if B == 1: padding_mask = None else: klengths = lengths + mem_length + right_context_blocks_length + lc_length padding_mask = lengths_to_padding_mask(lengths=klengths) mem_rc_input = torch.cat([mems, rc_block, utterance_block], dim=0) # In training lc_length = 0 key_length = mem_rc_input.size(0) + lc_length rc_input_sum = input q = self.e2h_q(rc_input_sum) kv = self.e2h_kv(mem_rc_input) k, v = kv.chunk(chunks=2, dim=2) result_qkv = (q, k, v) input_shape = (T, B, D) result_lengths_info = ( mem_length, utterance_length, right_context_blocks_length, key_length, ) if padding_mask is not None: assert padding_mask.size(0) == B assert padding_mask.size(1) == key_length return result_qkv, input_shape, result_lengths_info, padding_mask def prepare_attention_weights( self, q: Tensor, new_k: Tensor, new_v: Tensor, input_shape: Tuple[int, int, int], rpe: Optional[Tensor], ) -> Tuple[Tensor, Tensor, Tensor]: T, B, D = input_shape q = ( q.contiguous().view(-1, B * self.num_heads, self.head_dim).transpose(0, 1) * self.scaling ) k = ( new_k.contiguous() .view(-1, B * self.num_heads, self.head_dim) .transpose(0, 1) ) v = ( new_v.contiguous() .view(-1, B * self.num_heads, self.head_dim) .transpose(0, 1) ) attention_weights = torch.bmm(q, k.transpose(1, 2)) if self.use_rpe and rpe is not None and self.rpe_v is not None: r_k = self.rpe_k(rpe) # [q, B*h, d] * [q, k, d] -> [B*h, q, k] attention_weights_rpe = torch.matmul( q.transpose(0, 1), r_k.transpose(1, 2) ).transpose(0, 1) attention_weights = attention_weights + attention_weights_rpe attention_weights_float = attention_weights.float() return attention_weights, attention_weights_float, v def prepare_attention_output( self, attention_weights: Tensor, attention_weights_float: Tensor, v: Tensor, input_shape: Tuple[int, int, int], key_length: int, padding_mask: Optional[Tensor], rpe: Optional[Tensor], ) -> Tensor: T, B, D = input_shape if padding_mask is not None: attention_weights_float = attention_weights_float.view( B, self.num_heads, T, key_length ) attention_weights_float = attention_weights_float.masked_fill( padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf") ) attention_weights_float = attention_weights_float.view( B * self.num_heads, T, key_length ) if self.std_scale is not None: attention_weights_float = attention_suppression( attention_weights_float, self.std_scale ) attention_weights_float = torch.nn.functional.softmax( attention_weights_float, dim=-1 ) attention_weights = attention_weights_float.type_as(attention_weights) attention_probs = torch.nn.functional.dropout( attention_weights, p=self.dropout, training=self.training ) # [T, key_length, B, n_head]+ [key_length, B, n_head, d_head] # -> [T, B, n_head, d_head] attention = torch.bmm(attention_probs, v) if self.use_rpe and rpe is not None and self.rpe_v is not None: r_v = self.rpe_v(rpe) attention_rpe = torch.matmul( attention_probs.transpose(0, 1), r_v ).transpose(0, 1) if self.rpe_old_option: attention += attention + attention_rpe else: attention = attention + attention_rpe assert list(attention.shape) == [B * self.num_heads, T, self.head_dim] attention = attention.transpose(0, 1).contiguous().view(T, B, self.embed_dim) rc_output_memory = self.out_proj(attention) return rc_output_memory @torch.jit.unused def forward( self, input: Tensor, lengths: Tensor, mems: Tensor, attention_mask: Tensor, pre_mems: Optional[Tensor] = None, left_context_key: Optional[Tensor] = None, left_context_val: Optional[Tensor] = None, rpe: Optional[Tensor] = None, ) -> Tuple[Tensor, Tensor, Tensor, Tensor]: """ forward function for NoSegAugmentedMemoryMultiheadAttentionBmm in training. args: input: formed in the following way [right_context_0, right_contex_1, ..., seg_0, seg_1, ..., summary_0, summary_1,..] lengths: the length of query which is [seg_0, seg_1, ....] mems: [mem_0, mem_1, ...]. attention_mask: attention mask for query = [right_context, query, summary] key = [mem, right_context, query]. This is only used for traing. """ if self.use_mem: mem_length = mems.size(0) summary_length = mem_length + 1 if pre_mems is not None: mems = torch.cat([pre_mems, mems], dim=0) else: mem_length = 0 summary_length = 0 # In training, lc_length = 0 if left_context_key is not None: lc_length = left_context_key.size(0) else: lc_length = 0 results = self.prepare_qkv( input=input, mems=mems, lengths=lengths, summary_length=summary_length, lc_length=lc_length, ) result_qkv, input_shape, result_lengths_info, padding_mask = results q, k, v = result_qkv ( mem_length, utterance_length, right_context_blocks_length, key_length, ) = result_lengths_info if left_context_key is not None: # add the cache key and value new_k = torch.cat( [ k[: mem_length + right_context_blocks_length, :, :], left_context_key, k[-utterance_length:, :, :], ], dim=0, ) new_v = torch.cat( [ v[: mem_length + right_context_blocks_length, :, :], left_context_val, v[-utterance_length:, :, :], ], dim=0, ) next_k = new_k[mem_length + right_context_blocks_length :, :, :] next_v = new_v[mem_length + right_context_blocks_length :, :, :] else: new_k = k new_v = v next_k = None next_v = None attention_weights, attention_weights_float, v = self.prepare_attention_weights( q=q, new_k=new_k, new_v=new_v, input_shape=input_shape, rpe=rpe, ) # mask attention attention_mask = attention_mask.unsqueeze(0) attention_weights_float = attention_weights_float.masked_fill( attention_mask, float(self.negative_inf) ) rc_output_memory = self.prepare_attention_output( attention_weights=attention_weights, attention_weights_float=attention_weights_float, v=v, input_shape=input_shape, key_length=key_length, padding_mask=padding_mask, rpe=rpe, ) if self.use_mem: # next_m length equals to summary length - 1 # last memory is ignored if self.mini_batches: next_m = rc_output_memory[-summary_length:] else: next_m = rc_output_memory[-summary_length:-1] next_m = self.squash_mem(next_m) # rc and output rc_output = rc_output_memory[:-summary_length] if not self.nonlinear_squash_mem: next_m = torch.clamp(next_m, min=-10, max=10) else: next_m = mems rc_output = rc_output_memory return rc_output, next_m, next_k, next_v @torch.jit.export def forward_jit( self, input: Tensor, lengths: Tensor, mems: Tensor, left_context_key: Tensor, left_context_val: Tensor, rpe: Optional[Tensor], ) -> Tuple[Tensor, Tensor, Tensor, Tensor]: """ forward function for NoSegAugmentedMemoryMultiheadAttentionBmm in decoding. args: input: formed in the following way [right_context_0, right_contex_1, ..., seg_0, seg_1, ..., summary_0, summary_1,..] lengths: the length of query which is [seg_0, seg_1, ....] mems: [mem_0, mem_1, ...]. left_context_key: left_context for key part. This is only used for online decoding. In training, this is empty tensor left_context_val: left_context for value part. This is only used for online decoding. In training, this is empty tensor """ lc_length = left_context_key.size(0) # In decoding, summary_length = 1 or 0 if self.use_mem: summary_length = 1 else: summary_length = 0 results = self.prepare_qkv( input=input, mems=mems, lengths=lengths, summary_length=summary_length, lc_length=lc_length, ) result_qkv, input_shape, result_lengths_info, padding_mask = results q, k, v = result_qkv ( mem_length, utterance_length, right_context_blocks_length, key_length, ) = result_lengths_info # add the cache key and value new_k = torch.cat( [ k[: mem_length + right_context_blocks_length, :, :], left_context_key, k[-utterance_length:, :, :], ], dim=0, ) new_v = torch.cat( [ v[: mem_length + right_context_blocks_length, :, :], left_context_val, v[-utterance_length:, :, :], ], dim=0, ) next_k = new_k[mem_length + right_context_blocks_length :, :, :] next_v = new_v[mem_length + right_context_blocks_length :, :, :] attention_weights, attention_weights_float, v = self.prepare_attention_weights( q=q, new_k=new_k, new_v=new_v, input_shape=input_shape, rpe=rpe, ) # In online decoding, we don't have attention mask. But we still need # to disable the attention from summary query to memory attention_weights_float[:, -1, :mem_length] = float(self.negative_inf) rc_output_memory = self.prepare_attention_output( attention_weights=attention_weights, attention_weights_float=attention_weights_float, v=v, input_shape=input_shape, key_length=key_length, padding_mask=padding_mask, rpe=rpe, ) # In decoding, summary length is 1 if self.use_mem: next_m = rc_output_memory[-1:] next_m = self.squash_mem(next_m) # rc and output rc_output = rc_output_memory[:-1] if not self.nonlinear_squash_mem: next_m = torch.clamp(next_m, min=-10, max=10) else: rc_output = rc_output_memory # empty tensor as input mems next_m = mems return rc_output, next_m, next_k, next_v def quantize_(self, params=None): if params and "per_channel" in params and params["per_channel"]: qconfig = per_channel_dynamic_qconfig else: qconfig = default_dynamic_qconfig torch.ao.quantization.quantize_dynamic( self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True ) return self class NoSegAugmentedMemoryTransformer(nn.Module): """ Whole utterance augmented memory transformer. This is not pyspeech nn layer. It is used as a module in a master layer where multiple transformers is used. """ def __init__( self, input_dim, num_heads, ffn_dim, dropout_in_attn=0.0, dropout_on_attn=None, dropout_on_fc1=None, dropout_on_fc2=None, activation_fn="relu", tanh_on_mem=False, std_scale=None, scaled_init=False, segment_size=128, use_mem=True, mini_batches=False, negative_inf="-inf", layer_index=-1, summarization_method="mean", max_relative_position=0, rpe_old_option=True, ): super(NoSegAugmentedMemoryTransformer, self).__init__() self.attention = NoSegAugmentedMemoryMultiheadAttentionBmm( input_dim=input_dim, num_heads=num_heads, dropout=dropout_in_attn, scaled_init=scaled_init, tanh_on_mem=tanh_on_mem, std_scale=std_scale, use_mem=use_mem, mini_batches=mini_batches, negative_inf=negative_inf, layer_index=layer_index, max_relative_position=max_relative_position, ) self.dropout = nn.Dropout(dropout_on_attn) self.pos_ff = PositionwiseFF( input_dim=input_dim, ffn_dim=ffn_dim, dropout_on_fc1=dropout_on_fc1, dropout_on_fc2=dropout_on_fc2, activation_fn=activation_fn, ) self.layer_norm_pre = Fp32LayerNorm(input_dim) self.layer_norm = Fp32LayerNorm(input_dim) self.segment_size = segment_size self.use_mem = use_mem self.memory_op = SummarizationLayer( summarization_method, segment_size, input_dim ) def set_mini_batches(self, mini_batches): self.attention.mini_batches = mini_batches def gen_summary_queries(self, input): sum_input = self.memory_op(input) return sum_input def pre_attention_ops(self, input, right_context_blocks): rc_length = right_context_blocks.size(0) input_length = input.size(0) rc_and_input = torch.cat([right_context_blocks, input], dim=0) residual_input = rc_and_input rc_and_input = self.layer_norm_pre(rc_and_input) query_input = rc_and_input[-input_length:, :, :] return rc_length, input_length, residual_input, query_input, rc_and_input def after_attention_ops(self, attention_output, residual_input): output = self.dropout(attention_output) output = output + residual_input output = self.pos_ff(output) output = self.layer_norm(output) return output @torch.jit.export def forward_jit( self, input: Tensor, lengths: Tensor, mems: Tensor, left_context_key: Tensor, left_context_val: Tensor, right_context_blocks: Tensor, rpe: Optional[Tensor], ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: results = self.pre_attention_ops(input, right_context_blocks) rc_length, input_length, residual_input, query_input, rc_and_input = results # In online decoding, the summary query size is always 1 or 0 if self.use_mem: summary_query = self.gen_summary_queries(query_input) summary_query = summary_query[0:1, :, :] rc_qu_su = torch.cat([rc_and_input, summary_query], dim=0) else: rc_qu_su = rc_and_input rc_output, next_m, next_k, next_v = self.attention.forward_jit( input=rc_qu_su, lengths=lengths, mems=mems, left_context_key=left_context_key, left_context_val=left_context_val, rpe=rpe, ) rc_output = self.after_attention_ops(rc_output, residual_input) results = ( rc_output[-input_length:, :, :], next_m, rc_output[0:rc_length, :, :], next_k, next_v, ) return results @torch.jit.unused def forward( self, input, lengths, mems, right_context_blocks, attention_mask, pre_mems, left_context_key, left_context_val, rpe, ): results = self.pre_attention_ops(input, right_context_blocks) rc_length, input_length, residual_input, query_input, rc_and_input = results if self.use_mem: summary_query = self.gen_summary_queries(query_input) rc_qu_su = torch.cat([rc_and_input, summary_query], dim=0) else: rc_qu_su = rc_and_input rc_output, next_m, next_k, next_v = self.attention( input=rc_qu_su, lengths=lengths, mems=mems, attention_mask=attention_mask, pre_mems=pre_mems, left_context_key=left_context_key, left_context_val=left_context_val, rpe=rpe, ) # [TODO] Note memory did not go through pos_ff. What happen if we pass # memory through the pos_ff as well? rc_output = self.after_attention_ops(rc_output, residual_input) results = ( rc_output[-input_length:, :, :], next_m, rc_output[0:rc_length, :, :], next_k, next_v, ) return results class NoSegAugmentedMemoryTransformerEncoderLayer(FairseqEncoder): """ Whole utterance augmented memory transformer encoder layer. This is a master layer where we can define multiple augmented memory transformers. There are two reasons to setup the master layer. 1. We only need to define once about the attention mask. All the layers in the master layer share the same mask. 2. pyspeech nn layer has special input and output format. Defining one master layer is easier to passing memory between different layes inside the master layer args: input_dim: input embedding dimension num_heads: number of heads in multihead self-attention ffn_dim: ffn dimension in FFN layer num_layers: number of augmented memory transformer layers dropout_in_attn: dropout used in multi-head self-attention dropout_on_attn: dropout used for output from te multihead self-attention dropout_on_fc1: dropout used in FFN layer for the first linear layer dropout_on_fc2: dropout used in FFN layer for the second linear layer segment_size: segment size for each segment context_config: (left_context_size, right_context_size) defines the surround context size for each segment max_memory_size: maximum memory size used for each segment scaled_init: whether use scaled init for weight initialization in attention layer std_scale: if std_scale is not None. The weak attention suppression is turned on. For std_scale = 0.5, all the attention smaller than mean + 0.5 * std will be suppressed. activation_fn: activation function used in FFN layer. [ReLU, GELU] supported tanh_on_mem: whether use tanh on memory mini_batches: use mini-btach training negative_inf: the negative infinity value used in attention masking. default is "-inf". For some situation, e.g. LM. it is better to use "-1e8" to avoid nan issue. summarization_method: method to generate segment summrization embedding max_relative_position: max relatie position for relative position embedding rpe_old_option: To be compatible with previous model. The previous model was trained with attention += attention + rpe. The correct equation should be attention = attention + rpe [TODO]: remove the rpe_old_option by the end of 2021 Q1. """ def __init__( self, input_dim, num_heads, ffn_dim, num_layers=1, dropout_in_attn=0.0, dropout_on_attn=0.0, dropout_on_fc1=0.0, dropout_on_fc2=0.0, segment_size=128, context_config=(0, 0), max_memory_size=0, scaled_init=True, std_scale=None, activation_fn="relu", tanh_on_mem=False, mini_batches=False, negative_inf="-inf", deep_init=True, summarization_method="mean", max_relative_position=0, rpe_old_option=True, ): super().__init__(None) if input_dim % num_heads: raise ValueError( "input_dim ({}) must be divisible by num_heads ({})".format( input_dim, num_heads ) ) # we used to support growing memory size. However, it will cause # cross stream batching failure. Now we need to have exact max memory size if max_memory_size < 0: raise ValueError("max_memory_size must be >= 0") # Only assign right_context. In decoding, left context will be cached. # No need to let the online decoder to re-assign the left context self.left_context, self.right_context = context_config self.segment_size = segment_size self.memory_dim = input_dim self.max_memory_size = max_memory_size self.mini_batches = mini_batches if self.max_memory_size != 0: self.use_mem = True else: self.use_mem = False self.memory_op = SummarizationLayer( summarization_method, segment_size, input_dim ) self.layers = torch.nn.ModuleList() self.num_layers = num_layers self.max_relative_position = max_relative_position if self.max_relative_position > 0: self.use_rpe = True else: self.use_rpe = False for i in range(self.num_layers): if deep_init: layer_index = i else: layer_index = -1 self.layers.append( NoSegAugmentedMemoryTransformer( num_heads=num_heads, input_dim=input_dim, ffn_dim=ffn_dim, dropout_in_attn=dropout_in_attn, dropout_on_attn=dropout_on_attn, dropout_on_fc1=dropout_on_fc1, dropout_on_fc2=dropout_on_fc2, segment_size=segment_size, std_scale=std_scale, activation_fn=activation_fn, tanh_on_mem=tanh_on_mem, scaled_init=scaled_init, use_mem=self.use_mem, mini_batches=mini_batches, negative_inf=negative_inf, layer_index=layer_index, summarization_method=summarization_method, max_relative_position=max_relative_position, rpe_old_option=rpe_old_option, ) ) def set_mini_batches(self, mini_batches): # handy function only used for unit test self.mini_batches = mini_batches for layer in self.layers: layer.set_mini_batches(mini_batches) def _get_relative_position( self, input: Tensor, max_relative_position: int, left_context_length: int, past_length: int, is_decoding: bool, ): # For training, we copy the right context to the start of the utterance # First dimension in distance is corresponding to query. # [right context, utterance, summary vector] # Second dimension in distance is corresponding to key. # [Memory bank, right context, utterance] # For summary vector in query part, the distance with # all other position is 2*max_position. For memory bank in key, # the distance with all other positions is 0. T, B, D = input.shape num_segs = math.ceil((T - self.right_context) / self.segment_size) # utterance u_st = past_length * self.segment_size u_ed = u_st + T utterance_ranges = torch.arange(u_st, u_ed - self.right_context) # left context. Only in minibatch or decoding left_context_ranges = torch.arange(u_st - left_context_length, u_st) # Right context block # right context + utterance right_context_blocks = [] for i in range(0, num_segs - 1): st = (i + 1) * self.segment_size + u_st ed = st + self.right_context assert ed < u_ed temp = torch.arange(st, ed) right_context_blocks.append(temp) right_context_blocks.append(torch.arange(u_ed - self.right_context, u_ed)) right_context_ranges = torch.cat(right_context_blocks) if self.use_mem: # Memory bank # The position for memory -n, .., -1 if is_decoding: memory_size = min(past_length, self.max_memory_size) else: memory_size = num_segs + past_length - 1 memory_bank_ranges = torch.arange( -max_relative_position - 1, -max_relative_position - 1 - memory_size, -1 ) # summary vector # The position for summary vector as the T+max_relative_position+1. # After the clamping, the relative position is max_relative_position summary_pos_st = u_ed + max_relative_position + 1 summary_vector_ranges = torch.arange( summary_pos_st, summary_pos_st + num_segs ) key_ranges = torch.cat( [ memory_bank_ranges, right_context_ranges, left_context_ranges, utterance_ranges, ] ) query_ranges = torch.cat( [right_context_ranges, utterance_ranges, summary_vector_ranges] ) else: key_ranges = torch.cat( [right_context_ranges, left_context_ranges, utterance_ranges] ) query_ranges = torch.cat([right_context_ranges, utterance_ranges]) distance = key_ranges[None, :] - query_ranges[:, None] distance_clamp = ( torch.clamp(distance, -max_relative_position, max_relative_position) + max_relative_position ) distance_clamp = distance_clamp.to(input.device).long().detach() return distance_clamp def _get_attention_mask(self, input, past_length=0, left_context_cache=0): # attention mask for each query contains three parts: # 1. memory part # 2. left_context + segment # 3. right_context_block # so for each segment and its correspoinding right context block, # the attention matrix is formed by 9 parts: # [0, m, 0, 0, right_context, 0, 0, seg, 0] # [before memory, memory, after memory, before right context, right_context, # after right context, before seg, seg, after seg] # # Query is formed in the way as [right_context_blocks, utterance, summary] # # Note: put m and right_context before segment is convenient # for padding_mask operation. # Key lengths = m_length + right_context_block_length + lengths utterance_length, batch_size, _ = input.shape summary_length = math.ceil(utterance_length / self.segment_size) num_segs = summary_length rc_length = self.right_context * num_segs rc = self.right_context lc = self.left_context # using mini-batches, there is left context cache available for current # sequence. lcc = left_context_cache # max_memory_size is 0 then we don't have memory and summary # past_length is the memory carry from previous sequence if self.use_mem: mem_length = num_segs - 1 + past_length else: mem_length = 0 rc_mask = [] query_mask = [] summary_mask = [] for j in range(0, num_segs): ssize = min(self.segment_size, utterance_length - j * self.segment_size) rc_size = rc rc_mat = [] q_mat = [] s_mat = [] m_start = max(j + past_length - self.max_memory_size, 0) # max_memory_size is 0, then we don't use memory if self.use_mem: # part 0: before memory rc_mat.append(input.new_zeros(rc_size, m_start)) q_mat.append(input.new_zeros(ssize, m_start)) s_mat.append(input.new_zeros(1, m_start)) # part 1: memory col_1 = j + past_length - m_start rc_mat.append(torch.ones(rc_size, col_1, device=input.device)) q_mat.append(torch.ones(ssize, col_1, device=input.device)) # based on D22875746, disable summary query attention # on memeory is better for long form utterance s_mat.append(input.new_zeros(1, col_1)) # part 2: after memory col_2 = mem_length - (j + past_length) rc_mat.append(input.new_zeros(rc_size, col_2)) q_mat.append(input.new_zeros(ssize, col_2)) s_mat.append(input.new_zeros(1, col_2)) # part 3: before right context rc_start = j * rc rc_mat.append(input.new_zeros(rc_size, rc_start)) q_mat.append(input.new_zeros(ssize, rc_start)) s_mat.append(input.new_zeros(1, rc_start)) # part 4: right context rc_end = rc_start + rc col_4 = rc rc_mat.append(torch.ones(rc_size, col_4, device=input.device)) q_mat.append(torch.ones(ssize, col_4, device=input.device)) s_mat.append(torch.ones(1, col_4, device=input.device)) # part 5: after right context col_5 = rc_length - rc_end rc_mat.append(input.new_zeros(rc_size, col_5)) q_mat.append(input.new_zeros(ssize, col_5)) s_mat.append(input.new_zeros(1, col_5)) # part 6: before query segment seg_start = max(j * self.segment_size + lcc - lc, 0) rc_mat.append(input.new_zeros(rc_size, seg_start)) q_mat.append(input.new_zeros(ssize, seg_start)) s_mat.append(input.new_zeros(1, seg_start)) # part 7: query segment # note: right context is put in right context block # here we only need to consider about left context seg_end = min((j + 1) * self.segment_size + lcc, utterance_length + lcc) col_7 = seg_end - seg_start rc_mat.append(torch.ones(rc_size, col_7, device=input.device)) q_mat.append(torch.ones(ssize, col_7, device=input.device)) s_mat.append(torch.ones(1, col_7, device=input.device)) # part 8: after query segment col_8 = utterance_length + lcc - seg_end rc_mat.append(input.new_zeros(rc_size, col_8)) q_mat.append(input.new_zeros(ssize, col_8)) s_mat.append(input.new_zeros(1, col_8)) rc_mask.append(torch.cat(rc_mat, dim=1)) query_mask.append(torch.cat(q_mat, dim=1)) summary_mask.append(torch.cat(s_mat, dim=1)) # no memory, then we don't need summary either if self.use_mem: attention_mask = ( 1 - torch.cat( [ torch.cat(rc_mask, dim=0), torch.cat(query_mask, dim=0), torch.cat(summary_mask, dim=0), ], dim=0, ) ).to(torch.bool) else: attention_mask = ( 1 - torch.cat( [torch.cat(rc_mask, dim=0), torch.cat(query_mask, dim=0)], dim=0 ) ).to(torch.bool) return attention_mask @torch.jit.export def init_state( self, batch_size: int, device: Optional[Device] = None ) -> List[Tensor]: empty_memory = torch.zeros( self.num_layers, self.max_memory_size, batch_size, self.memory_dim, device=device, ) left_context_key = torch.zeros( self.num_layers, self.left_context, batch_size, self.memory_dim, device=device, ) left_context_val = torch.zeros( self.num_layers, self.left_context, batch_size, self.memory_dim, device=device, ) past_length = torch.zeros(1, batch_size, dtype=torch.int32, device=device) return [empty_memory, left_context_key, left_context_val, past_length] @torch.jit.export def batch_state(self, states: List[List[Tensor]]) -> List[Tensor]: if len(states) == 0: return [] batched_m = [] batched_lc_key = [] batched_lc_val = [] batched_past_length = [] for state in states: if len(state) == 0: continue m, lc_key, lc_val, past_length = state batched_m.append(m) batched_lc_key.append(lc_key) batched_lc_val.append(lc_val) batched_past_length.append(past_length) if ( (len(batched_m) == 0) or (len(batched_lc_key) == 0) or (len(batched_lc_val) == 0) or (len(batched_past_length) == 0) ): return [ torch.tensor([]), torch.tensor([]), torch.tensor([]), torch.tensor([]), ] batched_m = torch.cat(batched_m, dim=2) batched_lc_key = torch.cat(batched_lc_key, dim=2) batched_lc_val = torch.cat(batched_lc_val, dim=2) batched_past_length = torch.cat(batched_past_length, dim=1) return [batched_m, batched_lc_key, batched_lc_val, batched_past_length] @torch.jit.export def reorder_state(self, state: List[Tensor], indices: Tensor) -> List[Tensor]: if len(state) == 0: return [] m, lc_key, lc_val, past_length = state indices = indices.to(device=m.device) reord_m = torch.index_select(m, 2, indices) reord_lc_key = torch.index_select(lc_key, 2, indices) reord_lc_val = torch.index_select(lc_val, 2, indices) reord_past_length = torch.index_select(past_length, 1, indices) return [reord_m, reord_lc_key, reord_lc_val, reord_past_length] @torch.jit.export def reset_state(self, state: List[Tensor], indices: Tensor) -> List[Tensor]: m, lc_key, lc_val, past_length = state m = m.index_fill(dim=2, index=indices, value=0.0) lc_key = lc_key.index_fill(dim=2, index=indices, value=0.0) lc_val = lc_val.index_fill(dim=2, index=indices, value=0.0) past_length = past_length.index_fill(dim=1, index=indices, value=0) return [m, lc_key, lc_val, past_length] @torch.jit.export def state_size(self) -> int: return 4 @torch.jit.export def batch_size_in_state( self, state: Optional[List[Tensor]], sloppy: bool = True ) -> Optional[int]: if state is None: return None return state[0].size(2) def gen_summary_queries(self, input): sum_input = self.memory_op(input) return sum_input def _gen_right_context_padded_input(self, input): # This function deals with input that is already # padded with right context (e.g. minibatch training) right_context_blocks = [] T, B, D = input.shape num_segs = math.ceil((T - self.right_context) / self.segment_size) for i in range(0, num_segs - 1): st = (i + 1) * self.segment_size ed = st + self.right_context assert ed < T temp = input[st:ed, :, :] right_context_blocks.append(temp) # last segment right context is already available right_context_blocks.append(input[T - self.right_context :, :, :]) return torch.cat(right_context_blocks, dim=0) def _gen_segs_right_context(self, input, lengths): segments = [] T, B, D = input.size() nT = T - self.right_context # assume input is right context padded num_segs = math.ceil(nT / self.segment_size) # pad zeros to the utterance to make sure each # segment has the same right context. For the for i in range(0, num_segs - 1): st = i * self.segment_size ed = min(T, st + self.segment_size + self.right_context) temp = input[st:ed, :, :] rest_lengths = torch.clamp( lengths - self.segment_size, min=0, max=nT - (i + 1) * self.segment_size ) segments.append((temp, lengths - rest_lengths + self.right_context)) lengths = rest_lengths last_seg = input[st + self.segment_size :, :, :] segments.append((last_seg, rest_lengths + self.right_context)) return segments @torch.jit.unused def forward( self, input: Tensor, padding_masks: Tensor, state: Optional[List[Tensor]] = None ) -> Tuple[Tensor, Tensor, List[Tensor], List[Tensor]]: # Xutai: originally the second argument is lengths. lengths = (~padding_masks).sum(dim=1).long() # mini batch training. if self.mini_batches: return self.forward_mini_batches(input, lengths, state) # regular full sequence training. Note, assume the right context in provided # in the input. T, B, D = input.size() right_context_blocks = self._gen_right_context_padded_input(input) # generate the relative positional embedding if self.use_rpe: rpe = self._get_relative_position( input=input, max_relative_position=self.max_relative_position, left_context_length=0, past_length=0, is_decoding=False, ) else: rpe = None input = input[: T - self.right_context, :, :] attention_mask = self._get_attention_mask(input) # firt layer use each segment mean as memory # ignore the last one seg average if self.use_mem: mems = self.gen_summary_queries(input)[:-1, :, :] else: mems = torch.zeros(0, input.size(1), input.size(2), device=input.device) mems = mems.type_as(input) output = input all_outputs = [] for layer in self.layers: output, mems, right_context_blocks, _, _ = layer( input=output, lengths=lengths, attention_mask=attention_mask, mems=mems, right_context_blocks=right_context_blocks, pre_mems=None, left_context_key=None, left_context_val=None, rpe=rpe, ) all_outputs.append(output) return output, padding_masks, [], all_outputs def forward_jit_mini_batch_init( self, seg: Tensor, state: Optional[List[Tensor]] = None, is_decoding: bool = False, ): # Prepare state. In whole sequence training, state is ignored. # For minibatch training, we need to prepare state if state is None: state = self.init_state(batch_size=seg.size(1), device=seg.device) if seg.dtype == torch.half: state = [state[0].half(), state[1].half(), state[2].half(), state[3]] if self.use_mem: # note input average only on seg, not on right context # first layer use each segmetn mean as memory. the last # one segment average is used in state full_mems = self.gen_summary_queries(seg) if is_decoding: mems = full_mems[0:1, :, :] state_mems = torch.cat([state[0][0], mems], dim=0) else: mems = full_mems[:-1, :, :] state_mems = torch.cat([state[0][0], full_mems], dim=0) else: mems = state[0][0] state_mems = mems # track processed segment number or memory number # the same batch as the same bumber of past length past_length = state[3][0][0].item() past_left_context = min(past_length * self.segment_size, self.left_context) past_length = min(self.max_memory_size, past_length) return state, mems, state_mems, past_length, past_left_context def state_update_before( self, layer: int, state: List[Tensor], past_length: int, past_left_context: int ): pre_mems = state[0][layer][self.max_memory_size - past_length :, :, :] lc_key = state[1][layer][self.left_context - past_left_context :, :, :] lc_val = state[2][layer][self.left_context - past_left_context :, :, :] return pre_mems, lc_key, lc_val def state_update_after( self, layer: int, state: List[Tensor], mems: Tensor, next_key: Tensor, next_val: Tensor, mems_list: List[Tensor], lc_key_list: List[Tensor], lc_val_list: List[Tensor], ): # mems is used for next layer if layer < self.num_layers - 1: state_mems = torch.cat([state[0][layer + 1], mems], dim=0) mems_list.append(state_mems[-self.max_memory_size :, :, :]) # when mems pass to next sequence, we need the last memory. when mems # use for the next layer, we can ignore the last memory mems = mems[:-1, :, :] # note state[1][i] and state[2][i] original length equals to self.left_context new_k = torch.cat([state[1][layer], next_key], dim=0) new_v = torch.cat([state[2][layer], next_val], dim=0) lc_key_list.append(new_k[-self.left_context :, :, :]) lc_val_list.append(new_v[-self.left_context :, :, :]) return mems_list, lc_key_list, lc_val_list, mems def state_update_after_loop( self, state: List[Tensor], mems_list: List[Tensor], lc_key_list: List[Tensor], lc_val_list: List[Tensor], update_length: int, ): state[0] = torch.stack(mems_list, dim=0) state[1] = torch.stack(lc_key_list, dim=0) state[2] = torch.stack(lc_val_list, dim=0) state[3] = state[3] + update_length return state @torch.jit.unused def forward_mini_batches( self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None ) -> Tuple[Tensor, Tensor, List[Tensor], List[Tensor]]: T, B, D = input.size() # input without right context seg = input[: T - self.right_context, :, :] # get right context blocks right_context_blocks = self._gen_right_context_padded_input(input) mems_list = [] lc_key_list = [] lc_val_list = [] results = self.forward_jit_mini_batch_init(seg, state, False) state, mems, state_mems, past_length, past_left_context = results # relative position embedding if self.use_rpe: rpe = self._get_relative_position( input=input, max_relative_position=self.max_relative_position, left_context_length=past_left_context, past_length=past_length, is_decoding=False, ) else: rpe = None # get attention mask based on seg (not include right context) and available # left context attention_mask = self._get_attention_mask(seg, past_length, past_left_context) mems_list.append(state_mems[-self.max_memory_size :, :, :]) output = seg i = 0 all_outputs = [] for layer in self.layers: # In order to make cross stream batching work, mem, left context key # and left context value in the state should always be the same shape. # We use the past length to track the processed segment number. In this # way, we take out the essential memory, left context key and left # context val from the state. After finish the forward for current segment # we add the new memory, left context key and left context value into the # staate and trim out the oldest part to keep the shape consistent. pre_mems, lc_key, lc_val = self.state_update_before( i, state, past_length, past_left_context ) output, mems, right_context_blocks, next_key, next_val = layer.forward( input=output, lengths=lengths, attention_mask=attention_mask, mems=mems, right_context_blocks=right_context_blocks, pre_mems=pre_mems, left_context_key=lc_key, left_context_val=lc_val, rpe=rpe, ) all_outputs.append(output) mems_list, lc_key_list, lc_val_list, mems = self.state_update_after( layer=i, state=state, mems=mems, next_key=next_key, next_val=next_val, mems_list=mems_list, lc_key_list=lc_key_list, lc_val_list=lc_val_list, ) i += 1 # update state update_length = math.ceil((T - self.right_context) / self.segment_size) state = self.state_update_after_loop( state=state, mems_list=mems_list, lc_key_list=lc_key_list, lc_val_list=lc_val_list, update_length=update_length, ) return output, lengths, state, all_outputs def forward_jit_test( self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None ) -> Tuple[Tensor, Tensor, List[Tensor]]: """ This one simulate sequence encoder forward jit. This is for unit test purpose. It is not used in training or decoding. Note, extra_right_context is set in the model. In unit test, input = [utterance, right_context], lengths = [utterance_length]. args: input: input utterance lengths: utterance input length state: None here. input is whole utterance """ # [TODO] sequence_to_segment has bug in lengths. seg_src_tokens_lengths = self._gen_segs_right_context(input, lengths) seg_enc_tokens_lengths: List[Tuple[Tensor, Tensor]] = [] state: Optional[List[Tensor]] = None for seg_src_tokens, seg_src_lengths in seg_src_tokens_lengths: seg_enc_tokens, seg_enc_lengths, state = self.forward_jit( input=seg_src_tokens, lengths=seg_src_lengths, state=state ) seg_enc_tokens_lengths.append((seg_enc_tokens, seg_enc_lengths)) enc_tokens, enc_lengths = segments_to_sequence( segments=seg_enc_tokens_lengths, time_axis=0 ) state = [] # returns trivial state return enc_tokens, enc_lengths, state @torch.jit.export def forward_jit( self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None ) -> Tuple[Tensor, Tensor, List[Tensor]]: """ Forward helper for online decoding. args: input: [seg, right_context]. We assume in online we always padding the right context to the preset right context size. For the last segment, we may have short segment size, but right context size is the same as other segments lengths: utterance input length is the utterance segment length and right context size state: [memory, left_context_key, left_context_val]. To improve throughput, in addition to memory, we also cache key and value for left_context in multihead self-attention """ # In online decoding, input = [segment, right_context] # Lengths = [segment_length, right_context_length] # so we need strip right context in output T, B, D = input.size() rc_str = T - self.right_context rc_end = T right_context_blocks = input[rc_str:rc_end, :, :] seg = input[:rc_str, :, :] lengths = torch.clamp(lengths - self.right_context, min=0) mems_list = [] lc_key_list = [] lc_val_list = [] results = self.forward_jit_mini_batch_init(seg, state, True) state, mems, state_mems, past_length, past_left_context = results # relative position embedding if self.use_rpe: rpe = self._get_relative_position( input=input, max_relative_position=self.max_relative_position, left_context_length=past_left_context, past_length=past_length, is_decoding=True, ) else: rpe = None # memory for first layer. mems_list.append(state_mems[-self.max_memory_size :, :, :]) output = seg i = 0 for layer in self.layers: # In order to make cross stream batching work, mem, left context key # and left context value in the state should always be the same shape. # We use the past length to track the processed segment number. In this # way, we take out the essential memory, left context key and left # context val from the state. After finish the forward for current segment # we add the new memory, left context key and left context value into the # staate and trim out the oldest part to keep the shape consistent. true_mems, lc_key, lc_val = self.state_update_before( layer=i, state=state, past_length=past_length, past_left_context=past_left_context, ) output, mems, right_context_blocks, next_key, next_val = layer.forward_jit( input=output, lengths=lengths, mems=true_mems, right_context_blocks=right_context_blocks, left_context_key=lc_key, left_context_val=lc_val, rpe=rpe, ) # mems is used for next layer mems_list, lc_key_list, lc_val_list, _ = self.state_update_after( layer=i, state=state, mems_list=mems_list, mems=mems, next_key=next_key, next_val=next_val, lc_key_list=lc_key_list, lc_val_list=lc_val_list, ) i += 1 # update state state = self.state_update_after_loop( state=state, mems_list=mems_list, lc_key_list=lc_key_list, lc_val_list=lc_val_list, update_length=1, ) return output, lengths, state def quantize_(self, params=None): if params and "per_channel" in params and params["per_channel"]: qconfig = per_channel_dynamic_qconfig else: qconfig = default_dynamic_qconfig torch.ao.quantization.quantize_dynamic( self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True ) return self # ------------------------------------------------------------------------------ # Emformer encoder for seq2seq model # This is a wrapper over the original emformer # ------------------------------------------------------------------------------ def emformer_encoder(klass): class SpeechEncoder(klass): def __init__(self, args): super().__init__(args) stride = SpeechEncoder.conv_layer_stride(args) trf_left_context = args.segment_left_context // stride trf_right_context = args.segment_right_context // stride context_config = [trf_left_context, trf_right_context] self.transformer_layers = nn.ModuleList( [ NoSegAugmentedMemoryTransformerEncoderLayer( input_dim=args.encoder_embed_dim, num_heads=args.encoder_attention_heads, ffn_dim=args.encoder_ffn_embed_dim, num_layers=args.encoder_layers, dropout_in_attn=args.dropout, dropout_on_attn=args.dropout, dropout_on_fc1=args.dropout, dropout_on_fc2=args.dropout, activation_fn=args.activation_fn, context_config=context_config, segment_size=args.segment_length, max_memory_size=args.max_memory_size, scaled_init=True, # TODO: use constant for now. tanh_on_mem=args.amtrf_tanh_on_mem, ) ] ) def forward(self, src_tokens, src_lengths): encoder_out = super().forward(src_tokens, src_lengths) output = encoder_out["encoder_out"][0] encoder_padding_masks = encoder_out["encoder_padding_mask"][0] # This is because that in the original implementation # the output didn't consider the last segment as right context. encoder_padding_masks = encoder_padding_masks[:, : output.size(0)] return { "encoder_out": [output], "encoder_padding_mask": [encoder_padding_masks], "encoder_embedding": [], "encoder_states": [], "src_tokens": [], "src_lengths": [], } @staticmethod def conv_layer_stride(args): # TODO: make it configurable from the args return 4 SpeechEncoder.__name__ = klass.__name__ return SpeechEncoder
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/speech_to_text/modules/emformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Tuple, List import torch import torch.nn.functional as F from fairseq.models import FairseqEncoder from fairseq.models.speech_to_text import ( ConvTransformerEncoder, ) from fairseq.models.speech_to_text.utils import attention_suppression from fairseq.models.speech_to_text.utils import ( lengths_to_encoder_padding_mask, segments_to_sequence, sequence_to_segments, ) from fairseq.modules import MultiheadAttention, TransformerEncoderLayer from torch import nn, Tensor # ------------------------------------------------------------------------------ # AugmentedMemoryConvTransformerEncoder # ------------------------------------------------------------------------------ class AugmentedMemoryConvTransformerEncoder(ConvTransformerEncoder): def __init__(self, args): super().__init__(args) args.encoder_stride = self.stride() self.left_context = args.left_context // args.encoder_stride self.right_context = args.right_context // args.encoder_stride self.left_context_after_stride = args.left_context // args.encoder_stride self.right_context_after_stride = args.right_context // args.encoder_stride self.transformer_layers = nn.ModuleList([]) self.transformer_layers.extend( [ AugmentedMemoryTransformerEncoderLayer(args) for i in range(args.encoder_layers) ] ) def stride(self): # Hard coded here. Should infer from convs in future stride = 4 return stride def forward(self, src_tokens, src_lengths, states=None): """Encode input sequence. :param torch.Tensor xs: input tensor :param torch.Tensor masks: input mask :return: position embedded tensor and mask :rtype Tuple[torch.Tensor, torch.Tensor]: """ bsz, max_seq_len, _ = src_tokens.size() x = ( src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim) .transpose(1, 2) .contiguous() ) x = self.conv(x) bsz, _, output_seq_len, _ = x.size() x = x.transpose(1, 2).transpose(0, 1).contiguous().view(output_seq_len, bsz, -1) x = self.out(x) x = self.embed_scale * x subsampling_factor = 1.0 * max_seq_len / output_seq_len input_lengths = torch.max( (src_lengths.float() / subsampling_factor).ceil().long(), x.size(0) * src_lengths.new_ones([src_lengths.size(0)]).long(), ) encoder_padding_mask, _ = lengths_to_encoder_padding_mask( input_lengths, batch_first=True ) # TODO: fix positional embedding positions = self.embed_positions(encoder_padding_mask).transpose(0, 1) x += positions x = F.dropout(x, p=self.dropout, training=self.training) # State to store memory banks etc. if states is None: states = [ {"memory_banks": None, "encoder_states": None} for i in range(len(self.transformer_layers)) ] for i, layer in enumerate(self.transformer_layers): # x size: # (self.left_size + self.segment_size + self.right_size) # / self.stride, num_heads, dim # TODO: Consider mask here x = layer(x, states[i]) states[i]["encoder_states"] = x[ self.left_context_after_stride : -self.right_context_after_stride ] lengths = ( ( ~encoder_padding_mask[ :, self.left_context_after_stride : -self.right_context_after_stride ] ) .sum(dim=1, keepdim=True) .long() ) return states[-1]["encoder_states"], lengths, states # ------------------------------------------------------------------------------ # AugmentedMemoryTransformerEncoderLayer # ------------------------------------------------------------------------------ class AugmentedMemoryTransformerEncoderLayer(TransformerEncoderLayer): def __init__(self, args): super().__init__(args) self.left_context = args.left_context // args.encoder_stride self.right_context = args.right_context // args.encoder_stride def forward(self, x, state): length, batch_size, x_dim = x.size() residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) # init_state if state.get("memory_banks", None) is None: state["memory_banks"] = [] # TODO reseach new sum_query method seg_start = self.left_context seg_end = length - self.right_context if seg_start < seg_end: summarization_query = torch.mean(x[seg_start:seg_end], keepdim=True, dim=0) else: summarization_query = x.new_zeros(1, batch_size, x_dim) x = torch.cat([x, summarization_query], dim=0) x = self.self_attn(input_and_summary=x, state=state) x = self.dropout_module(x) x = residual + x if not self.normalize_before: x = self.self_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = self.activation_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = residual + x if not self.normalize_before: x = self.final_layer_norm(x) return x def build_self_attention(self, embed_dim, args): return AugmentedMemoryMultiheadAttention( embed_dim=embed_dim, num_heads=args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, tanh_on_mem=True, max_memory_size=args.max_memory_size, ) # ------------------------------------------------------------------------------ # AugmentedMemoryMultiheadAttention # ------------------------------------------------------------------------------ class AugmentedMemoryMultiheadAttention(MultiheadAttention): """ Augmented Memory Attention from Streaming Transformer-based Acoustic Models Using Self-attention with Augmented Memory https://arxiv.org/abs/2005.08042 """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, tanh_on_mem=False, memory_dim=None, std_scale=0.5, # 0.5 based on https://arxiv.org/abs/2005.09137 max_memory_size=-1, disable_mem_on_mem_attn=True, ): super().__init__( embed_dim, num_heads, kdim, vdim, dropout, bias, add_bias_kv, add_zero_attn, self_attention, encoder_decoder_attention, q_noise, qn_block_size, ) self.memory_dim = memory_dim if memory_dim is not None else embed_dim self.std_scale = std_scale self.disable_mem_on_mem_attn = disable_mem_on_mem_attn # This Operator was used for factorization in PySpeech self.v2e = lambda x: x if tanh_on_mem: self.squash_mem = torch.tanh self.nonlinear_squash_mem = True else: self.squash_mem = lambda x: x self.nonlinear_squash_mem = False self.max_memory_size = max_memory_size def forward(self, input_and_summary, state): """ input: Encoder states of current segment with left or right context, plus one summarization query """ length, batch_size, _ = input_and_summary.shape length = length - 1 # not include sum_query, last index memory = state["memory_banks"] # TODO: positional embedding on memory if self.max_memory_size > -1 and len(memory) > self.max_memory_size: # TODO: need to fix here if self.max_memory_size == 0: memory = memory.new_zeros(1, memory.size(1), self.memory_dim) else: memory = memory[-self.max_memory_size :] memory_and_input = torch.cat(memory + [input_and_summary[:-1]], dim=0) input_and_sum_query = input_and_summary q = self.q_proj(self.v2e(input_and_sum_query)) k = self.k_proj(self.v2e(memory_and_input)) v = self.v_proj(self.v2e(memory_and_input)) q = ( q.contiguous() .view(-1, batch_size * self.num_heads, self.head_dim) .transpose(0, 1) * self.scaling ) k = ( k.contiguous() .view(-1, batch_size * self.num_heads, self.head_dim) .transpose(0, 1) ) v = ( v.contiguous() .view(-1, batch_size * self.num_heads, self.head_dim) .transpose(0, 1) ) attention_weights = torch.bmm(q, k.transpose(1, 2)) if self.disable_mem_on_mem_attn: attention_weights = self.suppress_mem_on_mem_attention( batch_size, self.num_heads, len(memory), attention_weights ) if self.std_scale is not None: attention_weights = attention_suppression(attention_weights, self.std_scale) assert list(attention_weights.shape) == [ batch_size * self.num_heads, length + 1, length + len(memory), ] attention_weights = torch.nn.functional.softmax( attention_weights.float(), dim=-1 ).type_as(attention_weights) attention_probs = self.dropout_module(attention_weights) # [T, T, B, n_head] + [T, B, n_head, d_head] -> [T, B, n_head, d_head] attention = torch.bmm(attention_probs, v) assert list(attention.shape) == [ batch_size * self.num_heads, length + 1, self.head_dim, ] attention = ( attention.transpose(0, 1) .contiguous() .view(length + 1, batch_size, self.embed_dim) ) output_and_memory = self.out_proj(attention) next_m = output_and_memory[-1:] next_m = self.squash_mem(next_m) output = output_and_memory[:-1] state["memory_banks"].append(next_m) return output def suppress_mem_on_mem_attention( self, B: int, num_heads: int, mem_size: int, attention_weight: Tensor ): """ Arguments: - B: batch size - num_heads: number of attention heads - mem_size: size of memory bank - attention_weight: a [B*num_heads, T + 1, T + mem_size] vector Return: modified attention_weight with [B*num_heads, -1, :mem_size] = -inf """ attention_weight[:, -1, :mem_size] = float("-inf") return attention_weight # ------------------------------------------------------------------------------ # SequenceEncoder # ------------------------------------------------------------------------------ class SequenceEncoder(FairseqEncoder): """ SequenceEncoder encodes sequences. More specifically, `src_tokens` and `src_lengths` in `forward()` should describe a batch of "complete" sequences rather than segments. Segment-by-segment inference can be triggered by `segment_size`: 1) `segment_size` is None: SequenceEncoder treats the input sequence as one single segment. 2) `segment_size` is not None (some int instead): SequenceEncoder does the following: 1. breaks the input sequence into several segments 2. inference on each segment and collect the outputs 3. concatanete segment outputs into the output sequence. Note that `segment_size` here shouldn't include additional left/right contexts needed, for example if we wish to infer with LC-BLSTM where the middle chunk size is 100 and right context is 20, `segment_size` should be 100. """ def __init__(self, args, module): super().__init__(None) self.module = module self.input_time_axis = 1 self.output_time_axis = 0 self.segment_size = args.segment_size self.left_context = args.left_context self.right_context = args.right_context def forward( self, src_tokens: Tensor, src_lengths: Tensor, states=None, ): seg_src_tokens_lengths = sequence_to_segments( sequence=src_tokens, time_axis=self.input_time_axis, lengths=src_lengths, segment_size=self.segment_size, extra_left_context=self.left_context, extra_right_context=self.right_context, ) seg_encoder_states_lengths: List[Tuple[Tensor, Tensor]] = [] for seg_src_tokens, seg_src_lengths in seg_src_tokens_lengths: (seg_encoder_states, seg_enc_lengths, states) = self.module( seg_src_tokens, seg_src_lengths, states=states, ) seg_encoder_states_lengths.append((seg_encoder_states, seg_enc_lengths)) encoder_out, enc_lengths = segments_to_sequence( segments=seg_encoder_states_lengths, time_axis=self.output_time_axis ) encoder_padding_mask, _ = lengths_to_encoder_padding_mask( enc_lengths, batch_first=True ) if not encoder_padding_mask.any(): encoder_padding_mask = None return { "encoder_out": [encoder_out], "encoder_padding_mask": [encoder_padding_mask], "encoder_embedding": [], "encoder_states": [states], "src_tokens": [], "src_lengths": [], } def incremental_encode( self, seg_src_tokens: Tensor, seg_src_lengths: Tensor, states=None, ): """ Different from forward function, this function takes segmented speech as input, and append encoder states to previous states """ (seg_encoder_states, seg_enc_lengths, states) = self.module( seg_src_tokens, seg_src_lengths, states=states, ) return seg_encoder_states, seg_enc_lengths, states # ------------------------------------------------------------------------------ # Augmented memory model decorator # ------------------------------------------------------------------------------ def augmented_memory(klass): class StreamSeq2SeqModel(klass): @staticmethod def add_args(parser): super(StreamSeq2SeqModel, StreamSeq2SeqModel).add_args(parser) parser.add_argument( "--segment-size", type=int, required=True, help="Length of the segment." ) parser.add_argument( "--left-context", type=int, default=0, help="Left context for the segment.", ) parser.add_argument( "--right-context", type=int, default=0, help="Right context for the segment.", ) parser.add_argument( "--max-memory-size", type=int, default=-1, help="Right context for the segment.", ) StreamSeq2SeqModel.__name__ = klass.__name__ return StreamSeq2SeqModel
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/speech_to_text/modules/augmented_memory_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .hubert import * # noqa from .hubert_asr import * # noqa
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/hubert/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib from argparse import Namespace from typing import Any import torch import torch.nn as nn from dataclasses import dataclass, field from fairseq import checkpoint_utils, tasks, utils from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.models import BaseFairseqModel, FairseqEncoder, register_model from fairseq.models.hubert.hubert import MASKING_DISTRIBUTION_CHOICES from fairseq.tasks import FairseqTask from omegaconf import II, MISSING @dataclass class HubertAsrConfig(FairseqDataclass): w2v_path: str = field( default=MISSING, metadata={"help": "path to hubert model"} ) no_pretrained_weights: bool = field( default=False, metadata={"help": "if true, does not load pretrained weights"}, ) dropout_input: float = field( default=0.0, metadata={"help": "dropout to apply to the input (after feat extr)"}, ) final_dropout: float = field( default=0.0, metadata={ "help": "dropout after transformer and before final projection" }, ) dropout: float = field( default=0.0, metadata={"help": "dropout probability inside hubert model"}, ) attention_dropout: float = field( default=0.0, metadata={ "help": "dropout probability for attention weights " "inside hubert model" }, ) activation_dropout: float = field( default=0.0, metadata={ "help": "dropout probability after activation in FFN " "inside hubert model" }, ) # masking apply_mask: bool = field( default=False, metadata={"help": "apply masking during fine-tuning"} ) mask_length: int = field( default=10, metadata={"help": "repeat the mask indices multiple times"} ) mask_prob: float = field( default=0.5, metadata={ "help": "probability of replacing a token with mask " "(normalized by length)" }, ) mask_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose masks"} ) mask_other: float = field( default=0, metadata={ "help": "secondary mask argument " "(used for more complex distributions), " "see help in compute_mask_indices" }, ) no_mask_overlap: bool = field( default=False, metadata={"help": "whether to allow masks to overlap"} ) # channel masking mask_channel_length: int = field( default=10, metadata={"help": "length of the mask for features (channels)"}, ) mask_channel_prob: float = field( default=0.0, metadata={"help": "probability of replacing a feature with 0"}, ) mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose mask length for channel masking"}, ) mask_channel_other: float = field( default=0, metadata={ "help": "secondary mask argument " "(used for more complex distributions), " "see help in compute_mask_indices" }, ) no_mask_channel_overlap: bool = field( default=False, metadata={"help": "whether to allow channel masks to overlap"}, ) freeze_finetune_updates: int = field( default=0, metadata={"help": "dont finetune hubert for this many updates"}, ) feature_grad_mult: float = field( default=0.0, metadata={"help": "reset feature grad mult in hubert to this"}, ) layerdrop: float = field( default=0.0, metadata={"help": "probability of dropping a layer in hubert"}, ) normalize: bool = II("task.normalize") data: str = II("task.data") # this holds the loaded hubert args w2v_args: Any = None @dataclass class HubertCtcConfig(HubertAsrConfig): pass @register_model("hubert_ctc", dataclass=HubertCtcConfig) class HubertCtc(BaseFairseqModel): def __init__(self, cfg: HubertCtcConfig, w2v_encoder: BaseFairseqModel): super().__init__() self.cfg = cfg self.w2v_encoder = w2v_encoder def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) return state_dict @classmethod def build_model(cls, cfg: HubertCtcConfig, task: FairseqTask): """Build a new model instance.""" w2v_encoder = HubertEncoder(cfg, task.target_dictionary) return cls(cfg, w2v_encoder) def get_normalized_probs(self, net_output, log_probs): """Get normalized probabilities (or log probs) from a net's output.""" logits = net_output["encoder_out"] if log_probs: return utils.log_softmax(logits.float(), dim=-1) else: return utils.softmax(logits.float(), dim=-1) def get_logits(self, net_output): logits = net_output["encoder_out"] padding = net_output["encoder_padding_mask"] if padding is not None and padding.any(): padding = padding.T logits[padding][..., 0] = 0 logits[padding][..., 1:] = float("-inf") return logits def forward(self, **kwargs): x = self.w2v_encoder(**kwargs) return x @dataclass class HubertSeq2SeqConfig(HubertAsrConfig): decoder_embed_dim: int = field( default=768, metadata={"help": "decoder embedding dimension"} ) decoder_ffn_embed_dim: int = field( default=3072, metadata={"help": "decoder embedding dimension for FFN"} ) decoder_layers: int = field( default=6, metadata={"help": "num of decoder layers"} ) decoder_layerdrop: float = field( default=0.0, metadata={"help": "decoder layerdrop chance"} ) decoder_attention_heads: int = field( default=4, metadata={"help": "num decoder attention heads"} ) decoder_learned_pos: bool = field( default=False, metadata={"help": "use learned positional embeddings in the decoder"}, ) decoder_normalize_before: bool = field( default=False, metadata={"help": "apply layernorm before each decoder block"}, ) no_token_positional_embeddings: bool = field( default=False, metadata={ "help": "if set, disables positional embeddings " "(outside self attention)" }, ) decoder_dropout: float = field( default=0.0, metadata={"help": "dropout probability in the decoder"} ) decoder_attention_dropout: float = field( default=0.0, metadata={ "help": "dropout probability for attention weights " "inside the decoder" }, ) decoder_activation_dropout: float = field( default=0.0, metadata={ "help": "dropout probability after activation in FFN " "inside the decoder" }, ) max_target_positions: int = field( default=2048, metadata={"help": "max target positions"} ) share_decoder_input_output_embed: bool = field( default=False, metadata={"help": "share decoder input and output embeddings"}, ) class HubertEncoder(FairseqEncoder): def __init__(self, cfg: HubertAsrConfig, tgt_dict=None): self.apply_mask = cfg.apply_mask arg_overrides = { "dropout": cfg.dropout, "activation_dropout": cfg.activation_dropout, "dropout_input": cfg.dropout_input, "attention_dropout": cfg.attention_dropout, "mask_length": cfg.mask_length, "mask_prob": cfg.mask_prob, "mask_selection": cfg.mask_selection, "mask_other": cfg.mask_other, "no_mask_overlap": cfg.no_mask_overlap, "mask_channel_length": cfg.mask_channel_length, "mask_channel_prob": cfg.mask_channel_prob, "mask_channel_selection": cfg.mask_channel_selection, "mask_channel_other": cfg.mask_channel_other, "no_mask_channel_overlap": cfg.no_mask_channel_overlap, "encoder_layerdrop": cfg.layerdrop, "feature_grad_mult": cfg.feature_grad_mult, } if cfg.w2v_args is None: state = checkpoint_utils.load_checkpoint_to_cpu( cfg.w2v_path, arg_overrides ) w2v_args = state.get("cfg", None) if w2v_args is None: w2v_args = convert_namespace_to_omegaconf(state["args"]) cfg.w2v_args = w2v_args else: state = None w2v_args = cfg.w2v_args if isinstance(w2v_args, Namespace): cfg.w2v_args = w2v_args = convert_namespace_to_omegaconf( w2v_args ) assert cfg.normalize == w2v_args.task.normalize, ( "Fine-tuning works best when data normalization is the same. " "Please check that --normalize is set or unset for " "both pre-training and here" ) w2v_args.task.data = cfg.data task = tasks.setup_task(w2v_args.task) if state is not None and "task_state" in state: # This will load the stored "dictionaries" object task.load_state_dict(state["task_state"]) model = task.build_model(w2v_args.model) if state is not None and not cfg.no_pretrained_weights: # set strict=False because we omit some modules model.load_state_dict(state["model"], strict=False) model.remove_pretraining_modules() super().__init__(task.source_dictionary) d = w2v_args.model.encoder_embed_dim self.w2v_model = model self.final_dropout = nn.Dropout(cfg.final_dropout) self.freeze_finetune_updates = cfg.freeze_finetune_updates self.num_updates = 0 if tgt_dict is not None: self.proj = Linear(d, len(tgt_dict)) elif getattr(cfg, "decoder_embed_dim", d) != d: self.proj = Linear(d, cfg.decoder_embed_dim) else: self.proj = None def set_num_updates(self, num_updates): """Set the number of parameters updates.""" super().set_num_updates(num_updates) self.num_updates = num_updates def forward(self, source, padding_mask, tbc=True, **kwargs): w2v_args = { "source": source, "padding_mask": padding_mask, "mask": self.apply_mask and self.training, } ft = self.freeze_finetune_updates <= self.num_updates with torch.no_grad() if not ft else contextlib.ExitStack(): x, padding_mask = self.w2v_model.extract_features(**w2v_args) if tbc: # B x T x C -> T x B x C x = x.transpose(0, 1) x = self.final_dropout(x) if self.proj: x = self.proj(x) return { "encoder_out": x, # T x B x C "encoder_padding_mask": padding_mask, # B x T "padding_mask": padding_mask, } def reorder_encoder_out(self, encoder_out, new_order): if encoder_out["encoder_out"] is not None: encoder_out["encoder_out"] = encoder_out[ "encoder_out" ].index_select(1, new_order) if encoder_out["encoder_padding_mask"] is not None: encoder_out["encoder_padding_mask"] = encoder_out[ "encoder_padding_mask" ].index_select(0, new_order) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" return None def upgrade_state_dict_named(self, state_dict, name): return state_dict def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/hubert/hubert_asr.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import Dict, List, Optional, Tuple import numpy as np import torch import torch.nn as nn from dataclasses import dataclass, field from fairseq import utils from fairseq.data.data_utils import compute_mask_indices from fairseq.data.dictionary import Dictionary from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import BaseFairseqModel, register_model from fairseq.models.wav2vec.wav2vec2 import ( ConvFeatureExtractionModel, TransformerEncoder, ) from fairseq.modules import GradMultiply, LayerNorm from fairseq.tasks.hubert_pretraining import ( HubertPretrainingConfig, HubertPretrainingTask, ) from omegaconf import II logger = logging.getLogger(__name__) EXTRACTOR_MODE_CHOICES = ChoiceEnum(["default", "layer_norm"]) MASKING_DISTRIBUTION_CHOICES = ChoiceEnum( ["static", "uniform", "normal", "poisson"] ) @dataclass class HubertConfig(FairseqDataclass): label_rate: int = II("task.label_rate") extractor_mode: EXTRACTOR_MODE_CHOICES = field( default="default", metadata={ "help": "mode for feature extractor. default has a single group " "norm with d groups in the first conv block, whereas layer_norm " "has layer norms in every block (meant to use with normalize=True)" }, ) encoder_layers: int = field( default=12, metadata={"help": "num encoder layers in the transformer"} ) encoder_embed_dim: int = field( default=768, metadata={"help": "encoder embedding dimension"} ) encoder_ffn_embed_dim: int = field( default=3072, metadata={"help": "encoder embedding dimension for FFN"} ) encoder_attention_heads: int = field( default=12, metadata={"help": "num encoder attention heads"} ) activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field( default="gelu", metadata={"help": "activation function to use"} ) # dropouts dropout: float = field( default=0.1, metadata={"help": "dropout probability for the transformer"}, ) attention_dropout: float = field( default=0.1, metadata={"help": "dropout probability for attention weights"}, ) activation_dropout: float = field( default=0.0, metadata={"help": "dropout probability after activation in FFN"}, ) encoder_layerdrop: float = field( default=0.0, metadata={"help": "probability of dropping a tarnsformer layer"}, ) dropout_input: float = field( default=0.0, metadata={"help": "dropout to apply to the input (after feat extr)"}, ) dropout_features: float = field( default=0.0, metadata={ "help": "dropout to apply to the features (after feat extr)" }, ) final_dim: int = field( default=0, metadata={ "help": "project final representations and targets to this many " "dimensions. set to encoder_embed_dim is <= 0" }, ) untie_final_proj: bool = field( default=False, metadata={"help": "use separate projection for each target"}, ) layer_norm_first: bool = field( default=False, metadata={"help": "apply layernorm first in the transformer"}, ) conv_feature_layers: str = field( default="[(512,10,5)] + [(512,3,2)] * 4 + [(512,2,2)] * 2", metadata={ "help": "string describing convolutional feature extraction " "layers in form of a python list that contains " "[(dim, kernel_size, stride), ...]" }, ) conv_bias: bool = field( default=False, metadata={"help": "include bias in conv encoder"} ) logit_temp: float = field( default=0.1, metadata={"help": "temperature to divide logits by"} ) target_glu: bool = field( default=False, metadata={"help": "adds projection + glu to targets"} ) feature_grad_mult: float = field( default=1.0, metadata={"help": "multiply feature extractor var grads by this"}, ) # masking mask_length: int = field(default=10, metadata={"help": "mask length"}) mask_prob: float = field( default=0.65, metadata={"help": "probability of replacing a token with mask"}, ) mask_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose mask length"} ) mask_other: float = field( default=0, metadata={ "help": "secondary mask argument " "(used for more complex distributions), " "see help in compute_mask_indicesh" }, ) no_mask_overlap: bool = field( default=False, metadata={"help": "whether to allow masks to overlap"} ) mask_min_space: int = field( default=1, metadata={ "help": "min space between spans (if no overlap is enabled)" }, ) # channel masking mask_channel_length: int = field( default=10, metadata={"help": "length of the mask for features (channels)"}, ) mask_channel_prob: float = field( default=0.0, metadata={"help": "probability of replacing a feature with 0"}, ) mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose mask length for channel masking"}, ) mask_channel_other: float = field( default=0, metadata={ "help": "secondary mask argument " "(used for more complex distributions), " "see help in compute_mask_indicesh" }, ) no_mask_channel_overlap: bool = field( default=False, metadata={"help": "whether to allow channel masks to overlap"}, ) mask_channel_min_space: int = field( default=1, metadata={ "help": "min space between spans (if no overlap is enabled)" }, ) # positional embeddings conv_pos: int = field( default=128, metadata={ "help": "number of filters for convolutional positional embeddings" }, ) conv_pos_groups: int = field( default=16, metadata={ "help": "number of groups for convolutional positional embedding" }, ) latent_temp: Tuple[float, float, float] = field( default=(2, 0.5, 0.999995), metadata={"help": "legacy (to be removed)"}, ) # loss computation skip_masked: bool = field( default=False, metadata={"help": "skip computing losses over masked frames"}, ) skip_nomask: bool = field( default=False, metadata={"help": "skip computing losses over unmasked frames"}, ) @register_model("hubert", dataclass=HubertConfig) class HubertModel(BaseFairseqModel): def __init__( self, cfg: HubertConfig, task_cfg: HubertPretrainingConfig, dictionaries: List[Dictionary], ) -> None: super().__init__() logger.info(f"HubertModel Config: {cfg}") feature_enc_layers = eval(cfg.conv_feature_layers) # noqa self.embed = feature_enc_layers[-1][0] self.feature_extractor = ConvFeatureExtractionModel( conv_layers=feature_enc_layers, dropout=0.0, mode=cfg.extractor_mode, conv_bias=cfg.conv_bias, ) feature_ds_rate = np.prod([s for _, _, s in feature_enc_layers]) self.feat2tar_ratio = ( cfg.label_rate * feature_ds_rate / task_cfg.sample_rate ) self.post_extract_proj = ( nn.Linear(self.embed, cfg.encoder_embed_dim) if self.embed != cfg.encoder_embed_dim else None ) self.mask_prob = cfg.mask_prob self.mask_selection = cfg.mask_selection self.mask_other = cfg.mask_other self.mask_length = cfg.mask_length self.no_mask_overlap = cfg.no_mask_overlap self.mask_min_space = cfg.mask_min_space self.mask_channel_prob = cfg.mask_channel_prob self.mask_channel_selection = cfg.mask_channel_selection self.mask_channel_other = cfg.mask_channel_other self.mask_channel_length = cfg.mask_channel_length self.no_mask_channel_overlap = cfg.no_mask_channel_overlap self.mask_channel_min_space = cfg.mask_channel_min_space self.dropout_input = nn.Dropout(cfg.dropout_input) self.dropout_features = nn.Dropout(cfg.dropout_features) self.feature_grad_mult = cfg.feature_grad_mult self.logit_temp = cfg.logit_temp self.skip_masked = cfg.skip_masked self.skip_nomask = cfg.skip_nomask final_dim = ( cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim ) self.mask_emb = nn.Parameter( torch.FloatTensor(cfg.encoder_embed_dim).uniform_() ) self.encoder = TransformerEncoder(cfg) self.layer_norm = LayerNorm(self.embed) self.target_glu = None if cfg.target_glu: self.target_glu = nn.Sequential( nn.Linear(final_dim, final_dim * 2), nn.GLU() ) self.untie_final_proj = cfg.untie_final_proj if self.untie_final_proj: self.final_proj = nn.Linear( cfg.encoder_embed_dim, final_dim * len(dictionaries) ) else: self.final_proj = nn.Linear(cfg.encoder_embed_dim, final_dim) # modules below are not needed during fine-tuning if any([d is None for d in dictionaries]): logger.info( "cannot find dictionary. assume will be used for fine-tuning" ) else: self.num_classes = [len(d) for d in dictionaries] self.label_embs_concat = nn.Parameter( torch.FloatTensor(sum(self.num_classes), final_dim) ) nn.init.uniform_(self.label_embs_concat) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" super().upgrade_state_dict_named(state_dict, name) return state_dict @classmethod def build_model(cls, cfg: HubertConfig, task: HubertPretrainingTask): """Build a new model instance.""" model = HubertModel(cfg, task.cfg, task.dictionaries) return model def apply_mask(self, x, padding_mask, target_list): B, T, C = x.shape if self.mask_prob > 0: mask_indices = compute_mask_indices( (B, T), padding_mask, self.mask_prob, self.mask_length, self.mask_selection, self.mask_other, min_masks=2, no_overlap=self.no_mask_overlap, min_space=self.mask_min_space, ) mask_indices = torch.from_numpy(mask_indices).to(x.device) x[mask_indices] = self.mask_emb else: mask_indices = None if self.mask_channel_prob > 0: mask_channel_indices = compute_mask_indices( (B, C), None, self.mask_channel_prob, self.mask_channel_length, self.mask_channel_selection, self.mask_channel_other, no_overlap=self.no_mask_channel_overlap, min_space=self.mask_channel_min_space, ) mask_channel_indices = ( torch.from_numpy(mask_channel_indices) .to(x.device) .unsqueeze(1) .expand(-1, T, -1) ) x[mask_channel_indices] = 0 return x, mask_indices def compute_nce(self, x, pos, negs): neg_is_pos = (pos == negs).all(-1) pos = pos.unsqueeze(0) targets = torch.cat([pos, negs], dim=0) logits = torch.cosine_similarity( x.float(), targets.float(), dim=-1 ).type_as(x) logits /= self.logit_temp if neg_is_pos.any(): logits[1:][neg_is_pos] = float("-inf") logits = logits.transpose(0, 1) # (num_x, num_cls+1) return logits def forward_features(self, source: torch.Tensor) -> torch.Tensor: if self.feature_grad_mult > 0: features = self.feature_extractor(source) if self.feature_grad_mult != 1.0: features = GradMultiply.apply(features, self.feature_grad_mult) else: with torch.no_grad(): features = self.feature_extractor(source) return features def forward_targets( self, features: torch.Tensor, target_list: List[torch.Tensor], ) -> Tuple[torch.Tensor, torch.Tensor]: # Trim features to ensure labels exist and then get aligned labels feat_tsz = features.size(2) targ_tsz = min([t.size(1) for t in target_list]) if self.feat2tar_ratio * feat_tsz > targ_tsz: feat_tsz = int(targ_tsz / self.feat2tar_ratio) features = features[..., :feat_tsz] target_inds = torch.arange(feat_tsz).float() * self.feat2tar_ratio target_list = [t[:, target_inds.long()] for t in target_list] return features, target_list def forward_padding_mask( self, features: torch.Tensor, padding_mask: torch.Tensor, ) -> torch.Tensor: extra = padding_mask.size(1) % features.size(1) if extra > 0: padding_mask = padding_mask[:, :-extra] padding_mask = padding_mask.view( padding_mask.size(0), features.size(1), -1 ) padding_mask = padding_mask.all(-1) return padding_mask def forward( self, source: torch.Tensor, target_list: Optional[List[torch.Tensor]] = None, padding_mask: Optional[torch.Tensor] = None, mask: bool = True, features_only: bool = False, output_layer: Optional[int] = None, ) -> Dict[str, torch.Tensor]: """output layer is 1-based""" features = self.forward_features(source) if target_list is not None: features, target_list = self.forward_targets(features, target_list) features_pen = features.float().pow(2).mean() features = features.transpose(1, 2) features = self.layer_norm(features) unmasked_features = features.clone() if padding_mask is not None: padding_mask = self.forward_padding_mask(features, padding_mask) if self.post_extract_proj is not None: features = self.post_extract_proj(features) features = self.dropout_input(features) unmasked_features = self.dropout_features(unmasked_features) if mask: x, mask_indices = self.apply_mask( features, padding_mask, target_list ) else: x = features mask_indices = None # feature: (B, T, D), float # target: (B, T), long # x: (B, T, D), float # padding_mask: (B, T), bool # mask_indices: (B, T), bool x, _ = self.encoder( x, padding_mask=padding_mask, layer=None if output_layer is None else output_layer - 1 ) if features_only: return {"x": x, "padding_mask": padding_mask, "features": features} def compute_pred(proj_x, target, label_embs): # compute logits for the i-th label set y = torch.index_select(label_embs, 0, target.long()) negs = label_embs.unsqueeze(1).expand(-1, proj_x.size(0), -1) if self.target_glu: y = self.target_glu(y) negs = self.target_glu(negs) # proj_x: (S, D) # y: (S, D) # negs: (Neg, S, D) return self.compute_nce(proj_x, y, negs) label_embs_list = self.label_embs_concat.split(self.num_classes, 0) if not self.skip_masked: masked_indices = torch.logical_and(~padding_mask, mask_indices) proj_x_m = self.final_proj(x[masked_indices]) if self.untie_final_proj: proj_x_m_list = proj_x_m.chunk(len(target_list), dim=-1) else: proj_x_m_list = [proj_x_m for _ in range(len(target_list))] logit_m_list = [ compute_pred(proj_x_m, t[masked_indices], label_embs_list[i]) for i, (proj_x_m, t) in enumerate( zip(proj_x_m_list, target_list) ) ] else: logit_m_list = [None for _ in target_list] if not self.skip_nomask: nomask_indices = torch.logical_and(~padding_mask, ~mask_indices) proj_x_u = self.final_proj(x[nomask_indices]) if self.untie_final_proj: proj_x_u_list = proj_x_u.chunk(len(target_list), dim=-1) else: proj_x_u_list = [proj_x_u for _ in range(len(target_list))] logit_u_list = [ compute_pred(proj_x_u, t[nomask_indices], label_embs_list[i]) for i, (proj_x_u, t) in enumerate( zip(proj_x_u_list, target_list) ) ] else: logit_u_list = [None for _ in target_list] result = { "logit_m_list": logit_m_list, "logit_u_list": logit_u_list, "padding_mask": padding_mask, "features_pen": features_pen, } return result def extract_features( self, source: torch.Tensor, padding_mask: Optional[torch.Tensor] = None, mask: bool = False, ret_conv: bool = False, output_layer: Optional[int] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: res = self.forward( source, padding_mask=padding_mask, mask=mask, features_only=True, output_layer=output_layer, ) feature = res["features"] if ret_conv else res["x"] return feature, res["padding_mask"] def get_logits(self, net_output, is_masked=True): if is_masked: logits_list = net_output["logit_m_list"] else: logits_list = net_output["logit_u_list"] logits_list = [x.float() for x in logits_list if x is not None] return logits_list def get_targets(self, net_output, is_masked=True): logits_list = self.get_logits(net_output, is_masked) targets_list = [ x.new_zeros(x.size(0), dtype=torch.long) for x in logits_list ] return targets_list def get_extra_losses(self, net_output): extra_losses = [] names = [] if "features_pen" in net_output: extra_losses.append(net_output["features_pen"]) names.append("features_pen") return extra_losses, names def remove_pretraining_modules(self): self.target_glu = None self.final_proj = None
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/hubert/hubert.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import re from dataclasses import dataclass, field, fields from typing import List, Optional from omegaconf import II from fairseq import utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.utils import safe_getattr, safe_hasattr DEFAULT_MAX_SOURCE_POSITIONS = 1024 DEFAULT_MAX_TARGET_POSITIONS = 1024 DEFAULT_MIN_PARAMS_TO_WRAP = int(1e8) _NAME_PARSER = r"(decoder|encoder|quant_noise)_(.*)" @dataclass class EncDecBaseConfig(FairseqDataclass): embed_path: Optional[str] = field( default=None, metadata={"help": "path to pre-trained embedding"} ) embed_dim: Optional[int] = field( default=512, metadata={"help": "embedding dimension"} ) ffn_embed_dim: int = field( default=2048, metadata={"help": "embedding dimension for FFN"} ) layers: int = field(default=6, metadata={"help": "number of layers"}) attention_heads: int = field( default=8, metadata={"help": "number of attention heads"} ) normalize_before: bool = field( default=False, metadata={"help": "apply layernorm before each block"} ) learned_pos: bool = field( default=False, metadata={"help": "use learned positional embeddings"} ) # args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019) layerdrop: float = field(default=0, metadata={"help": "LayerDrop probability"}) layers_to_keep: Optional[List[int]] = field( default=None, metadata={"help": "which layers to *keep* when pruning"} ) @dataclass class DecoderConfig(EncDecBaseConfig): input_dim: int = II("model.decoder.embed_dim") output_dim: int = field( default=II("model.decoder.embed_dim"), metadata={ "help": "decoder output dimension (extra linear layer if different from decoder embed dim)" }, ) def __post_init__(self): # II doesn't work if we are just creating the object outside of hydra so fix that if self.input_dim == II("model.decoder.embed_dim"): self.input_dim = self.embed_dim if self.output_dim == II("model.decoder.embed_dim"): self.output_dim = self.embed_dim @dataclass class QuantNoiseConfig(FairseqDataclass): pq: float = field( default=0.0, metadata={"help": "iterative PQ quantization noise at training time"}, ) pq_block_size: int = field( default=8, metadata={"help": "block size of quantization noise at training time"}, ) scalar: float = field( default=0.0, metadata={ "help": "scalar quantization noise and scalar quantization at training time" }, ) @dataclass class TransformerConfig(FairseqDataclass): activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field( default="relu", metadata={"help": "activation function to use"}, ) dropout: float = field(default=0.1, metadata={"help": "dropout probability"}) attention_dropout: float = field( default=0.0, metadata={"help": "dropout probability for attention weights"} ) activation_dropout: float = field( default=0.0, metadata={ "help": "dropout probability after activation in FFN.", "alias": "--relu-dropout", }, ) adaptive_input: bool = False adaptive_input_factor: float = field( default=4, metadata={"help": "adaptive input factor"} ) adaptive_input_cutoff: Optional[str] = field( default=None, metadata={"help": "comma separated list of adaptive input cutoff points."}, ) encoder: EncDecBaseConfig = EncDecBaseConfig() # TODO should really be in the encoder config max_source_positions: int = field( default=DEFAULT_MAX_SOURCE_POSITIONS, metadata={"help": "Maximum input length supported by the encoder"}, ) decoder: DecoderConfig = DecoderConfig() # TODO should really be in the decoder config max_target_positions: int = field( default=DEFAULT_MAX_TARGET_POSITIONS, metadata={"help": "Maximum output length supported by the decoder"}, ) share_decoder_input_output_embed: bool = field( default=False, metadata={"help": "share decoder input and output embeddings"} ) model_param_type: str = field( default="", metadata={"help": "specify parameterization: edgeformer, ut; default is ''"} ) share_all_embeddings: bool = field( default=False, metadata={ "help": "share encoder, decoder and output embeddings (requires shared dictionary and embed dim)" }, ) no_token_positional_embeddings: bool = field( default=False, metadata={ "help": "if True, disables positional embeddings (outside self attention)" }, ) adaptive_softmax_cutoff: Optional[List[int]] = field( default=None, metadata={ "help": "list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion" }, ) adaptive_softmax_dropout: float = field( default=0.0, metadata={"help": "sets adaptive softmax dropout for the tail projections"}, ) adaptive_softmax_factor: float = field( default=4, metadata={"help": "adaptive input factor"} ) layernorm_embedding: bool = field( default=False, metadata={"help": "add layernorm to embedding"} ) tie_adaptive_weights: bool = field( default=False, metadata={ "help": "if set, ties the weights of adaptive softmax and adaptive input" }, ) tie_adaptive_proj: bool = field( default=False, metadata={ "help": "if set, ties the projection weights of adaptive softmax and adaptive input" }, ) no_scale_embedding: bool = field( default=False, metadata={"help": "if True, dont scale embeddings"} ) checkpoint_activations: bool = field( default=False, metadata={ "help": "checkpoint activations at each layer, which saves GPU memory usage at the cost of some additional compute" }, ) offload_activations: bool = field( default=False, metadata={ "help": "checkpoint activations at each layer, then save to gpu. Sets --checkpoint-activations." }, ) # args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019) no_cross_attention: bool = field( default=False, metadata={"help": "do not perform cross-attention"} ) cross_self_attention: bool = field( default=False, metadata={"help": "perform cross+self-attention"} ) # args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020) quant_noise: QuantNoiseConfig = field(default=QuantNoiseConfig()) min_params_to_wrap: int = field( default=DEFAULT_MIN_PARAMS_TO_WRAP, metadata={ "help": "minimum number of params for a layer to be wrapped with FSDP() when " "training with --ddp-backend=fully_sharded. Smaller values will " "improve memory efficiency, but may make torch.distributed " "communication less efficient due to smaller input sizes. This option " "is set to 0 (i.e., always wrap) when --checkpoint-activations or " "--offload-activations are passed." }, ) # DEPRECATED field, but some old checkpoints might have it char_inputs: bool = field( default=False, metadata={"help": "if set, model takes character ids as input"} ) relu_dropout: float = 0.0 # config for "BASE Layers: Simplifying Training of Large, Sparse Models" base_layers: Optional[int] = field( default=0, metadata={"help": "number of BASE layers in total"} ) base_sublayers: Optional[int] = field( default=1, metadata={"help": "number of sublayers in each BASE layer"} ) base_shuffle: Optional[int] = field( default=1, metadata={"help": "shuffle tokens between workers before computing assignment"}, ) export: bool = field( default=False, metadata={"help": "make the layernorm exportable with torchscript."}, ) # copied from transformer_lm but expected in transformer_decoder: no_decoder_final_norm: bool = field( default=False, metadata={"help": "don't add an extra layernorm after the last decoder block"}, ) lora_r: int = field( default=0, metadata={"help": "Rank r for LoRA. 0 means disabled."} ) lora_r_shape: int = field( default=0, metadata={"help": "LoRA r for different layers. 0 means uniform, 1 means decrasing (2, 1.5, 1, 0.5 ...), 2 means increasing"} ) # We need to make this hierarchical dataclass like the flat namespace # __getattr__ and __setattr__ here allow backward compatibility # for subclasses of Transformer(Legacy) that depend on read/write on # the flat namespace. def __getattr__(self, name): match = re.match(_NAME_PARSER, name) if match: sub = safe_getattr(self, match[1]) return safe_getattr(sub, match[2]) raise AttributeError(f"invalid argument {name}.") def __setattr__(self, name, value): match = re.match(_NAME_PARSER, name) if match: sub = safe_getattr(self, match[1]) setattr(sub, match[2], value) else: super().__setattr__(name, value) @staticmethod def _copy_keys(args, cls, prefix, seen): """ copy the prefixed keys (decoder_embed_dim) to the DC fields: decoder.embed_dim """ cfg = cls() for fld in fields(cls): # for all the fields in the DC, find the fields (e.g. embed_dim) # in the namespace with the prefix (e.g. decoder) # and set it on the dc. args_key = f"{prefix}_{fld.name}" if safe_hasattr(args, args_key): seen.add(args_key) setattr(cfg, fld.name, safe_getattr(args, args_key)) if safe_hasattr(args, fld.name): seen.add(fld.name) setattr(cfg, fld.name, safe_getattr(args, fld.name)) return cfg @classmethod def from_namespace(cls, args): if args is None: return None if not isinstance(args, cls): seen = set() config = cls() # currently, we can go generically from DC fields to args hierarchically # but we can't easily deconstruct a flat namespace to a hierarchical # DC. Mostly because we could have a sub-dc called `decoder-foo` that should not # go to the sub struct called `decoder`. There are ways to go around this, but let's keep it simple # for now. for fld in fields(cls): # concretelly, the transformer_config know what sub-dc it has, so we go through all the dc fields # and if it's one that has a sub-dc, we build that sub-dc with `copy_keys()` if fld.name == "decoder": if safe_hasattr(args, "decoder"): # in some cases, the args we receive is already structured (as DictConfigs), so let's just build the correct DC seen.add("decoder") config.decoder = DecoderConfig(**args.decoder) else: config.decoder = cls._copy_keys( args, DecoderConfig, "decoder", seen ) elif fld.name == "encoder": # same but for encoder if safe_hasattr(args, "encoder"): seen.add("encoder") config.encoder = EncDecBaseConfig(**args.encoder) else: config.encoder = cls._copy_keys( args, EncDecBaseConfig, "encoder", seen ) elif fld.name == "quant_noise": # same but for quant_noise if safe_hasattr(args, "quant_noise"): seen.add("quant_noise") config.quant_noise = QuantNoiseConfig(**args.quant_noise) else: config.quant_noise = cls._copy_keys( args, QuantNoiseConfig, "quant_noise", seen ) elif safe_hasattr(args, fld.name): # if it's not a structure field, it's just a normal field, copy it over seen.add(fld.name) setattr(config, fld.name, safe_getattr(args, fld.name)) # we got all the fields defined in the dataclass, but # the argparse namespace might have extra args for two reasons: # - we are in a legacy class so all the args are not declared in the dataclass. Ideally once everyone has defined a dataclass for their model, we won't need this # - some places expect args to be there but never define them args_dict = ( args._asdict() if safe_hasattr(args, "_asdict") else vars(args) if safe_hasattr(args, "__dict__") else {} ) # namedtupled doesn't have __dict__ :-/ for key, value in args_dict.items(): if key not in seen: setattr(config, key, value) return config else: return args
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/transformer/transformer_config.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.dataclass.utils import gen_parser_from_dataclass from fairseq.models import ( register_model, register_model_architecture, ) from fairseq.models.transformer.transformer_config import ( TransformerConfig, DEFAULT_MAX_SOURCE_POSITIONS, DEFAULT_MAX_TARGET_POSITIONS, DEFAULT_MIN_PARAMS_TO_WRAP, ) from fairseq.models.transformer.transformer_base import ( TransformerModelBase, ) @register_model("transformer") class TransformerModel(TransformerModelBase): """ This is the legacy implementation of the transformer model that uses argparse for configuration. """ @classmethod def hub_models(cls): # fmt: off def moses_subword(path): return { 'path': path, 'tokenizer': 'moses', 'bpe': 'subword_nmt', } def moses_fastbpe(path): return { 'path': path, 'tokenizer': 'moses', 'bpe': 'fastbpe', } def spm(path): return { 'path': path, 'bpe': 'sentencepiece', 'tokenizer': 'space', } return { 'transformer.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2'), 'transformer.wmt16.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2', 'transformer.wmt18.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt18.en-de.ensemble.tar.gz'), 'transformer.wmt19.en-de': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz'), 'transformer.wmt19.en-ru': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz'), 'transformer.wmt19.de-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz'), 'transformer.wmt19.ru-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz'), 'transformer.wmt19.en-de.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.single_model.tar.gz'), 'transformer.wmt19.en-ru.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.single_model.tar.gz'), 'transformer.wmt19.de-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.single_model.tar.gz'), 'transformer.wmt19.ru-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.single_model.tar.gz'), 'transformer.wmt20.en-ta': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-ta.single.tar.gz'), 'transformer.wmt20.en-iu.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.news.single.tar.gz'), 'transformer.wmt20.en-iu.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.nh.single.tar.gz'), 'transformer.wmt20.ta-en': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.ta-en.single.tar.gz'), 'transformer.wmt20.iu-en.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.news.single.tar.gz'), 'transformer.wmt20.iu-en.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.nh.single.tar.gz'), 'transformer.flores101.mm100.615M': spm('https://dl.fbaipublicfiles.com/flores101/pretrained_models/flores101_mm100_615M.tar.gz'), 'transformer.flores101.mm100.175M': spm('https://dl.fbaipublicfiles.com/flores101/pretrained_models/flores101_mm100_175M.tar.gz'), } # fmt: on def __init__(self, args, encoder, decoder): cfg = TransformerConfig.from_namespace(args) super().__init__(cfg, encoder, decoder) self.args = args @classmethod def add_args(cls, parser): """Add model-specific arguments to the parser.""" # we want to build the args recursively in this case. # do not set defaults so that settings defaults from various architectures still works gen_parser_from_dataclass( parser, TransformerConfig(), delete_default=True, with_prefix="" ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if args.encoder_layers_to_keep: args.encoder_layers = len(args.encoder_layers_to_keep.split(",")) if args.decoder_layers_to_keep: args.decoder_layers = len(args.decoder_layers_to_keep.split(",")) if getattr(args, "max_source_positions", None) is None: args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if getattr(args, "max_target_positions", None) is None: args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError("--share-all-embeddings requires a joined dictionary") if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim" ) if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path ): raise ValueError( "--share-all-embeddings not compatible with --decoder-embed-path" ) args.share_decoder_input_output_embed = True if getattr(args, "offload_activations", False): args.checkpoint_activations = True # offloading implies checkpointing if not args.share_all_embeddings: args.min_params_to_wrap = getattr( args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP ) cfg = TransformerConfig.from_namespace(args) return super().build_model(cfg, task) @classmethod def build_embedding(cls, args, dictionary, embed_dim, path=None): return super().build_embedding( TransformerConfig.from_namespace(args), dictionary, embed_dim, path ) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return super().build_encoder( TransformerConfig.from_namespace(args), src_dict, embed_tokens ) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens, encoder_layers=None): return super().build_decoder( TransformerConfig.from_namespace(args), tgt_dict, embed_tokens, encoder_layers=encoder_layers ) # architectures @register_model_architecture("transformer", "transformer_tiny") def tiny_architecture(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 64) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 64) args.encoder_layers = getattr(args, "encoder_layers", 2) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 2) args.decoder_layers = getattr(args, "decoder_layers", 2) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 2) return base_architecture(args) @register_model_architecture("transformer", "transformer") def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.no_cross_attention = getattr(args, "no_cross_attention", False) args.cross_self_attention = getattr(args, "cross_self_attention", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) args.checkpoint_activations = getattr(args, "checkpoint_activations", False) args.offload_activations = getattr(args, "offload_activations", False) if args.offload_activations: args.checkpoint_activations = True args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None) args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None) args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0) @register_model_architecture("transformer", "transformer_edge") def edge_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 12) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", 128 ) args.decoder_layers = getattr(args, "decoder_layers", 2) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.no_cross_attention = getattr(args, "no_cross_attention", False) args.cross_self_attention = getattr(args, "cross_self_attention", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.model_param_type = getattr( args, "model_param_type", "edgeformer" ) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) args.checkpoint_activations = getattr(args, "checkpoint_activations", False) args.offload_activations = getattr(args, "offload_activations", False) if args.offload_activations: args.checkpoint_activations = True args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None) args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None) args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0) @register_model_architecture("transformer", "transformer_iwslt_de_en") def transformer_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.encoder_layers = getattr(args, "encoder_layers", 6) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) args.decoder_layers = getattr(args, "decoder_layers", 6) base_architecture(args) @register_model_architecture("transformer", "transformer_wmt_en_de") def transformer_wmt_en_de(args): base_architecture(args) # parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture("transformer", "transformer_vaswani_wmt_en_de_big") def transformer_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.3) base_architecture(args) @register_model_architecture("transformer", "transformer_vaswani_wmt_en_fr_big") def transformer_vaswani_wmt_en_fr_big(args): args.dropout = getattr(args, "dropout", 0.1) transformer_vaswani_wmt_en_de_big(args) @register_model_architecture("transformer", "transformer_wmt_en_de_big") def transformer_wmt_en_de_big(args): args.attention_dropout = getattr(args, "attention_dropout", 0.1) transformer_vaswani_wmt_en_de_big(args) # default parameters used in tensor2tensor implementation @register_model_architecture("transformer", "transformer_wmt_en_de_big_t2t") def transformer_wmt_en_de_big_t2t(args): args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.activation_dropout = getattr(args, "activation_dropout", 0.1) transformer_vaswani_wmt_en_de_big(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/transformer/transformer_legacy.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Dict, List, Optional import torch import torch.nn as nn from fairseq import utils from fairseq.distributed import fsdp_wrap from fairseq.models import FairseqEncoder from fairseq.modules import ( FairseqDropout, LayerDropModuleList, LayerNorm, PositionalEmbedding, SinusoidalPositionalEmbedding, ) from fairseq.modules import transformer_layer from fairseq.modules.checkpoint_activations import checkpoint_wrapper from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ from torch import Tensor from fairseq.models.transformer import ( TransformerConfig, ) # rewrite name for backward compatibility in `make_generation_fast_` def module_name_fordropout(module_name: str) -> str: if module_name == 'TransformerEncoderBase': return 'TransformerEncoder' else: return module_name class TransformerEncoderBase(FairseqEncoder): """ Transformer encoder consisting of *cfg.encoder.layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, cfg, dictionary, embed_tokens): self.cfg = cfg super().__init__(dictionary) self.register_buffer("version", torch.Tensor([3])) self.dropout_module = FairseqDropout( cfg.dropout, module_name=module_name_fordropout(self.__class__.__name__) ) self.encoder_layerdrop = cfg.encoder.layerdrop embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = cfg.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim) self.embed_positions = ( PositionalEmbedding( cfg.max_source_positions, embed_dim, self.padding_idx, learned=cfg.encoder.learned_pos, ) if not cfg.no_token_positional_embeddings else None ) if cfg.layernorm_embedding: self.layernorm_embedding = LayerNorm(embed_dim, export=cfg.export) else: self.layernorm_embedding = None if not cfg.adaptive_input and cfg.quant_noise.pq > 0: self.quant_noise = apply_quant_noise_( nn.Linear(embed_dim, embed_dim, bias=False), cfg.quant_noise.pq, cfg.quant_noise.pq_block_size, ) else: self.quant_noise = None if self.encoder_layerdrop > 0.0: self.layers = LayerDropModuleList(p=self.encoder_layerdrop) else: self.layers = nn.ModuleList([]) if cfg.model_param_type.lower() == "edgeformer": edge_layers = [self.build_encoder_layer(cfg, layerid=i) for i in range(2)] # 2 groups for i in range(2, 4): edge_layers.append(self.build_encoder_layer(cfg,shared_layer=edge_layers[i%2], layerid=i)) edge_layers[-1].self_attn = edge_layers[-1].build_self_attention(embed_dim, cfg) # 3rd and 4th layers use free self-attn for i in range(4, 12): edge_layers.append(self.build_encoder_layer(cfg,shared_layer=edge_layers[i%4], layerid=i)) self.layers.extend(edge_layers) elif cfg.model_param_type.lower() == "ut" or cfg.model_param_type.lower() == 'universal': first_layer = self.build_encoder_layer(cfg) self.layers.append(first_layer) self.layers.extend( [self.build_encoder_layer(cfg, shared_layer=first_layer) for i in range(1, cfg.encoder.layers)] ) else: self.layers.extend( [self.build_encoder_layer(cfg) for i in range(cfg.encoder.layers)] ) self.num_layers = len(self.layers) if cfg.encoder.normalize_before: self.layer_norm = LayerNorm(embed_dim, export=cfg.export) else: self.layer_norm = None def build_encoder_layer(self, cfg, shared_layer=None, layerid=None): layer = transformer_layer.TransformerEncoderLayerBase(cfg, shared_layer=shared_layer, layerid=layerid) checkpoint = cfg.checkpoint_activations if checkpoint: offload_to_cpu = cfg.offload_activations layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu) # if we are checkpointing, enforce that FSDP always wraps the # checkpointed layer, regardless of layer size min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0 layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap) return layer def forward_embedding( self, src_tokens, token_embedding: Optional[torch.Tensor] = None ): # embed tokens and positions if token_embedding is None: token_embedding = self.embed_tokens(src_tokens) x = embed = self.embed_scale * token_embedding if self.embed_positions is not None: x = embed + self.embed_positions(src_tokens) if self.layernorm_embedding is not None: x = self.layernorm_embedding(x) x = self.dropout_module(x) if self.quant_noise is not None: x = self.quant_noise(x) return x, embed def forward( self, src_tokens, src_lengths: Optional[torch.Tensor] = None, return_all_hiddens: bool = False, token_embeddings: Optional[torch.Tensor] = None, ): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` return_all_hiddens (bool, optional): also return all of the intermediate hidden states (default: False). token_embeddings (torch.Tensor, optional): precomputed embeddings default `None` will recompute embeddings Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` - **encoder_embedding** (Tensor): the (scaled) embedding lookup of shape `(batch, src_len, embed_dim)` - **encoder_states** (List[Tensor]): all intermediate hidden states of shape `(src_len, batch, embed_dim)`. Only populated if *return_all_hiddens* is True. """ return self.forward_scriptable( src_tokens, src_lengths, return_all_hiddens, token_embeddings ) # TorchScript doesn't support super() method so that the scriptable Subclass # can't access the base class model in Torchscript. # Current workaround is to add a helper function with different name and # call the helper function from scriptable Subclass. def forward_scriptable( self, src_tokens, src_lengths: Optional[torch.Tensor] = None, return_all_hiddens: bool = False, token_embeddings: Optional[torch.Tensor] = None, ): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` return_all_hiddens (bool, optional): also return all of the intermediate hidden states (default: False). token_embeddings (torch.Tensor, optional): precomputed embeddings default `None` will recompute embeddings Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` - **encoder_embedding** (Tensor): the (scaled) embedding lookup of shape `(batch, src_len, embed_dim)` - **encoder_states** (List[Tensor]): all intermediate hidden states of shape `(src_len, batch, embed_dim)`. Only populated if *return_all_hiddens* is True. """ # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) has_pads = src_tokens.device.type == "xla" or encoder_padding_mask.any() x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings) # account for padding while computing the representation if has_pads: x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x)) # B x T x C -> T x B x C x = x.transpose(0, 1) encoder_states = [] if return_all_hiddens: encoder_states.append(x) # encoder layers for layer in self.layers: x = layer( x, encoder_padding_mask=encoder_padding_mask if has_pads else None ) if return_all_hiddens: assert encoder_states is not None encoder_states.append(x) if self.layer_norm is not None: x = self.layer_norm(x) # The Pytorch Mobile lite interpreter does not supports returning NamedTuple in # `forward` so we use a dictionary instead. # TorchScript does not support mixed values so the values are all lists. # The empty list is equivalent to None. src_lengths = src_tokens.ne(self.padding_idx).sum(dim=1, dtype=torch.int32).reshape(-1, 1).contiguous() return { "encoder_out": [x], # T x B x C "encoder_padding_mask": [encoder_padding_mask], # B x T "encoder_embedding": [encoder_embedding], # B x T x C "encoder_states": encoder_states, # List[T x B x C] "src_tokens": [], "src_lengths": [src_lengths], } @torch.jit.export def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if len(encoder_out["encoder_out"]) == 0: new_encoder_out = [] else: new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)] if len(encoder_out["encoder_padding_mask"]) == 0: new_encoder_padding_mask = [] else: new_encoder_padding_mask = [ encoder_out["encoder_padding_mask"][0].index_select(0, new_order) ] if len(encoder_out["encoder_embedding"]) == 0: new_encoder_embedding = [] else: new_encoder_embedding = [ encoder_out["encoder_embedding"][0].index_select(0, new_order) ] if len(encoder_out["src_tokens"]) == 0: src_tokens = [] else: src_tokens = [(encoder_out["src_tokens"][0]).index_select(0, new_order)] if len(encoder_out["src_lengths"]) == 0: src_lengths = [] else: src_lengths = [(encoder_out["src_lengths"][0]).index_select(0, new_order)] encoder_states = encoder_out["encoder_states"] if len(encoder_states) > 0: for idx, state in enumerate(encoder_states): encoder_states[idx] = state.index_select(1, new_order) return { "encoder_out": new_encoder_out, # T x B x C "encoder_padding_mask": new_encoder_padding_mask, # B x T "encoder_embedding": new_encoder_embedding, # B x T x C "encoder_states": encoder_states, # List[T x B x C] "src_tokens": src_tokens, # B x T "src_lengths": src_lengths, # B x 1 } def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = "{}.embed_positions.weights".format(name) if weights_key in state_dict: print("deleting {0}".format(weights_key)) del state_dict[weights_key] state_dict[ "{}.embed_positions._float_tensor".format(name) ] = torch.FloatTensor(1) for i in range(self.num_layers): # update layer norms self.layers[i].upgrade_state_dict_named( state_dict, "{}.layers.{}".format(name, i) ) version_key = "{}.version".format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerEncoder(TransformerEncoderBase): def __init__(self, args, dictionary, embed_tokens): self.args = args super().__init__( TransformerConfig.from_namespace(args), dictionary, embed_tokens, ) def build_encoder_layer(self, args, shared_layer=None): return super().build_encoder_layer( TransformerConfig.from_namespace(args), )
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/transformer/transformer_encoder.py
# Copyright (c) Facebook Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" from .transformer_config import ( TransformerConfig, DEFAULT_MAX_SOURCE_POSITIONS, DEFAULT_MAX_TARGET_POSITIONS, DEFAULT_MIN_PARAMS_TO_WRAP, ) from .transformer_decoder import TransformerDecoder, TransformerDecoderBase, Linear from .transformer_encoder import TransformerEncoder, TransformerEncoderBase from .transformer_legacy import ( TransformerModel, base_architecture, tiny_architecture, transformer_iwslt_de_en, transformer_wmt_en_de, transformer_vaswani_wmt_en_de_big, transformer_vaswani_wmt_en_fr_big, transformer_wmt_en_de_big, transformer_wmt_en_de_big_t2t, ) from .transformer_base import TransformerModelBase, Embedding __all__ = [ "TransformerModelBase", "TransformerConfig", "TransformerDecoder", "TransformerDecoderBase", "TransformerEncoder", "TransformerEncoderBase", "TransformerModel", "Embedding", "Linear", "base_architecture", "tiny_architecture", "transformer_iwslt_de_en", "transformer_wmt_en_de", "transformer_vaswani_wmt_en_de_big", "transformer_vaswani_wmt_en_fr_big", "transformer_wmt_en_de_big", "transformer_wmt_en_de_big_t2t", "DEFAULT_MAX_SOURCE_POSITIONS", "DEFAULT_MAX_TARGET_POSITIONS", "DEFAULT_MIN_PARAMS_TO_WRAP", ]
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/transformer/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Any, Dict, List, Optional import torch import torch.nn as nn from fairseq import utils from fairseq.distributed import fsdp_wrap from fairseq.models import FairseqIncrementalDecoder from fairseq.models.transformer import TransformerConfig from fairseq.modules import ( AdaptiveSoftmax, BaseLayer, FairseqDropout, LayerDropModuleList, LayerNorm, PositionalEmbedding, SinusoidalPositionalEmbedding, ) from fairseq.modules import transformer_layer from fairseq.modules.checkpoint_activations import checkpoint_wrapper from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ from torch import Tensor # rewrite name for backward compatibility in `make_generation_fast_` def module_name_fordropout(module_name: str) -> str: if module_name == 'TransformerDecoderBase': return 'TransformerDecoder' else: return module_name class TransformerDecoderBase(FairseqIncrementalDecoder): """ Transformer decoder consisting of *cfg.decoder.layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__( self, cfg, dictionary, embed_tokens, no_encoder_attn=False, output_projection=None, encoder_layers=None ): self.cfg = cfg super().__init__(dictionary) self.register_buffer("version", torch.Tensor([3])) self._future_mask = torch.empty(0) self.dropout_module = FairseqDropout( cfg.dropout, module_name=module_name_fordropout(self.__class__.__name__) ) self.decoder_layerdrop = cfg.decoder.layerdrop self.share_input_output_embed = cfg.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = cfg.decoder.embed_dim self.embed_dim = embed_dim self.output_embed_dim = cfg.decoder.output_dim self.padding_idx = embed_tokens.padding_idx self.max_target_positions = cfg.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim) if not cfg.adaptive_input and cfg.quant_noise.pq > 0: self.quant_noise = apply_quant_noise_( nn.Linear(embed_dim, embed_dim, bias=False), cfg.quant_noise.pq, cfg.quant_noise.pq_block_size, ) else: self.quant_noise = None self.project_in_dim = ( Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None ) self.embed_positions = ( PositionalEmbedding( self.max_target_positions, embed_dim, self.padding_idx, learned=cfg.decoder.learned_pos, ) if not cfg.no_token_positional_embeddings else None ) if cfg.layernorm_embedding: self.layernorm_embedding = LayerNorm(embed_dim, export=cfg.export) else: self.layernorm_embedding = None self.cross_self_attention = cfg.cross_self_attention if self.decoder_layerdrop > 0.0: self.layers = LayerDropModuleList(p=self.decoder_layerdrop) else: self.layers = nn.ModuleList([]) if cfg.model_param_type == "edgeformer": self.layers.append(self.build_interleaved_decoder_layer(cfg, no_encoder_attn, encoder_layers[0:2])) self.layers.append(self.build_interleaved_decoder_layer(cfg, no_encoder_attn, encoder_layers[2:4])) elif cfg.model_param_type.lower() == "ut" or cfg.model_param_type.lower() == 'universal': first_layer = self.build_decoder_layer(cfg, no_encoder_attn) self.layers.append(first_layer) self.layers.extend( [ self.build_decoder_layer(cfg, no_encoder_attn, shared_layer=first_layer) for _ in range(1, cfg.decoder.layers) ] ) else: self.layers.extend( [ self.build_decoder_layer(cfg, no_encoder_attn) for _ in range(cfg.decoder.layers) ] ) self.num_layers = len(self.layers) if cfg.decoder.normalize_before and not cfg.no_decoder_final_norm: self.layer_norm = LayerNorm(embed_dim, export=cfg.export) else: self.layer_norm = None self.project_out_dim = ( Linear(embed_dim, self.output_embed_dim, bias=False) if embed_dim != self.output_embed_dim and not cfg.tie_adaptive_weights else None ) self.adaptive_softmax = None self.output_projection = output_projection if self.output_projection is None: self.build_output_projection(cfg, dictionary, embed_tokens) def build_output_projection(self, cfg, dictionary, embed_tokens): if cfg.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, utils.eval_str_list(cfg.adaptive_softmax_cutoff, type=int), dropout=cfg.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if cfg.tie_adaptive_weights else None, factor=cfg.adaptive_softmax_factor, tie_proj=cfg.tie_adaptive_proj, ) elif self.share_input_output_embed: self.output_projection = nn.Linear( self.embed_tokens.weight.shape[1], self.embed_tokens.weight.shape[0], bias=False, ) self.output_projection.weight = self.embed_tokens.weight else: self.output_projection = nn.Linear( self.output_embed_dim, len(dictionary), bias=False ) nn.init.normal_( self.output_projection.weight, mean=0, std=self.output_embed_dim ** -0.5 ) num_base_layers = cfg.base_layers for i in range(num_base_layers): self.layers.insert( ((i + 1) * cfg.decoder.layers) // (num_base_layers + 1), BaseLayer(cfg), ) def build_decoder_layer(self, cfg, no_encoder_attn=False, shared_layer=None): layer = transformer_layer.TransformerDecoderLayerBase(cfg, no_encoder_attn, shared_layer=shared_layer) checkpoint = cfg.checkpoint_activations if checkpoint: offload_to_cpu = cfg.offload_activations layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu) # if we are checkpointing, enforce that FSDP always wraps the # checkpointed layer, regardless of layer size min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0 layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap) return layer def build_interleaved_decoder_layer(self, cfg, no_encoder_attn=False, encoder_layers=None): layer = transformer_layer.TransformerInterleavedDecoderLayerBase(cfg, no_encoder_attn, encoder_layers=encoder_layers) checkpoint = cfg.checkpoint_activations if checkpoint: offload_to_cpu = cfg.offload_activations layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu) # if we are checkpointing, enforce that FSDP always wraps the # checkpointed layer, regardless of layer size min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0 layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap) return layer def forward( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, features_only: bool = False, full_context_alignment: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, src_lengths: Optional[Any] = None, return_all_hiddens: bool = False, ): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (optional): output from the encoder, used for encoder-side attention, should be of size T x B x C incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` features_only (bool, optional): only return features without applying output layer (default: False). full_context_alignment (bool, optional): don't apply auto-regressive mask to self-attention (default: False). Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, incremental_state=incremental_state, full_context_alignment=full_context_alignment, alignment_layer=alignment_layer, alignment_heads=alignment_heads, ) if not features_only: x = self.output_layer(x) return x, extra def extract_features( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]], incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, full_context_alignment: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, ): return self.extract_features_scriptable( prev_output_tokens, encoder_out, incremental_state, full_context_alignment, alignment_layer, alignment_heads, ) """ A scriptable subclass of this class has an extract_features method and calls super().extract_features, but super() is not supported in torchscript. A copy of this function is made to be used in the subclass instead. """ def extract_features_scriptable( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]], incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, full_context_alignment: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, ): """ Similar to *forward* but only return features. Includes several features from "Jointly Learning to Align and Translate with Transformer Models" (Garg et al., EMNLP 2019). Args: full_context_alignment (bool, optional): don't apply auto-regressive mask to self-attention (default: False). alignment_layer (int, optional): return mean alignment over heads at this layer (default: last layer). alignment_heads (int, optional): only average alignment over this many heads (default: all heads). Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ bs, slen = prev_output_tokens.size() if alignment_layer is None: alignment_layer = self.num_layers - 1 enc: Optional[Tensor] = None padding_mask: Optional[Tensor] = None if encoder_out is not None and len(encoder_out["encoder_out"]) > 0: enc = encoder_out["encoder_out"][0] assert ( enc.size()[1] == bs ), f"Expected enc.shape == (t, {bs}, c) got {enc.shape}" if encoder_out is not None and len(encoder_out["encoder_padding_mask"]) > 0: padding_mask = encoder_out["encoder_padding_mask"][0] # embed positions positions = None if self.embed_positions is not None: positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state ) if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.quant_noise is not None: x = self.quant_noise(x) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions if self.layernorm_embedding is not None: x = self.layernorm_embedding(x) x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) self_attn_padding_mask: Optional[Tensor] = None if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any(): self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx) # decoder layers attn: Optional[Tensor] = None inner_states: List[Optional[Tensor]] = [x] for idx, layer in enumerate(self.layers): if incremental_state is None and not full_context_alignment: self_attn_mask = self.buffered_future_mask(x) else: self_attn_mask = None x, layer_attn, _ = layer( x, enc, padding_mask, incremental_state, self_attn_mask=self_attn_mask, self_attn_padding_mask=self_attn_padding_mask, need_attn=bool((idx == alignment_layer)), need_head_weights=bool((idx == alignment_layer)), ) inner_states.append(x) if layer_attn is not None and idx == alignment_layer: attn = layer_attn.float().to(x) if attn is not None: if alignment_heads is not None: attn = attn[:alignment_heads] # average probabilities over heads attn = attn.mean(dim=0) if self.layer_norm is not None: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {"attn": [attn], "inner_states": inner_states} def output_layer(self, features): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary return self.output_projection(features) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions) def buffered_future_mask(self, tensor): dim = tensor.size(0) # self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround. if ( self._future_mask.size(0) == 0 or (not self._future_mask.device == tensor.device) or self._future_mask.size(0) < dim ): self._future_mask = torch.triu( utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1 ) self._future_mask = self._future_mask.to(tensor) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = "{}.embed_positions.weights".format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict[ "{}.embed_positions._float_tensor".format(name) ] = torch.FloatTensor(1) if f"{name}.output_projection.weight" not in state_dict: if self.share_input_output_embed: embed_out_key = f"{name}.embed_tokens.weight" else: embed_out_key = f"{name}.embed_out" if embed_out_key in state_dict: state_dict[f"{name}.output_projection.weight"] = state_dict[ embed_out_key ] if not self.share_input_output_embed: del state_dict[embed_out_key] for i in range(self.num_layers): # update layer norms layer_norm_map = { "0": "self_attn_layer_norm", "1": "encoder_attn_layer_norm", "2": "final_layer_norm", } for old, new in layer_norm_map.items(): for m in ("weight", "bias"): k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m) if k in state_dict: state_dict[ "{}.layers.{}.{}.{}".format(name, i, new, m) ] = state_dict[k] del state_dict[k] version_key = "{}.version".format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m class TransformerDecoder(TransformerDecoderBase): def __init__( self, args, dictionary, embed_tokens, no_encoder_attn=False, output_projection=None, ): self.args = args super().__init__( TransformerConfig.from_namespace(args), dictionary, embed_tokens, no_encoder_attn=no_encoder_attn, output_projection=output_projection, ) def build_output_projection(self, args, dictionary, embed_tokens): super().build_output_projection( TransformerConfig.from_namespace(args), dictionary, embed_tokens ) def build_decoder_layer(self, args, no_encoder_attn=False): return super().build_decoder_layer( TransformerConfig.from_namespace(args), no_encoder_attn=no_encoder_attn )
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/transformer/transformer_decoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, Optional, Tuple import torch import torch.nn as nn from fairseq import utils, options from fairseq.dataclass.utils import gen_parser_from_dataclass from fairseq.distributed import fsdp_wrap from fairseq.models import FairseqEncoderDecoderModel from fairseq.models.transformer import ( TransformerEncoderBase, TransformerDecoderBase, TransformerConfig, ) from torch import Tensor from fairseq.modules import AdaptiveInput class TransformerModelBase(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, cfg, encoder, decoder): super().__init__(encoder, decoder) self.cfg = cfg self.supports_align_args = True @classmethod def add_args(cls, parser): """Add model-specific arguments to the parser.""" # we want to build the args recursively in this case. gen_parser_from_dataclass( parser, TransformerConfig(), delete_default=False, with_prefix="" ) @classmethod def build_model(cls, cfg, task): """Build a new model instance.""" # -- TODO T96535332 # bug caused by interaction between OmegaConf II and argparsing cfg.decoder.input_dim = int(cfg.decoder.input_dim) cfg.decoder.output_dim = int(cfg.decoder.output_dim) # -- if cfg.encoder.layers_to_keep: cfg.encoder.layers = len(cfg.encoder.layers_to_keep.split(",")) if cfg.decoder.layers_to_keep: cfg.decoder.layers = len(cfg.decoder.layers_to_keep.split(",")) src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if cfg.adaptive_input is False: if cfg.share_all_embeddings: if src_dict != tgt_dict: raise ValueError("--share-all-embeddings requires a joined dictionary") if cfg.encoder.embed_dim != cfg.decoder.embed_dim: raise ValueError( "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim" ) if cfg.decoder.embed_path and ( cfg.decoder.embed_path != cfg.encoder.embed_path ): raise ValueError( "--share-all-embeddings not compatible with --decoder-embed-path" ) encoder_embed_tokens = cls.build_embedding( cfg, src_dict, cfg.encoder.embed_dim, cfg.encoder.embed_path ) decoder_embed_tokens = encoder_embed_tokens cfg.share_decoder_input_output_embed = True else: encoder_embed_tokens = cls.build_embedding( cfg, src_dict, cfg.encoder.embed_dim, cfg.encoder.embed_path ) decoder_embed_tokens = cls.build_embedding( cfg, tgt_dict, cfg.decoder.embed_dim, cfg.decoder.embed_path ) else: encoder_embed_tokens = AdaptiveInput( len(task.source_dictionary), task.source_dictionary.pad(), cfg.encoder.embed_dim, cfg.adaptive_input_factor, cfg.encoder.embed_dim, options.eval_str_list(cfg.adaptive_input_cutoff, type=int), cfg.quant_noise_pq, cfg.quant_noise_pq_block_size, ) decoder_embed_tokens = encoder_embed_tokens if cfg.tie_adaptive_weights: assert cfg.adaptive_input assert cfg.adaptive_input_factor == cfg.adaptive_softmax_factor assert ( cfg.adaptive_softmax_cutoff == cfg.adaptive_input_cutoff ), "{} != {}".format( cfg.adaptive_softmax_cutoff, cfg.adaptive_input_cutoff ) assert cfg.decoder_input_dim == cfg.decoder_output_dim if cfg.offload_activations: cfg.checkpoint_activations = True # offloading implies checkpointing encoder = cls.build_encoder(cfg, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(cfg, tgt_dict, decoder_embed_tokens, encoder_layers=encoder.layers) if not cfg.share_all_embeddings: # fsdp_wrap is a no-op when --ddp-backend != fully_sharded encoder = fsdp_wrap(encoder, min_num_params=cfg.min_params_to_wrap) decoder = fsdp_wrap(decoder, min_num_params=cfg.min_params_to_wrap) return cls(cfg, encoder, decoder) @classmethod def build_embedding(cls, cfg, dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb @classmethod def build_encoder(cls, cfg, src_dict, embed_tokens): return TransformerEncoderBase(cfg, src_dict, embed_tokens) @classmethod def build_decoder(cls, cfg, tgt_dict, embed_tokens, encoder_layers=None): return TransformerDecoderBase( cfg, tgt_dict, embed_tokens, no_encoder_attn=cfg.no_cross_attention, encoder_layers=encoder_layers ) # TorchScript doesn't support optional arguments with variable length (**kwargs). # Current workaround is to add union of all arguments in child classes. def forward( self, src_tokens, src_lengths, prev_output_tokens, return_all_hiddens: bool = True, features_only: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, ): """ Run the forward pass for an encoder-decoder model. Copied from the base class, but without ``**kwargs``, which are not supported by TorchScript. """ encoder_out = self.encoder( src_tokens, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens ) decoder_out = self.decoder( prev_output_tokens, encoder_out=encoder_out, features_only=features_only, alignment_layer=alignment_layer, alignment_heads=alignment_heads, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens, ) return decoder_out # Since get_normalized_probs is in the Fairseq Model which is not scriptable, # I rewrite the get_normalized_probs from Base Class to call the # helper function in the Base Class. @torch.jit.export def get_normalized_probs( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): """Get normalized probabilities (or log probs) from a net's output.""" return self.get_normalized_probs_scriptable(net_output, log_probs, sample) def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/transformer/transformer_base.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq.utils import new_arange # -------------- Helper Functions --------------------------------------------------- # def load_libnat(): try: from fairseq import libnat_cuda return libnat_cuda, True except ImportError as e: print(str(e) + "... fall back to CPU version") try: from fairseq import libnat return libnat, False except ImportError as e: import sys sys.stderr.write( "ERROR: missing libnat_cuda. run `python setup.py build_ext --inplace`\n" ) raise e def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx): libnat, use_cuda = load_libnat() def _get_ins_targets_cuda(in_tokens, out_tokens, padding_idx, unk_idx): in_masks = in_tokens.ne(padding_idx) out_masks = out_tokens.ne(padding_idx) mask_ins_targets, masked_tgt_masks = libnat.generate_insertion_labels( out_tokens.int(), libnat.levenshtein_distance( in_tokens.int(), out_tokens.int(), in_masks.sum(1).int(), out_masks.sum(1).int(), ), ) masked_tgt_masks = masked_tgt_masks.bool() & out_masks mask_ins_targets = mask_ins_targets.type_as(in_tokens)[ :, 1 : in_masks.size(1) ].masked_fill_(~in_masks[:, 1:], 0) masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx) return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets def _get_ins_targets_cpu(in_tokens, out_tokens, padding_idx, unk_idx): in_seq_len, out_seq_len = in_tokens.size(1), out_tokens.size(1) in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) mask_inputs = [ [len(c) if c[0] != padding_idx else 0 for c in a[:-1]] for a in full_labels ] # generate labels masked_tgt_masks = [] for mask_input in mask_inputs: mask_label = [] for beam_size in mask_input[1:-1]: # HACK 1:-1 mask_label += [0] + [1 for _ in range(beam_size)] masked_tgt_masks.append( mask_label + [0 for _ in range(out_seq_len - len(mask_label))] ) mask_ins_targets = [ mask_input[1:-1] + [0 for _ in range(in_seq_len - 1 - len(mask_input[1:-1]))] for mask_input in mask_inputs ] # transform to tensor masked_tgt_masks = torch.tensor( masked_tgt_masks, device=out_tokens.device ).bool() mask_ins_targets = torch.tensor(mask_ins_targets, device=in_tokens.device) masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx) return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets if use_cuda: return _get_ins_targets_cuda(in_tokens, out_tokens, padding_idx, unk_idx) return _get_ins_targets_cpu(in_tokens, out_tokens, padding_idx, unk_idx) def _get_del_targets(in_tokens, out_tokens, padding_idx): libnat, use_cuda = load_libnat() def _get_del_targets_cuda(in_tokens, out_tokens, padding_idx): in_masks = in_tokens.ne(padding_idx) out_masks = out_tokens.ne(padding_idx) word_del_targets = libnat.generate_deletion_labels( in_tokens.int(), libnat.levenshtein_distance( in_tokens.int(), out_tokens.int(), in_masks.sum(1).int(), out_masks.sum(1).int(), ), ) word_del_targets = word_del_targets.type_as(in_tokens).masked_fill_( ~in_masks, 0 ) return word_del_targets def _get_del_targets_cpu(in_tokens, out_tokens, padding_idx): out_seq_len = out_tokens.size(1) with torch.cuda.device_of(in_tokens): in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) word_del_targets = [b[-1] for b in full_labels] word_del_targets = [ labels + [0 for _ in range(out_seq_len - len(labels))] for labels in word_del_targets ] # transform to tensor word_del_targets = torch.tensor(word_del_targets, device=out_tokens.device) return word_del_targets if use_cuda: return _get_del_targets_cuda(in_tokens, out_tokens, padding_idx) return _get_del_targets_cpu(in_tokens, out_tokens, padding_idx) def _apply_ins_masks( in_tokens, in_scores, mask_ins_pred, padding_idx, unk_idx, eos_idx ): in_masks = in_tokens.ne(padding_idx) in_lengths = in_masks.sum(1) # HACK: hacky way to shift all the paddings to eos first. in_tokens.masked_fill_(~in_masks, eos_idx) mask_ins_pred.masked_fill_(~in_masks[:, 1:], 0) out_lengths = in_lengths + mask_ins_pred.sum(1) out_max_len = out_lengths.max() out_masks = new_arange(out_lengths, out_max_len)[None, :] < out_lengths[:, None] reordering = (mask_ins_pred + in_masks[:, 1:].long()).cumsum(1) out_tokens = ( in_tokens.new_zeros(in_tokens.size(0), out_max_len) .fill_(padding_idx) .masked_fill_(out_masks, unk_idx) ) out_tokens[:, 0] = in_tokens[:, 0] out_tokens.scatter_(1, reordering, in_tokens[:, 1:]) out_scores = None if in_scores is not None: in_scores.masked_fill_(~in_masks, 0) out_scores = in_scores.new_zeros(*out_tokens.size()) out_scores[:, 0] = in_scores[:, 0] out_scores.scatter_(1, reordering, in_scores[:, 1:]) return out_tokens, out_scores def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_scores, unk_idx): word_ins_masks = in_tokens.eq(unk_idx) out_tokens = in_tokens.masked_scatter(word_ins_masks, word_ins_pred[word_ins_masks]) if in_scores is not None: out_scores = in_scores.masked_scatter( word_ins_masks, word_ins_scores[word_ins_masks] ) else: out_scores = None return out_tokens, out_scores def _apply_del_words( in_tokens, in_scores, in_attn, word_del_pred, padding_idx, bos_idx, eos_idx ): # apply deletion to a tensor in_masks = in_tokens.ne(padding_idx) bos_eos_masks = in_tokens.eq(bos_idx) | in_tokens.eq(eos_idx) max_len = in_tokens.size(1) word_del_pred.masked_fill_(~in_masks, 1) word_del_pred.masked_fill_(bos_eos_masks, 0) reordering = new_arange(in_tokens).masked_fill_(word_del_pred, max_len).sort(1)[1] out_tokens = in_tokens.masked_fill(word_del_pred, padding_idx).gather(1, reordering) out_scores = None if in_scores is not None: out_scores = in_scores.masked_fill(word_del_pred, 0).gather(1, reordering) out_attn = None if in_attn is not None: _mask = word_del_pred[:, :, None].expand_as(in_attn) _reordering = reordering[:, :, None].expand_as(in_attn) out_attn = in_attn.masked_fill(_mask, 0.0).gather(1, _reordering) return out_tokens, out_scores, out_attn def _skip(x, mask): """ Getting sliced (dim=0) tensor by mask. Supporting tensor and list/dict of tensors. """ if isinstance(x, int): return x if x is None: return None if isinstance(x, torch.Tensor): if x.size(0) == mask.size(0): return x[mask] elif x.size(1) == mask.size(0): return x[:, mask] if isinstance(x, list): return [_skip(x_i, mask) for x_i in x] if isinstance(x, dict): return {k: _skip(v, mask) for k, v in x.items()} raise NotImplementedError def _skip_encoder_out(encoder, encoder_out, mask): if not mask.any(): return encoder_out else: return encoder.reorder_encoder_out( encoder_out, mask.nonzero(as_tuple=False).squeeze() ) def _fill(x, mask, y, padding_idx): """ Filling tensor x with y at masked positions (dim=0). """ if x is None: return y assert x.dim() == y.dim() and mask.size(0) == x.size(0) assert x.dim() == 2 or (x.dim() == 3 and x.size(2) == y.size(2)) n_selected = mask.sum() assert n_selected == y.size(0) if n_selected == x.size(0): return y if x.size(1) < y.size(1): dims = [x.size(0), y.size(1) - x.size(1)] if x.dim() == 3: dims.append(x.size(2)) x = torch.cat([x, x.new_zeros(*dims).fill_(padding_idx)], 1) x[mask] = y elif x.size(1) > y.size(1): x[mask] = padding_idx if x.dim() == 2: x[mask, : y.size(1)] = y else: x[mask, : y.size(1), :] = y else: x[mask] = y return x
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/nat/levenshtein_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import NATransformerModel def _sequential_poisoning(s, V, beta=0.33, bos=2, eos=3, pad=1): # s: input batch # V: vocabulary size rand_words = torch.randint(low=4, high=V, size=s.size(), device=s.device) choices = torch.rand(size=s.size(), device=s.device) choices.masked_fill_((s == pad) | (s == bos) | (s == eos), 1) replace = choices < beta / 3 repeat = (choices >= beta / 3) & (choices < beta * 2 / 3) swap = (choices >= beta * 2 / 3) & (choices < beta) safe = choices >= beta for i in range(s.size(1) - 1): rand_word = rand_words[:, i] next_word = s[:, i + 1] self_word = s[:, i] replace_i = replace[:, i] swap_i = swap[:, i] & (next_word != 3) repeat_i = repeat[:, i] & (next_word != 3) safe_i = safe[:, i] | ((next_word == 3) & (~replace_i)) s[:, i] = ( self_word * (safe_i | repeat_i).long() + next_word * swap_i.long() + rand_word * replace_i.long() ) s[:, i + 1] = ( next_word * (safe_i | replace_i).long() + self_word * (swap_i | repeat_i).long() ) return s def gumbel_noise(input, TINY=1e-8): return ( input.new_zeros(*input.size()) .uniform_() .add_(TINY) .log_() .neg_() .add_(TINY) .log_() .neg_() ) @register_model("iterative_nonautoregressive_transformer") class IterNATransformerModel(NATransformerModel): @staticmethod def add_args(parser): NATransformerModel.add_args(parser) parser.add_argument( "--train-step", type=int, help="number of refinement iterations during training", ) parser.add_argument( "--dae-ratio", type=float, help="the probability of switching to the denoising auto-encoder loss", ) parser.add_argument( "--stochastic-approx", action="store_true", help="sampling from the decoder as the inputs for next iteration", ) @classmethod def build_model(cls, args, task): model = super().build_model(args, task) model.train_step = getattr(args, "train_step", 4) model.dae_ratio = getattr(args, "dae_ratio", 0.5) model.stochastic_approx = getattr(args, "stochastic_approx", False) return model def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): B, T = prev_output_tokens.size() # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # length prediction length_out = self.decoder.forward_length( normalize=False, encoder_out=encoder_out ) length_tgt = self.decoder.forward_length_prediction( length_out, encoder_out, tgt_tokens ) # decoding word_ins_outs, word_ins_tgts, word_ins_masks = [], [], [] for t in range(self.train_step): word_ins_out = self.decoder( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, step=t, ) word_ins_tgt = tgt_tokens word_ins_mask = word_ins_tgt.ne(self.pad) word_ins_outs.append(word_ins_out) word_ins_tgts.append(word_ins_tgt) word_ins_masks.append(word_ins_mask) if t < (self.train_step - 1): # prediction for next iteration if self.stochastic_approx: word_ins_prediction = ( word_ins_out + gumbel_noise(word_ins_out) ).max(-1)[1] else: word_ins_prediction = word_ins_out.max(-1)[1] prev_output_tokens = prev_output_tokens.masked_scatter( word_ins_mask, word_ins_prediction[word_ins_mask] ) if self.dae_ratio > 0: # we do not perform denoising for the first iteration corrputed = ( torch.rand(size=(B,), device=prev_output_tokens.device) < self.dae_ratio ) corrputed_tokens = _sequential_poisoning( tgt_tokens[corrputed], len(self.tgt_dict), 0.33, self.bos, self.eos, self.pad, ) prev_output_tokens[corrputed] = corrputed_tokens # concat everything word_ins_out = torch.cat(word_ins_outs, 0) word_ins_tgt = torch.cat(word_ins_tgts, 0) word_ins_mask = torch.cat(word_ins_masks, 0) return { "word_ins": { "out": word_ins_out, "tgt": word_ins_tgt, "mask": word_ins_mask, "ls": self.args.label_smoothing, "nll_loss": True, }, "length": { "out": length_out, "tgt": length_tgt, "factor": self.decoder.length_loss_factor, }, } @register_model_architecture( "iterative_nonautoregressive_transformer", "iterative_nonautoregressive_transformer" ) def inat_base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # --- special arguments --- args.sg_length_pred = getattr(args, "sg_length_pred", False) args.pred_length_offset = getattr(args, "pred_length_offset", False) args.length_loss_factor = getattr(args, "length_loss_factor", 0.1) args.ngram_predictor = getattr(args, "ngram_predictor", 1) args.src_embedding_copy = getattr(args, "src_embedding_copy", False) args.train_step = getattr(args, "train_step", 4) args.dae_ratio = getattr(args, "dae_ratio", 0.5) args.stochastic_approx = getattr(args, "stochastic_approx", False) @register_model_architecture( "iterative_nonautoregressive_transformer", "iterative_nonautoregressive_transformer_wmt_en_de", ) def iter_nat_wmt_en_de(args): inat_base_architecture(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/nat/iterative_nonautoregressive_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch from fairseq.models.transformer import ( TransformerDecoder, TransformerEncoder, TransformerModel, ) from fairseq.modules.transformer_sentence_encoder import init_bert_params def ensemble_encoder(func): def wrapper(self, *args, **kwargs): if self.ensemble_models is None or len(self.ensemble_models) == 1: return func(self, *args, **kwargs) encoder_outs = [func(model, *args, **kwargs, return_all_hiddens=True) for model in self.ensemble_models] _encoder_out = encoder_outs[0].copy() def stack(key): outs = [e[key][0] for e in encoder_outs] return [torch.stack(outs, -1) if outs[0] is not None else None] _encoder_out["encoder_out"] = stack("encoder_out") _encoder_out["encoder_embedding"] = stack("encoder_embedding") num_layers = len(_encoder_out["encoder_states"]) if num_layers > 0: _encoder_out["encoder_states"] = [ torch.stack([e["encoder_states"][i] for e in encoder_outs], -1) for i in range(num_layers) ] return _encoder_out return wrapper def ensemble_decoder(func): def wrapper(self, normalize=False, encoder_out=None, *args, **kwargs): if self.ensemble_models is None or len(self.ensemble_models) == 1: return func( self, normalize=normalize, encoder_out=encoder_out, *args, **kwargs ) def _replace(encoder_out, new_val): new_encoder_out = encoder_out.copy() new_encoder_out["encoder_out"] = [new_val] return new_encoder_out action_outs = [ func( model, normalize=normalize, encoder_out=_replace( encoder_out, encoder_out["encoder_out"][0][:, :, :, i] ), *args, **kwargs ) for i, model in enumerate(self.ensemble_models) ] if not isinstance(action_outs[0], tuple): # return multiple values action_outs = [[a] for a in action_outs] else: action_outs = [list(a) for a in action_outs] ensembled_outs = [] for i in range(len(action_outs[0])): if i == 0 and normalize: ensembled_outs += [ torch.logsumexp( torch.stack([a[i] for a in action_outs], -1), dim=-1 ) - math.log(len(self.ensemble_models)) ] elif action_outs[0][i] is not None: ensembled_outs += [torch.stack([a[i] for a in action_outs], -1)] else: ensembled_outs += [None] if len(ensembled_outs) == 1: return ensembled_outs[0] return tuple(ensembled_outs) return wrapper class FairseqNATModel(TransformerModel): """ Abstract class for all nonautoregressive-based models """ def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) self.tgt_dict = decoder.dictionary self.bos = decoder.dictionary.bos() self.eos = decoder.dictionary.eos() self.pad = decoder.dictionary.pad() self.unk = decoder.dictionary.unk() self.ensemble_models = None @property def allow_length_beam(self): return False @property def allow_ensemble(self): return True def enable_ensemble(self, models): self.encoder.ensemble_models = [m.encoder for m in models] self.decoder.ensemble_models = [m.decoder for m in models] @staticmethod def add_args(parser): TransformerModel.add_args(parser) parser.add_argument( "--apply-bert-init", action="store_true", help="use custom param initialization for BERT", ) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): decoder = FairseqNATDecoder(args, tgt_dict, embed_tokens) if getattr(args, "apply_bert_init", False): decoder.apply(init_bert_params) return decoder @classmethod def build_encoder(cls, args, src_dict, embed_tokens): encoder = FairseqNATEncoder(args, src_dict, embed_tokens) if getattr(args, "apply_bert_init", False): encoder.apply(init_bert_params) return encoder def forward_encoder(self, encoder_inputs): return self.encoder(*encoder_inputs) def forward_decoder(self, *args, **kwargs): return NotImplementedError def initialize_output_tokens(self, *args, **kwargs): return NotImplementedError def forward(self, *args, **kwargs): return NotImplementedError class FairseqNATEncoder(TransformerEncoder): def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens) self.ensemble_models = None @ensemble_encoder def forward(self, *args, **kwargs): return super().forward(*args, **kwargs) class FairseqNATDecoder(TransformerDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(args, dictionary, embed_tokens, no_encoder_attn) self.ensemble_models = None
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/nat/fairseq_nat_model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" from .fairseq_nat_model import * from .nonautoregressive_transformer import * from .nat_crf_transformer import * from .iterative_nonautoregressive_transformer import * from .cmlm_transformer import * from .levenshtein_transformer import * from .insertion_transformer import *
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/nat/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import FairseqNATDecoder, FairseqNATModel, ensemble_decoder from fairseq.models.transformer import Embedding from fairseq.modules import TransformerDecoderLayer from fairseq.modules.transformer_sentence_encoder import init_bert_params from .levenshtein_utils import ( _apply_del_words, _apply_ins_masks, _apply_ins_words, _fill, _get_del_targets, _get_ins_targets, _skip, _skip_encoder_out, ) @register_model("levenshtein_transformer") class LevenshteinTransformerModel(FairseqNATModel): @property def allow_length_beam(self): return False @staticmethod def add_args(parser): FairseqNATModel.add_args(parser) parser.add_argument( "--early-exit", default="6,6,6", type=str, help="number of decoder layers before word_del, mask_ins, word_ins", ) parser.add_argument( "--no-share-discriminator", action="store_true", help="separate parameters for discriminator", ) parser.add_argument( "--no-share-maskpredictor", action="store_true", help="separate parameters for mask-predictor", ) parser.add_argument( "--share-discriminator-maskpredictor", action="store_true", help="share the parameters for both mask-predictor and discriminator", ) parser.add_argument( "--sampling-for-deletion", action="store_true", help="instead of argmax, use sampling to predict the tokens", ) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): decoder = LevenshteinTransformerDecoder(args, tgt_dict, embed_tokens) if getattr(args, "apply_bert_init", False): decoder.apply(init_bert_params) return decoder def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): assert tgt_tokens is not None, "forward function only supports training." # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # generate training labels for insertion masked_tgt_masks, masked_tgt_tokens, mask_ins_targets = _get_ins_targets( prev_output_tokens, tgt_tokens, self.pad, self.unk ) mask_ins_targets = mask_ins_targets.clamp(min=0, max=255) # for safe prediction mask_ins_masks = prev_output_tokens[:, 1:].ne(self.pad) mask_ins_out, _ = self.decoder.forward_mask_ins( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, ) word_ins_out, _ = self.decoder.forward_word_ins( normalize=False, prev_output_tokens=masked_tgt_tokens, encoder_out=encoder_out, ) # make online prediction if self.decoder.sampling_for_deletion: word_predictions = torch.multinomial( F.softmax(word_ins_out, -1).view(-1, word_ins_out.size(-1)), 1 ).view(word_ins_out.size(0), -1) else: word_predictions = F.log_softmax(word_ins_out, dim=-1).max(2)[1] word_predictions.masked_scatter_( ~masked_tgt_masks, tgt_tokens[~masked_tgt_masks] ) # generate training labels for deletion word_del_targets = _get_del_targets(word_predictions, tgt_tokens, self.pad) word_del_out, _ = self.decoder.forward_word_del( normalize=False, prev_output_tokens=word_predictions, encoder_out=encoder_out, ) word_del_masks = word_predictions.ne(self.pad) return { "mask_ins": { "out": mask_ins_out, "tgt": mask_ins_targets, "mask": mask_ins_masks, "ls": 0.01, }, "word_ins": { "out": word_ins_out, "tgt": tgt_tokens, "mask": masked_tgt_masks, "ls": self.args.label_smoothing, "nll_loss": True, }, "word_del": { "out": word_del_out, "tgt": word_del_targets, "mask": word_del_masks, }, } def forward_decoder( self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs ): output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores attn = decoder_out.attn history = decoder_out.history bsz = output_tokens.size(0) if max_ratio is None: max_lens = torch.zeros_like(output_tokens).fill_(255) else: if not encoder_out["encoder_padding_mask"]: max_src_len = encoder_out["encoder_out"].size(0) src_lens = encoder_out["encoder_out"].new(bsz).fill_(max_src_len) else: src_lens = (~encoder_out["encoder_padding_mask"][0]).sum(1) max_lens = (src_lens * max_ratio).clamp(min=10).long() # delete words # do not delete tokens if it is <s> </s> can_del_word = output_tokens.ne(self.pad).sum(1) > 2 if can_del_word.sum() != 0: # we cannot delete, skip word_del_score, word_del_attn = self.decoder.forward_word_del( normalize=True, prev_output_tokens=_skip(output_tokens, can_del_word), encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_del_word), ) word_del_pred = word_del_score.max(-1)[1].bool() _tokens, _scores, _attn = _apply_del_words( output_tokens[can_del_word], output_scores[can_del_word], word_del_attn, word_del_pred, self.pad, self.bos, self.eos, ) output_tokens = _fill(output_tokens, can_del_word, _tokens, self.pad) output_scores = _fill(output_scores, can_del_word, _scores, 0) attn = _fill(attn, can_del_word, _attn, 0.0) if history is not None: history.append(output_tokens.clone()) # insert placeholders can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lens if can_ins_mask.sum() != 0: mask_ins_score, _ = self.decoder.forward_mask_ins( normalize=True, prev_output_tokens=_skip(output_tokens, can_ins_mask), encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_mask), ) if eos_penalty > 0.0: mask_ins_score[:, :, 0] = mask_ins_score[:, :, 0] - eos_penalty mask_ins_pred = mask_ins_score.max(-1)[1] mask_ins_pred = torch.min( mask_ins_pred, max_lens[can_ins_mask, None].expand_as(mask_ins_pred) ) _tokens, _scores = _apply_ins_masks( output_tokens[can_ins_mask], output_scores[can_ins_mask], mask_ins_pred, self.pad, self.unk, self.eos, ) output_tokens = _fill(output_tokens, can_ins_mask, _tokens, self.pad) output_scores = _fill(output_scores, can_ins_mask, _scores, 0) if history is not None: history.append(output_tokens.clone()) # insert words can_ins_word = output_tokens.eq(self.unk).sum(1) > 0 if can_ins_word.sum() != 0: word_ins_score, word_ins_attn = self.decoder.forward_word_ins( normalize=True, prev_output_tokens=_skip(output_tokens, can_ins_word), encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_word), ) word_ins_score, word_ins_pred = word_ins_score.max(-1) _tokens, _scores = _apply_ins_words( output_tokens[can_ins_word], output_scores[can_ins_word], word_ins_pred, word_ins_score, self.unk, ) output_tokens = _fill(output_tokens, can_ins_word, _tokens, self.pad) output_scores = _fill(output_scores, can_ins_word, _scores, 0) attn = _fill(attn, can_ins_word, word_ins_attn, 0.0) if history is not None: history.append(output_tokens.clone()) # delete some unnecessary paddings cut_off = output_tokens.ne(self.pad).sum(1).max() output_tokens = output_tokens[:, :cut_off] output_scores = output_scores[:, :cut_off] attn = None if attn is None else attn[:, :cut_off, :] return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=attn, history=history, ) def initialize_output_tokens(self, encoder_out, src_tokens): initial_output_tokens = src_tokens.new_zeros(src_tokens.size(0), 2) initial_output_tokens[:, 0] = self.bos initial_output_tokens[:, 1] = self.eos initial_output_scores = initial_output_tokens.new_zeros( *initial_output_tokens.size() ).type_as(encoder_out["encoder_out"][0]) return DecoderOut( output_tokens=initial_output_tokens, output_scores=initial_output_scores, attn=None, step=0, max_step=0, history=None, ) class LevenshteinTransformerDecoder(FairseqNATDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__( args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn ) self.dictionary = dictionary self.bos = dictionary.bos() self.unk = dictionary.unk() self.eos = dictionary.eos() self.sampling_for_deletion = getattr(args, "sampling_for_deletion", False) self.embed_mask_ins = Embedding(256, self.output_embed_dim * 2, None) self.embed_word_del = Embedding(2, self.output_embed_dim, None) # del_word, ins_mask, ins_word self.early_exit = [int(i) for i in args.early_exit.split(",")] assert len(self.early_exit) == 3 # copy layers for mask-predict/deletion self.layers_msk = None if getattr(args, "no_share_maskpredictor", False): self.layers_msk = nn.ModuleList( [ TransformerDecoderLayer(args, no_encoder_attn) for _ in range(self.early_exit[1]) ] ) self.layers_del = None if getattr(args, "no_share_discriminator", False): self.layers_del = nn.ModuleList( [ TransformerDecoderLayer(args, no_encoder_attn) for _ in range(self.early_exit[0]) ] ) if getattr(args, "share_discriminator_maskpredictor", False): assert getattr( args, "no_share_discriminator", False ), "must set saperate discriminator" self.layers_msk = self.layers_del def extract_features( self, prev_output_tokens, encoder_out=None, early_exit=None, layers=None, **unused ): """ Similar to *forward* but only return features. Inputs: prev_output_tokens: Tensor(B, T) encoder_out: a dictionary of hidden states and masks Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs the LevenshteinTransformer decoder has full-attention to all generated tokens """ # embed positions positions = ( self.embed_positions(prev_output_tokens) if self.embed_positions is not None else None ) # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers decoder_padding_mask = prev_output_tokens.eq(self.padding_idx) layers = self.layers if layers is None else layers early_exit = len(layers) if early_exit is None else early_exit for _, layer in enumerate(layers[:early_exit]): x, attn, _ = layer( x, encoder_out["encoder_out"][0] if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0) else None, encoder_out["encoder_padding_mask"][0] if ( encoder_out is not None and len(encoder_out["encoder_padding_mask"]) > 0 ) else None, self_attn_mask=None, self_attn_padding_mask=decoder_padding_mask, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {"attn": attn, "inner_states": inner_states} @ensemble_decoder def forward_mask_ins(self, normalize, encoder_out, prev_output_tokens, **unused): features, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, early_exit=self.early_exit[1], layers=self.layers_msk, **unused ) features_cat = torch.cat([features[:, :-1, :], features[:, 1:, :]], 2) decoder_out = F.linear(features_cat, self.embed_mask_ins.weight) if normalize: return F.log_softmax(decoder_out, -1), extra["attn"] return decoder_out, extra["attn"] @ensemble_decoder def forward_word_ins(self, normalize, encoder_out, prev_output_tokens, **unused): features, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, early_exit=self.early_exit[2], layers=self.layers, **unused ) decoder_out = self.output_layer(features) if normalize: return F.log_softmax(decoder_out, -1), extra["attn"] return decoder_out, extra["attn"] @ensemble_decoder def forward_word_del(self, normalize, encoder_out, prev_output_tokens, **unused): features, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, early_exit=self.early_exit[0], layers=self.layers_del, **unused ) decoder_out = F.linear(features, self.embed_word_del.weight) if normalize: return F.log_softmax(decoder_out, -1), extra["attn"] return decoder_out, extra["attn"] @register_model_architecture("levenshtein_transformer", "levenshtein_transformer") def levenshtein_base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.sampling_for_deletion = getattr(args, "sampling_for_deletion", False) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.early_exit = getattr(args, "early_exit", "6,6,6") args.no_share_discriminator = getattr(args, "no_share_discriminator", False) args.no_share_maskpredictor = getattr(args, "no_share_maskpredictor", False) args.share_discriminator_maskpredictor = getattr( args, "share_discriminator_maskpredictor", False ) args.no_share_last_layer = getattr(args, "no_share_last_layer", False) @register_model_architecture( "levenshtein_transformer", "levenshtein_transformer_wmt_en_de" ) def levenshtein_transformer_wmt_en_de(args): levenshtein_base_architecture(args) # similar parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture( "levenshtein_transformer", "levenshtein_transformer_vaswani_wmt_en_de_big" ) def levenshtein_transformer_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.3) levenshtein_base_architecture(args) # default parameters used in tensor2tensor implementation @register_model_architecture( "levenshtein_transformer", "levenshtein_transformer_wmt_en_de_big" ) def levenshtein_transformer_wmt_en_de_big_t2t(args): args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.activation_dropout = getattr(args, "activation_dropout", 0.1) levenshtein_transformer_vaswani_wmt_en_de_big(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/nat/levenshtein_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq.models.nat import ( _apply_del_words, _apply_ins_masks, _apply_ins_words, _fill, _skip, _skip_encoder_out, ) class _EnsembleModelEncoder(object): def __init__(self, models): self.models = models def reorder_encoder_out(self, encoder_outs, new_order): encoder_outs = [ model.encoder.reorder_encoder_out(encoder_out, new_order) for model, encoder_out in zip(self.models, encoder_outs) ] return encoder_outs class BasicEnsembleModel(torch.nn.Module): """A wrapper around an ensemble of models.""" def __init__(self, models): super().__init__() self.models = torch.nn.ModuleList(models) self.bos = self.models[0].decoder.dictionary.bos() self.eos = self.models[0].decoder.dictionary.eos() self.pad = self.models[0].decoder.dictionary.pad() self.unk = self.models[0].decoder.dictionary.unk() self.encoder = _EnsembleModelEncoder(self.models) def has_encoder(self): return hasattr(self.models[0], "encoder") def max_decoder_positions(self): return min(m.max_decoder_positions() for m in self.models) @torch.no_grad() def forward_encoder(self, encoder_input): if not self.has_encoder(): return None return [model.forward_encoder(encoder_input) for model in self.models] @torch.no_grad() def forward_decoder(self, *inputs): raise NotImplementedError def initialize_output_tokens(self, *inputs): raise NotImplementedError class EnsembleLevT(BasicEnsembleModel): """A wrapper around an ensemble of models.""" def __init__(self, models): super().__init__(models) @torch.no_grad() def forward_decoder( self, decoder_out, encoder_outs, eos_penalty=0.0, max_ratio=None, **kwargs ): # LevT ensembling # A pipeline of three steps: deletion, placeholder, and word insertion. # We need to average scores in each step in a pipeline way because of dependence. # deletion output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores attn = decoder_out.attn bsz = output_tokens.size(0) if max_ratio is None: max_lens = output_tokens.new().fill_(255) else: if not encoder_outs[0]["encoder_padding_mask"]: src_lens = ( encoder_outs[0]["encoder_out"][0].new(bsz) .fill_(encoder_outs[0]["encoder_out"][0].size(1)) ) else: src_lens = (~encoder_outs[0]["encoder_padding_mask"][0]).sum(1) max_lens = (src_lens * max_ratio).clamp(min=10).long() # delete words # do not delete tokens if it is <s> </s> can_del_word = output_tokens.ne(self.pad).sum(1) > 2 if can_del_word.sum() != 0: # we cannot delete, skip output_tokens, output_scores, attn = self.forward_word_del( encoder_outs, output_tokens, output_scores, attn, can_del_word, ) # insert placeholders can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lens if can_ins_mask.sum() != 0: output_tokens, output_scores = self.forward_mask_ins( encoder_outs, output_tokens, output_scores, can_ins_mask, eos_penalty, max_lens, ) # insert words can_ins_word = output_tokens.eq(self.unk).sum(1) > 0 if can_ins_word.sum() != 0: output_tokens, output_scores, attn = self.forward_word_ins( encoder_outs, output_tokens, output_scores, attn, can_ins_word, ) # delete some unnecessary paddings cut_off = output_tokens.ne(self.pad).sum(1).max() output_tokens = output_tokens[:, :cut_off] output_scores = output_scores[:, :cut_off] attn = None if attn is None else attn[:, :cut_off, :] return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=attn, history=None, ) def forward_word_del( self, encoder_outs, output_tokens, output_scores, attn, can_del_word ): word_del_score_avg = [] word_del_attn_avg = [] for model, encoder_out in zip(self.models, encoder_outs): word_del_out, word_del_attn = model.decoder.forward_word_del( _skip(output_tokens, can_del_word), _skip_encoder_out(model.encoder, encoder_out, can_del_word), ) word_del_score = F.log_softmax(word_del_out, 2) word_del_score_avg.append(word_del_score) word_del_attn_avg.append(word_del_attn) word_del_score_avg = torch.logsumexp( torch.stack(word_del_score_avg, dim=0), dim=0 ) - math.log(len(self.models)) word_del_pred = word_del_score_avg.max(-1)[1].bool() if word_del_attn_avg[0] is not None: word_del_attn_avg = torch.stack(word_del_attn_avg, dim=0) / len(self.models) else: word_del_attn_avg = None _tokens, _scores, _attn = _apply_del_words( output_tokens[can_del_word], output_scores[can_del_word], word_del_attn_avg, word_del_pred, self.pad, self.bos, self.eos, ) output_tokens = _fill(output_tokens, can_del_word, _tokens, self.pad) output_scores = _fill(output_scores, can_del_word, _scores, 0) attn = _fill(attn, can_del_word, _attn, 0.0) return output_tokens, output_scores, attn def forward_mask_ins( self, encoder_outs, output_tokens, output_scores, can_ins_mask, eos_penalty, max_lens, ): mask_ins_score_avg = [] for model, encoder_out in zip(self.models, encoder_outs): mask_ins_out, _ = model.decoder.forward_mask_ins( _skip(output_tokens, can_ins_mask), _skip_encoder_out(model.encoder, encoder_out, can_ins_mask), ) mask_ins_score = F.log_softmax(mask_ins_out, 2) if eos_penalty > 0.0: mask_ins_score[:, :, 0] -= eos_penalty mask_ins_score_avg.append(mask_ins_score) mask_ins_score_avg = torch.logsumexp( torch.stack(mask_ins_score_avg, dim=0), dim=0 ) - math.log(len(self.models)) mask_ins_pred = mask_ins_score_avg.max(-1)[1] mask_ins_pred = torch.min( mask_ins_pred, max_lens[can_ins_mask, None].expand_as(mask_ins_pred) ) _tokens, _scores = _apply_ins_masks( output_tokens[can_ins_mask], output_scores[can_ins_mask], mask_ins_pred, self.pad, self.unk, self.eos, ) output_tokens = _fill(output_tokens, can_ins_mask, _tokens, self.pad) output_scores = _fill(output_scores, can_ins_mask, _scores, 0) return output_tokens, output_scores def forward_word_ins( self, encoder_outs, output_tokens, output_scores, attn, can_ins_word ): word_ins_score_avg = [] word_ins_attn_avg = [] for model, encoder_out in zip(self.models, encoder_outs): word_ins_out, word_ins_attn = model.decoder.forward_word_ins( _skip(output_tokens, can_ins_word), _skip_encoder_out(model.encoder, encoder_out, can_ins_word), ) word_ins_score = F.log_softmax(word_ins_out, 2) word_ins_score_avg.append(word_ins_score) word_ins_attn_avg.append(word_ins_attn) word_ins_score_avg = torch.logsumexp( torch.stack(word_ins_score_avg, dim=0), dim=0 ) - math.log(len(self.models)) if word_ins_attn_avg[0] is not None: word_ins_attn_avg = torch.stack(word_ins_attn_avg, dim=0) / len(self.models) else: word_ins_attn_avg = None word_ins_score_max, word_ins_pred = word_ins_score_avg.max(-1) _tokens, _scores = _apply_ins_words( output_tokens[can_ins_word], output_scores[can_ins_word], word_ins_pred, word_ins_score_max, self.unk, ) output_tokens = _fill(output_tokens, can_ins_word, _tokens, self.pad) output_scores = _fill(output_scores, can_ins_word, _scores, 0) attn = _fill(attn, can_ins_word, word_ins_attn, 0.0) return output_tokens, output_scores, attn def initialize_output_tokens(self, encoder_outs, src_tokens): # LevT doesn't do length prediction. return self.models[0].initialize_output_tokens(encoder_outs[0], src_tokens)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/nat/nonautoregressive_ensembles.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn.functional as F from fairseq import utils from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import FairseqNATDecoder, FairseqNATModel, ensemble_decoder from fairseq.models.transformer import Embedding from fairseq.modules.transformer_sentence_encoder import init_bert_params def _mean_pooling(enc_feats, src_masks): # enc_feats: T x B x C # src_masks: B x T or None if src_masks is None: enc_feats = enc_feats.mean(0) else: src_masks = (~src_masks).transpose(0, 1).type_as(enc_feats) enc_feats = ( (enc_feats / src_masks.sum(0)[None, :, None]) * src_masks[:, :, None] ).sum(0) return enc_feats def _argmax(x, dim): return (x == x.max(dim, keepdim=True)[0]).type_as(x) def _uniform_assignment(src_lens, trg_lens): max_trg_len = trg_lens.max() steps = (src_lens.float() - 1) / (trg_lens.float() - 1) # step-size # max_trg_len index_t = utils.new_arange(trg_lens, max_trg_len).float() index_t = steps[:, None] * index_t[None, :] # batch_size X max_trg_len index_t = torch.round(index_t).long().detach() return index_t @register_model("nonautoregressive_transformer") class NATransformerModel(FairseqNATModel): @property def allow_length_beam(self): return True @staticmethod def add_args(parser): FairseqNATModel.add_args(parser) # length prediction parser.add_argument( "--src-embedding-copy", action="store_true", help="copy encoder word embeddings as the initial input of the decoder", ) parser.add_argument( "--pred-length-offset", action="store_true", help="predicting the length difference between the target and source sentences", ) parser.add_argument( "--sg-length-pred", action="store_true", help="stop the gradients back-propagated from the length predictor", ) parser.add_argument( "--length-loss-factor", type=float, help="weights on the length prediction loss", ) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): decoder = NATransformerDecoder(args, tgt_dict, embed_tokens) if getattr(args, "apply_bert_init", False): decoder.apply(init_bert_params) return decoder def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # length prediction length_out = self.decoder.forward_length( normalize=False, encoder_out=encoder_out ) length_tgt = self.decoder.forward_length_prediction( length_out, encoder_out, tgt_tokens ) # decoding word_ins_out = self.decoder( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, ) return { "word_ins": { "out": word_ins_out, "tgt": tgt_tokens, "mask": tgt_tokens.ne(self.pad), "ls": self.args.label_smoothing, "nll_loss": True, }, "length": { "out": length_out, "tgt": length_tgt, "factor": self.decoder.length_loss_factor, }, } def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs): step = decoder_out.step output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores history = decoder_out.history # execute the decoder output_masks = output_tokens.ne(self.pad) _scores, _tokens = self.decoder( normalize=True, prev_output_tokens=output_tokens, encoder_out=encoder_out, step=step, ).max(-1) output_tokens.masked_scatter_(output_masks, _tokens[output_masks]) output_scores.masked_scatter_(output_masks, _scores[output_masks]) if history is not None: history.append(output_tokens.clone()) return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=None, history=history, ) def initialize_output_tokens(self, encoder_out, src_tokens): # length prediction length_tgt = self.decoder.forward_length_prediction( self.decoder.forward_length(normalize=True, encoder_out=encoder_out), encoder_out=encoder_out, ) max_length = length_tgt.clamp_(min=2).max() idx_length = utils.new_arange(src_tokens, max_length) initial_output_tokens = src_tokens.new_zeros( src_tokens.size(0), max_length ).fill_(self.pad) initial_output_tokens.masked_fill_( idx_length[None, :] < length_tgt[:, None], self.unk ) initial_output_tokens[:, 0] = self.bos initial_output_tokens.scatter_(1, length_tgt[:, None] - 1, self.eos) initial_output_scores = initial_output_tokens.new_zeros( *initial_output_tokens.size() ).type_as(encoder_out["encoder_out"][0]) return DecoderOut( output_tokens=initial_output_tokens, output_scores=initial_output_scores, attn=None, step=0, max_step=0, history=None, ) def regenerate_length_beam(self, decoder_out, beam_size): output_tokens = decoder_out.output_tokens length_tgt = output_tokens.ne(self.pad).sum(1) length_tgt = ( length_tgt[:, None] + utils.new_arange(length_tgt, 1, beam_size) - beam_size // 2 ) length_tgt = length_tgt.view(-1).clamp_(min=2) max_length = length_tgt.max() idx_length = utils.new_arange(length_tgt, max_length) initial_output_tokens = output_tokens.new_zeros( length_tgt.size(0), max_length ).fill_(self.pad) initial_output_tokens.masked_fill_( idx_length[None, :] < length_tgt[:, None], self.unk ) initial_output_tokens[:, 0] = self.bos initial_output_tokens.scatter_(1, length_tgt[:, None] - 1, self.eos) initial_output_scores = initial_output_tokens.new_zeros( *initial_output_tokens.size() ).type_as(decoder_out.output_scores) return decoder_out._replace( output_tokens=initial_output_tokens, output_scores=initial_output_scores ) class NATransformerDecoder(FairseqNATDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__( args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn ) self.dictionary = dictionary self.bos = dictionary.bos() self.unk = dictionary.unk() self.eos = dictionary.eos() self.encoder_embed_dim = args.encoder_embed_dim self.sg_length_pred = getattr(args, "sg_length_pred", False) self.pred_length_offset = getattr(args, "pred_length_offset", False) self.length_loss_factor = getattr(args, "length_loss_factor", 0.1) self.src_embedding_copy = getattr(args, "src_embedding_copy", False) self.embed_length = Embedding(256, self.encoder_embed_dim, None) @ensemble_decoder def forward(self, normalize, encoder_out, prev_output_tokens, step=0, **unused): features, _ = self.extract_features( prev_output_tokens, encoder_out=encoder_out, embedding_copy=(step == 0) & self.src_embedding_copy, ) decoder_out = self.output_layer(features) return F.log_softmax(decoder_out, -1) if normalize else decoder_out @ensemble_decoder def forward_length(self, normalize, encoder_out): enc_feats = encoder_out["encoder_out"][0] # T x B x C if len(encoder_out["encoder_padding_mask"]) > 0: src_masks = encoder_out["encoder_padding_mask"][0] # B x T else: src_masks = None enc_feats = _mean_pooling(enc_feats, src_masks) if self.sg_length_pred: enc_feats = enc_feats.detach() length_out = F.linear(enc_feats, self.embed_length.weight) return F.log_softmax(length_out, -1) if normalize else length_out def extract_features( self, prev_output_tokens, encoder_out=None, early_exit=None, embedding_copy=False, **unused ): """ Similar to *forward* but only return features. Inputs: prev_output_tokens: Tensor(B, T) encoder_out: a dictionary of hidden states and masks Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs the LevenshteinTransformer decoder has full-attention to all generated tokens """ # embedding if embedding_copy: src_embd = encoder_out["encoder_embedding"][0] if len(encoder_out["encoder_padding_mask"]) > 0: src_mask = encoder_out["encoder_padding_mask"][0] else: src_mask = None src_mask = ( ~src_mask if src_mask is not None else prev_output_tokens.new_ones(*src_embd.size()[:2]).bool() ) x, decoder_padding_mask = self.forward_embedding( prev_output_tokens, self.forward_copying_source( src_embd, src_mask, prev_output_tokens.ne(self.padding_idx) ), ) else: x, decoder_padding_mask = self.forward_embedding(prev_output_tokens) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for i, layer in enumerate(self.layers): # early exit from the decoder. if (early_exit is not None) and (i >= early_exit): break x, attn, _ = layer( x, encoder_out["encoder_out"][0] if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0) else None, encoder_out["encoder_padding_mask"][0] if ( encoder_out is not None and len(encoder_out["encoder_padding_mask"]) > 0 ) else None, self_attn_mask=None, self_attn_padding_mask=decoder_padding_mask, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {"attn": attn, "inner_states": inner_states} def forward_embedding(self, prev_output_tokens, states=None): # embed positions positions = ( self.embed_positions(prev_output_tokens) if self.embed_positions is not None else None ) # embed tokens and positions if states is None: x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) else: x = states if positions is not None: x += positions x = self.dropout_module(x) decoder_padding_mask = prev_output_tokens.eq(self.padding_idx) return x, decoder_padding_mask def forward_copying_source(self, src_embeds, src_masks, tgt_masks): length_sources = src_masks.sum(1) length_targets = tgt_masks.sum(1) mapped_inputs = _uniform_assignment(length_sources, length_targets).masked_fill( ~tgt_masks, 0 ) copied_embedding = torch.gather( src_embeds, 1, mapped_inputs.unsqueeze(-1).expand( *mapped_inputs.size(), src_embeds.size(-1) ), ) return copied_embedding def forward_length_prediction(self, length_out, encoder_out, tgt_tokens=None): enc_feats = encoder_out["encoder_out"][0] # T x B x C if len(encoder_out["encoder_padding_mask"]) > 0: src_masks = encoder_out["encoder_padding_mask"][0] # B x T else: src_masks = None if self.pred_length_offset: if src_masks is None: src_lengs = enc_feats.new_ones(enc_feats.size(1)).fill_( enc_feats.size(0) ) else: src_lengs = (~src_masks).transpose(0, 1).type_as(enc_feats).sum(0) src_lengs = src_lengs.long() if tgt_tokens is not None: # obtain the length target tgt_lengs = tgt_tokens.ne(self.padding_idx).sum(1).long() if self.pred_length_offset: length_tgt = tgt_lengs - src_lengs + 128 else: length_tgt = tgt_lengs length_tgt = length_tgt.clamp(min=0, max=255) else: # predict the length target (greedy for now) # TODO: implementing length-beam pred_lengs = length_out.max(-1)[1] if self.pred_length_offset: length_tgt = pred_lengs - 128 + src_lengs else: length_tgt = pred_lengs return length_tgt @register_model_architecture( "nonautoregressive_transformer", "nonautoregressive_transformer" ) def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # --- special arguments --- args.sg_length_pred = getattr(args, "sg_length_pred", False) args.pred_length_offset = getattr(args, "pred_length_offset", False) args.length_loss_factor = getattr(args, "length_loss_factor", 0.1) args.src_embedding_copy = getattr(args, "src_embedding_copy", False) @register_model_architecture( "nonautoregressive_transformer", "nonautoregressive_transformer_wmt_en_de" ) def nonautoregressive_transformer_wmt_en_de(args): base_architecture(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/nat/nonautoregressive_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ This file implements: Ghazvininejad, Marjan, et al. "Constant-time machine translation with conditional masked language models." arXiv preprint arXiv:1904.09324 (2019). """ from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import NATransformerModel from fairseq.utils import new_arange def _skeptical_unmasking(output_scores, output_masks, p): sorted_index = output_scores.sort(-1)[1] boundary_len = ( (output_masks.sum(1, keepdim=True).type_as(output_scores) - 2) * p ).long() skeptical_mask = new_arange(output_masks) < boundary_len return skeptical_mask.scatter(1, sorted_index, skeptical_mask) @register_model("cmlm_transformer") class CMLMNATransformerModel(NATransformerModel): @staticmethod def add_args(parser): NATransformerModel.add_args(parser) def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): assert not self.decoder.src_embedding_copy, "do not support embedding copy." # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # length prediction length_out = self.decoder.forward_length( normalize=False, encoder_out=encoder_out ) length_tgt = self.decoder.forward_length_prediction( length_out, encoder_out, tgt_tokens ) # decoding word_ins_out = self.decoder( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, ) word_ins_mask = prev_output_tokens.eq(self.unk) return { "word_ins": { "out": word_ins_out, "tgt": tgt_tokens, "mask": word_ins_mask, "ls": self.args.label_smoothing, "nll_loss": True, }, "length": { "out": length_out, "tgt": length_tgt, "factor": self.decoder.length_loss_factor, }, } def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs): step = decoder_out.step max_step = decoder_out.max_step output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores history = decoder_out.history # execute the decoder output_masks = output_tokens.eq(self.unk) _scores, _tokens = self.decoder( normalize=True, prev_output_tokens=output_tokens, encoder_out=encoder_out, ).max(-1) output_tokens.masked_scatter_(output_masks, _tokens[output_masks]) output_scores.masked_scatter_(output_masks, _scores[output_masks]) if history is not None: history.append(output_tokens.clone()) # skeptical decoding (depend on the maximum decoding steps.) if (step + 1) < max_step: skeptical_mask = _skeptical_unmasking( output_scores, output_tokens.ne(self.pad), 1 - (step + 1) / max_step ) output_tokens.masked_fill_(skeptical_mask, self.unk) output_scores.masked_fill_(skeptical_mask, 0.0) if history is not None: history.append(output_tokens.clone()) return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=None, history=history, ) @register_model_architecture("cmlm_transformer", "cmlm_transformer") def cmlm_base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", True) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # --- special arguments --- args.sg_length_pred = getattr(args, "sg_length_pred", False) args.pred_length_offset = getattr(args, "pred_length_offset", False) args.length_loss_factor = getattr(args, "length_loss_factor", 0.1) args.ngram_predictor = getattr(args, "ngram_predictor", 1) args.src_embedding_copy = getattr(args, "src_embedding_copy", False) @register_model_architecture("cmlm_transformer", "cmlm_transformer_wmt_en_de") def cmlm_wmt_en_de(args): cmlm_base_architecture(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/nat/cmlm_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import NATransformerModel, base_architecture from fairseq.modules import DynamicCRF @register_model("nacrf_transformer") class NACRFTransformerModel(NATransformerModel): def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) self.crf_layer = DynamicCRF( num_embedding=len(self.tgt_dict), low_rank=args.crf_lowrank_approx, beam_size=args.crf_beam_approx, ) @property def allow_ensemble(self): return False @staticmethod def add_args(parser): NATransformerModel.add_args(parser) parser.add_argument( "--crf-lowrank-approx", type=int, help="the dimension of low-rank approximation of transition", ) parser.add_argument( "--crf-beam-approx", type=int, help="the beam size for apporixmating the normalizing factor", ) parser.add_argument( "--word-ins-loss-factor", type=float, help="weights on NAT loss used to co-training with CRF loss.", ) def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # length prediction length_out = self.decoder.forward_length( normalize=False, encoder_out=encoder_out ) length_tgt = self.decoder.forward_length_prediction( length_out, encoder_out, tgt_tokens ) # decoding word_ins_out = self.decoder( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, ) word_ins_tgt, word_ins_mask = tgt_tokens, tgt_tokens.ne(self.pad) # compute the log-likelihood of CRF crf_nll = -self.crf_layer(word_ins_out, word_ins_tgt, word_ins_mask) crf_nll = (crf_nll / word_ins_mask.type_as(crf_nll).sum(-1)).mean() return { "word_ins": { "out": word_ins_out, "tgt": word_ins_tgt, "mask": word_ins_mask, "ls": self.args.label_smoothing, "nll_loss": True, "factor": self.args.word_ins_loss_factor, }, "word_crf": {"loss": crf_nll}, "length": { "out": length_out, "tgt": length_tgt, "factor": self.decoder.length_loss_factor, }, } def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs): output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores history = decoder_out.history # execute the decoder and get emission scores output_masks = output_tokens.ne(self.pad) word_ins_out = self.decoder( normalize=False, prev_output_tokens=output_tokens, encoder_out=encoder_out ) # run viterbi decoding through CRF _scores, _tokens = self.crf_layer.forward_decoder(word_ins_out, output_masks) output_tokens.masked_scatter_(output_masks, _tokens[output_masks]) output_scores.masked_scatter_(output_masks, _scores[output_masks]) if history is not None: history.append(output_tokens.clone()) return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=None, history=history, ) @register_model_architecture("nacrf_transformer", "nacrf_transformer") def nacrf_base_architecture(args): args.crf_lowrank_approx = getattr(args, "crf_lowrank_approx", 32) args.crf_beam_approx = getattr(args, "crf_beam_approx", 64) args.word_ins_loss_factor = getattr(args, "word_ins_loss_factor", 0.5) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) base_architecture(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/nat/nat_crf_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch import torch.nn.functional as F from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import ( FairseqNATModel, LevenshteinTransformerDecoder, LevenshteinTransformerModel, ensemble_decoder, ) from fairseq.models.transformer import Linear from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import new_arange class NegativeDistanceScore(object): def __init__(self): # pre-compute some values self.scores = {} self.scores[0.5] = self.compute_score_full(50, 0.5) self.scores[1.0] = self.compute_score_full(50, 1.0) self.scores[2.0] = self.compute_score_full(50, 2.0) def __call__(self, i, L, tau): if (tau is None) or (tau > 1000): return 1 / L if tau in self.scores: if L < self.scores[tau].shape[0]: return self.scores[tau][L - 1, i] return self.compute_score(L, tau)[i] def compute_score(self, L, tau): s = np.array([-abs(L / 2 - i) / tau for i in range(L)]) s = np.exp(s - s.max()) return s / s.sum() def compute_score_full(self, L, tau): s = -abs(np.arange(0, L - 1)[:, None] / 2 - np.arange(L)[None, :]) / tau s = np.tril(s, 0) + np.triu(s - float("inf"), 1) s = np.exp(s - s.max(1, keepdims=True)) return s / s.sum(1, keepdims=True) neg_scorer = NegativeDistanceScore() def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx, vocab_size, tau=None): try: from fairseq import libnat except ImportError as e: import sys sys.stderr.write("ERROR: missing libnat. run `pip install --editable .`\n") raise e B = in_tokens.size(0) T = in_tokens.size(1) V = vocab_size with torch.cuda.device_of(in_tokens): in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) insert_labels = [a[:-1] for a in full_labels] # numericalize1 insert_label_tensors = in_tokens.new_zeros(B * (T - 1) * V).float() insert_index, insert_labels = zip( *[ (w + (j + i * (T - 1)) * V, neg_scorer(k, len(label), tau)) for i, labels in enumerate(insert_labels) for j, label in enumerate(labels[1:-1]) for k, w in enumerate(label) ] ) # HACK 1:-1 insert_index, insert_labels = [ torch.tensor(list(a), device=in_tokens.device) for a in [insert_index, insert_labels] ] insert_label_tensors.scatter_(0, insert_index.long(), insert_labels) insert_label_tensors = insert_label_tensors.view(B, T - 1, V) return insert_label_tensors def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_scores, padding_idx): padding_masks = in_tokens[:, 1:].eq(padding_idx) word_ins_scores.masked_fill_(padding_masks, 0.0) word_ins_pred.masked_fill_(padding_masks, padding_idx) in_coords = new_arange(in_tokens).type_as(in_scores) # shift all padding predictions to infinite out_coords = (in_coords[:, 1:] - 0.5).masked_fill( word_ins_pred.eq(padding_idx), float("inf") ) out_coords = torch.cat([in_coords, out_coords], 1).sort(-1)[1] out_tokens = torch.cat([in_tokens, word_ins_pred], 1).gather(1, out_coords) out_scores = torch.cat([in_scores, word_ins_scores], 1).gather(1, out_coords) return out_tokens, out_scores @register_model("insertion_transformer") class InsertionTransformerModel(LevenshteinTransformerModel): def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) @staticmethod def add_args(parser): FairseqNATModel.add_args(parser) parser.add_argument("--label-tau", default=None, type=float) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): decoder = InsertionTransformerDecoder(args, tgt_dict, embed_tokens) if getattr(args, "apply_bert_init", False): decoder.apply(init_bert_params) return decoder def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): assert tgt_tokens is not None, "forward function only supports training." # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # generate training labels for insertion word_ins_out = self.decoder.forward_word_ins( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, ) word_ins_tgt = _get_ins_targets( prev_output_tokens, tgt_tokens, self.pad, self.unk, len(self.tgt_dict), tau=self.decoder.label_tau, ).type_as(word_ins_out) word_ins_masks = prev_output_tokens[:, 1:].ne(self.pad) return { "word_ins": { "out": word_ins_out, "tgt": word_ins_tgt, "mask": word_ins_masks, "ls": self.args.label_smoothing, "nll_loss": True, } } def forward_decoder( self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs ): output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores history = decoder_out.history # TODO: decoding for InsertionTransformer word_ins_score = self.decoder.forward_word_ins( normalize=True, prev_output_tokens=output_tokens, encoder_out=encoder_out ) if eos_penalty > 0.0: word_ins_score[:, :, self.pad] -= eos_penalty word_ins_score, word_ins_pred = word_ins_score.max(-1) output_tokens, output_scores = _apply_ins_words( output_tokens, output_scores, word_ins_pred, word_ins_score, self.pad ) # delete some unnecessary paddings cut_off = output_tokens.ne(self.pad).sum(1).max() output_tokens = output_tokens[:, :cut_off] output_scores = output_scores[:, :cut_off] if history is not None: history.append(output_tokens.clone()) return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=None, history=history, ) class InsertionTransformerDecoder(LevenshteinTransformerDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): # use the TransformerDecoder's __init__ super(LevenshteinTransformerDecoder, self).__init__( args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn ) self.dictionary = dictionary self.bos = dictionary.bos() self.unk = dictionary.unk() self.eos = dictionary.eos() self.pool_out = Linear(self.output_embed_dim * 2, self.output_embed_dim) self.label_tau = getattr(args, "label_tau", None) @ensemble_decoder def forward_word_ins(self, normalize, encoder_out, prev_output_tokens): features = self.extract_features(prev_output_tokens, encoder_out=encoder_out)[0] features = self.pool_out( torch.cat([features[:, :-1, :], features[:, 1:, :]], 2) ) decoder_out = self.output_layer(features) return F.log_softmax(decoder_out, -1) if normalize else decoder_out def forward_mask_ins(self, *args, **kwargs): raise NotImplementedError def forward_word_del(self, *args, **kwargs): raise NotImplementedError @register_model_architecture("insertion_transformer", "insertion_transformer") def insertion_base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # special for insertion transformer args.label_tau = getattr(args, "label_tau", None)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/nat/insertion_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .hub_interface import * # noqa from .model import * # noqa
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/bart/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension """ from typing import Optional import logging import torch import torch.nn as nn from fairseq import utils from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import TransformerModel from fairseq.modules.transformer_sentence_encoder import init_bert_params from .hub_interface import BARTHubInterface logger = logging.getLogger(__name__) @register_model("bart") class BARTModel(TransformerModel): __jit_unused_properties__ = ["supported_targets"] @classmethod def hub_models(cls): return { "bart.base": "http://dl.fbaipublicfiles.com/fairseq/models/bart.base.tar.gz", "bart.large": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.tar.gz", "bart.large.mnli": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.mnli.tar.gz", "bart.large.cnn": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.cnn.tar.gz", "bart.large.xsum": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.xsum.tar.gz", } def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) # We follow BERT's random weight initialization self.apply(init_bert_params) self.classification_heads = nn.ModuleDict() if hasattr(self.encoder, "dictionary"): self.eos: int = self.encoder.dictionary.eos() @staticmethod def add_args(parser): super(BARTModel, BARTModel).add_args(parser) parser.add_argument( "--pooler-dropout", type=float, metavar="D", help="dropout probability in the masked_lm pooler layers", ) parser.add_argument( "--pooler-activation-fn", choices=utils.get_available_activation_fns(), help="activation function to use for pooler layer", ) parser.add_argument( "--spectral-norm-classification-head", action="store_true", help="Apply spectral normalization on the classification head", ) @property def supported_targets(self): return {"self"} def forward( self, src_tokens, src_lengths, prev_output_tokens, features_only: bool = False, classification_head_name: Optional[str] = None, token_embeddings: Optional[torch.Tensor] = None, return_all_hiddens: bool = True, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, ): if classification_head_name is not None: features_only = True encoder_out = self.encoder( src_tokens, src_lengths=src_lengths, token_embeddings=token_embeddings, return_all_hiddens=return_all_hiddens ) x, extra = self.decoder( prev_output_tokens, encoder_out=encoder_out, features_only=features_only, alignment_layer=alignment_layer, alignment_heads=alignment_heads, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens, ) eos: int = self.eos if classification_head_name is not None: sentence_representation = x[ src_tokens.eq(eos), : ].view(x.size(0), -1, x.size(-1))[:, -1, :] for k, head in self.classification_heads.items(): # for torch script only supports iteration if k == classification_head_name: x = head(sentence_representation) break return x, extra @classmethod def from_pretrained( cls, model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", bpe="gpt2", sample_break_mode="eos", **kwargs, ): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), bpe=bpe, load_checkpoint_heads=True, sample_break_mode=sample_break_mode, **kwargs, ) return BARTHubInterface(x["args"], x["task"], x["models"][0]) def register_classification_head( self, name, num_classes=None, inner_dim=None, **kwargs ): """Register a classification head.""" logger.info("Registering classification head: {0}".format(name)) if name in self.classification_heads: prev_num_classes = self.classification_heads[name].out_proj.out_features prev_inner_dim = self.classification_heads[name].dense.out_features if num_classes != prev_num_classes or inner_dim != prev_inner_dim: logger.warning( 're-registering head "{}" with num_classes {} (prev: {}) ' "and inner_dim {} (prev: {})".format( name, num_classes, prev_num_classes, inner_dim, prev_inner_dim ) ) self.classification_heads[name] = BARTClassificationHead( input_dim=self.args.encoder_embed_dim, inner_dim=inner_dim or self.args.encoder_embed_dim, num_classes=num_classes, activation_fn=self.args.pooler_activation_fn, pooler_dropout=self.args.pooler_dropout, do_spectral_norm=getattr( self.args, "spectral_norm_classification_head", False ), ) def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) prefix = name + "." if name != "" else "" current_head_names = ( [] if not hasattr(self, "classification_heads") else self.classification_heads.keys() ) # Handle new classification heads present in the state dict. keys_to_delete = [] for k in state_dict.keys(): if not k.startswith(prefix + "classification_heads."): continue head_name = k[len(prefix + "classification_heads.") :].split(".")[0] num_classes = state_dict[ prefix + "classification_heads." + head_name + ".out_proj.weight" ].size(0) inner_dim = state_dict[ prefix + "classification_heads." + head_name + ".dense.weight" ].size(0) if getattr(self.args, "load_checkpoint_heads", False): if head_name not in current_head_names: self.register_classification_head(head_name, num_classes, inner_dim) else: if head_name not in current_head_names: logger.warning( "deleting classification head ({}) from checkpoint " "not present in current model: {}".format(head_name, k) ) keys_to_delete.append(k) elif ( num_classes != self.classification_heads[head_name].out_proj.out_features or inner_dim != self.classification_heads[head_name].dense.out_features ): logger.warning( "deleting classification head ({}) from checkpoint " "with different dimensions than current model: {}".format( head_name, k ) ) keys_to_delete.append(k) for k in keys_to_delete: del state_dict[k] def truncate_emb(key): if key in state_dict: state_dict[key] = state_dict[key][:-1, :] # When finetuning on translation task, remove last row of # embedding matrix that corresponds to mask_idx token. loaded_dict_size = state_dict["encoder.embed_tokens.weight"].size(0) if ( loaded_dict_size == len(self.encoder.dictionary) + 1 and "<mask>" not in self.encoder.dictionary ): truncate_emb("encoder.embed_tokens.weight") truncate_emb("decoder.embed_tokens.weight") truncate_emb("encoder.output_projection.weight") truncate_emb("decoder.output_projection.weight") # When continued pretraining on new set of languages for mbart, # add extra lang embeddings at the end of embed_tokens. # Note: newly added languages are assumed to have been added at the end. if self.args.task == "multilingual_denoising" and loaded_dict_size < len( self.encoder.dictionary ): logger.info( "Adding extra language embeddings not found in pretrained model for " "continued pretraining of MBART on new set of languages." ) loaded_mask_token_embedding = state_dict["encoder.embed_tokens.weight"][ -1, : ] num_langids_to_add = len(self.encoder.dictionary) - loaded_dict_size embed_dim = state_dict["encoder.embed_tokens.weight"].size(1) new_lang_embed_to_add = torch.zeros(num_langids_to_add, embed_dim) nn.init.normal_(new_lang_embed_to_add, mean=0, std=embed_dim ** -0.5) new_lang_embed_to_add = new_lang_embed_to_add.to( dtype=state_dict["encoder.embed_tokens.weight"].dtype, ) state_dict["encoder.embed_tokens.weight"] = torch.cat( [ state_dict["encoder.embed_tokens.weight"][ : loaded_dict_size - 1, : ], new_lang_embed_to_add, loaded_mask_token_embedding.unsqueeze(0), ] ) state_dict["decoder.embed_tokens.weight"] = torch.cat( [ state_dict["decoder.embed_tokens.weight"][ : loaded_dict_size - 1, : ], new_lang_embed_to_add, loaded_mask_token_embedding.unsqueeze(0), ] ) # Copy any newly-added classification heads into the state dict # with their current weights. if hasattr(self, "classification_heads"): cur_state = self.classification_heads.state_dict() for k, v in cur_state.items(): if prefix + "classification_heads." + k not in state_dict: logger.info("Overwriting " + prefix + "classification_heads." + k) state_dict[prefix + "classification_heads." + k] = v class BARTClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__( self, input_dim, inner_dim, num_classes, activation_fn, pooler_dropout, do_spectral_norm=False, ): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.activation_fn = utils.get_activation_fn(activation_fn) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) if do_spectral_norm: self.out_proj = torch.nn.utils.spectral_norm(self.out_proj) def forward(self, features, **kwargs): x = features x = self.dropout(x) x = self.dense(x) x = self.activation_fn(x) x = self.dropout(x) x = self.out_proj(x) return x @register_model_architecture("bart", "bart_large") def bart_large_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 1024) args.encoder_layers = getattr(args, "encoder_layers", 12) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 12) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", True) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.relu_dropout = getattr(args, "relu_dropout", 0.0) args.dropout = getattr(args, "dropout", 0.1) args.max_target_positions = getattr(args, "max_target_positions", 1024) args.max_source_positions = getattr(args, "max_source_positions", 1024) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", True ) args.share_all_embeddings = getattr(args, "share_all_embeddings", True) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", True) args.layernorm_embedding = getattr(args, "layernorm_embedding", True) args.activation_fn = getattr(args, "activation_fn", "gelu") args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh") args.pooler_dropout = getattr(args, "pooler_dropout", 0.0) @register_model_architecture("bart", "bart_base") def bart_base_architecture(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 768) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 12) bart_large_architecture(args) @register_model_architecture("bart", "mbart_large") def mbart_large_architecture(args): args.no_scale_embedding = getattr(args, "no_scale_embedding", False) bart_large_architecture(args) @register_model_architecture("bart", "mbart_base") def mbart_base_architecture(args): args.no_scale_embedding = getattr(args, "no_scale_embedding", False) bart_base_architecture(args) @register_model_architecture("bart", "mbart_base_wmt20") def mbart_base_wmt20_architecture(args): args.layernorm_embedding = getattr(args, "layernorm_embedding", False) mbart_base_architecture(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/bart/model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import copy import logging from typing import Dict, List import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.data import encoders from fairseq.hub_utils import GeneratorHubInterface from omegaconf import open_dict logger = logging.getLogger(__name__) class BARTHubInterface(GeneratorHubInterface): """A simple PyTorch Hub interface to BART. Usage: https://github.com/pytorch/fairseq/tree/main/examples/bart """ def __init__(self, cfg, task, model): super().__init__(cfg, task, [model]) self.model = self.models[0] def encode( self, sentence: str, *addl_sentences, no_separator=True ) -> torch.LongTensor: """ BPE-encode a sentence (or multiple sentences). Every sequence begins with a beginning-of-sentence (`<s>`) symbol. Every sentence ends with an end-of-sentence (`</s>`). Example (single sentence): `<s> a b c </s>` Example (sentence pair): `<s> d e f </s> 1 2 3 </s>` The BPE encoding follows GPT-2. One subtle detail is that the GPT-2 BPE requires leading spaces. For example:: >>> bart.encode('Hello world').tolist() [0, 31414, 232, 2] >>> bart.encode(' world').tolist() [0, 232, 2] >>> bart.encode('world').tolist() [0, 8331, 2] """ tokens = self.bpe.encode(sentence) if len(tokens.split(" ")) > min(self.max_positions) - 2: tokens = " ".join(tokens.split(" ")[: min(self.max_positions) - 2]) bpe_sentence = "<s> " + tokens + " </s>" for s in addl_sentences: bpe_sentence += " </s>" if not no_separator else "" bpe_sentence += " " + self.bpe.encode(s) + " </s>" tokens = self.task.source_dictionary.encode_line(bpe_sentence, append_eos=False) return tokens.long() def decode(self, tokens: torch.LongTensor): assert tokens.dim() == 1 tokens = tokens.cpu().numpy() if tokens[0] == self.task.source_dictionary.bos(): tokens = tokens[1:] # remove <s> eos_mask = tokens == self.task.source_dictionary.eos() doc_mask = eos_mask[1:] & eos_mask[:-1] sentences = np.split(tokens, doc_mask.nonzero()[0] + 1) sentences = [ self.bpe.decode(self.task.source_dictionary.string(s)) for s in sentences ] if len(sentences) == 1: return sentences[0] return sentences def _build_sample(self, src_tokens: List[torch.LongTensor]): # assert torch.is_tensor(src_tokens) dataset = self.task.build_dataset_for_inference( src_tokens, [x.numel() for x in src_tokens], ) sample = dataset.collater(dataset) sample = utils.apply_to_sample(lambda tensor: tensor.to(self.device), sample) return sample def generate( self, tokenized_sentences: List[torch.LongTensor], *args, inference_step_args=None, skip_invalid_size_inputs=False, **kwargs ) -> List[List[Dict[str, torch.Tensor]]]: inference_step_args = inference_step_args or {} if "prefix_tokens" in inference_step_args: raise NotImplementedError("prefix generation not implemented for BART") res = [] for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs): src_tokens = batch['net_input']['src_tokens'] inference_step_args["prefix_tokens"] =src_tokens.new_full( (src_tokens.size(0), 1), fill_value=self.task.source_dictionary.bos() ).to(device=self.device) results = super().generate( src_tokens, *args, inference_step_args=inference_step_args, skip_invalid_size_inputs=skip_invalid_size_inputs, **kwargs ) for id, hypos in zip(batch['id'].tolist(), results): res.append((id, hypos)) res = [hypos for _, hypos in sorted(res, key=lambda x: x[0])] return res def extract_features( self, tokens: torch.LongTensor, return_all_hiddens: bool = False ) -> torch.Tensor: if tokens.dim() == 1: tokens = tokens.unsqueeze(0) if tokens.size(-1) > min(self.model.max_positions()): raise ValueError( "tokens exceeds maximum length: {} > {}".format( tokens.size(-1), self.model.max_positions() ) ) tokens.to(device=self.device), prev_output_tokens = tokens.clone() prev_output_tokens[:, 0] = tokens.gather( 1, (tokens.ne(self.task.source_dictionary.pad()).sum(dim=1) - 1).unsqueeze(-1), ).squeeze() prev_output_tokens[:, 1:] = tokens[:, :-1] features, extra = self.model( src_tokens=tokens, src_lengths=None, prev_output_tokens=prev_output_tokens, features_only=True, return_all_hiddens=return_all_hiddens, ) if return_all_hiddens: # convert from T x B x C -> B x T x C inner_states = extra["inner_states"] return [inner_state.transpose(0, 1) for inner_state in inner_states] else: return features # just the last layer's features def register_classification_head( self, name: str, num_classes: int = None, embedding_size: int = None, **kwargs ): self.model.register_classification_head( name, num_classes=num_classes, embedding_size=embedding_size, **kwargs ) def predict(self, head: str, tokens: torch.LongTensor, return_logits: bool = False): if tokens.dim() == 1: tokens = tokens.unsqueeze(0) features = self.extract_features(tokens.to(device=self.device)) sentence_representation = features[ tokens.eq(self.task.source_dictionary.eos()), : ].view(features.size(0), -1, features.size(-1))[:, -1, :] logits = self.model.classification_heads[head](sentence_representation) if return_logits: return logits return F.log_softmax(logits, dim=-1) def fill_mask( self, masked_inputs: List[str], topk: int = 5, match_source_len: bool = True, **generate_kwargs ): masked_token = '<mask>' batch_tokens = [] for masked_input in masked_inputs: assert masked_token in masked_input, \ "please add one {} token for the input".format(masked_token) text_spans = masked_input.split(masked_token) text_spans_bpe = (' {0} '.format(masked_token)).join( [self.bpe.encode(text_span.rstrip()) for text_span in text_spans] ).strip() tokens = self.task.source_dictionary.encode_line( '<s> ' + text_spans_bpe + ' </s>', append_eos=False, add_if_not_exist=False, ).long() batch_tokens.append(tokens) # ensure beam size is at least as big as topk generate_kwargs['beam'] = max( topk, generate_kwargs.get('beam', -1), ) generate_kwargs['match_source_len'] = match_source_len batch_hypos = self.generate(batch_tokens, **generate_kwargs) return [ [(self.decode(hypo['tokens']), hypo['score']) for hypo in hypos[:topk]] for hypos in batch_hypos ]
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/bart/hub_interface.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .wav2vec import * # noqa from .wav2vec2 import * # noqa from .wav2vec2_asr import * # noqa
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/wav2vec/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from argparse import Namespace import contextlib import copy import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from dataclasses import dataclass, field from omegaconf import MISSING, II, open_dict from typing import Any, Optional from fairseq import checkpoint_utils, tasks, utils from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.tasks import FairseqTask from fairseq.models import ( BaseFairseqModel, FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, ) from fairseq.models.wav2vec.wav2vec2 import MASKING_DISTRIBUTION_CHOICES from fairseq.modules import ( LayerNorm, PositionalEmbedding, TransformerDecoderLayer, ) @dataclass class Wav2Vec2AsrConfig(FairseqDataclass): w2v_path: str = field( default=MISSING, metadata={"help": "path to wav2vec 2.0 model"} ) no_pretrained_weights: bool = field( default=False, metadata={"help": "if true, does not load pretrained weights"} ) dropout_input: float = field( default=0.0, metadata={"help": "dropout to apply to the input (after feat extr)"}, ) final_dropout: float = field( default=0.0, metadata={"help": "dropout after transformer and before final projection"}, ) dropout: float = field( default=0.0, metadata={"help": "dropout probability inside wav2vec 2.0 model"} ) attention_dropout: float = field( default=0.0, metadata={ "help": "dropout probability for attention weights inside wav2vec 2.0 model" }, ) activation_dropout: float = field( default=0.0, metadata={ "help": "dropout probability after activation in FFN inside wav2vec 2.0 model" }, ) conv_feature_layers: Optional[str] = field( default="[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512,2,2)] + [(512,2,2)]", metadata={ "help": ( "string describing convolutional feature extraction " "layers in form of a python list that contains " "[(dim, kernel_size, stride), ...]" ), }, ) encoder_embed_dim: Optional[int] = field( default=768, metadata={"help": "encoder embedding dimension"} ) # masking apply_mask: bool = field( default=False, metadata={"help": "apply masking during fine-tuning"} ) mask_length: int = field( default=10, metadata={"help": "repeat the mask indices multiple times"} ) mask_prob: float = field( default=0.5, metadata={ "help": "probability of replacing a token with mask (normalized by length)" }, ) mask_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose masks"} ) mask_other: float = field( default=0, metadata={ "help": "secondary mask argument (used for more complex distributions), " "see help in compute_mask_indices" }, ) no_mask_overlap: bool = field( default=False, metadata={"help": "whether to allow masks to overlap"} ) mask_min_space: Optional[int] = field( default=1, metadata={"help": "min space between spans (if no overlap is enabled)"}, ) # channel masking mask_channel_length: int = field( default=10, metadata={"help": "length of the mask for features (channels)"} ) mask_channel_prob: float = field( default=0.0, metadata={"help": "probability of replacing a feature with 0"} ) mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose mask length for channel masking"}, ) mask_channel_other: float = field( default=0, metadata={ "help": "secondary mask argument (used for more complex distributions), " "see help in compute_mask_indicesh" }, ) no_mask_channel_overlap: bool = field( default=False, metadata={"help": "whether to allow channel masks to overlap"} ) freeze_finetune_updates: int = field( default=0, metadata={"help": "dont finetune wav2vec for this many updates"} ) feature_grad_mult: float = field( default=0.0, metadata={"help": "reset feature grad mult in wav2vec 2.0 to this"} ) layerdrop: float = field( default=0.0, metadata={"help": "probability of dropping a layer in wav2vec 2.0"} ) mask_channel_min_space: Optional[int] = field( default=1, metadata={"help": "min space between spans (if no overlap is enabled)"}, ) mask_channel_before: bool = False normalize: bool = II("task.normalize") data: str = II("task.data") # this holds the loaded wav2vec args w2v_args: Any = None @dataclass class Wav2Vec2CtcConfig(Wav2Vec2AsrConfig): blank_weight: float = 0 blank_mode: str = "add" @register_model("wav2vec_ctc", dataclass=Wav2Vec2CtcConfig) class Wav2VecCtc(BaseFairseqModel): def __init__(self, cfg: Wav2Vec2CtcConfig, w2v_encoder: BaseFairseqModel): super().__init__() self.cfg = cfg self.w2v_encoder = w2v_encoder self.blank_weight = cfg.blank_weight self.blank_mode = cfg.blank_mode def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) return state_dict @classmethod def build_model(cls, cfg: Wav2Vec2CtcConfig, task: FairseqTask): """Build a new model instance.""" w2v_encoder = Wav2VecEncoder(cfg, len(task.target_dictionary)) return cls(cfg, w2v_encoder) def get_logits(self, net_output, normalize=False): logits = net_output["encoder_out"] if self.blank_weight != 0: if self.blank_mode == "add": logits[..., 0] += self.blank_weight elif self.blank_mode == "set": logits[..., 0] = self.blank_weight else: raise Exception(f"invalid blank mode {self.blank_mode}") if net_output["padding_mask"] is not None and net_output["padding_mask"].any(): logits[net_output["padding_mask"].T][..., 0] = float("inf") logits[net_output["padding_mask"].T][..., 1:] = float("-inf") if normalize: logits = utils.log_softmax(logits.float(), dim=-1) return logits def get_normalized_probs(self, net_output, log_probs): """Get normalized probabilities (or log probs) from a net's output.""" logits = self.get_logits(net_output) if log_probs: return utils.log_softmax(logits.float(), dim=-1) else: return utils.softmax(logits.float(), dim=-1) def forward(self, **kwargs): x = self.w2v_encoder(**kwargs) return x @dataclass class Wav2Vec2Seq2SeqConfig(Wav2Vec2AsrConfig): decoder_embed_dim: int = field( default=768, metadata={"help": "decoder embedding dimension"} ) decoder_ffn_embed_dim: int = field( default=3072, metadata={"help": "decoder embedding dimension for FFN"} ) decoder_layers: int = field(default=6, metadata={"help": "num of decoder layers"}) decoder_layerdrop: float = field( default=0.0, metadata={"help": "decoder layerdrop chance"} ) decoder_attention_heads: int = field( default=4, metadata={"help": "num decoder attention heads"} ) decoder_learned_pos: bool = field( default=False, metadata={"help": "use learned positional embeddings in the decoder"}, ) decoder_normalize_before: bool = field( default=False, metadata={"help": "apply layernorm before each decoder block"} ) no_token_positional_embeddings: bool = field( default=False, metadata={ "help": "if set, disables positional embeddings (outside self attention)" }, ) decoder_dropout: float = field( default=0.0, metadata={"help": "dropout probability in the decoder"} ) decoder_attention_dropout: float = field( default=0.0, metadata={ "help": "dropout probability for attention weights inside the decoder" }, ) decoder_activation_dropout: float = field( default=0.0, metadata={ "help": "dropout probability after activation in FFN inside the decoder" }, ) max_target_positions: int = field( default=2048, metadata={"help": "max target positions"} ) share_decoder_input_output_embed: bool = field( default=False, metadata={"help": "share decoder input and output embeddings"} ) autoregressive: bool = II("task.autoregressive") @register_model("wav2vec_seq2seq", dataclass=Wav2Vec2Seq2SeqConfig) class Wav2Vec2Seq2SeqModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def build_model(cls, cfg: Wav2Vec2Seq2SeqConfig, task: FairseqTask): """Build a new model instance.""" assert ( cfg.autoregressive ), "Please set task.autoregressive=true for seq2seq asr models" src_dict, tgt_dict = task.source_dictionary, task.target_dictionary def build_embedding(dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) return emb decoder_embed_tokens = build_embedding(tgt_dict, cfg.decoder_embed_dim) encoder = cls.build_encoder(cfg) decoder = cls.build_decoder(cfg, tgt_dict, decoder_embed_tokens) return Wav2Vec2Seq2SeqModel(encoder, decoder) @classmethod def build_encoder(cls, cfg: Wav2Vec2AsrConfig): return Wav2VecEncoder(cfg) @classmethod def build_decoder(cls, cfg: Wav2Vec2Seq2SeqConfig, tgt_dict, embed_tokens): return TransformerDecoder(cfg, tgt_dict, embed_tokens) def forward(self, **kwargs): encoder_out = self.encoder(**kwargs) decoder_out = self.decoder(encoder_out=encoder_out, **kwargs) return decoder_out def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) return state_dict class Wav2VecEncoder(FairseqEncoder): def __init__(self, cfg: Wav2Vec2AsrConfig, output_size=None): self.apply_mask = cfg.apply_mask arg_overrides = { "dropout": cfg.dropout, "activation_dropout": cfg.activation_dropout, "dropout_input": cfg.dropout_input, "attention_dropout": cfg.attention_dropout, "mask_length": cfg.mask_length, "mask_prob": cfg.mask_prob, "mask_selection": cfg.mask_selection, "mask_other": cfg.mask_other, "no_mask_overlap": cfg.no_mask_overlap, "mask_channel_length": cfg.mask_channel_length, "mask_channel_prob": cfg.mask_channel_prob, "mask_channel_before": cfg.mask_channel_before, "mask_channel_selection": cfg.mask_channel_selection, "mask_channel_other": cfg.mask_channel_other, "no_mask_channel_overlap": cfg.no_mask_channel_overlap, "encoder_layerdrop": cfg.layerdrop, "feature_grad_mult": cfg.feature_grad_mult, } if cfg.w2v_args is None: state = checkpoint_utils.load_checkpoint_to_cpu(cfg.w2v_path, arg_overrides) w2v_args = state.get("cfg", None) if w2v_args is None: w2v_args = convert_namespace_to_omegaconf(state["args"]) w2v_args.criterion = None w2v_args.lr_scheduler = None cfg.w2v_args = w2v_args else: state = None w2v_args = cfg.w2v_args if isinstance(w2v_args, Namespace): cfg.w2v_args = w2v_args = convert_namespace_to_omegaconf(w2v_args) assert cfg.normalize == w2v_args.task.normalize, ( "Fine-tuning works best when data normalization is the same. " "Please check that --normalize is set or unset for both pre-training and here" ) w2v_args.task.data = cfg.data task = tasks.setup_task(w2v_args.task) model = task.build_model(w2v_args.model) if state is not None and not cfg.no_pretrained_weights: model.load_state_dict(state["model"], strict=True) model.remove_pretraining_modules() super().__init__(task.source_dictionary) d = w2v_args.model.encoder_embed_dim self.w2v_model = model self.final_dropout = nn.Dropout(cfg.final_dropout) self.freeze_finetune_updates = cfg.freeze_finetune_updates self.num_updates = 0 targ_d = None self.proj = None if output_size is not None: targ_d = output_size elif getattr(cfg, "decoder_embed_dim", d) != d: targ_d = cfg.decoder_embed_dim if targ_d is not None: self.proj = Linear(d, targ_d) def set_num_updates(self, num_updates): """Set the number of parameters updates.""" super().set_num_updates(num_updates) self.num_updates = num_updates def forward(self, source, padding_mask, **kwargs): w2v_args = { "source": source, "padding_mask": padding_mask, "mask": self.apply_mask and self.training, } ft = self.freeze_finetune_updates <= self.num_updates with torch.no_grad() if not ft else contextlib.ExitStack(): res = self.w2v_model.extract_features(**w2v_args) x = res["x"] padding_mask = res["padding_mask"] # B x T x C -> T x B x C x = x.transpose(0, 1) x = self.final_dropout(x) if self.proj: x = self.proj(x) return { "encoder_out": x, # T x B x C "padding_mask": padding_mask, # B x T, "layer_results": res["layer_results"], } def forward_torchscript(self, net_input): if torch.jit.is_scripting(): return self.forward(net_input["source"], net_input["padding_mask"]) else: return self.forward_non_torchscript(net_input) def reorder_encoder_out(self, encoder_out, new_order): if encoder_out["encoder_out"] is not None: encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select( 1, new_order ) if encoder_out["padding_mask"] is not None: encoder_out["padding_mask"] = encoder_out[ "padding_mask" ].index_select(0, new_order) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" return None def upgrade_state_dict_named(self, state_dict, name): return state_dict class TransformerDecoder(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__( self, cfg: Wav2Vec2Seq2SeqConfig, dictionary, embed_tokens, no_encoder_attn=False, ): super().__init__(dictionary) self.dropout = cfg.decoder_dropout self.share_input_output_embed = cfg.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = cfg.decoder_embed_dim self.output_embed_dim = cfg.decoder_embed_dim self.layerdrop = cfg.decoder_layerdrop self.padding_idx = embed_tokens.padding_idx self.max_target_positions = cfg.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = ( Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None ) self.embed_positions = ( PositionalEmbedding( cfg.max_target_positions, embed_dim, self.padding_idx, learned=cfg.decoder_learned_pos, ) if not cfg.no_token_positional_embeddings else None ) # TODO: update this when transformer gets converted to dataclass configs transformer_cfg = copy.deepcopy(cfg) with open_dict(transformer_cfg): transformer_cfg.dropout = transformer_cfg.decoder_dropout transformer_cfg.attention_dropout = ( transformer_cfg.decoder_attention_dropout ) transformer_cfg.activation_dropout = ( transformer_cfg.decoder_activation_dropout ) self.layers = nn.ModuleList([]) self.layers.extend( [ TransformerDecoderLayer(transformer_cfg, no_encoder_attn) for _ in range(transformer_cfg.decoder_layers) ] ) if not self.share_input_output_embed: self.embed_out = nn.Parameter( torch.Tensor(len(dictionary), self.output_embed_dim) ) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if transformer_cfg.decoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward( self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused ): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ prev_output_tokens = prev_output_tokens.long() x, extra = self.extract_features( prev_output_tokens, encoder_out, incremental_state ) x = self.output_layer(x) return x, extra def extract_features( self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused ): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = ( self.embed_positions( prev_output_tokens, incremental_state=incremental_state ) if self.embed_positions is not None else None ) if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers self_attn_padding_mask = None if prev_output_tokens.eq(self.padding_idx).any(): self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx) for layer in self.layers: dropout_probability = np.random.random() if not self.training or (dropout_probability > self.layerdrop): x, attn, _ = layer( x, encoder_out["encoder_out"] if encoder_out is not None else None, encoder_out["padding_mask"] if encoder_out is not None else None, incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, self_attn_padding_mask=self_attn_padding_mask ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) return x, {"attn": attn, "inner_states": inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions) def buffered_future_mask(self, tensor): dim = tensor.size(0) if ( not hasattr(self, "_future_mask") or self._future_mask is None or self._future_mask.device != tensor.device or self._future_mask.size(0) < dim ): self._future_mask = torch.triu( utils.fill_with_neg_inf(tensor.new(dim, dim)), 1 ) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): return state_dict def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/wav2vec/wav2vec2_asr.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from dataclasses import dataclass, field from typing import List, Tuple import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.data.data_utils import compute_mask_indices from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import BaseFairseqModel, register_model from fairseq.modules import ( Fp32GroupNorm, Fp32LayerNorm, GradMultiply, GumbelVectorQuantizer, LayerNorm, MultiheadAttention, SamePad, TransposeLast, ) from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import buffered_arange, index_put, is_xla_tensor EXTRACTOR_MODE_CHOICES = ChoiceEnum(["default", "layer_norm"]) MASKING_DISTRIBUTION_CHOICES = ChoiceEnum(["static", "uniform", "normal", "poisson"]) @dataclass class Wav2Vec2Config(FairseqDataclass): extractor_mode: EXTRACTOR_MODE_CHOICES = field( default="default", metadata={ "help": "mode for feature extractor. default has a single group norm with d " "groups in the first conv block, whereas layer_norm has layer norms in " "every block (meant to use with normalize=True)" }, ) encoder_layers: int = field( default=12, metadata={"help": "num encoder layers in the transformer"} ) encoder_embed_dim: int = field( default=768, metadata={"help": "encoder embedding dimension"} ) encoder_ffn_embed_dim: int = field( default=3072, metadata={"help": "encoder embedding dimension for FFN"} ) encoder_attention_heads: int = field( default=12, metadata={"help": "num encoder attention heads"} ) activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field( default="gelu", metadata={"help": "activation function to use"} ) # dropouts dropout: float = field( default=0.1, metadata={"help": "dropout probability for the transformer"} ) attention_dropout: float = field( default=0.1, metadata={"help": "dropout probability for attention weights"} ) activation_dropout: float = field( default=0.0, metadata={"help": "dropout probability after activation in FFN"} ) encoder_layerdrop: float = field( default=0.0, metadata={"help": "probability of dropping a tarnsformer layer"} ) dropout_input: float = field( default=0.0, metadata={"help": "dropout to apply to the input (after feat extr)"}, ) dropout_features: float = field( default=0.0, metadata={"help": "dropout to apply to the features (after feat extr)"}, ) final_dim: int = field( default=0, metadata={ "help": "project final representations and targets to this many dimensions." "set to encoder_embed_dim is <= 0" }, ) layer_norm_first: bool = field( default=False, metadata={"help": "apply layernorm first in the transformer"} ) conv_feature_layers: str = field( default="[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512,2,2)] + [(512,2,2)]", metadata={ "help": "string describing convolutional feature extraction layers in form of a python list that contains " "[(dim, kernel_size, stride), ...]" }, ) conv_bias: bool = field( default=False, metadata={"help": "include bias in conv encoder"} ) logit_temp: float = field( default=0.1, metadata={"help": "temperature to divide logits by"} ) quantize_targets: bool = field( default=False, metadata={"help": "use quantized targets"} ) quantize_input: bool = field( default=False, metadata={"help": "use quantized inputs"} ) same_quantizer: bool = field( default=False, metadata={"help": "use same quantizer for inputs and targets"} ) target_glu: bool = field( default=False, metadata={"help": "adds projection + glu to targets"} ) feature_grad_mult: float = field( default=1.0, metadata={"help": "multiply feature extractor var grads by this"} ) quantizer_depth: int = field( default=1, metadata={"help": "number of quantizer layers"}, ) quantizer_factor: int = field( default=3, metadata={ "help": "dimensionality increase for inner quantizer layers (if depth > 1)" }, ) latent_vars: int = field( default=320, metadata={"help": "number of latent variables V in each group of the codebook"}, ) latent_groups: int = field( default=2, metadata={"help": "number of groups G of latent variables in the codebook"}, ) latent_dim: int = field( default=0, metadata={ "help": "if > 0, uses this dimensionality for latent variables. " "otherwise uses final_dim / latent_groups" }, ) # masking mask_length: int = field(default=10, metadata={"help": "mask length"}) mask_prob: float = field( default=0.65, metadata={"help": "probability of replacing a token with mask"} ) mask_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose mask length"} ) mask_other: float = field( default=0, metadata={ "help": "secondary mask argument (used for more complex distributions), " "see help in compute_mask_indices" }, ) no_mask_overlap: bool = field( default=False, metadata={"help": "whether to allow masks to overlap"} ) mask_min_space: int = field( default=1, metadata={"help": "min space between spans (if no overlap is enabled)"}, ) # channel masking mask_channel_length: int = field( default=10, metadata={"help": "length of the mask for features (channels)"} ) mask_channel_prob: float = field( default=0.0, metadata={"help": "probability of replacing a feature with 0"} ) mask_channel_before: bool = False mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose mask length for channel masking"}, ) mask_channel_other: float = field( default=0, metadata={ "help": "secondary mask argument (used for more complex distributions), " "see help in compute_mask_indicesh" }, ) no_mask_channel_overlap: bool = field( default=False, metadata={"help": "whether to allow channel masks to overlap"} ) mask_channel_min_space: int = field( default=1, metadata={"help": "min space between spans (if no overlap is enabled)"}, ) # negative selection num_negatives: int = field( default=100, metadata={"help": "number of negative examples from the same sample"}, ) negatives_from_everywhere: bool = field( default=False, metadata={"help": "sample negatives from everywhere, not just masked states"}, ) cross_sample_negatives: int = field( default=0, metadata={"help": "number of negative examples from the any sample"} ) codebook_negatives: int = field( default=0, metadata={"help": "number of negative examples codebook"} ) # positional embeddings conv_pos: int = field( default=128, metadata={"help": "number of filters for convolutional positional embeddings"}, ) conv_pos_groups: int = field( default=16, metadata={"help": "number of groups for convolutional positional embedding"}, ) latent_temp: Tuple[float, float, float] = field( default=(2, 0.5, 0.999995), metadata={ "help": "temperature for latent variable sampling. " "can be tuple of 3 values (start, end, decay)" }, ) @register_model("wav2vec2", dataclass=Wav2Vec2Config) class Wav2Vec2Model(BaseFairseqModel): def __init__(self, cfg: Wav2Vec2Config): super().__init__() self.cfg = cfg feature_enc_layers = eval(cfg.conv_feature_layers) self.embed = feature_enc_layers[-1][0] self.feature_extractor = ConvFeatureExtractionModel( conv_layers=feature_enc_layers, dropout=0.0, mode=cfg.extractor_mode, conv_bias=cfg.conv_bias, ) self.post_extract_proj = ( nn.Linear(self.embed, cfg.encoder_embed_dim) if self.embed != cfg.encoder_embed_dim and not cfg.quantize_input else None ) self.mask_prob = cfg.mask_prob self.mask_selection = cfg.mask_selection self.mask_other = cfg.mask_other self.mask_length = cfg.mask_length self.no_mask_overlap = cfg.no_mask_overlap self.mask_min_space = cfg.mask_min_space self.mask_channel_prob = cfg.mask_channel_prob self.mask_channel_before = cfg.mask_channel_before self.mask_channel_selection = cfg.mask_channel_selection self.mask_channel_other = cfg.mask_channel_other self.mask_channel_length = cfg.mask_channel_length self.no_mask_channel_overlap = cfg.no_mask_channel_overlap self.mask_channel_min_space = cfg.mask_channel_min_space self.dropout_input = nn.Dropout(cfg.dropout_input) self.dropout_features = nn.Dropout(cfg.dropout_features) self.feature_grad_mult = cfg.feature_grad_mult self.quantizer = None self.input_quantizer = None self.n_negatives = cfg.num_negatives self.cross_sample_negatives = cfg.cross_sample_negatives self.codebook_negatives = cfg.codebook_negatives self.negatives_from_everywhere = cfg.negatives_from_everywhere self.logit_temp = cfg.logit_temp final_dim = cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim if cfg.quantize_targets: vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else final_dim self.quantizer = GumbelVectorQuantizer( dim=self.embed, num_vars=cfg.latent_vars, temp=cfg.latent_temp, groups=cfg.latent_groups, combine_groups=False, vq_dim=vq_dim, time_first=True, weight_proj_depth=cfg.quantizer_depth, weight_proj_factor=cfg.quantizer_factor, ) self.project_q = nn.Linear(vq_dim, final_dim) else: self.project_q = nn.Linear(self.embed, final_dim) if cfg.quantize_input: if cfg.same_quantizer and self.quantizer is not None: vq_dim = final_dim self.input_quantizer = self.quantizer else: vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else cfg.encoder_embed_dim self.input_quantizer = GumbelVectorQuantizer( dim=self.embed, num_vars=cfg.latent_vars, temp=cfg.latent_temp, groups=cfg.latent_groups, combine_groups=False, vq_dim=vq_dim, time_first=True, weight_proj_depth=cfg.quantizer_depth, weight_proj_factor=cfg.quantizer_factor, ) self.project_inp = nn.Linear(vq_dim, cfg.encoder_embed_dim) self.mask_emb = nn.Parameter( torch.FloatTensor(cfg.encoder_embed_dim).uniform_() ) self.encoder = TransformerEncoder(cfg) self.layer_norm = LayerNorm(self.embed) self.target_glu = None if cfg.target_glu: self.target_glu = nn.Sequential( nn.Linear(final_dim, final_dim * 2), nn.GLU() ) self.final_proj = nn.Linear(cfg.encoder_embed_dim, final_dim) def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) """Upgrade a (possibly old) state dict for new versions of fairseq.""" return state_dict @classmethod def build_model(cls, cfg: Wav2Vec2Config, task=None): """Build a new model instance.""" return cls(cfg) def apply_mask( self, x, padding_mask, mask_indices=None, mask_channel_indices=None, ): B, T, C = x.shape if self.mask_channel_prob > 0 and self.mask_channel_before: mask_channel_indices = compute_mask_indices( (B, C), None, self.mask_channel_prob, self.mask_channel_length, self.mask_channel_selection, self.mask_channel_other, no_overlap=self.no_mask_channel_overlap, min_space=self.mask_channel_min_space, ) mask_channel_indices = ( torch.from_numpy(mask_channel_indices) .to(x.device) .unsqueeze(1) .expand(-1, T, -1) ) x[mask_channel_indices] = 0 if self.mask_prob > 0: if mask_indices is None: mask_indices = compute_mask_indices( (B, T), padding_mask, self.mask_prob, self.mask_length, self.mask_selection, self.mask_other, min_masks=2, no_overlap=self.no_mask_overlap, min_space=self.mask_min_space, ) mask_indices = torch.from_numpy(mask_indices).to(x.device) x = index_put(x, mask_indices, self.mask_emb) else: mask_indices = None if self.mask_channel_prob > 0 and not self.mask_channel_before: if mask_channel_indices is None: mask_channel_indices = compute_mask_indices( (B, C), None, self.mask_channel_prob, self.mask_channel_length, self.mask_channel_selection, self.mask_channel_other, no_overlap=self.no_mask_channel_overlap, min_space=self.mask_channel_min_space, ) mask_channel_indices = ( torch.from_numpy(mask_channel_indices) .to(x.device) .unsqueeze(1) .expand(-1, T, -1) ) x = index_put(x, mask_channel_indices, 0) return x, mask_indices def sample_negatives(self, y, num, padding_count=None): if self.n_negatives == 0 and self.cross_sample_negatives == 0: return y.new(0) bsz, tsz, fsz = y.shape y = y.view(-1, fsz) # BTC => (BxT)C # FIXME: what happens if padding_count is specified? cross_high = tsz * bsz high = tsz - (padding_count or 0) with torch.no_grad(): assert high > 1, f"{bsz,tsz,fsz}" if self.n_negatives > 0: tszs = ( buffered_arange(num) .unsqueeze(-1) .expand(-1, self.n_negatives) .flatten() ) neg_idxs = torch.randint( low=0, high=high - 1, size=(bsz, self.n_negatives * num) ) neg_idxs[neg_idxs >= tszs] += 1 if self.cross_sample_negatives > 0: tszs = ( buffered_arange(num) .unsqueeze(-1) .expand(-1, self.cross_sample_negatives) .flatten() ) cross_neg_idxs = torch.randint( low=0, high=cross_high - 1, size=(bsz, self.cross_sample_negatives * num), ) cross_neg_idxs[cross_neg_idxs >= tszs] += 1 if self.n_negatives > 0: for i in range(1, bsz): neg_idxs[i] += i * high else: neg_idxs = cross_neg_idxs if self.cross_sample_negatives > 0 and self.n_negatives > 0: neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1) negs = y[neg_idxs.view(-1)] negs = negs.view( bsz, num, self.n_negatives + self.cross_sample_negatives, fsz ).permute( 2, 0, 1, 3 ) # to NxBxTxC return negs, neg_idxs def compute_preds(self, x, y, negatives): neg_is_pos = (y == negatives).all(-1) y = y.unsqueeze(0) targets = torch.cat([y, negatives], dim=0) logits = torch.cosine_similarity(x.float(), targets.float(), dim=-1).type_as(x) logits = logits / self.logit_temp if is_xla_tensor(logits) or neg_is_pos.any(): fillval = -float(2 ** 30) if not hasattr(self, "_inftensor"): self._inftensor = ( torch.tensor(fillval).to(x.device) if is_xla_tensor(logits) else float("-inf") ) logits[1:] = index_put(logits[1:], neg_is_pos, self._inftensor) return logits def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor): """ Computes the output length of the convolutional layers """ def _conv_out_length(input_length, kernel_size, stride): return torch.floor((input_length - kernel_size) / stride + 1) conv_cfg_list = eval(self.cfg.conv_feature_layers) for i in range(len(conv_cfg_list)): input_lengths = _conv_out_length( input_lengths, conv_cfg_list[i][1], conv_cfg_list[i][2] ) return input_lengths.to(torch.long) def forward( self, source, padding_mask=None, mask=True, features_only=False, layer=None, mask_indices=None, mask_channel_indices=None, padding_count=None, ): if self.feature_grad_mult > 0: features = self.feature_extractor(source) if self.feature_grad_mult != 1.0: features = GradMultiply.apply(features, self.feature_grad_mult) else: with torch.no_grad(): features = self.feature_extractor(source) features_pen = features.float().pow(2).mean() features = features.transpose(1, 2) features = self.layer_norm(features) unmasked_features = features.clone() if padding_mask is not None and padding_mask.any(): input_lengths = (1 - padding_mask.long()).sum(-1) # apply conv formula to get real output_lengths output_lengths = self._get_feat_extract_output_lengths(input_lengths) padding_mask = torch.zeros( features.shape[:2], dtype=features.dtype, device=features.device ) # these two operations makes sure that all values # before the output lengths indices are attended to padding_mask[ ( torch.arange(padding_mask.shape[0], device=padding_mask.device), output_lengths - 1, ) ] = 1 padding_mask = (1 - padding_mask.flip([-1]).cumsum(-1).flip([-1])).bool() else: padding_mask = None if self.post_extract_proj is not None: features = self.post_extract_proj(features) features = self.dropout_input(features) unmasked_features = self.dropout_features(unmasked_features) num_vars = None code_ppl = None prob_ppl = None curr_temp = None if self.input_quantizer: q = self.input_quantizer(features, produce_targets=False) features = q["x"] num_vars = q["num_vars"] code_ppl = q["code_perplexity"] prob_ppl = q["prob_perplexity"] curr_temp = q["temp"] features = self.project_inp(features) if mask: x, mask_indices = self.apply_mask( features, padding_mask, mask_indices=mask_indices, mask_channel_indices=mask_channel_indices, ) if not is_xla_tensor(x) and mask_indices is not None: # tpu-comment: reducing the size in a dynamic way causes # too many recompilations on xla. y = unmasked_features[mask_indices].view( unmasked_features.size(0), -1, unmasked_features.size(-1) ) else: y = unmasked_features else: x = features y = unmasked_features mask_indices = None x, layer_results = self.encoder(x, padding_mask=padding_mask, layer=layer) if features_only: return { "x": x, "padding_mask": padding_mask, "features": unmasked_features, "layer_results": layer_results, } if self.quantizer: q = self.quantizer(y, produce_targets=False) y = q["x"] num_vars = q["num_vars"] code_ppl = q["code_perplexity"] prob_ppl = q["prob_perplexity"] curr_temp = q["temp"] y = self.project_q(y) if self.negatives_from_everywhere: neg_cands = self.quantizer(unmasked_features, produce_targets=False)[ "x" ] negs, _ = self.sample_negatives( neg_cands, y.size(1), padding_count=padding_count, ) negs = self.project_q(negs) else: negs, _ = self.sample_negatives( y, y.size(1), padding_count=padding_count, ) if self.codebook_negatives > 0: cb_negs = self.quantizer.sample_from_codebook( y.size(0) * y.size(1), self.codebook_negatives ) cb_negs = cb_negs.view( self.codebook_negatives, y.size(0), y.size(1), -1 ) # order doesnt matter cb_negs = self.project_q(cb_negs) negs = torch.cat([negs, cb_negs], dim=0) else: y = self.project_q(y) if self.negatives_from_everywhere: negs, _ = self.sample_negatives( unmasked_features, y.size(1), padding_count=padding_count, ) negs = self.project_q(negs) else: negs, _ = self.sample_negatives( y, y.size(1), padding_count=padding_count, ) if not is_xla_tensor(x): # tpu-comment: reducing the size in a dynamic way causes # too many recompilations on xla. x = x[mask_indices].view(x.size(0), -1, x.size(-1)) if self.target_glu: y = self.target_glu(y) negs = self.target_glu(negs) x = self.final_proj(x) x = self.compute_preds(x, y, negs) result = { "x": x, "padding_mask": padding_mask, "features_pen": features_pen, } if prob_ppl is not None: result["prob_perplexity"] = prob_ppl result["code_perplexity"] = code_ppl result["num_vars"] = num_vars result["temp"] = curr_temp return result def quantize(self, x): assert self.quantizer is not None x = self.feature_extractor(x) x = x.transpose(1, 2) x = self.layer_norm(x) return self.quantizer.forward_idx(x) def extract_features(self, source, padding_mask, mask=False, layer=None): res = self.forward( source, padding_mask, mask=mask, features_only=True, layer=layer ) return res def get_logits(self, net_output): logits = net_output["x"] logits = logits.transpose(0, 2) logits = logits.reshape(-1, logits.size(-1)) return logits def get_targets(self, sample, net_output, expand_steps=True): x = net_output["x"] return x.new_zeros(x.size(1) * x.size(2), dtype=torch.long) def get_extra_losses(self, net_output): pen = [] if "prob_perplexity" in net_output: pen.append( (net_output["num_vars"] - net_output["prob_perplexity"]) / net_output["num_vars"] ) if "features_pen" in net_output: pen.append(net_output["features_pen"]) return pen def remove_pretraining_modules(self): self.quantizer = None self.project_q = None self.target_glu = None self.final_proj = None class ConvFeatureExtractionModel(nn.Module): def __init__( self, conv_layers: List[Tuple[int, int, int]], dropout: float = 0.0, mode: str = "default", conv_bias: bool = False, ): super().__init__() assert mode in {"default", "layer_norm"} def block( n_in, n_out, k, stride, is_layer_norm=False, is_group_norm=False, conv_bias=False, ): def make_conv(): conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias) nn.init.kaiming_normal_(conv.weight) return conv assert ( is_layer_norm and is_group_norm ) == False, "layer norm and group norm are exclusive" if is_layer_norm: return nn.Sequential( make_conv(), nn.Dropout(p=dropout), nn.Sequential( TransposeLast(), Fp32LayerNorm(dim, elementwise_affine=True), TransposeLast(), ), nn.GELU(), ) elif is_group_norm: return nn.Sequential( make_conv(), nn.Dropout(p=dropout), Fp32GroupNorm(dim, dim, affine=True), nn.GELU(), ) else: return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU()) in_d = 1 self.conv_layers = nn.ModuleList() for i, cl in enumerate(conv_layers): assert len(cl) == 3, "invalid conv definition: " + str(cl) (dim, k, stride) = cl self.conv_layers.append( block( in_d, dim, k, stride, is_layer_norm=mode == "layer_norm", is_group_norm=mode == "default" and i == 0, conv_bias=conv_bias, ) ) in_d = dim def forward(self, x): # BxT -> BxCxT x = x.unsqueeze(1) for conv in self.conv_layers: x = conv(x) return x class TransformerEncoder(nn.Module): def __init__(self, args): super().__init__() self.dropout = args.dropout self.embedding_dim = args.encoder_embed_dim self.pos_conv = nn.Conv1d( self.embedding_dim, self.embedding_dim, kernel_size=args.conv_pos, padding=args.conv_pos // 2, groups=args.conv_pos_groups, ) dropout = 0 std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim)) nn.init.normal_(self.pos_conv.weight, mean=0, std=std) nn.init.constant_(self.pos_conv.bias, 0) self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2) self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU()) self.layers = nn.ModuleList( [ TransformerSentenceEncoderLayer( embedding_dim=self.embedding_dim, ffn_embedding_dim=args.encoder_ffn_embed_dim, num_attention_heads=args.encoder_attention_heads, dropout=self.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_fn=args.activation_fn, layer_norm_first=args.layer_norm_first, ) for _ in range(args.encoder_layers) ] ) self.layer_norm_first = args.layer_norm_first self.layer_norm = LayerNorm(self.embedding_dim) self.layerdrop = args.encoder_layerdrop self.apply(init_bert_params) def forward(self, x, padding_mask=None, layer=None): x, layer_results = self.extract_features(x, padding_mask, layer) if self.layer_norm_first and layer is None: x = self.layer_norm(x) return x, layer_results def extract_features(self, x, padding_mask=None, tgt_layer=None): if padding_mask is not None: x = index_put(x, padding_mask, 0) x_conv = self.pos_conv(x.transpose(1, 2)) x_conv = x_conv.transpose(1, 2) x = x + x_conv if not self.layer_norm_first: x = self.layer_norm(x) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) layer_results = [] r = None for i, layer in enumerate(self.layers): dropout_probability = np.random.random() if not self.training or (dropout_probability > self.layerdrop): x, z = layer(x, self_attn_padding_mask=padding_mask, need_weights=False) if tgt_layer is not None: layer_results.append((x, z)) if i == tgt_layer: r = x break if r is not None: x = r # T x B x C -> B x T x C x = x.transpose(0, 1) return x, layer_results def max_positions(self): """Maximum output length supported by the encoder.""" return self.args.max_positions def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" return state_dict class TransformerSentenceEncoderLayer(nn.Module): """ Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained models. """ def __init__( self, embedding_dim: float = 768, ffn_embedding_dim: float = 3072, num_attention_heads: float = 8, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, activation_fn: str = "relu", layer_norm_first: bool = False, ) -> None: super().__init__() # Initialize parameters self.embedding_dim = embedding_dim self.dropout = dropout self.activation_dropout = activation_dropout # Initialize blocks self.activation_fn = utils.get_activation_fn(activation_fn) self.self_attn = MultiheadAttention( self.embedding_dim, num_attention_heads, dropout=attention_dropout, self_attention=True, ) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(self.activation_dropout) self.dropout3 = nn.Dropout(dropout) self.layer_norm_first = layer_norm_first # layer norm associated with the self attention layer self.self_attn_layer_norm = LayerNorm(self.embedding_dim) self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim) self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim) # layer norm associated with the position wise feed-forward NN self.final_layer_norm = LayerNorm(self.embedding_dim) def forward( self, x: torch.Tensor, self_attn_mask: torch.Tensor = None, self_attn_padding_mask: torch.Tensor = None, need_weights: bool = False, att_args=None, ): """ LayerNorm is applied either before or after the self-attention/ffn modules similar to the original Transformer imlementation. """ residual = x if self.layer_norm_first: x = self.self_attn_layer_norm(x) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, attn_mask=self_attn_mask, ) x = self.dropout1(x) x = residual + x residual = x x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = self.dropout2(x) x = self.fc2(x) x = self.dropout3(x) x = residual + x else: x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, ) x = self.dropout1(x) x = residual + x x = self.self_attn_layer_norm(x) residual = x x = self.activation_fn(self.fc1(x)) x = self.dropout2(x) x = self.fc2(x) x = self.dropout3(x) x = residual + x x = self.final_layer_norm(x) return x, attn
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/wav2vec/wav2vec2.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field import logging import math from typing import Optional, Tuple from omegaconf import II import sys import torch import torch.nn as nn import torch.nn.functional as F from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import BaseFairseqModel, register_model from fairseq.modules import ( Fp32GroupNorm, Fp32LayerNorm, GumbelVectorQuantizer, KmeansVectorQuantizer, TransposeLast, ) from fairseq.tasks import FairseqTask from fairseq.utils import buffered_arange logger = logging.getLogger(__name__) AGGREGATOR_CHOICES = ChoiceEnum(["cnn", "gru"]) PROJECT_FEATURES_CHOICES = ChoiceEnum(["none", "same", "new"]) ACTIVATION_CHOICES = ChoiceEnum(["relu", "gelu"]) VQ_TYPE_CHOICES = ChoiceEnum(["none", "gumbel", "kmeans"]) @dataclass class Wav2VecConfig(FairseqDataclass): prediction_steps: int = field( default=12, metadata={"help": "number of steps ahead to predict"} ) sample_distance: Optional[int] = field( default=None, metadata={ "help": "sample distance from target. does not work properly with cross-sampling" }, ) cross_sample_negatives: int = field( default=0, metadata={"help": "num of cross sampled negatives"} ) num_negatives: int = field( default=10, metadata={"help": "num of sampled negatives"} ) conv_feature_layers: str = field( default="[(512, 10, 5), (512, 8, 4), (512, 4, 2), (512, 4, 2), (512, 4, 2), (512, 1, 1), (512, 1, 1), (512, 1, 1)]", metadata={ "help": "convolutional feature extraction layers [(dim, kernel_size, stride), ...]" }, ) conv_aggregator_layers: str = field( default="[(512, 2, 1), (512, 3, 1), (512, 4, 1), (512, 5, 1), (512, 6, 1), (512, 7, 1), (512, 8, 1), (512, 9, 1), (512, 10, 1), (512, 11, 1), (512, 12, 1), (512, 13, 1)]", metadata={ "help": "convolutional aggregator layers [(dim, kernel_size, stride), ...]" }, ) dropout: float = field( default=0.0, metadata={"help": "dropout to apply within the model"} ) dropout_features: float = field( default=0.0, metadata={"help": "dropout to apply to the features"} ) dropout_agg: float = field( default=0.0, metadata={"help": "dropout to apply after aggregation step"} ) aggregator: AGGREGATOR_CHOICES = field( default="cnn", metadata={"help": "type of aggregator to use"} ) gru_dim: int = field(default=512, metadata={"help": "GRU dimensionality"}) no_conv_bias: bool = field( default=False, metadata={"help": "if set, does not learn bias for conv layers"} ) agg_zero_pad: bool = field( default=False, metadata={"help": "if set, zero pads in aggregator instead of repl pad"}, ) skip_connections_feat: bool = field( default=False, metadata={"help": "if set, adds skip connections to the feature extractor"}, ) skip_connections_agg: bool = field( default=True, metadata={"help": "if set, adds skip connections to the aggregator"}, ) residual_scale: float = field( default=0.5, metadata={"help": "scales residual by sqrt(value)"} ) log_compression: bool = field( default=True, metadata={"help": "if set, adds a log compression to feature extractor"}, ) balanced_classes: bool = field( default=False, metadata={"help": "if set, loss is scaled to balance for number of negatives"}, ) project_features: PROJECT_FEATURES_CHOICES = field( default="none", metadata={ "help": "if not none, features are projected using the (same or new) aggregator" }, ) non_affine_group_norm: bool = field( default=False, metadata={"help": "if set, group norm is not affine"} ) offset: str = field( default="auto", metadata={ "help": "if set to 'auto', it is computed automatically from the receptive field, else set to int value" }, ) activation: ACTIVATION_CHOICES = field( default="relu", metadata={ "help": "if set to 'auto', it is computed automatically from the receptive field, else set to int value" }, ) vq_type: VQ_TYPE_CHOICES = field( default="none", metadata={"help": "which type of quantizer to use"} ) vq_vars: int = field( default=320, metadata={"help": "project to this many vector quantized variables per group"}, ) vq_groups: int = field( default=2, metadata={"help": "number of groups of latent variables"} ) vq_dim: int = field( default=0, metadata={ "help": "uses this dimensionality for quantized vectors. 0 to use model dim // groups" }, ) vq_depth: int = field( default=1, metadata={"help": "number of layers for vq weight projection"} ) combine_groups: bool = field( default=False, metadata={"help": "if set, variables are shared among groups"} ) vq_temp: Tuple[float, float, float] = field( default=(2.0, 0.5, 0.999995), metadata={ "help": "temperature for latent variable sampling with gumbel softmax. should be a tuple of 3 values (start, end, decay)" }, ) vq_gamma: float = field( default=0.25, metadata={"help": "gamma parameter for kmeans style vector quantization"}, ) infonce: bool = II("criterion.infonce") @register_model("wav2vec", dataclass=Wav2VecConfig) class Wav2VecModel(BaseFairseqModel): @classmethod def build_model(cls, cfg: Wav2VecConfig, task: FairseqTask): """Build a new model instance.""" model = Wav2VecModel(cfg) logger.info(model) return model def __init__(self, cfg: Wav2VecConfig): super().__init__() self.prediction_steps = cfg.prediction_steps offset = cfg.offset if cfg.activation == "relu": activation = nn.ReLU() elif cfg.activation == "gelu": activation = nn.GELU() else: raise Exception("unknown activation " + cfg.activation) feature_enc_layers = eval(cfg.conv_feature_layers) self.feature_extractor = ConvFeatureExtractionModel( conv_layers=feature_enc_layers, dropout=0.0, log_compression=cfg.log_compression, skip_connections=cfg.skip_connections_feat, residual_scale=cfg.residual_scale, non_affine_group_norm=cfg.non_affine_group_norm, activation=activation, ) embed = feature_enc_layers[-1][0] self.vector_quantizer = None if cfg.vq_type == "gumbel": self.vector_quantizer = GumbelVectorQuantizer( dim=embed, num_vars=cfg.vq_vars, temp=cfg.vq_temp, groups=cfg.vq_groups, combine_groups=cfg.combine_groups, vq_dim=cfg.vq_dim if cfg.vq_dim > 0 else embed, time_first=False, activation=activation, weight_proj_depth=cfg.vq_depth, weight_proj_factor=2, ) elif cfg.vq_type == "kmeans": self.vector_quantizer = KmeansVectorQuantizer( dim=embed, num_vars=cfg.vq_vars, groups=cfg.vq_groups, combine_groups=cfg.combine_groups, vq_dim=cfg.vq_dim if cfg.vq_dim > 0 else embed, time_first=False, gamma=cfg.vq_gamma, ) else: assert ( cfg.vq_type == "none" or cfg.vq_type is None ), "Unknown quantizer type" if cfg.offset == "auto": jin = 0 rin = 0 for _, k, stride in feature_enc_layers: if rin == 0: rin = k rin = rin + (k - 1) * jin if jin == 0: jin = stride else: jin *= stride offset = math.ceil(rin / jin) offset = int(offset) def make_aggregator(): if cfg.aggregator == "cnn": agg_layers = eval(cfg.conv_aggregator_layers) agg_dim = agg_layers[-1][0] feature_aggregator = ConvAggegator( conv_layers=agg_layers, embed=embed, dropout=cfg.dropout, skip_connections=cfg.skip_connections_agg, residual_scale=cfg.residual_scale, non_affine_group_norm=cfg.non_affine_group_norm, conv_bias=not cfg.no_conv_bias, zero_pad=cfg.agg_zero_pad, activation=activation, ) elif cfg.aggregator == "gru": agg_dim = cfg.gru_dim feature_aggregator = nn.Sequential( TransposeLast(), nn.GRU( input_size=embed, hidden_size=agg_dim, num_layers=1, dropout=cfg.dropout, ), TransposeLast(deconstruct_idx=0), ) else: raise Exception("unknown aggregator type " + cfg.aggregator) return feature_aggregator, agg_dim self.feature_aggregator, agg_dim = make_aggregator() self.wav2vec_predictions = Wav2VecPredictionsModel( in_dim=agg_dim, out_dim=embed, prediction_steps=cfg.prediction_steps, n_negatives=cfg.num_negatives, cross_sample_negatives=cfg.cross_sample_negatives, sample_distance=cfg.sample_distance, dropout=cfg.dropout, offset=offset, balanced_classes=cfg.balanced_classes, infonce=cfg.infonce, ) self.dropout_feats = nn.Dropout(p=cfg.dropout_features) self.dropout_agg = nn.Dropout(p=cfg.dropout_agg) if cfg.project_features == "none": self.project_features = None elif cfg.project_features == "same": self.project_features = self.feature_aggregator elif cfg.project_features == "new": self.project_features, _ = make_aggregator() def forward(self, source): result = {} features = self.feature_extractor(source) if self.vector_quantizer: q_res = self.vector_quantizer(features) features = q_res["x"] for k in q_res.keys(): if k != "x": result[k] = q_res[k] x = self.dropout_feats(features) x = self.feature_aggregator(x) x = self.dropout_agg(x) if self.project_features is not None: features = self.project_features(features) x, targets = self.wav2vec_predictions(x, features) result["cpc_logits"] = x result["cpc_targets"] = targets return result def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) def max_positions(self): """Maximum length supported by the model.""" return sys.maxsize def get_logits(self, net_output): logits = net_output["cpc_logits"] return logits def get_targets(self, sample, net_output): t = net_output["cpc_targets"] if isinstance(t, tuple): t = t[0] return t.contiguous() def get_target_weights(self, targets, net_output): targets = net_output["cpc_targets"] if isinstance(targets, tuple) and targets[-1] is not None: return targets[-1] return None def get_extra_losses(self, net_output): loss = None if "prob_perplexity" in net_output: loss = net_output["num_vars"] - net_output["prob_perplexity"] elif "kmeans_loss" in net_output: loss = net_output["kmeans_loss"] return loss def norm_block(is_layer_norm, dim, affine=True): if is_layer_norm: mod = nn.Sequential( TransposeLast(), Fp32LayerNorm(dim, elementwise_affine=affine), TransposeLast(), ) else: mod = Fp32GroupNorm(1, dim, affine=affine) return mod class ConvFeatureExtractionModel(nn.Module): def __init__( self, conv_layers, dropout, log_compression, skip_connections, residual_scale, non_affine_group_norm, activation, ): super().__init__() def block(n_in, n_out, k, stride): return nn.Sequential( nn.Conv1d(n_in, n_out, k, stride=stride, bias=False), nn.Dropout(p=dropout), norm_block( is_layer_norm=False, dim=n_out, affine=not non_affine_group_norm ), activation, ) in_d = 1 self.conv_layers = nn.ModuleList() for dim, k, stride in conv_layers: self.conv_layers.append(block(in_d, dim, k, stride)) in_d = dim self.log_compression = log_compression self.skip_connections = skip_connections self.residual_scale = math.sqrt(residual_scale) def forward(self, x): # BxT -> BxCxT x = x.unsqueeze(1) for conv in self.conv_layers: residual = x x = conv(x) if self.skip_connections and x.size(1) == residual.size(1): tsz = x.size(2) r_tsz = residual.size(2) residual = residual[..., :: r_tsz // tsz][..., :tsz] x = (x + residual) * self.residual_scale if self.log_compression: x = x.abs() x = x + 1 x = x.log() return x class ZeroPad1d(nn.Module): def __init__(self, pad_left, pad_right): super().__init__() self.pad_left = pad_left self.pad_right = pad_right def forward(self, x): return F.pad(x, (self.pad_left, self.pad_right)) class ConvAggegator(nn.Module): def __init__( self, conv_layers, embed, dropout, skip_connections, residual_scale, non_affine_group_norm, conv_bias, zero_pad, activation, ): super().__init__() def block(n_in, n_out, k, stride): # padding dims only really make sense for stride = 1 ka = k // 2 kb = ka - 1 if k % 2 == 0 else ka pad = ( ZeroPad1d(ka + kb, 0) if zero_pad else nn.ReplicationPad1d((ka + kb, 0)) ) return nn.Sequential( pad, nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias), nn.Dropout(p=dropout), norm_block(False, n_out, affine=not non_affine_group_norm), activation, ) in_d = embed self.conv_layers = nn.ModuleList() self.residual_proj = nn.ModuleList() for dim, k, stride in conv_layers: if in_d != dim and skip_connections: self.residual_proj.append(nn.Conv1d(in_d, dim, 1, bias=False)) else: self.residual_proj.append(None) self.conv_layers.append(block(in_d, dim, k, stride)) in_d = dim self.conv_layers = nn.Sequential(*self.conv_layers) self.skip_connections = skip_connections self.residual_scale = math.sqrt(residual_scale) def forward(self, x): for rproj, conv in zip(self.residual_proj, self.conv_layers): residual = x x = conv(x) if self.skip_connections: if rproj is not None: residual = rproj(residual) x = (x + residual) * self.residual_scale return x class Wav2VecPredictionsModel(nn.Module): def __init__( self, in_dim, out_dim, prediction_steps, n_negatives, cross_sample_negatives, sample_distance, dropout, offset, balanced_classes, infonce, ): super().__init__() self.n_negatives = n_negatives self.cross_sample_negatives = cross_sample_negatives self.sample_distance = sample_distance self.project_to_steps = nn.ConvTranspose2d( in_dim, out_dim, (1, prediction_steps) ) self.dropout = nn.Dropout(p=dropout) self.offset = offset self.balanced_classes = balanced_classes self.infonce = infonce def sample_negatives(self, y): bsz, fsz, tsz = y.shape y = y.transpose(0, 1) # BCT -> CBT y = y.contiguous().view(fsz, -1) # CBT => C(BxT) cross_high = tsz * bsz high = tsz if self.sample_distance is None else min(tsz, self.sample_distance) assert high > 1 neg_idxs = torch.randint(low=0, high=high, size=(bsz, self.n_negatives * tsz)) with torch.no_grad(): if self.n_negatives > 0: tszs = ( buffered_arange(tsz) .unsqueeze(-1) .expand(-1, self.n_negatives) .flatten() ) neg_idxs = torch.randint( low=0, high=high - 1, size=(bsz, self.n_negatives * tsz) ) neg_idxs[neg_idxs >= tszs] += 1 if self.cross_sample_negatives > 0: tszs = ( buffered_arange(tsz) .unsqueeze(-1) .expand(-1, self.cross_sample_negatives) .flatten() ) cross_neg_idxs = torch.randint( low=0, high=cross_high - 1, size=(bsz, self.cross_sample_negatives * tsz), ) cross_neg_idxs[cross_neg_idxs >= tszs] += 1 if self.n_negatives > 0: for i in range(1, bsz): neg_idxs[i] += i * high else: neg_idxs = cross_neg_idxs if self.cross_sample_negatives > 0 and self.n_negatives > 0: neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1) negs = y[..., neg_idxs.view(-1)] negs = negs.view( fsz, bsz, self.n_negatives + self.cross_sample_negatives, tsz ).permute( 2, 1, 0, 3 ) # to NxBxCxT return negs def forward(self, x, y): x = x.unsqueeze(-1) x = self.project_to_steps(x) # BxCxTxS x = self.dropout(x) negatives = self.sample_negatives(y) y = y.unsqueeze(0) targets = torch.cat([y, negatives], dim=0) # Copies x B x C x T copies = targets.size(0) bsz, dim, tsz, steps = x.shape steps = min(steps, tsz - self.offset) predictions = x.new( bsz * copies * (tsz - self.offset + 1) * steps - ((steps + 1) * steps // 2) * copies * bsz ) if self.infonce: labels = predictions.new_full( (predictions.shape[0] // copies,), 0, dtype=torch.long ) else: labels = torch.zeros_like(predictions) weights = ( torch.full_like(labels, 1 / self.n_negatives) if self.balanced_classes and not self.infonce else None ) start = end = 0 for i in range(steps): offset = i + self.offset end = start + (tsz - offset) * bsz * copies if self.infonce: predictions[start:end] = torch.einsum( "bct,nbct->tbn", x[..., :-offset, i], targets[..., offset:] ).flatten() else: pos_num = (end - start) // copies predictions[start:end] = torch.einsum( "bct,nbct->nbt", x[..., :-offset, i], targets[..., offset:] ).flatten() labels[start : start + pos_num] = 1.0 if weights is not None: weights[start : start + pos_num] = 1.0 start = end assert end == predictions.numel(), "{} != {}".format(end, predictions.numel()) if self.infonce: predictions = predictions.view(-1, copies) else: if weights is not None: labels = (labels, weights) return predictions, labels
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/wav2vec/wav2vec.py
import argparse import logging import torch.nn as nn import fairseq.checkpoint_utils from fairseq.models import ( FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.models.transformer import TransformerDecoder from fairseq.models.roberta import model as roberta logger = logging.getLogger(__name__) @register_model("roberta_enc_dec") class RobertaEncDecModel(FairseqEncoderDecoderModel): @staticmethod def add_args(parser): parser.add_argument( "--pretrained-mlm-checkpoint", default=None, type=str, metavar="PRETRAINED", help="path to pretrained mlm checkpoint", ) parser.add_argument( "--pretrained-decoder", action="store_true", help="reload decoder" ) parser.add_argument( "--hack-layernorm-embedding", action="store_true", help="hack to reload old models trained with encoder-normalize-before=False (no equivalent to encoder-normalize-before=False and layernorm_embedding=False", ) parser.add_argument( "--share-decoder-input-output-embed", action="store_true", help="share decoder input and output embeddings", ) parser.add_argument( "--share-all-embeddings", action="store_true", help="share encoder, decoder and output embeddings" " (requires shared dictionary and embed dim)", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present base_enc_dec_architecture(args) if args.pretrained_mlm_checkpoint: arg_overrides = None if args.hack_layernorm_embedding: arg_overrides = {"layernorm_embedding": False} loaded = fairseq.checkpoint_utils.load_model_ensemble_and_task( [args.pretrained_mlm_checkpoint], arg_overrides=arg_overrides ) ([roberta_enc], _cfg, _task) = loaded else: # Do we need to edit untie_weights here ? share_in_out = ( args.share_decoder_input_output_embed or args.share_all_embeddings ) args.untie_weights_roberta = not share_in_out if args.hack_layernorm_embedding: args.layernorm_embedding = False args.encoder_normalize_before = False roberta_enc = roberta.RobertaModel.build_model(args, task) return cls.from_roberta(roberta_enc, args, task.source_dictionary) @staticmethod def from_roberta(roberta_enc: roberta.RobertaModel, args, dictionary): encoder = roberta_enc.encoder.sentence_encoder vocab_size, embed_dim = encoder.embed_tokens.weight.shape if args.share_all_embeddings: lm_head = roberta_enc.encoder.lm_head assert encoder.embed_tokens.weight is lm_head.weight, ( "Can't use --share-all-embeddings with a model " "that was pretraiend with --untie-weights-roberta_enc" ) else: lm_head = roberta.RobertaLMHead( embed_dim, vocab_size, roberta_enc.args.activation_fn ) dec_embs = nn.Embedding(vocab_size, embed_dim, dictionary.pad()) if args.share_all_embeddings or args.share_decoder_input_output_embed: # Note: I wasn't able to use Embedding _weight parameter to achive this sharing. dec_embs.weight = lm_head.weight decoder = TransformerDecoder( RobertaEncDecModel.read_args_from_roberta(roberta_enc.args), dictionary, dec_embs, no_encoder_attn=False, output_projection=lm_head, ) if getattr(args, "pretrained_decoder", False): decoder_dict = encoder.state_dict() # TODO: hide setting "encoder_attn" layers behind a flag. for k, w in list(decoder_dict.items()): if ".self_attn" in k: k_enc_attn = k.replace(".self_attn", ".encoder_attn") decoder_dict[k_enc_attn] = w.detach().clone() for k, w in lm_head.state_dict().items(): decoder_dict["output_projection." + k] = w missing_keys, unexpected_keys = decoder.load_state_dict( decoder_dict, strict=False ) # missing_keys = [m for m in missing_keys if ".encoder_attn" not in m] assert not missing_keys and not unexpected_keys, ( "Failed to load state dict. " f"Missing keys: {missing_keys}. " f"Unexpected keys: {unexpected_keys}." ) if args.share_all_embeddings: assert decoder.output_projection.weight is decoder.embed_tokens.weight assert encoder.embed_tokens.weight is decoder.embed_tokens.weight elif args.share_decoder_input_output_embed: assert decoder.output_projection.weight is decoder.embed_tokens.weight assert encoder.embed_tokens.weight is not decoder.embed_tokens.weight else: assert decoder.output_projection.weight is not decoder.embed_tokens.weight assert encoder.embed_tokens.weight is not decoder.embed_tokens.weight return RobertaEncDecModel(encoder, decoder) @staticmethod def read_args_from_roberta(roberta_args: argparse.Namespace): # TODO: this would become easier if encoder/decoder where using a similar # TransformerConfig object args = argparse.Namespace(**vars(roberta_args)) attr_map = [ ("encoder_attention_heads", "decoder_attention_heads"), ("encoder_embed_dim", "decoder_embed_dim"), ("encoder_embed_dim", "decoder_output_dim"), ("encoder_normalize_before", "decoder_normalize_before"), ("encoder_layers_to_keep", "decoder_layers_to_keep"), ("encoder_ffn_embed_dim", "decoder_ffn_embed_dim"), ("encoder_layerdrop", "decoder_layerdrop"), ("encoder_layers", "decoder_layers"), ("encoder_learned_pos", "decoder_learned_pos"), # should this be set from here ? ("max_positions", "max_target_positions"), ] for k1, k2 in attr_map: setattr(args, k2, getattr(roberta_args, k1)) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = not roberta_args.untie_weights_roberta return args def upgrade_state_dict_named(self, state_dict, name): prefix = name + "." if name != "" else "" super().upgrade_state_dict_named(state_dict, name) old_keys = list(state_dict.keys()) # rename decoder -> encoder before upgrading children modules for k in old_keys: if k.startswith(prefix + "encoder.lm_head"): state_dict.pop(k) continue new_k = k new_k = new_k.replace(".sentence_encoder.", ".") new_k = new_k.replace("decoder.lm_head.", "decoder.output_projection.") if k == new_k: continue # print(k, "->", new_k) state_dict[new_k] = state_dict.pop(k) @register_model_architecture("roberta_enc_dec", "roberta_enc_dec") def base_enc_dec_architecture(args): args.hack_layernorm_embedding = getattr(args, "hack_layernorm_embedding", False) args.pretrained_mlm_checkpoint = getattr(args, "pretrained_mlm_checkpoint", None) args.pretrained_decoder = getattr(args, "pretrained_decoder", None) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) roberta.base_architecture(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/roberta/enc_dec.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ GottBERT: a pure German Language Model """ from fairseq.models import register_model from .hub_interface import RobertaHubInterface from .model import RobertaModel @register_model('gottbert') class GottbertModel(RobertaModel): @classmethod def hub_models(cls): return { 'gottbert-base': 'https://dl.gottbert.de/fairseq/models/gottbert-base.tar.gz', } @classmethod def from_pretrained(cls, model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', bpe='hf_byte_bpe', bpe_vocab='vocab.json', bpe_merges='merges.txt', bpe_add_prefix_space=False, **kwargs ): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), bpe=bpe, load_checkpoint_heads=True, bpe_vocab=bpe_vocab, bpe_merges=bpe_merges, bpe_add_prefix_space=bpe_add_prefix_space, **kwargs, ) return RobertaHubInterface(x['args'], x['task'], x['models'][0])
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/roberta/model_gottbert.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Unsupervised Cross-lingual Representation Learning at Scale """ from fairseq.models import register_model from .hub_interface import RobertaHubInterface from .model import RobertaModel @register_model("xlmr") class XLMRModel(RobertaModel): @classmethod def hub_models(cls): return { "xlmr.base": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr.base.tar.gz", "xlmr.large": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr.large.tar.gz", "xlmr.xl": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr/xlmr.xl.tar.gz", "xlmr.xxl": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr/xlmr.xxl.tar.gz", } @classmethod def from_pretrained( cls, model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", bpe="sentencepiece", **kwargs ): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), bpe=bpe, load_checkpoint_heads=True, **kwargs, ) return RobertaHubInterface(x["args"], x["task"], x["models"][0])
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/roberta/model_xlmr.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import Counter from typing import List import torch def align_bpe_to_words(roberta, bpe_tokens: torch.LongTensor, other_tokens: List[str]): """ Helper to align GPT-2 BPE to other tokenization formats (e.g., spaCy). Args: roberta (RobertaHubInterface): RoBERTa instance bpe_tokens (torch.LongTensor): GPT-2 BPE tokens of shape `(T_bpe)` other_tokens (List[str]): other tokens of shape `(T_words)` Returns: List[str]: mapping from *other_tokens* to corresponding *bpe_tokens*. """ assert bpe_tokens.dim() == 1 assert bpe_tokens[0] == 0 def clean(text): return text.strip() # remove whitespaces to simplify alignment bpe_tokens = [roberta.task.source_dictionary.string([x]) for x in bpe_tokens] bpe_tokens = [ clean(roberta.bpe.decode(x) if x not in {"<s>", ""} else x) for x in bpe_tokens ] other_tokens = [clean(str(o)) for o in other_tokens] # strip leading <s> bpe_tokens = bpe_tokens[1:] assert "".join(bpe_tokens) == "".join(other_tokens) # create alignment from every word to a list of BPE tokens alignment = [] bpe_toks = filter(lambda item: item[1] != "", enumerate(bpe_tokens, start=1)) j, bpe_tok = next(bpe_toks) for other_tok in other_tokens: bpe_indices = [] while True: if other_tok.startswith(bpe_tok): bpe_indices.append(j) other_tok = other_tok[len(bpe_tok) :] try: j, bpe_tok = next(bpe_toks) except StopIteration: j, bpe_tok = None, None elif bpe_tok.startswith(other_tok): # other_tok spans multiple BPE tokens bpe_indices.append(j) bpe_tok = bpe_tok[len(other_tok) :] other_tok = "" else: raise Exception('Cannot align "{}" and "{}"'.format(other_tok, bpe_tok)) if other_tok == "": break assert len(bpe_indices) > 0 alignment.append(bpe_indices) assert len(alignment) == len(other_tokens) return alignment def align_features_to_words(roberta, features, alignment): """ Align given features to words. Args: roberta (RobertaHubInterface): RoBERTa instance features (torch.Tensor): features to align of shape `(T_bpe x C)` alignment: alignment between BPE tokens and words returned by func:`align_bpe_to_words`. """ assert features.dim() == 2 bpe_counts = Counter(j for bpe_indices in alignment for j in bpe_indices) assert bpe_counts[0] == 0 # <s> shouldn't be aligned denom = features.new([bpe_counts.get(j, 1) for j in range(len(features))]) weighted_features = features / denom.unsqueeze(-1) output = [weighted_features[0]] largest_j = -1 for bpe_indices in alignment: output.append(weighted_features[bpe_indices].sum(dim=0)) largest_j = max(largest_j, *bpe_indices) for j in range(largest_j + 1, len(features)): output.append(weighted_features[j]) output = torch.stack(output) assert torch.all(torch.abs(output.sum(dim=0) - features.sum(dim=0)) < 1e-4) return output def spacy_nlp(): if getattr(spacy_nlp, "_nlp", None) is None: try: from spacy.lang.en import English spacy_nlp._nlp = English() except ImportError: raise ImportError("Please install spacy with: pip install spacy") return spacy_nlp._nlp def spacy_tokenizer(): if getattr(spacy_tokenizer, "_tokenizer", None) is None: try: nlp = spacy_nlp() spacy_tokenizer._tokenizer = nlp.Defaults.create_tokenizer(nlp) except ImportError: raise ImportError("Please install spacy with: pip install spacy") return spacy_tokenizer._tokenizer
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/roberta/alignment_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .hub_interface import * # noqa from .model import * # noqa from .enc_dec import * # noqa from .model_camembert import * # noqa from .model_gottbert import * # noqa from .model_xlmr import * # noqa
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/roberta/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ RoBERTa: A Robustly Optimized BERT Pretraining Approach. """ import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, register_model, register_model_architecture, ) from fairseq.models.transformer import DEFAULT_MIN_PARAMS_TO_WRAP, TransformerEncoder from fairseq.modules import LayerNorm from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import safe_getattr, safe_hasattr from .hub_interface import RobertaHubInterface logger = logging.getLogger(__name__) @register_model("roberta") class RobertaModel(FairseqEncoderModel): @classmethod def hub_models(cls): return { "roberta.base": "http://dl.fbaipublicfiles.com/fairseq/models/roberta.base.tar.gz", "roberta.large": "http://dl.fbaipublicfiles.com/fairseq/models/roberta.large.tar.gz", "roberta.large.mnli": "http://dl.fbaipublicfiles.com/fairseq/models/roberta.large.mnli.tar.gz", "roberta.large.wsc": "http://dl.fbaipublicfiles.com/fairseq/models/roberta.large.wsc.tar.gz", } def __init__(self, args, encoder): super().__init__(encoder) self.args = args # We follow BERT's random weight initialization self.apply(init_bert_params) self.classification_heads = nn.ModuleDict() @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--encoder-layers", type=int, metavar="L", help="num encoder layers" ) parser.add_argument( "--encoder-embed-dim", type=int, metavar="H", help="encoder embedding dimension", ) parser.add_argument( "--encoder-ffn-embed-dim", type=int, metavar="F", help="encoder embedding dimension for FFN", ) parser.add_argument( "--encoder-attention-heads", type=int, metavar="A", help="num encoder attention heads", ) parser.add_argument( "--activation-fn", choices=utils.get_available_activation_fns(), help="activation function to use", ) parser.add_argument( "--pooler-activation-fn", choices=utils.get_available_activation_fns(), help="activation function to use for pooler layer", ) parser.add_argument( "--encoder-normalize-before", action="store_true", help="apply layernorm before each encoder block", ) parser.add_argument( "--layernorm-embedding", action="store_true", help="add layernorm to embedding", ) parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability" ) parser.add_argument( "--attention-dropout", type=float, metavar="D", help="dropout probability for attention weights", ) parser.add_argument( "--activation-dropout", type=float, metavar="D", help="dropout probability after activation in FFN", ) parser.add_argument( "--pooler-dropout", type=float, metavar="D", help="dropout probability in the masked_lm pooler layers", ) parser.add_argument( "--max-positions", type=int, help="number of positional embeddings to learn" ) parser.add_argument( "--load-checkpoint-heads", action="store_true", help="(re-)register and load heads when loading checkpoints", ) parser.add_argument( "--untie-weights-roberta", action="store_true", help="Untie weights between embeddings and classifiers in RoBERTa", ) # args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019) parser.add_argument( "--encoder-layerdrop", type=float, metavar="D", default=0, help="LayerDrop probability for encoder", ) parser.add_argument( "--encoder-layers-to-keep", default=None, help="which layers to *keep* when pruning as a comma-separated list", ) # args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020) parser.add_argument( "--quant-noise-pq", type=float, metavar="D", default=0, help="iterative PQ quantization noise at training time", ) parser.add_argument( "--quant-noise-pq-block-size", type=int, metavar="D", default=8, help="block size of quantization noise at training time", ) parser.add_argument( "--quant-noise-scalar", type=float, metavar="D", default=0, help="scalar quantization noise and scalar quantization at training time", ) # args for "Better Fine-Tuning by Reducing Representational Collapse" (Aghajanyan et al. 2020) parser.add_argument( "--spectral-norm-classification-head", action="store_true", default=False, help="Apply spectral normalization on the classification head", ) # args for Fully Sharded Data Parallel (FSDP) training parser.add_argument( "--min-params-to-wrap", type=int, metavar="D", default=DEFAULT_MIN_PARAMS_TO_WRAP, help=( "minimum number of params for a layer to be wrapped with FSDP() when " "training with --ddp-backend=fully_sharded. Smaller values will " "improve memory efficiency, but may make torch.distributed " "communication less efficient due to smaller input sizes. This option " "is set to 0 (i.e., always wrap) when --checkpoint-activations or " "--offload-activations are passed." ) ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" from omegaconf import OmegaConf if OmegaConf.is_config(args): OmegaConf.set_struct(args, False) # make sure all arguments are present base_architecture(args) if not safe_hasattr(args, "max_positions"): if not safe_hasattr(args, "tokens_per_sample"): args.tokens_per_sample = task.max_positions() args.max_positions = args.tokens_per_sample encoder = RobertaEncoder(args, task.source_dictionary) if OmegaConf.is_config(args): OmegaConf.set_struct(args, True) return cls(args, encoder) def forward( self, src_tokens, features_only=False, return_all_hiddens=False, classification_head_name=None, **kwargs, ): if classification_head_name is not None: features_only = True x, extra = self.encoder(src_tokens, features_only, return_all_hiddens, **kwargs) if classification_head_name is not None: x = self.classification_heads[classification_head_name](x) return x, extra def get_normalized_probs(self, net_output, log_probs, sample=None): """Get normalized probabilities (or log probs) from a net's output.""" logits = net_output[0].float() if log_probs: return F.log_softmax(logits, dim=-1) else: return F.softmax(logits, dim=-1) def register_classification_head( self, name, num_classes=None, inner_dim=None, **kwargs ): """Register a classification head.""" if name in self.classification_heads: prev_num_classes = self.classification_heads[name].out_proj.out_features prev_inner_dim = self.classification_heads[name].dense.out_features if num_classes != prev_num_classes or inner_dim != prev_inner_dim: logger.warning( 're-registering head "{}" with num_classes {} (prev: {}) ' "and inner_dim {} (prev: {})".format( name, num_classes, prev_num_classes, inner_dim, prev_inner_dim ) ) self.classification_heads[name] = RobertaClassificationHead( input_dim=self.args.encoder_embed_dim, inner_dim=inner_dim or self.args.encoder_embed_dim, num_classes=num_classes, activation_fn=self.args.pooler_activation_fn, pooler_dropout=self.args.pooler_dropout, q_noise=self.args.quant_noise_pq, qn_block_size=self.args.quant_noise_pq_block_size, do_spectral_norm=self.args.spectral_norm_classification_head, ) @property def supported_targets(self): return {"self"} @classmethod def from_pretrained( cls, model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", bpe="gpt2", **kwargs, ): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), bpe=bpe, load_checkpoint_heads=True, **kwargs, ) logger.info(x["args"]) return RobertaHubInterface(x["args"], x["task"], x["models"][0]) def upgrade_state_dict_named(self, state_dict, name): prefix = name + "." if name != "" else "" # rename decoder -> encoder before upgrading children modules for k in list(state_dict.keys()): if k.startswith(prefix + "decoder"): new_k = prefix + "encoder" + k[len(prefix + "decoder") :] state_dict[new_k] = state_dict[k] del state_dict[k] # rename emb_layer_norm -> layernorm_embedding for k in list(state_dict.keys()): if ".emb_layer_norm." in k: new_k = k.replace(".emb_layer_norm.", ".layernorm_embedding.") state_dict[new_k] = state_dict[k] del state_dict[k] # upgrade children modules super().upgrade_state_dict_named(state_dict, name) # Handle new classification heads present in the state dict. current_head_names = ( [] if not hasattr(self, "classification_heads") else self.classification_heads.keys() ) keys_to_delete = [] for k in state_dict.keys(): if not k.startswith(prefix + "classification_heads."): continue head_name = k[len(prefix + "classification_heads.") :].split(".")[0] num_classes = state_dict[ prefix + "classification_heads." + head_name + ".out_proj.weight" ].size(0) inner_dim = state_dict[ prefix + "classification_heads." + head_name + ".dense.weight" ].size(0) if getattr(self.args, "load_checkpoint_heads", False): if head_name not in current_head_names: self.register_classification_head(head_name, num_classes, inner_dim) else: if head_name not in current_head_names: logger.warning( "deleting classification head ({}) from checkpoint " "not present in current model: {}".format(head_name, k) ) keys_to_delete.append(k) elif ( num_classes != self.classification_heads[head_name].out_proj.out_features or inner_dim != self.classification_heads[head_name].dense.out_features ): logger.warning( "deleting classification head ({}) from checkpoint " "with different dimensions than current model: {}".format( head_name, k ) ) keys_to_delete.append(k) for k in keys_to_delete: del state_dict[k] # Copy any newly-added classification heads into the state dict # with their current weights. if hasattr(self, "classification_heads"): cur_state = self.classification_heads.state_dict() for k, v in cur_state.items(): if prefix + "classification_heads." + k not in state_dict: logger.info("Overwriting " + prefix + "classification_heads." + k) state_dict[prefix + "classification_heads." + k] = v class RobertaLMHead(nn.Module): """Head for masked language modeling.""" def __init__(self, embed_dim, output_dim, activation_fn, weight=None): super().__init__() self.dense = nn.Linear(embed_dim, embed_dim) self.activation_fn = utils.get_activation_fn(activation_fn) self.layer_norm = LayerNorm(embed_dim) if weight is None: weight = nn.Linear(embed_dim, output_dim, bias=False).weight self.weight = weight self.bias = nn.Parameter(torch.zeros(output_dim)) def forward(self, features, masked_tokens=None, **kwargs): # Only project the masked tokens while training, # saves both memory and computation if masked_tokens is not None: features = features[masked_tokens, :] x = self.dense(features) x = self.activation_fn(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = F.linear(x, self.weight) + self.bias return x class RobertaClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__( self, input_dim, inner_dim, num_classes, activation_fn, pooler_dropout, q_noise=0, qn_block_size=8, do_spectral_norm=False, ): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.activation_fn = utils.get_activation_fn(activation_fn) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = apply_quant_noise_( nn.Linear(inner_dim, num_classes), q_noise, qn_block_size ) if do_spectral_norm: if q_noise != 0: raise NotImplementedError( "Attempting to use Spectral Normalization with Quant Noise. This is not officially supported" ) self.out_proj = torch.nn.utils.spectral_norm(self.out_proj) def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = self.activation_fn(x) x = self.dropout(x) x = self.out_proj(x) return x class RobertaEncoder(FairseqEncoder): """RoBERTa encoder.""" def __init__(self, args, dictionary): super().__init__(dictionary) # set any missing default values base_architecture(args) self.args = args if args.encoder_layers_to_keep: args.encoder_layers = len(args.encoder_layers_to_keep.split(",")) embed_tokens = self.build_embedding( len(dictionary), args.encoder_embed_dim, dictionary.pad() ) self.sentence_encoder = self.build_encoder(args, dictionary, embed_tokens) self.lm_head = self.build_lm_head( embed_dim=args.encoder_embed_dim, output_dim=len(dictionary), activation_fn=args.activation_fn, weight=( self.sentence_encoder.embed_tokens.weight if not args.untie_weights_roberta else None ), ) def build_embedding(self, vocab_size, embedding_dim, padding_idx): return nn.Embedding(vocab_size, embedding_dim, padding_idx) def build_encoder(self, args, dictionary, embed_tokens): encoder = TransformerEncoder(args, dictionary, embed_tokens) encoder.apply(init_bert_params) return encoder def build_lm_head(self, embed_dim, output_dim, activation_fn, weight): return RobertaLMHead(embed_dim, output_dim, activation_fn, weight) def forward( self, src_tokens, features_only=False, return_all_hiddens=False, masked_tokens=None, **unused, ): """ Args: src_tokens (LongTensor): input tokens of shape `(batch, src_len)` features_only (bool, optional): skip LM head and just return features. If True, the output will be of shape `(batch, src_len, embed_dim)`. return_all_hiddens (bool, optional): also return all of the intermediate hidden states (default: False). Returns: tuple: - the LM output of shape `(batch, src_len, vocab)` - a dictionary of additional data, where 'inner_states' is a list of hidden states. Note that the hidden states have shape `(src_len, batch, vocab)`. """ x, extra = self.extract_features( src_tokens, return_all_hiddens=return_all_hiddens ) if not features_only: x = self.output_layer(x, masked_tokens=masked_tokens) return x, extra def extract_features(self, src_tokens, return_all_hiddens=False, **kwargs): encoder_out = self.sentence_encoder( src_tokens, return_all_hiddens=return_all_hiddens, token_embeddings=kwargs.get("token_embeddings", None), ) # T x B x C -> B x T x C features = encoder_out["encoder_out"][0].transpose(0, 1) inner_states = encoder_out["encoder_states"] if return_all_hiddens else None return features, {"inner_states": inner_states} def output_layer(self, features, masked_tokens=None, **unused): return self.lm_head(features, masked_tokens) def max_positions(self): """Maximum output length supported by the encoder.""" return self.args.max_positions @register_model_architecture("roberta", "roberta") def base_architecture(args): args.encoder_layers = safe_getattr(args, "encoder_layers", 12) args.encoder_embed_dim = safe_getattr(args, "encoder_embed_dim", 768) args.encoder_ffn_embed_dim = safe_getattr(args, "encoder_ffn_embed_dim", 3072) args.encoder_attention_heads = safe_getattr(args, "encoder_attention_heads", 12) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_dropout = safe_getattr(args, "activation_dropout", 0.0) args.pooler_dropout = safe_getattr(args, "pooler_dropout", 0.0) args.max_source_positions = safe_getattr(args, "max_positions", 512) args.no_token_positional_embeddings = safe_getattr( args, "no_token_positional_embeddings", False ) # BERT has a few structural differences compared to the original Transformer args.encoder_learned_pos = safe_getattr(args, "encoder_learned_pos", True) args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", True) args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", True) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") args.encoder_normalize_before = safe_getattr(args, "encoder_normalize_before", False) args.pooler_activation_fn = safe_getattr(args, "pooler_activation_fn", "tanh") args.untie_weights_roberta = safe_getattr(args, "untie_weights_roberta", False) # Adaptive input config args.adaptive_input = safe_getattr(args, "adaptive_input", False) # LayerDrop config args.encoder_layerdrop = safe_getattr(args, "encoder_layerdrop", 0.0) args.encoder_layers_to_keep = safe_getattr(args, "encoder_layers_to_keep", None) # Quantization noise config args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0) # R4F config args.spectral_norm_classification_head = safe_getattr( args, "spectral_norm_classification_head", False ) @register_model_architecture("roberta", "roberta_prenorm") def roberta_prenorm_architecture(args): args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False) args.encoder_normalize_before = safe_getattr(args, "encoder_normalize_before", True) base_architecture(args) @register_model_architecture("roberta", "roberta_base") def roberta_base_architecture(args): base_architecture(args) @register_model_architecture("roberta", "roberta_large") def roberta_large_architecture(args): args.encoder_layers = safe_getattr(args, "encoder_layers", 24) args.encoder_embed_dim = safe_getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = safe_getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = safe_getattr(args, "encoder_attention_heads", 16) base_architecture(args) @register_model_architecture("roberta", "xlm") def xlm_architecture(args): args.encoder_layers = safe_getattr(args, "encoder_layers", 16) args.encoder_embed_dim = safe_getattr(args, "encoder_embed_dim", 1280) args.encoder_ffn_embed_dim = safe_getattr(args, "encoder_ffn_embed_dim", 1280 * 4) args.encoder_attention_heads = safe_getattr(args, "encoder_attention_heads", 16) base_architecture(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/roberta/model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ CamemBERT: a Tasty French Language Model """ from fairseq.models import register_model from .hub_interface import RobertaHubInterface from .model import RobertaModel @register_model("camembert") class CamembertModel(RobertaModel): @classmethod def hub_models(cls): return { "camembert": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base.tar.gz", "camembert.v0": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base.tar.gz", "camembert-base": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base.tar.gz", "camembert-large": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-large.tar.gz", "camembert-base-ccnet": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base-ccnet.tar.gz", "camembert-base-ccnet-4gb": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base-ccnet-4gb.tar.gz", "camembert-base-wikipedia-4gb": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base-wikipedia-4gb.tar.gz", "camembert-base-oscar-4gb": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base-oscar-4gb.tar.gz", } @classmethod def from_pretrained( cls, model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", bpe="sentencepiece", **kwargs ): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), bpe=bpe, load_checkpoint_heads=True, **kwargs, ) return RobertaHubInterface(x["args"], x["task"], x["models"][0])
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/roberta/model_camembert.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.data import encoders class RobertaHubInterface(nn.Module): """A simple PyTorch Hub interface to RoBERTa. Usage: https://github.com/pytorch/fairseq/tree/main/examples/roberta """ def __init__(self, cfg, task, model): super().__init__() self.cfg = cfg self.task = task self.model = model self.bpe = encoders.build_bpe(cfg.bpe) # this is useful for determining the device self.register_buffer("_float_tensor", torch.tensor([0], dtype=torch.float)) @property def device(self): return self._float_tensor.device def encode( self, sentence: str, *addl_sentences, no_separator=False ) -> torch.LongTensor: """ BPE-encode a sentence (or multiple sentences). Every sequence begins with a beginning-of-sentence (`<s>`) symbol. Every sentence ends with an end-of-sentence (`</s>`) and we use an extra end-of-sentence (`</s>`) as a separator. Example (single sentence): `<s> a b c </s>` Example (sentence pair): `<s> d e f </s> </s> 1 2 3 </s>` The BPE encoding follows GPT-2. One subtle detail is that the GPT-2 BPE requires leading spaces. For example:: >>> roberta.encode('Hello world').tolist() [0, 31414, 232, 2] >>> roberta.encode(' world').tolist() [0, 232, 2] >>> roberta.encode('world').tolist() [0, 8331, 2] """ bpe_sentence = "<s> " + self.bpe.encode(sentence) + " </s>" for s in addl_sentences: bpe_sentence += " </s>" if not no_separator else "" bpe_sentence += " " + self.bpe.encode(s) + " </s>" tokens = self.task.source_dictionary.encode_line( bpe_sentence, append_eos=False, add_if_not_exist=False ) return tokens.long() def decode(self, tokens: torch.LongTensor): assert tokens.dim() == 1 tokens = tokens.numpy() if tokens[0] == self.task.source_dictionary.bos(): tokens = tokens[1:] # remove <s> eos_mask = tokens == self.task.source_dictionary.eos() doc_mask = eos_mask[1:] & eos_mask[:-1] sentences = np.split(tokens, doc_mask.nonzero()[0] + 1) sentences = [ self.bpe.decode(self.task.source_dictionary.string(s)) for s in sentences ] if len(sentences) == 1: return sentences[0] return sentences def extract_features( self, tokens: torch.LongTensor, return_all_hiddens: bool = False ) -> torch.Tensor: if tokens.dim() == 1: tokens = tokens.unsqueeze(0) if tokens.size(-1) > self.model.max_positions(): raise ValueError( "tokens exceeds maximum length: {} > {}".format( tokens.size(-1), self.model.max_positions() ) ) features, extra = self.model( tokens.to(device=self.device), features_only=True, return_all_hiddens=return_all_hiddens, ) if return_all_hiddens: # convert from T x B x C -> B x T x C inner_states = extra["inner_states"] return [inner_state.transpose(0, 1) for inner_state in inner_states] else: return features # just the last layer's features def register_classification_head( self, name: str, num_classes: int = None, embedding_size: int = None, **kwargs ): self.model.register_classification_head( name, num_classes=num_classes, embedding_size=embedding_size, **kwargs ) def predict(self, head: str, tokens: torch.LongTensor, return_logits: bool = False): features = self.extract_features(tokens.to(device=self.device)) logits = self.model.classification_heads[head](features) if return_logits: return logits return F.log_softmax(logits, dim=-1) def extract_features_aligned_to_words( self, sentence: str, return_all_hiddens: bool = False ) -> torch.Tensor: """Extract RoBERTa features, aligned to spaCy's word-level tokenizer.""" from fairseq.models.roberta import alignment_utils from spacy.tokens import Doc nlp = alignment_utils.spacy_nlp() tokenizer = alignment_utils.spacy_tokenizer() # tokenize both with GPT-2 BPE and spaCy bpe_toks = self.encode(sentence) spacy_toks = tokenizer(sentence) spacy_toks_ws = [t.text_with_ws for t in tokenizer(sentence)] alignment = alignment_utils.align_bpe_to_words(self, bpe_toks, spacy_toks_ws) # extract features and align them features = self.extract_features( bpe_toks, return_all_hiddens=return_all_hiddens ) features = features.squeeze(0) aligned_feats = alignment_utils.align_features_to_words( self, features, alignment ) # wrap in spaCy Doc doc = Doc( nlp.vocab, words=["<s>"] + [x.text for x in spacy_toks] + ["</s>"], spaces=[True] + [x.endswith(" ") for x in spacy_toks_ws[:-1]] + [True, False], ) assert len(doc) == aligned_feats.size(0) doc.user_token_hooks["vector"] = lambda token: aligned_feats[token.i] return doc def fill_mask(self, masked_input: str, topk: int = 5): masked_token = "<mask>" assert ( masked_token in masked_input and masked_input.count(masked_token) == 1 ), "Please add one {0} token for the input, eg: 'He is a {0} guy'".format( masked_token ) text_spans = masked_input.split(masked_token) text_spans_bpe = ( (" {0} ".format(masked_token)) .join([self.bpe.encode(text_span.rstrip()) for text_span in text_spans]) .strip() ) tokens = self.task.source_dictionary.encode_line( "<s> " + text_spans_bpe + " </s>", append_eos=False, add_if_not_exist=False, ) masked_index = (tokens == self.task.mask_idx).nonzero(as_tuple=False) if tokens.dim() == 1: tokens = tokens.unsqueeze(0) with utils.model_eval(self.model): features, extra = self.model( tokens.long().to(device=self.device), features_only=False, return_all_hiddens=False, ) logits = features[0, masked_index, :].squeeze() prob = logits.softmax(dim=0) values, index = prob.topk(k=topk, dim=0) topk_predicted_token_bpe = self.task.source_dictionary.string(index) topk_filled_outputs = [] for index, predicted_token_bpe in enumerate( topk_predicted_token_bpe.split(" ") ): predicted_token = self.bpe.decode(predicted_token_bpe) # Quick hack to fix https://github.com/pytorch/fairseq/issues/1306 if predicted_token_bpe.startswith("\u2581"): predicted_token = " " + predicted_token if " {0}".format(masked_token) in masked_input: topk_filled_outputs.append( ( masked_input.replace( " {0}".format(masked_token), predicted_token ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(masked_token, predicted_token), values[index].item(), predicted_token, ) ) return topk_filled_outputs def disambiguate_pronoun(self, sentence: str) -> bool: """ Usage:: >>> disambiguate_pronoun('The _trophy_ would not fit in the brown suitcase because [it] was too big.') True >>> disambiguate_pronoun('The trophy would not fit in the brown suitcase because [it] was too big.') 'The trophy' """ assert hasattr( self.task, "disambiguate_pronoun" ), "roberta.disambiguate_pronoun() requires a model trained with the WSC task." with utils.model_eval(self.model): return self.task.disambiguate_pronoun( self.model, sentence, use_cuda=self.device.type == "cuda" )
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/roberta/hub_interface.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import os # automatically import any Python files in the models/huggingface/ directory models_dir = os.path.dirname(__file__) for file in os.listdir(models_dir): path = os.path.join(models_dir, file) if ( not file.startswith("_") and not file.startswith(".") and (file.endswith(".py") or os.path.isdir(path)) ): model_name = file[: file.find(".py")] if file.endswith(".py") else file module = importlib.import_module("fairseq.models.huggingface." + model_name)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/huggingface/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import sys from typing import Dict, List, Optional import torch from fairseq.models import ( FairseqIncrementalDecoder, FairseqLanguageModel, register_model, register_model_architecture, ) logger = logging.getLogger(__name__) DEFAULT_MAX_TARGET_POSITIONS = 1024 @register_model("hf_gpt2") class HuggingFaceGPT2LanguageModel(FairseqLanguageModel): def __init__(self, decoder): super().__init__(decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--embed-dim', type=int, metavar='N', help='embedding dimension') parser.add_argument('--num-attention-heads', type=int, metavar='N', help='num attention heads') parser.add_argument('--num-layers', type=int, metavar='N', help='num layers') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability for all fully connected layers ' 'in the embeddings, encoder, and pooler') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" default_architecture(args) return cls(HuggingFaceGPT2Decoder(args, task)) class HuggingFaceGPT2Decoder(FairseqIncrementalDecoder): def __init__(self, args, task): try: from transformers import GPT2Config, GPT2LMHeadModel except ImportError: raise ImportError( "\n\nPlease install huggingface/transformers with:" "\n\n pip install transformers" ) super().__init__(task.target_dictionary) config = GPT2Config( vocab_size=len(task.target_dictionary), n_positions=args.max_target_positions + 1, n_ctx=args.max_target_positions, n_embd=args.embed_dim, n_layer=args.num_layers, n_head=args.num_attention_heads, resid_pdrop=args.dropout, embd_pdrop=args.dropout, attn_pdrop=args.attention_dropout, layer_norm_epsilon=1e-6, ) self.model = GPT2LMHeadModel(config) # set zero embedding for padding symbol self.pad_idx = task.target_dictionary.pad() self.model.transformer.wte.weight.data[self.pad_idx].zero_() self.model.transformer.wpe.weight.data[0].zero_() def forward( self, prev_output_tokens, src_lengths=None, incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None, encoder_out=None, ): features = self.extract_features(prev_output_tokens, incremental_state) lm_logits = self.model.lm_head(features) return (lm_logits,) def extract_features( self, prev_output_tokens, incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None, ): if incremental_state: past = self.get_incremental_state("past") else: past = None # don't attend to padding symbols attention_mask = prev_output_tokens.ne(self.pad_idx).int() # set position ids to exclude padding symbols position_ids = attention_mask * ( torch.arange(1, 1 + prev_output_tokens.size(1)) .to(prev_output_tokens) .repeat(prev_output_tokens.size(0), 1) ) outputs = self.model.transformer( input_ids=prev_output_tokens, past=past, attention_mask=attention_mask, position_ids=position_ids, ) last_hidden_states = outputs[0] if incremental_state: self.set_incremental_state(incremental_state, "past", outputs[1]) return last_hidden_states def max_positions(self): return self.model.config.n_positions - 1 @register_model_architecture("hf_gpt2", "hf_gpt2") def default_architecture(args): if getattr(args, "max_target_positions", None) is None: args.max_target_positions = getattr( args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS ) args.embed_dim = getattr(args, "embed_dim", 768) args.num_attention_heads = getattr(args, "num_attention_heads", 12) args.num_layers = getattr(args, "num_layers", 12) args.dropout = getattr(args, "dropout", 0.1) args.attention_dropout = getattr(args, "attention_dropout", 0.1) @register_model_architecture("hf_gpt2", "hf_gpt2_medium") def hf_gpt2_medium(args): args.embed_dim = getattr(args, "embed_dim", 1024) args.num_attention_heads = getattr(args, "num_attention_heads", 16) args.num_layers = getattr(args, "num_layers", 24) default_architecture(args) @register_model_architecture("hf_gpt2", "hf_gpt2_large") def hf_gpt2_large(args): args.embed_dim = getattr(args, "embed_dim", 1280) args.num_attention_heads = getattr(args, "num_attention_heads", 20) args.num_layers = getattr(args, "num_layers", 36) default_architecture(args) @register_model_architecture("hf_gpt2", "hf_gpt2_xl") def hf_gpt2_xl(args): args.embed_dim = getattr(args, "embed_dim", 1600) args.num_attention_heads = getattr(args, "num_attention_heads", 25) args.num_layers = getattr(args, "num_layers", 48) default_architecture(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/huggingface/hf_gpt2.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import json from typing import Dict import numpy as np import torch from torch import nn import torch.nn.functional as F from fairseq.data.audio.audio_utils import ( get_window, get_fourier_basis, get_mel_filters, TTSSpectrogram ) from fairseq.data.audio.speech_to_text_dataset import S2TDataConfig from fairseq.models.text_to_speech.hifigan import Generator as HiFiGANModel logger = logging.getLogger(__name__) class PseudoInverseMelScale(torch.nn.Module): def __init__(self, n_stft, n_mels, sample_rate, f_min, f_max) -> None: super(PseudoInverseMelScale, self).__init__() self.n_mels = n_mels basis = get_mel_filters( sample_rate, (n_stft - 1) * 2, n_mels, f_min, f_max ) basis = torch.pinverse(basis) # F x F_mel self.register_buffer('basis', basis) def forward(self, melspec: torch.Tensor) -> torch.Tensor: # pack batch shape = melspec.shape # B_1 x ... x B_K x F_mel x T n_mels, time = shape[-2], shape[-1] melspec = melspec.view(-1, n_mels, time) freq, _ = self.basis.size() # F x F_mel assert self.n_mels == n_mels, (self.n_mels, n_mels) specgram = self.basis.matmul(melspec).clamp(min=0) # unpack batch specgram = specgram.view(shape[:-2] + (freq, time)) return specgram class GriffinLim(torch.nn.Module): def __init__( self, n_fft: int, win_length: int, hop_length: int, n_iter: int, window_fn=torch.hann_window ): super(GriffinLim, self).__init__() self.transform = TTSSpectrogram( n_fft, win_length, hop_length, return_phase=True ) basis = get_fourier_basis(n_fft) basis = torch.pinverse(n_fft / hop_length * basis).T[:, None, :] basis *= get_window(window_fn, n_fft, win_length) self.register_buffer('basis', basis) self.n_fft = n_fft self.win_length = win_length self.hop_length = hop_length self.n_iter = n_iter self.tiny = 1.1754944e-38 @classmethod def get_window_sum_square( cls, n_frames, hop_length, win_length, n_fft, window_fn=torch.hann_window ) -> torch.Tensor: w_sq = get_window(window_fn, n_fft, win_length) ** 2 n = n_fft + hop_length * (n_frames - 1) x = torch.zeros(n, dtype=torch.float32) for i in range(n_frames): ofst = i * hop_length x[ofst: min(n, ofst + n_fft)] += w_sq[:max(0, min(n_fft, n - ofst))] return x def inverse(self, magnitude: torch.Tensor, phase) -> torch.Tensor: x = torch.cat( [magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1 ) x = F.conv_transpose1d(x, self.basis, stride=self.hop_length) win_sum_sq = self.get_window_sum_square( magnitude.shape[-1], hop_length=self.hop_length, win_length=self.win_length, n_fft=self.n_fft ).to(magnitude.device) # remove modulation effects approx_nonzero_indices = win_sum_sq > self.tiny x[:, :, approx_nonzero_indices] /= win_sum_sq[approx_nonzero_indices] x *= self.n_fft / self.hop_length x = x[:, :, self.n_fft // 2:] x = x[:, :, :-self.n_fft // 2:] return x def forward(self, specgram: torch.Tensor) -> torch.Tensor: angles = np.angle(np.exp(2j * np.pi * np.random.rand(*specgram.shape))) angles = torch.from_numpy(angles).to(specgram) _specgram = specgram.view(-1, specgram.shape[-2], specgram.shape[-1]) waveform = self.inverse(_specgram, angles).squeeze(1) for _ in range(self.n_iter): _, angles = self.transform(waveform) waveform = self.inverse(_specgram, angles).squeeze(1) return waveform.squeeze(0) class GriffinLimVocoder(nn.Module): def __init__(self, sample_rate, win_size, hop_size, n_fft, n_mels, f_min, f_max, window_fn, spec_bwd_max_iter=32, fp16=False): super().__init__() self.inv_mel_transform = PseudoInverseMelScale( n_stft=n_fft // 2 + 1, n_mels=n_mels, sample_rate=sample_rate, f_min=f_min, f_max=f_max ) self.gl_transform = GriffinLim( n_fft=n_fft, win_length=win_size, hop_length=hop_size, window_fn=window_fn, n_iter=spec_bwd_max_iter ) if fp16: self.half() self.inv_mel_transform.half() self.gl_transform.half() else: self.float() self.inv_mel_transform.float() self.gl_transform.float() def forward(self, x): # x: (B x) T x D -> (B x) 1 x T # NOTE: batched forward produces noisier waveform. recommend running # one utterance at a time self.eval() x = x.exp().transpose(-1, -2) x = self.inv_mel_transform(x) x = self.gl_transform(x) return x @classmethod def from_data_cfg(cls, args, data_cfg: S2TDataConfig): feat_cfg = data_cfg.config["features"] window_fn = getattr(torch, feat_cfg["window_fn"] + "_window") return cls( sample_rate=feat_cfg["sample_rate"], win_size=int(feat_cfg["win_len_t"] * feat_cfg["sample_rate"]), hop_size=int(feat_cfg["hop_len_t"] * feat_cfg["sample_rate"]), n_fft=feat_cfg["n_fft"], n_mels=feat_cfg["n_mels"], f_min=feat_cfg["f_min"], f_max=feat_cfg["f_max"], window_fn=window_fn, spec_bwd_max_iter=args.spec_bwd_max_iter, fp16=args.fp16 ) class HiFiGANVocoder(nn.Module): def __init__( self, checkpoint_path: str, model_cfg: Dict[str, str], fp16: bool = False ) -> None: super().__init__() self.model = HiFiGANModel(model_cfg) state_dict = torch.load(checkpoint_path) self.model.load_state_dict(state_dict["generator"]) if fp16: self.model.half() logger.info(f"loaded HiFiGAN checkpoint from {checkpoint_path}") def forward(self, x: torch.Tensor) -> torch.Tensor: # (B x) T x D -> (B x) 1 x T model = self.model.eval() if len(x.shape) == 2: return model(x.unsqueeze(0).transpose(1, 2)).detach().squeeze(0) else: return model(x.transpose(-1, -2)).detach() @classmethod def from_data_cfg(cls, args, data_cfg: S2TDataConfig): vocoder_cfg = data_cfg.vocoder assert vocoder_cfg.get("type", "griffin_lim") == "hifigan" with open(vocoder_cfg["config"]) as f: model_cfg = json.load(f) return cls(vocoder_cfg["checkpoint"], model_cfg, fp16=args.fp16) def get_vocoder(args, data_cfg: S2TDataConfig): if args.vocoder == "griffin_lim": return GriffinLimVocoder.from_data_cfg(args, data_cfg) elif args.vocoder == "hifigan": return HiFiGANVocoder.from_data_cfg(args, data_cfg) else: raise ValueError("Unknown vocoder")
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/text_to_speech/vocoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch from torch import nn from torch.nn import functional as F from fairseq.models import (FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture) from fairseq.modules import LSTMCellWithZoneOut, LocationAttention logger = logging.getLogger(__name__) def encoder_init(m): if isinstance(m, nn.Conv1d): nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("relu")) class Tacotron2Encoder(FairseqEncoder): def __init__(self, args, src_dict, embed_speaker): super().__init__(src_dict) self.padding_idx = src_dict.pad() self.embed_speaker = embed_speaker self.spk_emb_proj = None if embed_speaker is not None: self.spk_emb_proj = nn.Linear( args.encoder_embed_dim + args.speaker_embed_dim, args.encoder_embed_dim ) self.embed_tokens = nn.Embedding(len(src_dict), args.encoder_embed_dim, padding_idx=self.padding_idx) assert(args.encoder_conv_kernel_size % 2 == 1) self.convolutions = nn.ModuleList( nn.Sequential( nn.Conv1d(args.encoder_embed_dim, args.encoder_embed_dim, kernel_size=args.encoder_conv_kernel_size, padding=((args.encoder_conv_kernel_size - 1) // 2)), nn.BatchNorm1d(args.encoder_embed_dim), nn.ReLU(), nn.Dropout(args.encoder_dropout) ) for _ in range(args.encoder_conv_layers) ) self.lstm = nn.LSTM(args.encoder_embed_dim, args.encoder_embed_dim // 2, num_layers=args.encoder_lstm_layers, batch_first=True, bidirectional=True) self.apply(encoder_init) def forward(self, src_tokens, src_lengths=None, speaker=None, **kwargs): x = self.embed_tokens(src_tokens) x = x.transpose(1, 2).contiguous() # B x T x C -> B x C x T for conv in self.convolutions: x = conv(x) x = x.transpose(1, 2).contiguous() # B x C x T -> B x T x C src_lengths = src_lengths.cpu().long() x = nn.utils.rnn.pack_padded_sequence(x, src_lengths, batch_first=True) x = self.lstm(x)[0] x = nn.utils.rnn.pad_packed_sequence(x, batch_first=True)[0] encoder_padding_mask = src_tokens.eq(self.padding_idx) if self.embed_speaker is not None: seq_len, bsz, _ = x.size() emb = self.embed_speaker(speaker).expand(seq_len, bsz, -1) x = self.spk_emb_proj(torch.cat([x, emb], dim=2)) return { "encoder_out": [x], # B x T x C "encoder_padding_mask": encoder_padding_mask, # B x T } class Prenet(nn.Module): def __init__(self, in_dim, n_layers, n_units, dropout): super().__init__() self.layers = nn.ModuleList( nn.Sequential(nn.Linear(in_dim if i == 0 else n_units, n_units), nn.ReLU()) for i in range(n_layers) ) self.dropout = dropout def forward(self, x): for layer in self.layers: x = F.dropout(layer(x), p=self.dropout) # always applies dropout return x class Postnet(nn.Module): def __init__(self, in_dim, n_channels, kernel_size, n_layers, dropout): super(Postnet, self).__init__() self.convolutions = nn.ModuleList() assert(kernel_size % 2 == 1) for i in range(n_layers): cur_layers = [ nn.Conv1d(in_dim if i == 0 else n_channels, n_channels if i < n_layers - 1 else in_dim, kernel_size=kernel_size, padding=((kernel_size - 1) // 2)), nn.BatchNorm1d(n_channels if i < n_layers - 1 else in_dim) ] + ([nn.Tanh()] if i < n_layers - 1 else []) + [nn.Dropout(dropout)] nn.init.xavier_uniform_( cur_layers[0].weight, torch.nn.init.calculate_gain( "tanh" if i < n_layers - 1 else "linear" ) ) self.convolutions.append(nn.Sequential(*cur_layers)) def forward(self, x): x = x.transpose(1, 2) # B x T x C -> B x C x T for conv in self.convolutions: x = conv(x) return x.transpose(1, 2) def decoder_init(m): if isinstance(m, torch.nn.Conv1d): nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("tanh")) class Tacotron2Decoder(FairseqIncrementalDecoder): def __init__(self, args, src_dict): super().__init__(None) self.args = args self.n_frames_per_step = args.n_frames_per_step self.out_dim = args.output_frame_dim * args.n_frames_per_step self.prenet = Prenet(self.out_dim, args.prenet_layers, args.prenet_dim, args.prenet_dropout) # take prev_context, prev_frame, (speaker embedding) as input self.attention_lstm = LSTMCellWithZoneOut( args.zoneout, args.prenet_dim + args.encoder_embed_dim, args.decoder_lstm_dim ) # take attention_lstm output, attention_state, encoder_out as input self.attention = LocationAttention( args.attention_dim, args.encoder_embed_dim, args.decoder_lstm_dim, (1 + int(args.attention_use_cumprob)), args.attention_conv_dim, args.attention_conv_kernel_size ) # take attention_lstm output, context, (gated_latent) as input self.lstm = nn.ModuleList( LSTMCellWithZoneOut( args.zoneout, args.encoder_embed_dim + args.decoder_lstm_dim, args.decoder_lstm_dim ) for i in range(args.decoder_lstm_layers) ) proj_in_dim = args.encoder_embed_dim + args.decoder_lstm_dim self.feat_proj = nn.Linear(proj_in_dim, self.out_dim) self.eos_proj = nn.Linear(proj_in_dim, 1) self.postnet = Postnet(self.out_dim, args.postnet_conv_dim, args.postnet_conv_kernel_size, args.postnet_layers, args.postnet_dropout) self.ctc_proj = None if getattr(args, "ctc_weight", 0.) > 0.: self.ctc_proj = nn.Linear(self.out_dim, len(src_dict)) self.apply(decoder_init) def _get_states(self, incremental_state, enc_out): bsz, in_len, _ = enc_out.size() alstm_h = self.get_incremental_state(incremental_state, "alstm_h") if alstm_h is None: alstm_h = enc_out.new_zeros(bsz, self.args.decoder_lstm_dim) alstm_c = self.get_incremental_state(incremental_state, "alstm_c") if alstm_c is None: alstm_c = enc_out.new_zeros(bsz, self.args.decoder_lstm_dim) lstm_h = self.get_incremental_state(incremental_state, "lstm_h") if lstm_h is None: lstm_h = [enc_out.new_zeros(bsz, self.args.decoder_lstm_dim) for _ in range(self.args.decoder_lstm_layers)] lstm_c = self.get_incremental_state(incremental_state, "lstm_c") if lstm_c is None: lstm_c = [enc_out.new_zeros(bsz, self.args.decoder_lstm_dim) for _ in range(self.args.decoder_lstm_layers)] attn_w = self.get_incremental_state(incremental_state, "attn_w") if attn_w is None: attn_w = enc_out.new_zeros(bsz, in_len) attn_w_cum = self.get_incremental_state(incremental_state, "attn_w_cum") if attn_w_cum is None: attn_w_cum = enc_out.new_zeros(bsz, in_len) return alstm_h, alstm_c, lstm_h, lstm_c, attn_w, attn_w_cum def _get_init_attn_c(self, enc_out, enc_mask): bsz = enc_out.size(0) if self.args.init_attn_c == "zero": return enc_out.new_zeros(bsz, self.args.encoder_embed_dim) elif self.args.init_attn_c == "avg": enc_w = (~enc_mask).type(enc_out.type()) enc_w = enc_w / enc_w.sum(dim=1, keepdim=True) return torch.sum(enc_out * enc_w.unsqueeze(2), dim=1) else: raise ValueError(f"{self.args.init_attn_c} not supported") def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None, target_lengths=None, **kwargs): enc_mask = encoder_out["encoder_padding_mask"] enc_out = encoder_out["encoder_out"][0] in_len = enc_out.size(1) if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:, :] bsz, out_len, _ = prev_output_tokens.size() prenet_out = self.prenet(prev_output_tokens) (alstm_h, alstm_c, lstm_h, lstm_c, attn_w, attn_w_cum) = self._get_states(incremental_state, enc_out) attn_ctx = self._get_init_attn_c(enc_out, enc_mask) attn_out = enc_out.new_zeros(bsz, in_len, out_len) feat_out = enc_out.new_zeros(bsz, out_len, self.out_dim) eos_out = enc_out.new_zeros(bsz, out_len) for t in range(out_len): alstm_in = torch.cat((attn_ctx, prenet_out[:, t, :]), dim=1) alstm_h, alstm_c = self.attention_lstm(alstm_in, (alstm_h, alstm_c)) attn_state = attn_w.unsqueeze(1) if self.args.attention_use_cumprob: attn_state = torch.stack((attn_w, attn_w_cum), dim=1) attn_ctx, attn_w = self.attention( enc_out, enc_mask, alstm_h, attn_state ) attn_w_cum = attn_w_cum + attn_w attn_out[:, :, t] = attn_w for i, cur_lstm in enumerate(self.lstm): if i == 0: lstm_in = torch.cat((attn_ctx, alstm_h), dim=1) else: lstm_in = torch.cat((attn_ctx, lstm_h[i - 1]), dim=1) lstm_h[i], lstm_c[i] = cur_lstm(lstm_in, (lstm_h[i], lstm_c[i])) proj_in = torch.cat((attn_ctx, lstm_h[-1]), dim=1) feat_out[:, t, :] = self.feat_proj(proj_in) eos_out[:, t] = self.eos_proj(proj_in).squeeze(1) self.attention.clear_cache() self.set_incremental_state(incremental_state, "alstm_h", alstm_h) self.set_incremental_state(incremental_state, "alstm_c", alstm_c) self.set_incremental_state(incremental_state, "lstm_h", lstm_h) self.set_incremental_state(incremental_state, "lstm_c", lstm_c) self.set_incremental_state(incremental_state, "attn_w", attn_w) self.set_incremental_state(incremental_state, "attn_w_cum", attn_w_cum) post_feat_out = feat_out + self.postnet(feat_out) eos_out = eos_out.view(bsz, out_len, 1) return post_feat_out, eos_out, {"attn": attn_out, "feature_out": feat_out} @register_model("tacotron_2") class Tacotron2Model(FairseqEncoderDecoderModel): """ Implementation for https://arxiv.org/pdf/1712.05884.pdf """ @staticmethod def add_args(parser): # encoder parser.add_argument("--encoder-dropout", type=float) parser.add_argument("--encoder-embed-dim", type=int) parser.add_argument("--encoder-conv-layers", type=int) parser.add_argument("--encoder-conv-kernel-size", type=int) parser.add_argument("--encoder-lstm-layers", type=int) # decoder parser.add_argument("--attention-dim", type=int) parser.add_argument("--attention-conv-dim", type=int) parser.add_argument("--attention-conv-kernel-size", type=int) parser.add_argument("--prenet-dropout", type=float) parser.add_argument("--prenet-layers", type=int) parser.add_argument("--prenet-dim", type=int) parser.add_argument("--postnet-dropout", type=float) parser.add_argument("--postnet-layers", type=int) parser.add_argument("--postnet-conv-dim", type=int) parser.add_argument("--postnet-conv-kernel-size", type=int) parser.add_argument("--init-attn-c", type=str) parser.add_argument("--attention-use-cumprob", action='store_true') parser.add_argument("--zoneout", type=float) parser.add_argument("--decoder-lstm-layers", type=int) parser.add_argument("--decoder-lstm-dim", type=int) parser.add_argument("--output-frame-dim", type=int) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._num_updates = 0 @classmethod def build_model(cls, args, task): embed_speaker = task.get_speaker_embeddings(args) encoder = Tacotron2Encoder(args, task.src_dict, embed_speaker) decoder = Tacotron2Decoder(args, task.src_dict) return cls(encoder, decoder) def forward_encoder(self, src_tokens, src_lengths, **kwargs): return self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) def set_num_updates(self, num_updates): super().set_num_updates(num_updates) self._num_updates = num_updates @register_model_architecture("tacotron_2", "tacotron_2") def base_architecture(args): # encoder args.encoder_dropout = getattr(args, "encoder_dropout", 0.5) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_conv_layers = getattr(args, "encoder_conv_layers", 3) args.encoder_conv_kernel_size = getattr(args, "encoder_conv_kernel_size", 5) args.encoder_lstm_layers = getattr(args, "encoder_lstm_layers", 1) # decoder args.attention_dim = getattr(args, "attention_dim", 128) args.attention_conv_dim = getattr(args, "attention_conv_dim", 32) args.attention_conv_kernel_size = getattr(args, "attention_conv_kernel_size", 15) args.prenet_dropout = getattr(args, "prenet_dropout", 0.5) args.prenet_layers = getattr(args, "prenet_layers", 2) args.prenet_dim = getattr(args, "prenet_dim", 256) args.postnet_dropout = getattr(args, "postnet_dropout", 0.5) args.postnet_layers = getattr(args, "postnet_layers", 5) args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512) args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5) args.init_attn_c = getattr(args, "init_attn_c", "zero") args.attention_use_cumprob = getattr(args, "attention_use_cumprob", True) args.zoneout = getattr(args, "zoneout", 0.1) args.decoder_lstm_layers = getattr(args, "decoder_lstm_layers", 2) args.decoder_lstm_dim = getattr(args, "decoder_lstm_dim", 1024) args.output_frame_dim = getattr(args, "output_frame_dim", 80)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/text_to_speech/tacotron2.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .tacotron2 import * # noqa from .tts_transformer import * # noqa from .fastspeech2 import * # noqa
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/text_to_speech/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch from torch import nn from fairseq.models import (FairseqEncoder, FairseqEncoderModel, register_model, register_model_architecture) from fairseq.modules import ( LayerNorm, PositionalEmbedding, FairseqDropout, MultiheadAttention ) from fairseq import utils from fairseq.data.data_utils import lengths_to_padding_mask logger = logging.getLogger(__name__) def model_init(m): if isinstance(m, nn.Conv1d): nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("relu")) def Embedding(num_embeddings, embedding_dim, padding_idx=None): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) return m class PositionwiseFeedForward(nn.Module): def __init__(self, in_dim, hidden_dim, kernel_size, dropout): super().__init__() self.ffn = nn.Sequential( nn.Conv1d(in_dim, hidden_dim, kernel_size=kernel_size, padding=(kernel_size - 1) // 2), nn.ReLU(), nn.Conv1d(hidden_dim, in_dim, kernel_size=kernel_size, padding=(kernel_size - 1) // 2) ) self.layer_norm = LayerNorm(in_dim) self.dropout = self.dropout_module = FairseqDropout( p=dropout, module_name=self.__class__.__name__ ) def forward(self, x): # B x T x C residual = x x = self.ffn(x.transpose(1, 2)).transpose(1, 2) x = self.dropout(x) return self.layer_norm(x + residual) class FFTLayer(torch.nn.Module): def __init__( self, embed_dim, n_heads, hidden_dim, kernel_size, dropout, attention_dropout ): super().__init__() self.self_attn = MultiheadAttention( embed_dim, n_heads, dropout=attention_dropout, self_attention=True ) self.layer_norm = LayerNorm(embed_dim) self.ffn = PositionwiseFeedForward( embed_dim, hidden_dim, kernel_size, dropout=dropout ) def forward(self, x, padding_mask=None): # B x T x C residual = x x = x.transpose(0, 1) x, _ = self.self_attn( query=x, key=x, value=x, key_padding_mask=padding_mask, need_weights=False ) x = x.transpose(0, 1) x = self.layer_norm(x + residual) return self.ffn(x) class LengthRegulator(nn.Module): def forward(self, x, durations): # x: B x T x C out_lens = durations.sum(dim=1) max_len = out_lens.max() bsz, seq_len, dim = x.size() out = x.new_zeros((bsz, max_len, dim)) for b in range(bsz): indices = [] for t in range(seq_len): indices.extend([t] * utils.item(durations[b, t])) indices = torch.tensor(indices, dtype=torch.long).to(x.device) out_len = utils.item(out_lens[b]) out[b, :out_len] = x[b].index_select(0, indices) return out, out_lens class VariancePredictor(nn.Module): def __init__(self, args): super().__init__() self.conv1 = nn.Sequential( nn.Conv1d( args.encoder_embed_dim, args.var_pred_hidden_dim, kernel_size=args.var_pred_kernel_size, padding=(args.var_pred_kernel_size - 1) // 2 ), nn.ReLU() ) self.ln1 = nn.LayerNorm(args.var_pred_hidden_dim) self.dropout_module = FairseqDropout( p=args.var_pred_dropout, module_name=self.__class__.__name__ ) self.conv2 = nn.Sequential( nn.Conv1d( args.var_pred_hidden_dim, args.var_pred_hidden_dim, kernel_size=args.var_pred_kernel_size, padding=1 ), nn.ReLU() ) self.ln2 = nn.LayerNorm(args.var_pred_hidden_dim) self.proj = nn.Linear(args.var_pred_hidden_dim, 1) def forward(self, x): # Input: B x T x C; Output: B x T x = self.conv1(x.transpose(1, 2)).transpose(1, 2) x = self.dropout_module(self.ln1(x)) x = self.conv2(x.transpose(1, 2)).transpose(1, 2) x = self.dropout_module(self.ln2(x)) return self.proj(x).squeeze(dim=2) class VarianceAdaptor(nn.Module): def __init__(self, args): super().__init__() self.args = args self.length_regulator = LengthRegulator() self.duration_predictor = VariancePredictor(args) self.pitch_predictor = VariancePredictor(args) self.energy_predictor = VariancePredictor(args) n_bins, steps = self.args.var_pred_n_bins, self.args.var_pred_n_bins - 1 self.pitch_bins = torch.linspace(args.pitch_min, args.pitch_max, steps) self.embed_pitch = Embedding(n_bins, args.encoder_embed_dim) self.energy_bins = torch.linspace(args.energy_min, args.energy_max, steps) self.embed_energy = Embedding(n_bins, args.encoder_embed_dim) def get_pitch_emb(self, x, tgt=None, factor=1.0): out = self.pitch_predictor(x) bins = self.pitch_bins.to(x.device) if tgt is None: out = out * factor emb = self.embed_pitch(torch.bucketize(out, bins)) else: emb = self.embed_pitch(torch.bucketize(tgt, bins)) return out, emb def get_energy_emb(self, x, tgt=None, factor=1.0): out = self.energy_predictor(x) bins = self.energy_bins.to(x.device) if tgt is None: out = out * factor emb = self.embed_energy(torch.bucketize(out, bins)) else: emb = self.embed_energy(torch.bucketize(tgt, bins)) return out, emb def forward( self, x, padding_mask, durations=None, pitches=None, energies=None, d_factor=1.0, p_factor=1.0, e_factor=1.0 ): # x: B x T x C log_dur_out = self.duration_predictor(x) dur_out = torch.clamp( torch.round((torch.exp(log_dur_out) - 1) * d_factor).long(), min=0 ) dur_out.masked_fill_(padding_mask, 0) pitch_out, pitch_emb = self.get_pitch_emb(x, pitches, p_factor) x = x + pitch_emb energy_out, energy_emb = self.get_energy_emb(x, energies, e_factor) x = x + energy_emb x, out_lens = self.length_regulator( x, dur_out if durations is None else durations ) return x, out_lens, log_dur_out, pitch_out, energy_out class FastSpeech2Encoder(FairseqEncoder): def __init__(self, args, src_dict, embed_speaker): super().__init__(src_dict) self.args = args self.padding_idx = src_dict.pad() self.n_frames_per_step = args.n_frames_per_step self.out_dim = args.output_frame_dim * args.n_frames_per_step self.embed_speaker = embed_speaker self.spk_emb_proj = None if embed_speaker is not None: self.spk_emb_proj = nn.Linear( args.encoder_embed_dim + args.speaker_embed_dim, args.encoder_embed_dim ) self.dropout_module = FairseqDropout( p=args.dropout, module_name=self.__class__.__name__ ) self.embed_tokens = Embedding( len(src_dict), args.encoder_embed_dim, padding_idx=self.padding_idx ) self.embed_positions = PositionalEmbedding( args.max_source_positions, args.encoder_embed_dim, self.padding_idx ) self.pos_emb_alpha = nn.Parameter(torch.ones(1)) self.dec_pos_emb_alpha = nn.Parameter(torch.ones(1)) self.encoder_fft_layers = nn.ModuleList( FFTLayer( args.encoder_embed_dim, args.encoder_attention_heads, args.fft_hidden_dim, args.fft_kernel_size, dropout=args.dropout, attention_dropout=args.attention_dropout ) for _ in range(args.encoder_layers) ) self.var_adaptor = VarianceAdaptor(args) self.decoder_fft_layers = nn.ModuleList( FFTLayer( args.decoder_embed_dim, args.decoder_attention_heads, args.fft_hidden_dim, args.fft_kernel_size, dropout=args.dropout, attention_dropout=args.attention_dropout ) for _ in range(args.decoder_layers) ) self.out_proj = nn.Linear(args.decoder_embed_dim, self.out_dim) self.apply(model_init) def forward(self, src_tokens, src_lengths=None, speaker=None, durations=None, pitches=None, energies=None, **kwargs): x = self.embed_tokens(src_tokens) enc_padding_mask = src_tokens.eq(self.padding_idx) x += self.pos_emb_alpha * self.embed_positions(enc_padding_mask) x = self.dropout_module(x) for layer in self.encoder_fft_layers: x = layer(x, enc_padding_mask) if self.embed_speaker is not None: bsz, seq_len, _ = x.size() emb = self.embed_speaker(speaker).expand(bsz, seq_len, -1) x = self.spk_emb_proj(torch.cat([x, emb], dim=2)) x, out_lens, log_dur_out, pitch_out, energy_out = \ self.var_adaptor(x, enc_padding_mask, durations, pitches, energies) dec_padding_mask = lengths_to_padding_mask(out_lens) x += self.dec_pos_emb_alpha * self.embed_positions(dec_padding_mask) for layer in self.decoder_fft_layers: x = layer(x, dec_padding_mask) x = self.out_proj(x) return x, out_lens, log_dur_out, pitch_out, energy_out @register_model("fastspeech2") class FastSpeech2Model(FairseqEncoderModel): """ Implementation for https://arxiv.org/abs/2006.04558 """ NON_AUTOREGRESSIVE = True @staticmethod def add_args(parser): parser.add_argument("--dropout", type=float) parser.add_argument("--output-frame-dim", type=int) parser.add_argument("--speaker-embed-dim", type=int) # FFT blocks parser.add_argument("--fft-hidden-dim", type=int) parser.add_argument("--fft-kernel-size", type=int) parser.add_argument("--attention-dropout", type=float) parser.add_argument("--encoder-layers", type=int) parser.add_argument("--encoder-embed-dim", type=int) parser.add_argument("--encoder-attention-heads", type=int) parser.add_argument("--decoder-layers", type=int) parser.add_argument("--decoder-embed-dim", type=int) parser.add_argument("--decoder-attention-heads", type=int) # variance predictor parser.add_argument("--var-pred-n-bins", type=int) parser.add_argument("--var-pred-hidden-dim", type=int) parser.add_argument("--var-pred-kernel-size", type=int) parser.add_argument("--var-pred-dropout", type=float) def __init__(self, encoder, args, src_dict): super().__init__(encoder) self._num_updates = 0 out_dim = args.output_frame_dim * args.n_frames_per_step self.ctc_proj = None if getattr(args, "ctc_weight", 0.) > 0.: self.ctc_proj = nn.Linear(out_dim, len(src_dict)) @classmethod def build_model(cls, args, task): embed_speaker = task.get_speaker_embeddings(args) encoder = FastSpeech2Encoder(args, task.src_dict, embed_speaker) return cls(encoder, args, task.src_dict) def set_num_updates(self, num_updates): super().set_num_updates(num_updates) self._num_updates = num_updates def get_normalized_probs(self, net_output, log_probs, sample=None): logits = self.ctc_proj(net_output[0]) if log_probs: return utils.log_softmax(logits.float(), dim=-1) else: return utils.softmax(logits.float(), dim=-1) @register_model_architecture("fastspeech2", "fastspeech2") def base_architecture(args): args.dropout = getattr(args, "dropout", 0.2) args.output_frame_dim = getattr(args, "output_frame_dim", 80) args.speaker_embed_dim = getattr(args, "speaker_embed_dim", 64) # FFT blocks args.fft_hidden_dim = getattr(args, "fft_hidden_dim", 1024) args.fft_kernel_size = getattr(args, "fft_kernel_size", 9) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.encoder_layers = getattr(args, "encoder_layers", 4) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 2) args.decoder_layers = getattr(args, "decoder_layers", 4) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 2) # variance predictor args.var_pred_n_bins = getattr(args, "var_pred_n_bins", 256) args.var_pred_hidden_dim = getattr(args, "var_pred_hidden_dim", 256) args.var_pred_kernel_size = getattr(args, "var_pred_kernel_size", 3) args.var_pred_dropout = getattr(args, "var_pred_dropout", 0.5)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/text_to_speech/fastspeech2.py
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Conv1d, ConvTranspose1d from torch.nn.utils import weight_norm, remove_weight_norm LRELU_SLOPE = 0.1 def init_weights(m, mean=0.0, std=0.01): classname = m.__class__.__name__ if classname.find("Conv") != -1: m.weight.data.normal_(mean, std) def get_padding(kernel_size, dilation=1): return (kernel_size * dilation - dilation) // 2 class ResBlock(torch.nn.Module): def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): super(ResBlock, self).__init__() self.convs1 = nn.ModuleList( [ weight_norm( Conv1d( channels, channels, kernel_size, 1, dilation=dilation[0], padding=get_padding(kernel_size, dilation[0]), ) ), weight_norm( Conv1d( channels, channels, kernel_size, 1, dilation=dilation[1], padding=get_padding(kernel_size, dilation[1]), ) ), weight_norm( Conv1d( channels, channels, kernel_size, 1, dilation=dilation[2], padding=get_padding(kernel_size, dilation[2]), ) ), ] ) self.convs1.apply(init_weights) self.convs2 = nn.ModuleList( [ weight_norm( Conv1d( channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1), ) ), weight_norm( Conv1d( channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1), ) ), weight_norm( Conv1d( channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1), ) ), ] ) self.convs2.apply(init_weights) def forward(self, x): for c1, c2 in zip(self.convs1, self.convs2): xt = F.leaky_relu(x, LRELU_SLOPE) xt = c1(xt) xt = F.leaky_relu(xt, LRELU_SLOPE) xt = c2(xt) x = xt + x return x def remove_weight_norm(self): for layer in self.convs1: remove_weight_norm(layer) for layer in self.convs2: remove_weight_norm(layer) class Generator(torch.nn.Module): def __init__(self, cfg): super(Generator, self).__init__() self.num_kernels = len(cfg["resblock_kernel_sizes"]) self.num_upsamples = len(cfg["upsample_rates"]) self.conv_pre = weight_norm( Conv1d(80, cfg["upsample_initial_channel"], 7, 1, padding=3) ) self.ups = nn.ModuleList() for i, (u, k) in enumerate( zip(cfg["upsample_rates"], cfg["upsample_kernel_sizes"]) ): self.ups.append( weight_norm( ConvTranspose1d( cfg["upsample_initial_channel"] // (2 ** i), cfg["upsample_initial_channel"] // (2 ** (i + 1)), k, u, padding=(k - u) // 2, ) ) ) self.resblocks = nn.ModuleList() for i in range(len(self.ups)): ch = cfg["upsample_initial_channel"] // (2 ** (i + 1)) for k, d in zip( cfg["resblock_kernel_sizes"], cfg["resblock_dilation_sizes"] ): self.resblocks.append(ResBlock(ch, k, d)) self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) self.ups.apply(init_weights) self.conv_post.apply(init_weights) def forward(self, x): x = self.conv_pre(x) for i in range(self.num_upsamples): x = F.leaky_relu(x, LRELU_SLOPE) x = self.ups[i](x) xs = None for j in range(self.num_kernels): if xs is None: xs = self.resblocks[i * self.num_kernels + j](x) else: xs += self.resblocks[i * self.num_kernels + j](x) x = xs / self.num_kernels x = F.leaky_relu(x) x = self.conv_post(x) x = torch.tanh(x) return x def remove_weight_norm(self): print("Removing weight norm...") for layer in self.ups: remove_weight_norm(layer) for layer in self.resblocks: layer.remove_weight_norm() remove_weight_norm(self.conv_pre) remove_weight_norm(self.conv_post)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/text_to_speech/hifigan.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import List, Optional import torch from torch import nn from fairseq.models import (FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture) from fairseq.modules import ( TransformerEncoderLayer, TransformerDecoderLayer ) from fairseq.models.text_to_speech.tacotron2 import Prenet, Postnet from fairseq.modules import LayerNorm, PositionalEmbedding, FairseqDropout from fairseq.data.data_utils import lengths_to_padding_mask from fairseq import utils logger = logging.getLogger(__name__) def encoder_init(m): if isinstance(m, nn.Conv1d): nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("relu")) def Embedding(num_embeddings, embedding_dim): m = nn.Embedding(num_embeddings, embedding_dim) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) return m class TTSTransformerEncoder(FairseqEncoder): def __init__(self, args, src_dict, embed_speaker): super().__init__(src_dict) self.padding_idx = src_dict.pad() self.embed_speaker = embed_speaker self.spk_emb_proj = None if embed_speaker is not None: self.spk_emb_proj = nn.Linear( args.encoder_embed_dim + args.speaker_embed_dim, args.encoder_embed_dim ) self.dropout_module = FairseqDropout( p=args.dropout, module_name=self.__class__.__name__ ) self.embed_tokens = nn.Embedding(len(src_dict), args.encoder_embed_dim, padding_idx=self.padding_idx) assert(args.encoder_conv_kernel_size % 2 == 1) self.prenet = nn.ModuleList( nn.Sequential( nn.Conv1d(args.encoder_embed_dim, args.encoder_embed_dim, kernel_size=args.encoder_conv_kernel_size, padding=((args.encoder_conv_kernel_size - 1) // 2)), nn.BatchNorm1d(args.encoder_embed_dim), nn.ReLU(), nn.Dropout(args.encoder_dropout), ) for _ in range(args.encoder_conv_layers) ) self.prenet_proj = nn.Linear( args.encoder_embed_dim, args.encoder_embed_dim ) self.embed_positions = PositionalEmbedding( args.max_source_positions, args.encoder_embed_dim, self.padding_idx ) self.pos_emb_alpha = nn.Parameter(torch.ones(1)) self.transformer_layers = nn.ModuleList( TransformerEncoderLayer(args) for _ in range(args.encoder_transformer_layers) ) if args.encoder_normalize_before: self.layer_norm = LayerNorm(args.encoder_embed_dim) else: self.layer_norm = None self.apply(encoder_init) def forward(self, src_tokens, src_lengths=None, speaker=None, **kwargs): x = self.embed_tokens(src_tokens) x = x.transpose(1, 2).contiguous() # B x T x C -> B x C x T for conv in self.prenet: x = conv(x) x = x.transpose(1, 2).contiguous() # B x C x T -> B x T x C x = self.prenet_proj(x) padding_mask = src_tokens.eq(self.padding_idx) positions = self.embed_positions(padding_mask) x += self.pos_emb_alpha * positions x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) for layer in self.transformer_layers: x = layer(x, padding_mask) if self.layer_norm is not None: x = self.layer_norm(x) if self.embed_speaker is not None: seq_len, bsz, _ = x.size() emb = self.embed_speaker(speaker).transpose(0, 1) emb = emb.expand(seq_len, bsz, -1) x = self.spk_emb_proj(torch.cat([x, emb], dim=2)) return { "encoder_out": [x], # T x B x C "encoder_padding_mask": [padding_mask] if padding_mask.any() else [], # B x T "encoder_embedding": [], # B x T x C "encoder_states": [], # List[T x B x C] "src_tokens": [], "src_lengths": [], } def decoder_init(m): if isinstance(m, torch.nn.Conv1d): nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("tanh")) class TTSTransformerDecoder(FairseqIncrementalDecoder): def __init__(self, args, src_dict): super().__init__(None) self._future_mask = torch.empty(0) self.args = args self.padding_idx = src_dict.pad() self.n_frames_per_step = args.n_frames_per_step self.out_dim = args.output_frame_dim * args.n_frames_per_step self.dropout_module = FairseqDropout( args.dropout, module_name=self.__class__.__name__ ) self.embed_positions = PositionalEmbedding( args.max_target_positions, args.decoder_embed_dim, self.padding_idx ) self.pos_emb_alpha = nn.Parameter(torch.ones(1)) self.prenet = nn.Sequential( Prenet(self.out_dim, args.prenet_layers, args.prenet_dim, args.prenet_dropout), nn.Linear(args.prenet_dim, args.decoder_embed_dim), ) self.n_transformer_layers = args.decoder_transformer_layers self.transformer_layers = nn.ModuleList( TransformerDecoderLayer(args) for _ in range(self.n_transformer_layers) ) if args.decoder_normalize_before: self.layer_norm = LayerNorm(args.decoder_embed_dim) else: self.layer_norm = None self.feat_proj = nn.Linear(args.decoder_embed_dim, self.out_dim) self.eos_proj = nn.Linear(args.decoder_embed_dim, 1) self.postnet = Postnet(self.out_dim, args.postnet_conv_dim, args.postnet_conv_kernel_size, args.postnet_layers, args.postnet_dropout) self.ctc_proj = None if getattr(args, "ctc_weight", 0.) > 0.: self.ctc_proj = nn.Linear(self.out_dim, len(src_dict)) self.apply(decoder_init) def extract_features( self, prev_outputs, encoder_out=None, incremental_state=None, target_lengths=None, speaker=None, **kwargs ): alignment_layer = self.n_transformer_layers - 1 self_attn_padding_mask = lengths_to_padding_mask(target_lengths) positions = self.embed_positions( self_attn_padding_mask, incremental_state=incremental_state ) if incremental_state is not None: prev_outputs = prev_outputs[:, -1:, :] self_attn_padding_mask = self_attn_padding_mask[:, -1:] if positions is not None: positions = positions[:, -1:] x = self.prenet(prev_outputs) x += self.pos_emb_alpha * positions x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) if not self_attn_padding_mask.any(): self_attn_padding_mask = None attn: Optional[torch.Tensor] = None inner_states: List[Optional[torch.Tensor]] = [x] for idx, transformer_layer in enumerate(self.transformer_layers): if incremental_state is None: self_attn_mask = self.buffered_future_mask(x) else: self_attn_mask = None x, layer_attn, _ = transformer_layer( x, encoder_out["encoder_out"][0] if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0) else None, encoder_out["encoder_padding_mask"][0] if ( encoder_out is not None and len(encoder_out["encoder_padding_mask"]) > 0 ) else None, incremental_state, self_attn_mask=self_attn_mask, self_attn_padding_mask=self_attn_padding_mask, need_attn=bool((idx == alignment_layer)), need_head_weights=bool((idx == alignment_layer)), ) inner_states.append(x) if layer_attn is not None and idx == alignment_layer: attn = layer_attn.float().to(x) if attn is not None: # average probabilities over heads, transpose to # (B, src_len, tgt_len) attn = attn.mean(dim=0).transpose(2, 1) if self.layer_norm is not None: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) return x, {"attn": attn, "inner_states": inner_states} def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None, target_lengths=None, speaker=None, **kwargs): x, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, incremental_state=incremental_state, target_lengths=target_lengths, speaker=speaker, **kwargs ) attn = extra["attn"] feat_out = self.feat_proj(x) bsz, seq_len, _ = x.size() eos_out = self.eos_proj(x) post_feat_out = feat_out + self.postnet(feat_out) return post_feat_out, eos_out, {"attn": attn, "feature_out": feat_out} def get_normalized_probs(self, net_output, log_probs, sample): logits = self.ctc_proj(net_output[2]["feature_out"]) if log_probs: return utils.log_softmax(logits.float(), dim=-1) else: return utils.softmax(logits.float(), dim=-1) def buffered_future_mask(self, tensor): dim = tensor.size(0) # self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround. if ( self._future_mask.size(0) == 0 or (not self._future_mask.device == tensor.device) or self._future_mask.size(0) < dim ): self._future_mask = torch.triu( utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1 ) self._future_mask = self._future_mask.to(tensor) return self._future_mask[:dim, :dim] @register_model("tts_transformer") class TTSTransformerModel(FairseqEncoderDecoderModel): """ Implementation for https://arxiv.org/pdf/1809.08895.pdf """ @staticmethod def add_args(parser): parser.add_argument("--dropout", type=float) parser.add_argument("--output-frame-dim", type=int) parser.add_argument("--speaker-embed-dim", type=int) # encoder prenet parser.add_argument("--encoder-dropout", type=float) parser.add_argument("--encoder-conv-layers", type=int) parser.add_argument("--encoder-conv-kernel-size", type=int) # encoder transformer layers parser.add_argument("--encoder-transformer-layers", type=int) parser.add_argument("--encoder-embed-dim", type=int) parser.add_argument("--encoder-ffn-embed-dim", type=int) parser.add_argument("--encoder-normalize-before", action="store_true") parser.add_argument("--encoder-attention-heads", type=int) parser.add_argument("--attention-dropout", type=float) parser.add_argument("--activation-dropout", "--relu-dropout", type=float) parser.add_argument("--activation-fn", type=str, default="relu") # decoder prenet parser.add_argument("--prenet-dropout", type=float) parser.add_argument("--prenet-layers", type=int) parser.add_argument("--prenet-dim", type=int) # decoder postnet parser.add_argument("--postnet-dropout", type=float) parser.add_argument("--postnet-layers", type=int) parser.add_argument("--postnet-conv-dim", type=int) parser.add_argument("--postnet-conv-kernel-size", type=int) # decoder transformer layers parser.add_argument("--decoder-transformer-layers", type=int) parser.add_argument("--decoder-embed-dim", type=int) parser.add_argument("--decoder-ffn-embed-dim", type=int) parser.add_argument("--decoder-normalize-before", action="store_true") parser.add_argument("--decoder-attention-heads", type=int) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._num_updates = 0 @classmethod def build_model(cls, args, task): embed_speaker = task.get_speaker_embeddings(args) encoder = TTSTransformerEncoder(args, task.src_dict, embed_speaker) decoder = TTSTransformerDecoder(args, task.src_dict) return cls(encoder, decoder) def forward_encoder(self, src_tokens, src_lengths, speaker=None, **kwargs): return self.encoder(src_tokens, src_lengths=src_lengths, speaker=speaker, **kwargs) def set_num_updates(self, num_updates): super().set_num_updates(num_updates) self._num_updates = num_updates @register_model_architecture("tts_transformer", "tts_transformer") def base_architecture(args): args.dropout = getattr(args, "dropout", 0.1) args.output_frame_dim = getattr(args, "output_frame_dim", 80) args.speaker_embed_dim = getattr(args, "speaker_embed_dim", 64) # encoder prenet args.encoder_dropout = getattr(args, "encoder_dropout", 0.5) args.encoder_conv_layers = getattr(args, "encoder_conv_layers", 3) args.encoder_conv_kernel_size = getattr(args, "encoder_conv_kernel_size", 5) # encoder transformer layers args.encoder_transformer_layers = getattr(args, "encoder_transformer_layers", 6) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * args.encoder_embed_dim) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") # decoder prenet args.prenet_dropout = getattr(args, "prenet_dropout", 0.5) args.prenet_layers = getattr(args, "prenet_layers", 2) args.prenet_dim = getattr(args, "prenet_dim", 256) # decoder postnet args.postnet_dropout = getattr(args, "postnet_dropout", 0.5) args.postnet_layers = getattr(args, "postnet_layers", 5) args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512) args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5) # decoder transformer layers args.decoder_transformer_layers = getattr(args, "decoder_transformer_layers", 6) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4 * args.decoder_embed_dim) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/models/text_to_speech/tts_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Train a network across multiple GPUs. """ from fairseq.dataclass.configs import FairseqConfig from fairseq.distributed import utils as distributed_utils from fairseq.trainer import Trainer try: from fairseq.model_parallel.megatron.mpu import ( get_data_parallel_rank, get_data_parallel_world_size, get_model_parallel_src_rank, get_cuda_rng_tracker, ) has_megatron_submodule = True except (ImportError, ModuleNotFoundError): has_megatron_submodule = False class MegatronTrainer(Trainer): """Main class for model parallel with data parallel training.""" def __init__(self, cfg: FairseqConfig, task, model, criterion, **kwargs): if not has_megatron_submodule: raise ImportError( "\n\nPlease install the megatron submodule:" "\n\n git submodule update --init " "fairseq/model_parallel/megatron" ) super().__init__(cfg, task, model, criterion, **kwargs) def clip_grad_norm(self, clip_norm): def _aggregate_model_parallel_grad_norm(total_norm): total_norm = total_norm ** 2 distributed_utils.all_reduce( total_norm, group=distributed_utils.get_model_parallel_group() ) total_norm = total_norm ** 0.5 return total_norm return self.optimizer.clip_grad_norm( clip_norm, aggregate_norm_fn=_aggregate_model_parallel_grad_norm, ) def save_checkpoint(self, filename, extra_state): """Save all training state in a checkpoint file.""" extra_state['rng_tracker_states'] \ = get_cuda_rng_tracker().get_states() super().save_checkpoint(filename, extra_state) def load_checkpoint( self, filename, reset_optimizer=False, reset_lr_scheduler=False, optimizer_overrides=None, reset_meters=False, ): extra_state = super().load_checkpoint(filename, reset_optimizer=reset_optimizer, reset_lr_scheduler=reset_lr_scheduler, optimizer_overrides=optimizer_overrides, reset_meters=reset_meters) if extra_state is not None and 'rng_tracker_states' in extra_state: get_cuda_rng_tracker().set_states( extra_state['rng_tracker_states']) return extra_state
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/model_parallel/megatron_trainer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import criterions, models, modules # noqa
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/model_parallel/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import os # automatically import any Python files in the models/ directory models_dir = os.path.dirname(__file__) for file in os.listdir(models_dir): path = os.path.join(models_dir, file) if ( not file.startswith("_") and not file.startswith(".") and (file.endswith(".py") or os.path.isdir(path)) ): model_name = file[: file.find(".py")] if file.endswith(".py") else file module = importlib.import_module("fairseq.model_parallel.models." + model_name)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/model_parallel/models/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch.nn as nn from fairseq.model_parallel.modules import ( ModelParallelTransformerDecoderLayer, ModelParallelTransformerEncoderLayer, ) from fairseq.models import register_model from fairseq.models.transformer import ( TransformerDecoder, TransformerEncoder, TransformerModel, ) try: from fairseq.model_parallel.megatron.mpu import ( copy_to_model_parallel_region, gather_from_model_parallel_region, VocabParallelEmbedding, ) has_megatron_submodule = True except (ImportError, ModuleNotFoundError): has_megatron_submodule = False logger = logging.getLogger(__name__) @register_model("model_parallel_transformer") class ModelParallelTransformerModel(TransformerModel): """ Model parallel Transformer model. """ @classmethod def build_embedding(cls, args, dictionary, embed_dim, path=None): if not has_megatron_submodule: raise ImportError( "\n\nPlease install the megatron submodule:" "\n\n git submodule update --init " "fairseq/model_parallel/megatron" ) dictionary.pad_to_multiple_(args.model_parallel_size * 8) num_embeddings = len(dictionary) padding_idx = dictionary.pad() def _vocab_init(tensor, **kwargs): nn.init.normal_(tensor, mean=0, std=num_embeddings ** -0.5) nn.init.constant_(tensor[1], 0) emb = VocabParallelEmbedding( num_embeddings, embed_dim, padding_idx, init_method=_vocab_init ) # if provided, load from preloaded dictionaries if path: raise NotImplementedError( "Loading of embedding from path is not supported for model parallel" ) return emb @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return ModelParallelTransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return ModelParallelTransformerDecoder( args, tgt_dict, embed_tokens, no_encoder_attn=getattr(args, "no_cross_attention", False), ) class ModelParallelTransformerEncoder(TransformerEncoder): """ Model parallel Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`ModelParallelTransformerEncoderLayer`. """ def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens) if args.no_final_layer_norm: self.layer_norm = None def build_encoder_layer(self, args): return ModelParallelTransformerEncoderLayer(args) class ModelParallelTransformerDecoder(TransformerDecoder): """ Model Parallel Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`ModelParallelTransformerDecoderLayer`. """ def build_decoder_layer(self, args, no_encoder_attn=False): return ModelParallelTransformerDecoderLayer(args, no_encoder_attn) def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if not self.share_input_output_embed: raise NotImplementedError( "Model parallel training currently requires --share-decoder-input-output-embed" ) features = copy_to_model_parallel_region(features) # project back to size of vocabulary x = self.output_projection(features) if getattr(self.args, "criterion") != "vocab_parallel_cross_entropy": x = gather_from_model_parallel_region(x).contiguous() return x
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/model_parallel/models/transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from fairseq.model_parallel.models.transformer import ModelParallelTransformerDecoder from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer_lm import TransformerLanguageModel try: from fairseq.model_parallel.megatron.mpu import VocabParallelEmbedding has_megatron_submodule = True except (ImportError, ModuleNotFoundError): has_megatron_submodule = False DEFAULT_MAX_TARGET_POSITIONS = 1024 @register_model("model_parallel_transformer_lm") class ModelParallelTransformerLanguageModel(TransformerLanguageModel): @staticmethod def add_args(parser): TransformerLanguageModel.add_args(parser) @classmethod def build_model(cls, args, task): """Build a new model instance.""" if not has_megatron_submodule: raise ImportError( "\n\nPlease install the megatron submodule:" "\n\n git submodule update --init " "fairseq/model_parallel/megatron" ) # make sure all arguments are present in older models base_lm_architecture(args) task.source_dictionary.pad_to_multiple_(args.model_parallel_size * 8) task.target_dictionary.pad_to_multiple_(args.model_parallel_size * 8) if args.decoder_layers_to_keep: args.decoder_layers = len(args.decoder_layers_to_keep.split(",")) if getattr(args, "max_target_positions", None) is None: args.max_target_positions = getattr( args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS ) if args.character_embeddings: raise NotImplementedError( "Character embeddings is not supported for model parallel" ) elif args.adaptive_input: raise NotImplementedError( "Adaptive input is not supported for model parallel" ) else: embed_tokens = cls.build_embedding( args, task.source_dictionary, args.decoder_input_dim ) decoder = ModelParallelTransformerDecoder( args, task.target_dictionary, embed_tokens, no_encoder_attn=True, ) return cls(decoder) @staticmethod def add_args(parser): TransformerLanguageModel.add_args(parser) @classmethod def build_embedding(cls, args, dictionary, embed_dim, path=None): def _vocab_init(tensor, **kwargs): nn.init.normal_(tensor, mean=0, std=embed_dim ** -0.5) nn.init.constant_(tensor[1], 0) embed_tokens = VocabParallelEmbedding( len(dictionary), embed_dim, dictionary.pad(), init_method=_vocab_init ) return embed_tokens def base_lm_architecture(args): # backward compatibility for older model checkpoints if hasattr(args, "no_tie_adaptive_proj"): # previous models defined --no-tie-adaptive-proj, so use the existence of # that option to determine if this is an "old" model checkpoint args.no_decoder_final_norm = True # old models always set this to True if args.no_tie_adaptive_proj is False: args.tie_adaptive_proj = True if hasattr(args, "decoder_final_norm"): args.no_decoder_final_norm = not args.decoder_final_norm args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.relu_dropout = getattr(args, "relu_dropout", 0.0) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) # Model training is not stable without this args.decoder_normalize_before = True args.no_decoder_final_norm = getattr(args, "no_decoder_final_norm", False) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.adaptive_softmax_factor = getattr(args, "adaptive_softmax_factor", 4) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.character_embeddings = getattr(args, "character_embeddings", False) args.character_filters = getattr( args, "character_filters", "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]", ) args.character_embedding_dim = getattr(args, "character_embedding_dim", 4) args.char_embedder_highway_layers = getattr(args, "char_embedder_highway_layers", 2) args.adaptive_input = getattr(args, "adaptive_input", False) args.adaptive_input_factor = getattr(args, "adaptive_input_factor", 4) args.adaptive_input_cutoff = getattr(args, "adaptive_input_cutoff", None) args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) args.tie_adaptive_proj = getattr(args, "tie_adaptive_proj", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0) args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0.0) args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0.0) args.add_bos_token = getattr(args, "add_bos_token", False) @register_model_architecture("model_parallel_transformer_lm", "transformer_lm_megatron") def transformer_lm_megatron(args): args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 3072) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 3072 * 4) args.decoder_layers = getattr(args, "decoder_layers", 72) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 32) args.dropout = getattr(args, "dropout", 0.1) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.activation_fn = getattr(args, "activation_fn", "gelu") base_lm_architecture(args) @register_model_architecture( "model_parallel_transformer_lm", "transformer_lm_megatron_11b" ) def transformer_lm_megatron_11b(args): args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 3072) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 3072 * 6) args.decoder_layers = getattr(args, "decoder_layers", 72) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 32) args.dropout = getattr(args, "dropout", 0.1) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.activation_fn = getattr(args, "activation_fn", "gelu") base_lm_architecture(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/model_parallel/models/transformer_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .model import * # noqa
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/model_parallel/models/pipeline_parallel_transformer/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.model_parallel.models.pipeline_parallel_transformer.layers import ( Embedding, TransformerDecoderEmbedding, TransformerDecoderLayer, TransformerDecoderOutputLayer, TransformerEncoderEmbedding, TransformerEncoderLayer, TransformerEncoderLayerNorm, ) from fairseq.models import ( BaseFairseqModel, FairseqDecoder, FairseqEncoder, register_model, register_model_architecture, ) from fairseq.models.fairseq_encoder import EncoderOut from fairseq.models.transformer import ( base_architecture, transformer_iwslt_de_en, transformer_wmt_en_de_big, ) from fairseq.modules import SinusoidalPositionalEmbedding logger = logging.getLogger(__name__) DEFAULT_MAX_SOURCE_POSITIONS = 1024 DEFAULT_MAX_TARGET_POSITIONS = 1024 TORCH_PIPE = False RPC_INIT = False def import_pipe(): global TORCH_PIPE global RPC_INIT try: from torch.distributed.pipeline.sync import Pipe # noqa global Pipe from torch.distributed.pipeline.sync.utils import partition_model global partition_model from torch.distributed import rpc import tempfile TORCH_PIPE = True # Initialize single process RPC agent since TORCH_PIPE requires # RRef. RRef depends on RPC being initialized and as a result we initialize # RPC with a single node. tmpfile = tempfile.NamedTemporaryFile() if not RPC_INIT: rpc.init_rpc( name="worker", rank=0, world_size=1, rpc_backend_options=rpc.TensorPipeRpcBackendOptions( init_method="file://{}".format(tmpfile.name), ) ) RPC_INIT = True logger.info('Using torch pipe') except ImportError: try: from fairscale.nn import Pipe # noqa logger.info('Using fairscale pipe') except ImportError: raise ImportError("Please install fairscale with: pip install fairscale") @register_model("pipeline_parallel_transformer") class PipelineParallelTransformerModel(BaseFairseqModel): def __init__(self, encoder, decoder, balance, devices, chunks, checkpoint): import_pipe() super().__init__() assert isinstance(encoder, FairseqEncoder) assert isinstance(decoder, FairseqDecoder) encoder_module_list = ( [encoder.embedding_layer] + list(encoder.encoder_layers) + [encoder.final_layer_norm] ) self.num_encoder_modules = len(encoder_module_list) decoder_module_list = ( [decoder.embedding_layer] + list(decoder.decoder_layers) + [decoder.decoder_output_layer] ) self.num_decoder_modules = len(decoder_module_list) module_list = encoder_module_list + decoder_module_list self.devices = devices if TORCH_PIPE: self.model = Pipe( partition_model(nn.Sequential(*module_list), balance, devices), chunks=chunks, checkpoint=checkpoint, ) else: self.model = Pipe( nn.Sequential(*module_list), balance=balance, devices=devices, chunks=chunks, checkpoint=checkpoint, ) self.encoder_max_positions = self.max_positions_helper( encoder.embedding_layer, "max_source_positions" ) self.decoder_max_positions = self.max_positions_helper( decoder.embedding_layer, "max_target_positions" ) self.adaptive_softmax = getattr(decoder, "adaptive_softmax", None) # Note: To be populated during inference self.encoder = None self.decoder = None def forward(self, src_tokens, src_lengths, prev_output_tokens): if self.training: input_lst = [src_tokens, src_lengths, prev_output_tokens] input = tuple(i.to(self.devices[0], non_blocking=True) for i in input_lst) if TORCH_PIPE: return self.model(input).local_value() else: return self.model(input) else: assert self.encoder is not None and self.decoder is not None, ( "encoder and decoder need to be initialized by " + "calling the `prepare_for_inference_()` method" ) encoder_output_tuple = self.encoder(input) return self.decoder(encoder_output_tuple) def prepare_for_inference_(self, cfg): if self.encoder is not None and self.decoder is not None: logger.info("Encoder and Decoder already initialized") return encoder_module_list = [] decoder_module_list = [] module_count = 0 for partition in self.model.partitions: for module in partition: if module_count < self.num_encoder_modules: encoder_module_list.append(module) else: decoder_module_list.append(module) module_count += 1 self.model = None self.encoder = TransformerEncoder(cfg.distributed_training, None, None, encoder_module_list) self.decoder = TransformerDecoder( cfg.distributed_training, None, None, decoder_module_list=decoder_module_list ) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') parser.add_argument('--num-embedding-chunks', type=int, metavar='N', default=1, help='Number of embedding layer chunks (enables more even distribution' 'of optimizer states across data parallel nodes' 'when using optimizer state sharding and' 'a big embedding vocabulary)') # fmt: on @classmethod def build_model_base(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, "max_source_positions"): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, "max_target_positions"): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary def build_embedding(dictionary, embed_dim, path=None, num_embed_chunks=1): assert embed_dim % num_embed_chunks == 0, ( f"Number of embedding chunks = {num_embed_chunks} should be " + f"divisible by the embedding dimension = {embed_dim}" ) assert path is None or num_embed_chunks == 1, ( "Loading embedding from a path with number of embedding chunks > 1" + " is not yet supported" ) num_embeddings = len(dictionary) padding_idx = dictionary.pad() # if provided, load from preloaded dictionaries if path: emb = Embedding(num_embeddings, embed_dim, padding_idx) embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) else: embed_chunk_dim = embed_dim // num_embed_chunks emb = nn.ModuleList() for i in range(num_embed_chunks): emb.append(Embedding(num_embeddings, embed_chunk_dim, padding_idx)) return emb num_embed_chunks = args.num_embedding_chunks if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError("--share-all-embeddings requires a joined dictionary") if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim" ) if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path ): raise ValueError( "--share-all-embeddings not compatible with --decoder-embed-path" ) encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path, num_embed_chunks, ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: assert args.share_decoder_input_output_embed or num_embed_chunks == 1, ( "Not sharing decoder I/O embeddings is not yet supported with number of " + "embedding chunks > 1" ) encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path, num_embed_chunks, ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path, num_embed_chunks, ) encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return (encoder, decoder) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder(args, tgt_dict, embed_tokens) @classmethod def build_model(cls, args, task): encoder, decoder = cls.build_model_base(args, task) return PipelineParallelTransformerModel( encoder=encoder, decoder=decoder, balance=utils.eval_str_list(args.pipeline_balance, type=int), devices=utils.eval_str_list(args.pipeline_devices, type=int), chunks=args.pipeline_chunks, checkpoint=args.pipeline_checkpoint, ) def output_layer(self, features, **kwargs): """Project features to the default output size (typically vocabulary size).""" return self.decoder.output_layer(features, **kwargs) def max_positions(self): """Maximum length supported by the model.""" return (self.encoder_max_positions, self.decoder_max_positions) def max_positions_helper( self, embedding_layer, max_positions_field="max_source_positions" ): """Maximum input length supported by the encoder or decoder.""" if embedding_layer.embed_positions is None: return getattr(embedding_layer, max_positions_field) return min( getattr(embedding_layer, max_positions_field), embedding_layer.embed_positions.max_positions, ) def get_normalized_probs(self, net_output, log_probs, sample=None): """Get normalized probabilities (or log probs) from a net's output.""" if hasattr(self, "adaptive_softmax") and self.adaptive_softmax is not None: if sample is not None: assert "target" in sample target = sample["target"] else: target = None out = self.adaptive_softmax.get_log_prob(net_output, target=target) return out.exp_() if not log_probs else out # A Pipe() module returns a tuple of tensors as the output. # In this case, the tuple has one element - the output tensor of logits logits = net_output if isinstance(net_output, torch.Tensor) else net_output[0] if log_probs: return utils.log_softmax(logits, dim=-1, onnx_trace=False) else: return utils.softmax(logits, dim=-1, onnx_trace=False) def max_decoder_positions(self): """Maximum length supported by the decoder.""" return self.decoder_max_positions def load_state_dict(self, state_dict, strict=True, model_cfg=None): """Copies parameters and buffers from *state_dict* into this module and its descendants. Overrides the method in :class:`nn.Module`. Compared with that method this additionally "upgrades" *state_dicts* from old checkpoints. """ self.upgrade_state_dict(state_dict) is_regular_transformer = not any("model.partitions" in k for k in state_dict) if is_regular_transformer: state_dict = self.convert_to_pipeline_parallel_state_dict(state_dict) return super().load_state_dict(state_dict, strict) def convert_to_pipeline_parallel_state_dict(self, state_dict): new_state_dict = self.state_dict() encoder_layer_idx = 0 decoder_layer_idx = 0 encoder_key_suffixes = [ "self_attn.k_proj.weight", "self_attn.k_proj.bias", "self_attn.v_proj.weight", "self_attn.v_proj.bias", "self_attn.q_proj.weight", "self_attn.q_proj.bias", "self_attn.out_proj.weight", "self_attn.out_proj.bias", "self_attn_layer_norm.weight", "self_attn_layer_norm.bias", "fc1.weight", "fc1.bias", "fc2.weight", "fc2.bias", "final_layer_norm.weight", "final_layer_norm.bias", ] decoder_key_suffixes = [ "self_attn.k_proj.weight", "self_attn.k_proj.bias", "self_attn.v_proj.weight", "self_attn.v_proj.bias", "self_attn.q_proj.weight", "self_attn.q_proj.bias", "self_attn.out_proj.weight", "self_attn.out_proj.bias", "self_attn_layer_norm.weight", "self_attn_layer_norm.bias", "encoder_attn.k_proj.weight", "encoder_attn.k_proj.bias", "encoder_attn.v_proj.weight", "encoder_attn.v_proj.bias", "encoder_attn.q_proj.weight", "encoder_attn.q_proj.bias", "encoder_attn.out_proj.weight", "encoder_attn.out_proj.bias", "encoder_attn_layer_norm.weight", "encoder_attn_layer_norm.bias", "fc1.weight", "fc1.bias", "fc2.weight", "fc2.bias", "final_layer_norm.weight", "final_layer_norm.bias", ] for pid, partition in enumerate(self.model.partitions): logger.info(f"Begin Partition {pid}") for mid, module in enumerate(partition): # fmt: off if isinstance(module, TransformerEncoderEmbedding): new_state_dict[f'model.partitions.{pid}.{mid}.embed_tokens.weight'] = state_dict['encoder.embed_tokens.weight'] new_state_dict[f'model.partitions.{pid}.{mid}.embed_positions._float_tensor'] = state_dict['encoder.embed_positions._float_tensor'] if isinstance(module, TransformerEncoderLayer): for suffix in encoder_key_suffixes: new_state_dict[f'model.partitions.{pid}.{mid}.{suffix}'] = state_dict[f'encoder.layers.{encoder_layer_idx}.{suffix}'] encoder_layer_idx += 1 if isinstance(module, TransformerDecoderLayer): for suffix in decoder_key_suffixes: new_state_dict[f'model.partitions.{pid}.{mid}.{suffix}'] = state_dict[f'decoder.layers.{decoder_layer_idx}.{suffix}'] decoder_layer_idx += 1 if isinstance(module, TransformerEncoderLayerNorm): if 'encoder.layer_norm.weight' in state_dict: new_state_dict[f'model.partitions.{pid}.{mid}.layer_norm.weight'] = state_dict['encoder.layer_norm.weight'] new_state_dict[f'model.partitions.{pid}.{mid}.layer_norm.bias'] = state_dict['encoder.layer_norm.bias'] if isinstance(module, TransformerDecoderEmbedding): new_state_dict[f'model.partitions.{pid}.{mid}.embed_tokens.weight'] = state_dict['decoder.embed_tokens.weight'] new_state_dict[f'model.partitions.{pid}.{mid}.embed_positions._float_tensor'] = state_dict['decoder.embed_positions._float_tensor'] if isinstance(module, TransformerDecoderOutputLayer): new_state_dict[f'model.partitions.{pid}.{mid}.output_projection.weight'] = state_dict['decoder.output_projection.weight'] # fmt: on return new_state_dict class TransformerEncoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens, encoder_module_list=None): super().__init__(dictionary) self.register_buffer("version", torch.Tensor([3])) import_pipe() self.use_pipeline = encoder_module_list is not None if not self.use_pipeline: self.embedding_layer = TransformerEncoderEmbedding(args, embed_tokens) self.encoder_layers = nn.Sequential(*[TransformerEncoderLayer(args) for i in range(args.encoder_layers)]) if isinstance(embed_tokens, nn.ModuleList): emb_dim = sum(e.embedding_dim for e in embed_tokens) else: emb_dim = embed_tokens.embedding_dim self.final_layer_norm = TransformerEncoderLayerNorm(args, emb_dim) else: encoder_balance = utils.eval_str_list( args.pipeline_encoder_balance, type=int ) encoder_devices = utils.eval_str_list( args.pipeline_encoder_devices, type=int ) assert sum(encoder_balance) == len(encoder_module_list), ( f"Sum of encoder_balance={encoder_balance} is not equal " + f"to num_encoder_modules={len(encoder_module_list)}" ) if TORCH_PIPE: self.model = Pipe( module=partition_model(nn.Sequential(*encoder_module_list), encoder_balance, encoder_devices), chunks=args.pipeline_chunks, checkpoint=args.pipeline_checkpoint, ) else: self.model = Pipe( module=nn.Sequential(*encoder_module_list), balance=encoder_balance, devices=encoder_devices, chunks=args.pipeline_chunks, checkpoint=args.pipeline_checkpoint, ) def forward(self, src_tokens, src_lengths): """ Args: input_tuple( src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` ) Returns: output_tuple( - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` - prev_output_tokens - **encoder_states** (List[Tensor]): all intermediate hidden states of shape `(src_len, batch, embed_dim)`. Only populated if *return_all_hiddens* is True. ) """ dummy_prev_output_tokens = torch.zeros( 1, dtype=src_tokens.dtype, device=src_tokens.device ) input_tuple = (src_tokens, src_lengths, dummy_prev_output_tokens) if self.use_pipeline: input_tuple = tuple(i.to(self.model.devices[0]) for i in input_tuple) if TORCH_PIPE: encoder_out = self.model(input_tuple).local_value() else: encoder_out = self.model(input_tuple) else: encoder_embed_output_tuple = self.embedding_layer(input_tuple) encoder_layers_output = self.encoder_layers(encoder_embed_output_tuple) encoder_out = self.final_layer_norm(encoder_layers_output) # first element is the encoder output # second element is the encoder padding mask # the remaining elements of EncoderOut are not computed by # the PipelineParallelTransformer return EncoderOut(encoder_out[0], encoder_out[1], None, None, None, None) def reorder_encoder_out(self, encoder_out, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out.encoder_out is not None: encoder_out = encoder_out._replace( encoder_out=encoder_out.encoder_out.index_select(1, new_order) ) if encoder_out.encoder_padding_mask is not None: encoder_out = encoder_out._replace( encoder_padding_mask=encoder_out.encoder_padding_mask.index_select( 0, new_order ) ) if encoder_out.encoder_embedding is not None: encoder_out = encoder_out._replace( encoder_embedding=encoder_out.encoder_embedding.index_select( 0, new_order ) ) if encoder_out.encoder_states is not None: for idx, state in enumerate(encoder_out.encoder_states): encoder_out.encoder_states[idx] = state.index_select(1, new_order) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" if self.embedding_layer.embed_positions is None: return self.embedding_layer.max_source_positions return min( self.embedding_layer.max_source_positions, self.embedding_layer.embed_positions.max_positions, ) class TransformerDecoder(FairseqDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__( self, args, dictionary, embed_tokens, no_encoder_attn=False, decoder_module_list=None, ): super().__init__(dictionary) self.register_buffer("version", torch.Tensor([3])) import_pipe() self.use_pipeline = decoder_module_list is not None if not self.use_pipeline: self.embedding_layer = TransformerDecoderEmbedding(args, embed_tokens) self.decoder_layers = nn.Sequential(*[ TransformerDecoderLayer(args, no_encoder_attn) for _ in range(args.decoder_layers) ]) self.decoder_output_layer = TransformerDecoderOutputLayer( args, embed_tokens, dictionary ) else: decoder_balance = utils.eval_str_list( args.pipeline_decoder_balance, type=int ) decoder_devices = utils.eval_str_list( args.pipeline_decoder_devices, type=int ) assert sum(decoder_balance) == len(decoder_module_list), ( f"Sum of decoder_balance={decoder_balance} is not equal " + f"to num_decoder_modules={len(decoder_module_list)}" ) if TORCH_PIPE: self.model = Pipe( module=partition_model(nn.Sequential(*decoder_module_list), decoder_balance, decoder_devices), chunks=args.pipeline_chunks, checkpoint=args.pipeline_checkpoint, ) else: self.model = Pipe( module=nn.Sequential(*decoder_module_list), balance=decoder_balance, devices=decoder_devices, chunks=args.pipeline_chunks, checkpoint=args.pipeline_checkpoint, ) def forward( self, prev_output_tokens, encoder_out=None, ): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` features_only (bool, optional): only return features without applying output layer (default: False). Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ input_tuple = ( encoder_out.encoder_out, encoder_out.encoder_padding_mask, prev_output_tokens, ) if self.use_pipeline: input_tuple = tuple(i.to(self.model.devices[0]) for i in input_tuple) if TORCH_PIPE: return (self.model(input_tuple).local_value(),) else: return (self.model(input_tuple),) else: embed_layer_output = self.embedding_layer(input_tuple) state = self.decoder_layers(embed_layer_output) return (self.decoder_output_layer(state),) def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embedding_layer.embed_positions is None: return self.embedding_layer.max_target_positions return min( self.embedding_layer.max_target_positions, self.embedding_layer.embed_positions.max_positions, ) def buffered_future_mask(self, tensor): dim = tensor.size(0) if ( not hasattr(self, "_future_mask") or self._future_mask is None or self._future_mask.device != tensor.device or self._future_mask.size(0) < dim ): self._future_mask = torch.triu( utils.fill_with_neg_inf(tensor.new(dim, dim)), 1 ) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = "{}.embed_positions.weights".format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict[ "{}.embed_positions._float_tensor".format(name) ] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { "0": "self_attn_layer_norm", "1": "encoder_attn_layer_norm", "2": "final_layer_norm", } for old, new in layer_norm_map.items(): for m in ("weight", "bias"): k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m) if k in state_dict: state_dict[ "{}.layers.{}.{}.{}".format(name, i, new, m) ] = state_dict[k] del state_dict[k] version_key = "{}.version".format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict @register_model_architecture( "pipeline_parallel_transformer", "transformer_iwslt_de_en_pipeline_parallel" ) def transformer_iwslt_de_en_dist(args): transformer_iwslt_de_en(args) @register_model_architecture( "pipeline_parallel_transformer", "transformer_wmt_en_de_big_pipeline_parallel" ) def transformer_wmt_en_de_big_dist(args): transformer_wmt_en_de_big(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/model_parallel/models/pipeline_parallel_transformer/model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from collections import namedtuple import torch import torch.nn as nn import torch.nn.functional as F from fairseq import options, utils from fairseq.modules import ( AdaptiveSoftmax, LayerNorm, MultiheadAttention, PositionalEmbedding, ) EncoderOut = namedtuple( "TransformerEncoderOut", [ "encoder_out", # T x B x C "encoder_padding_mask", # B x T "encoder_embedding", # B x T x C "encoder_states", # List[T x B x C] ], ) class TransformerEncoderEmbedding(nn.Module): """ Encoder Embedding + Positional Embedding """ def __init__(self, args, embed_tokens): super().__init__() self.dropout = args.dropout self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens if isinstance(embed_tokens, nn.ModuleList): self.padding_idx = embed_tokens[0].padding_idx embed_dim = sum(e.embedding_dim for e in embed_tokens) else: self.padding_idx = embed_tokens.padding_idx embed_dim = embed_tokens.embedding_dim self.embed_scale = math.sqrt(embed_dim) self.embed_positions = ( PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None ) if getattr(args, "layernorm_embedding", False): self.layernorm_embedding = LayerNorm(embed_dim) else: self.layernorm_embedding = None def forward(self, input): # embed tokens and positions src_tokens = input[0] prev_output_tokens = input[2] if isinstance(self.embed_tokens, nn.ModuleList): x_embed_list = [] for embed_tokens_part in self.embed_tokens: x_embed_list.append(embed_tokens_part(src_tokens)) embedded = torch.cat(x_embed_list, dim=-1) else: embedded = self.embed_tokens(src_tokens) x = embed = self.embed_scale * embedded if self.embed_positions is not None: x = embed + self.embed_positions(src_tokens) if self.layernorm_embedding: x = self.layernorm_embedding(x) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) return (x, encoder_padding_mask, prev_output_tokens) class TransformerEncoderLayerNorm(nn.Module): """ Layer norm at the the end of all encoder layers if args.encoder_enormalize_before = True """ def __init__(self, args, embed_dim): super().__init__() if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, input): x = input[0] encoder_padding_mask = input[1] prev_output_tokens = input[2] if self.layer_norm: x = self.layer_norm(x) # keeping track of the incremental_state is not supported yet return (x, encoder_padding_mask, prev_output_tokens) class TransformerDecoderEmbedding(nn.Module): """ Decoder Embedding + Positional Embedding """ def __init__(self, args, embed_tokens): super().__init__() self.dropout = args.dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = ( sum(e.embedding_dim for e in embed_tokens) if isinstance(embed_tokens, nn.ModuleList) else embed_tokens.embedding_dim ) embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_output_dim padding_idx = ( embed_tokens[0].padding_idx if isinstance(embed_tokens, nn.ModuleList) else embed_tokens.padding_idx ) self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = ( Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None ) self.embed_positions = ( PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None ) def forward(self, input): mt_task = False if isinstance(input, tuple): if len(input) == 3: encoder_out = input[0] encoder_padding_mask = input[1] prev_output_tokens = input[2] incremental_state = None # Hardcoding to avoid passing of None objects mt_task = True else: # HACK for now, need to fix (TODO sidgoyal) prev_output_tokens = input[0] # discard "src_lengths" encoder_out = None encoder_padding_mask = None incremental_state = None else: prev_output_tokens = input encoder_out = None encoder_padding_mask = None incremental_state = None positions = ( self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None ) if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions if isinstance(self.embed_tokens, nn.ModuleList): x_embed_list = [] for embed_tokens_part in self.embed_tokens: x_embed_list.append(embed_tokens_part(prev_output_tokens)) x = self.embed_scale * torch.cat(x_embed_list, dim=-1) else: x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) if mt_task: return (x, encoder_out, encoder_padding_mask) return x class TransformerDecoderOutputLayer(nn.Module): def __init__(self, args, embed_tokens, dictionary): super().__init__() self.share_input_output_embed = args.share_decoder_input_output_embed self.embed_tokens = embed_tokens self.output_embed_dim = args.decoder_output_dim embed_dim = args.decoder_embed_dim self.project_out_dim = ( Linear(embed_dim, self.output_embed_dim, bias=False) if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None ) self.adaptive_softmax = None if args.adaptive_softmax_cutoff is not None: assert not isinstance(embed_tokens, nn.ModuleList) self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_tokens = nn.Parameter( torch.Tensor(len(dictionary), self.output_embed_dim) ) nn.init.normal_( self.embed_tokens, mean=0, std=self.output_embed_dim ** -0.5 ) if args.decoder_normalize_before and not getattr( args, "no_decoder_final_norm", False ): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, input, apply_final_proj=True): if isinstance(input, tuple): x = input[0] else: x = input if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) if apply_final_proj: x = self.output_layer(x) return x def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: if isinstance(self.embed_tokens, nn.ModuleList): output = None for i, emb in enumerate(self.embed_tokens): sidx = i * emb.embedding_dim eidx = (i + 1) * emb.embedding_dim if output is None: output = F.linear(features[:, :, sidx:eidx], emb.weight) else: output += F.linear(features[:, :, sidx:eidx], emb.weight) return output else: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_tokens) else: return features class TransformerEncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True, ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") ) self.activation_dropout = getattr(args, "activation_dropout", 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, "relu_dropout", 0) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"} for old, new in layer_norm_map.items(): for m in ("weight", "bias"): k = "{}.layer_norms.{}.{}".format(name, old, m) if k in state_dict: state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k] del state_dict[k] def forward(self, input): """ Args: input (Tuple): input[0] (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` input[1] (ByteTensor/FloatTensor): encoder padding mask - binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. input[2] (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing) Returns: output (Tuple): output[0] (Tensor): encoded output of shape `(batch, src_len, embed_dim)` output[1] (ByteTensor/FloatTensor): encoder padding mask output[2] (LongTensor): previous decoder outputs """ x = input[0] encoder_padding_mask = input[1] prev_output_tokens = input[2] residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) x, _ = self.self_attn( query=x, key=x, value=x, key_padding_mask=encoder_padding_mask ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return (x, encoder_padding_mask, prev_output_tokens) def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x class TransformerDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__( self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False ): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True, ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") ) self.activation_dropout = getattr(args, "activation_dropout", 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, "relu_dropout", 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, "char_inputs", False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=getattr(args, "encoder_embed_dim", None), vdim=getattr(args, "encoder_embed_dim", None), dropout=args.attention_dropout, encoder_decoder_attention=True, ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False def prepare_for_onnx_export_(self): self.onnx_trace = True def forward(self, input): """ Args: input (Tuple): input[0] (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` input[1] (Tensor): encoder output of shape `(batch, src_len, embed_dim)` input[2] (ByteTensor/FloatTensor): encoder padding mask - binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: output (Tuple): output[0] (Tensor): encoded output of shape `(batch, src_len, embed_dim)` output[1] (ByteTensor/FloatTensor): encoder padding mask output[2] (LongTensor): previous decoder outputs """ # Note: incremental state is not yet supported mt_task = False if isinstance(input, tuple): x = input[0] encoder_out = input[1] encoder_padding_mask = input[2] incremental_state = None mt_task = True else: x = input encoder_out = None encoder_padding_mask = None incremental_state = None if incremental_state is None: self_attn_mask = self.buffered_future_mask(x) else: self_attn_mask = None # TODO: add back prev_self_attn_state, prev_attn_state, # self_attn_padding_mask prev_self_attn_state = None prev_attn_state = None self_attn_padding_mask = None residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if mt_task: return (x, encoder_out, encoder_padding_mask) return x def buffered_future_mask(self, tensor): dim = tensor.size(0) if ( not hasattr(self, "_future_mask") or self._future_mask is None or self._future_mask.device != tensor.device ): self._future_mask = torch.triu( utils.fill_with_neg_inf(tensor.new(dim, dim)), 1 ) if self._future_mask.size(0) < dim: self._future_mask = torch.triu( utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1 ) return self._future_mask[:dim, :dim] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/model_parallel/models/pipeline_parallel_transformer/layers.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .model import * # noqa
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/model_parallel/models/roberta/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ RoBERTa: A Robustly Optimized BERT Pretraining Approach. """ import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.model_parallel.models.transformer import ModelParallelTransformerEncoder from fairseq.models import register_model, register_model_architecture from fairseq.models.roberta import ( roberta_base_architecture, roberta_prenorm_architecture, RobertaEncoder, RobertaModel, ) from fairseq.modules import LayerNorm try: from fairseq.model_parallel.megatron.mpu import ( copy_to_model_parallel_region, gather_from_model_parallel_region, ColumnParallelLinear, VocabParallelEmbedding, ) has_megatron_submodule = True except (ImportError, ModuleNotFoundError): has_megatron_submodule = False logger = logging.getLogger(__name__) @register_model("model_parallel_roberta") class ModelParallelRobertaModel(RobertaModel): def __init__(self, args, encoder): super().__init__(args, encoder) self.classification_heads = nn.ModuleDict() @staticmethod def add_args(parser): RobertaModel.add_args(parser) parser.add_argument( "--no-final-layer-norm", action="store_true", help=( "don't add final layernorm (only applicable when " "--encoder-normalize-before=True" ), ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present base_architecture(args) task.source_dictionary.pad_to_multiple_(args.model_parallel_size * 8) task.target_dictionary.pad_to_multiple_(args.model_parallel_size * 8) if not hasattr(args, "max_positions"): args.max_positions = args.tokens_per_sample if getattr(args, "untie_weights_roberta", False): raise NotImplementedError( "--untie-weights-roberta is not supported in model parallel mode" ) encoder = ModelParallelRobertaEncoder(args, task.source_dictionary) return cls(args, encoder) def forward( self, src_tokens, features_only=False, return_all_hiddens=False, classification_head_name=None, **kwargs ): if classification_head_name is not None: features_only = True x, extra = self.encoder(src_tokens, features_only, return_all_hiddens, **kwargs) if classification_head_name is not None: x = self.classification_heads[classification_head_name](x) return x, extra def register_classification_head( self, name, num_classes=None, inner_dim=None, **kwargs ): """Register a classification head.""" if name in self.classification_heads: prev_num_classes = self.classification_heads[name].out_proj.out_features prev_inner_dim = self.classification_heads[name].dense.out_features if num_classes != prev_num_classes or inner_dim != prev_inner_dim: logger.warning( 're-registering head "{}" with num_classes {} (prev: {}) ' "and inner_dim {} (prev: {})".format( name, num_classes, prev_num_classes, inner_dim, prev_inner_dim ) ) self.classification_heads[name] = ModelParallelRobertaClassificationHead( self.args.encoder_embed_dim, inner_dim or self.args.encoder_embed_dim, num_classes, self.args.pooler_activation_fn, self.args.pooler_dropout, ) class ModelParallelRobertaLMHead(nn.Module): """Head for masked language modeling.""" def __init__(self, embed_dim, output_dim, activation_fn, weight=None): super().__init__() self.dense = ColumnParallelLinear(embed_dim, embed_dim, gather_output=True) self.activation_fn = utils.get_activation_fn(activation_fn) self.layer_norm = LayerNorm(embed_dim) if weight is None: weight = nn.Linear(embed_dim, output_dim, bias=False).weight self.weight = weight self.bias = nn.Parameter(torch.zeros(output_dim)) def forward(self, features, masked_tokens=None, **kwargs): # Only project the unmasked tokens while training, # saves both memory and computation if masked_tokens is not None: features = features[masked_tokens, :] x = self.dense(features) x = self.activation_fn(x) x = self.layer_norm(x) x = copy_to_model_parallel_region(x) # project back to size of vocabulary with bias x = F.linear(x, self.weight) x = gather_from_model_parallel_region(x).contiguous() x = x + self.bias return x class ModelParallelRobertaClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__( self, input_dim, inner_dim, num_classes, activation_fn, pooler_dropout ): super().__init__() self.dense = ColumnParallelLinear(input_dim, inner_dim, gather_output=True) self.activation_fn = utils.get_activation_fn(activation_fn) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = self.activation_fn(x) x = self.dropout(x) x = self.out_proj(x) return x class ModelParallelRobertaEncoder(RobertaEncoder): """RoBERTa encoder.""" def __init__(self, args, dictionary): super().__init__(args, dictionary) assert not self.args.untie_weights_roberta def build_embedding(self, vocab_size, embedding_dim, padding_idx): return VocabParallelEmbedding(vocab_size, embedding_dim, padding_idx) def build_encoder(self, args, dictionary, embed_tokens): return ModelParallelTransformerEncoder(args, dictionary, embed_tokens) def build_lm_head(self, embed_dim, output_dim, activation_fn, weight): return ModelParallelRobertaLMHead(embed_dim, output_dim, activation_fn, weight) @register_model_architecture("model_parallel_roberta", "model_parallel_roberta") def base_architecture(args): args.no_final_layer_norm = getattr(args, "no_final_layer_norm", False) # model parallel RoBERTa defaults to "Pre-LN" formulation roberta_prenorm_architecture(args) # earlier versions of model parallel RoBERTa removed the final layer norm @register_model_architecture("model_parallel_roberta", "model_parallel_roberta_v1") def model_parallel_roberta_v1_architecture(args): args.no_final_layer_norm = getattr(args, "no_final_layer_norm", True) base_architecture(args) @register_model_architecture( "model_parallel_roberta", "model_parallel_roberta_postnorm" ) def model_parallel_roberta_postnorm_architecture(args): # the original BERT/RoBERTa uses the "Post-LN" formulation roberta_base_architecture(args) @register_model_architecture("model_parallel_roberta", "model_parallel_roberta_base") def model_parallel_roberta_base_architecture(args): base_architecture(args) @register_model_architecture("model_parallel_roberta", "model_parallel_roberta_large") def model_parallel_roberta_large_architecture(args): args.encoder_layers = getattr(args, "encoder_layers", 24) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) base_architecture(args)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/model_parallel/models/roberta/model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, Optional, Tuple import torch import torch.nn.functional as F from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.fairseq_dropout import FairseqDropout from torch import Tensor, nn try: from fairseq.model_parallel.megatron.mpu import ( get_cuda_rng_tracker, get_model_parallel_world_size, ColumnParallelLinear, RowParallelLinear, ) has_megatron_submodule = True except (ImportError, ModuleNotFoundError): has_megatron_submodule = False @with_incremental_state class ModelParallelMultiheadAttention(nn.Module): """Model parallel Multi-headed attention. This performs the Multi-headed attention over multiple gpus. See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, self_attention=False, encoder_decoder_attention=False, ): super().__init__() if not has_megatron_submodule: raise ImportError( "\n\nPlease install the megatron submodule:" "\n\n git submodule update --init " "fairseq/model_parallel/megatron" ) self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.model_parallel_size = get_model_parallel_world_size() self.num_heads_partition = num_heads // self.model_parallel_size assert ( self.num_heads_partition * self.model_parallel_size == num_heads ), "Number of heads must be divisible by model parallel size" self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.head_dim = embed_dim // num_heads assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert ( not self.self_attention or self.qkv_same_dim ), "Self-attention requires query, key and value to be of the same size" self.k_proj = ColumnParallelLinear( self.kdim, embed_dim, bias=bias, gather_output=False ) self.v_proj = ColumnParallelLinear( self.vdim, embed_dim, bias=bias, gather_output=False ) self.q_proj = ColumnParallelLinear( embed_dim, embed_dim, bias=bias, gather_output=False ) self.out_proj = RowParallelLinear( embed_dim, embed_dim, bias=bias, input_is_parallel=True ) def forward( self, query, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, static_kv: bool = False, attn_mask: Optional[Tensor] = None, **unused_kwargs, ) -> Tuple[Tensor, Optional[Tensor]]: """Input shape: Time x Batch x Channel Args: key_padding_mask (ByteTensor, optional): mask to exclude keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s. attn_mask (ByteTensor, optional): typically used to implement causal attention, where the mask prevents the attention from looking forward in time (default: None). """ tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] is_tpu = query.device.type == "xla" if incremental_state is not None: saved_state = self._get_input_buffer(incremental_state) if saved_state is not None and "prev_key" in saved_state: # previous time steps are cached - no need to recompute # key and value if they are static if static_kv: assert self.encoder_decoder_attention and not self.self_attention key = value = None else: saved_state = None if self.self_attention: q = self.q_proj(query) k = self.k_proj(query) v = self.v_proj(query) elif self.encoder_decoder_attention: # encoder-decoder attention q = self.q_proj(query) if key is None: assert value is None k = v = None else: k = self.k_proj(key) v = self.v_proj(key) else: assert key is not None and value is not None q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) q *= self.scaling q = ( q.contiguous() .view(tgt_len, bsz * self.num_heads_partition, self.head_dim) .transpose(0, 1) ) if k is not None: k = ( k.contiguous() .view(-1, bsz * self.num_heads_partition, self.head_dim) .transpose(0, 1) ) if v is not None: v = ( v.contiguous() .view(-1, bsz * self.num_heads_partition, self.head_dim) .transpose(0, 1) ) if saved_state is not None: # saved states are stored with shape (bsz, num_heads_partition, seq_len, head_dim) if "prev_key" in saved_state: _prev_key = saved_state["prev_key"] assert _prev_key is not None prev_key = _prev_key.view( bsz * self.num_heads_partition, -1, self.head_dim ) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) if "prev_value" in saved_state: _prev_value = saved_state["prev_value"] assert _prev_value is not None prev_value = _prev_value.view( bsz * self.num_heads_partition, -1, self.head_dim ) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) prev_key_padding_mask: Optional[Tensor] = None if "prev_key_padding_mask" in saved_state: prev_key_padding_mask = saved_state["prev_key_padding_mask"] assert k is not None and v is not None key_padding_mask = ( ModelParallelMultiheadAttention._append_prev_key_padding_mask( key_padding_mask=key_padding_mask, prev_key_padding_mask=prev_key_padding_mask, batch_size=bsz, src_len=k.size(1), static_kv=static_kv, ) ) saved_state["prev_key"] = k.view( bsz, self.num_heads_partition, -1, self.head_dim ) saved_state["prev_value"] = v.view( bsz, self.num_heads_partition, -1, self.head_dim ) saved_state["prev_key_padding_mask"] = key_padding_mask # In this branch incremental_state is never None assert incremental_state is not None incremental_state = self._set_input_buffer(incremental_state, saved_state) assert k is not None src_len = k.size(1) # This is part of a workaround to get around fork/join parallelism # not supporting Optional types. if key_padding_mask is not None and key_padding_mask.dim() == 0: key_padding_mask = None if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len attn_weights = torch.bmm(q, k.transpose(1, 2)) assert list(attn_weights.size()) == [ bsz * self.num_heads_partition, tgt_len, src_len, ] if attn_mask is not None: attn_mask = attn_mask.unsqueeze(0) attn_weights += attn_mask if key_padding_mask is not None: # don't attend to padding symbols attn_weights = attn_weights.view( bsz, self.num_heads_partition, tgt_len, src_len ) if not is_tpu: attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf"), ) else: attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf")) attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.view( bsz * self.num_heads_partition, tgt_len, src_len ) attn_weights_float = utils.softmax(attn_weights, dim=-1) attn_weights = attn_weights_float.type_as(attn_weights) with get_cuda_rng_tracker().fork(): attn_probs = self.dropout_module(attn_weights) assert v is not None attn = torch.bmm(attn_probs, v) assert list(attn.size()) == [ bsz * self.num_heads_partition, tgt_len, self.head_dim, ] embed_dim_partition = embed_dim // self.model_parallel_size attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim_partition) attn = self.out_proj(attn) # return attn_weights None to keep the return type same as single gpu multihead attention # This will be deprecated. attn_weights: Optional[Tensor] = None return attn, attn_weights @staticmethod def _append_prev_key_padding_mask( key_padding_mask: Optional[Tensor], prev_key_padding_mask: Optional[Tensor], batch_size: int, src_len: int, static_kv: bool, ) -> Optional[Tensor]: # saved key padding masks have shape (bsz, seq_len) if prev_key_padding_mask is not None and static_kv: new_key_padding_mask = prev_key_padding_mask elif prev_key_padding_mask is not None and key_padding_mask is not None: new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1 ) # During incremental decoding, as the padding token enters and # leaves the frame, there will be a time when prev or current # is None elif prev_key_padding_mask is not None: filler = torch.zeros(batch_size, src_len - prev_key_padding_mask.size(1)) if prev_key_padding_mask.is_cuda: filler = filler.cuda() new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), filler.float()], dim=1 ) elif key_padding_mask is not None: filler = torch.zeros(batch_size, src_len - key_padding_mask.size(1)) if key_padding_mask.is_cuda: filler = filler.cuda() new_key_padding_mask = torch.cat( [filler.float(), key_padding_mask.float()], dim=1 ) else: new_key_padding_mask = prev_key_padding_mask return new_key_padding_mask def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order ): """Reorder buffered internal state (for incremental generation).""" input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): if input_buffer[k] is not None: input_buffer[k] = input_buffer[k].index_select(0, new_order) incremental_state = self._set_input_buffer(incremental_state, input_buffer) return incremental_state def _get_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] ) -> Dict[str, Optional[Tensor]]: result = self.get_incremental_state(incremental_state, "attn_state") if result is not None: return result else: empty_result: Dict[str, Optional[Tensor]] = {} return empty_result def _set_input_buffer( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], buffer: Dict[str, Optional[Tensor]], ): return self.set_incremental_state(incremental_state, "attn_state", buffer)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/model_parallel/modules/multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" from .multihead_attention import ModelParallelMultiheadAttention from .transformer_layer import ( ModelParallelTransformerEncoderLayer, ModelParallelTransformerDecoderLayer, ) __all__ = [ "ModelParallelMultiheadAttention", "ModelParallelTransformerEncoderLayer", "ModelParallelTransformerDecoderLayer", ]
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/model_parallel/modules/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.model_parallel.modules import ModelParallelMultiheadAttention from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer try: from fairseq.model_parallel.megatron.mpu import ( ColumnParallelLinear, RowParallelLinear, ) has_megatron_submodule = True except (ImportError, ModuleNotFoundError): has_megatron_submodule = False class ModelParallelTransformerEncoderLayer(TransformerEncoderLayer): """Encoder layer block over multiple gpus. See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details. """ def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): if q_noise > 0: raise NotImplementedError return ColumnParallelLinear(input_dim, output_dim, gather_output=False) def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): if q_noise > 0: raise NotImplementedError return RowParallelLinear(input_dim, output_dim, input_is_parallel=True) def build_self_attention(self, embed_dim, args, **unused_kwargs): return ModelParallelMultiheadAttention( embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True, ) class ModelParallelTransformerDecoderLayer(TransformerDecoderLayer): """Decoder layer block. See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details. """ def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): if q_noise > 0: raise NotImplementedError return ColumnParallelLinear(input_dim, output_dim, gather_output=False) def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): if q_noise > 0: raise NotImplementedError return RowParallelLinear(input_dim, output_dim, input_is_parallel=True) def build_self_attention(self, embed_dim, args, **unused_kwargs): return ModelParallelMultiheadAttention( embed_dim=embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, self_attention=not getattr(args, "cross_self_attention", False), ) def build_encoder_attention(self, embed_dim, args, **unused_kwargs): return ModelParallelMultiheadAttention( embed_dim=embed_dim, num_heads=args.decoder_attention_heads, kdim=getattr(args, "encoder_embed_dim", None), vdim=getattr(args, "encoder_embed_dim", None), dropout=args.attention_dropout, encoder_decoder_attention=True, )
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/model_parallel/modules/transformer_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion try: from fairseq.model_parallel.megatron.mpu.cross_entropy import ( vocab_parallel_cross_entropy, ) has_megatron_submodule = True except (ImportError, ModuleNotFoundError): has_megatron_submodule = False @register_criterion("vocab_parallel_cross_entropy") class VocabParallelCrossEntropyCriterion(FairseqCriterion): def __init__(self, task, sentence_avg): super().__init__(task) self.sentence_avg = sentence_avg if not has_megatron_submodule: raise ImportError( "\n\nPlease install the megatron submodule:" "\n\n git submodule update --init " "fairseq/model_parallel/megatron" ) def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ net_output = model(**sample["net_input"]) target = sample["target"] loss = vocab_parallel_cross_entropy(net_output[0].float(), target) loss = (loss * (target != self.padding_idx)).sum() sample_size = ( sample["target"].size(0) if self.sentence_avg else sample["ntokens"] ) logging_output = { "loss": utils.item(loss.data) if reduce else loss.data, "ntokens": sample["ntokens"], "nsentences": sample["target"].size(0), "sample_size": sample_size, } return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get("loss", 0) for log in logging_outputs) ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) metrics.log_scalar( "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 ) if sample_size != ntokens: metrics.log_scalar( "nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3 ) metrics.log_derived( "ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg) ) else: metrics.log_derived( "ppl", lambda meters: utils.get_perplexity(meters["loss"].avg) ) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/model_parallel/criterions/vocab_parallel_cross_entropy.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import os # automatically import any Python files in the criterions/ directory for file in sorted(os.listdir(os.path.dirname(__file__))): if file.endswith(".py") and not file.startswith("_"): module = file[: file.find(".py")] importlib.import_module("fairseq.model_parallel.criterions." + module)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/model_parallel/criterions/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import ctypes import math import sys from dataclasses import dataclass, field import torch from fairseq.dataclass import FairseqDataclass from fairseq.scoring import BaseScorer, register_scorer from fairseq.scoring.tokenizer import EvaluationTokenizer class BleuStat(ctypes.Structure): _fields_ = [ ("reflen", ctypes.c_size_t), ("predlen", ctypes.c_size_t), ("match1", ctypes.c_size_t), ("count1", ctypes.c_size_t), ("match2", ctypes.c_size_t), ("count2", ctypes.c_size_t), ("match3", ctypes.c_size_t), ("count3", ctypes.c_size_t), ("match4", ctypes.c_size_t), ("count4", ctypes.c_size_t), ] @dataclass class SacrebleuConfig(FairseqDataclass): sacrebleu_tokenizer: EvaluationTokenizer.ALL_TOKENIZER_TYPES = field( default="13a", metadata={"help": "tokenizer"} ) sacrebleu_lowercase: bool = field( default=False, metadata={"help": "apply lowercasing"} ) sacrebleu_char_level: bool = field( default=False, metadata={"help": "evaluate at character level"} ) @register_scorer("sacrebleu", dataclass=SacrebleuConfig) class SacrebleuScorer(BaseScorer): def __init__(self, cfg): super(SacrebleuScorer, self).__init__(cfg) import sacrebleu self.sacrebleu = sacrebleu self.tokenizer = EvaluationTokenizer( tokenizer_type=cfg.sacrebleu_tokenizer, lowercase=cfg.sacrebleu_lowercase, character_tokenization=cfg.sacrebleu_char_level, ) def add_string(self, ref, pred): self.ref.append(self.tokenizer.tokenize(ref)) self.pred.append(self.tokenizer.tokenize(pred)) def score(self, order=4): return self.result_string(order).score def result_string(self, order=4): if order != 4: raise NotImplementedError # tokenization and lowercasing are performed by self.tokenizer instead. return self.sacrebleu.corpus_bleu( self.pred, [self.ref], tokenize="none" ).format() @dataclass class BleuConfig(FairseqDataclass): pad: int = field(default=1, metadata={"help": "padding index"}) eos: int = field(default=2, metadata={"help": "eos index"}) unk: int = field(default=3, metadata={"help": "unk index"}) @register_scorer("bleu", dataclass=BleuConfig) class Scorer(object): def __init__(self, cfg): self.stat = BleuStat() self.pad = cfg.pad self.eos = cfg.eos self.unk = cfg.unk try: from fairseq import libbleu except ImportError as e: sys.stderr.write( "ERROR: missing libbleu.so. run `pip install --editable .`\n" ) raise e self.C = ctypes.cdll.LoadLibrary(libbleu.__file__) self.reset() def reset(self, one_init=False): if one_init: self.C.bleu_one_init(ctypes.byref(self.stat)) else: self.C.bleu_zero_init(ctypes.byref(self.stat)) def add(self, ref, pred): if not isinstance(ref, torch.IntTensor): raise TypeError("ref must be a torch.IntTensor (got {})".format(type(ref))) if not isinstance(pred, torch.IntTensor): raise TypeError("pred must be a torch.IntTensor(got {})".format(type(pred))) # don't match unknown words rref = ref.clone() assert not rref.lt(0).any() rref[rref.eq(self.unk)] = -999 rref = rref.contiguous().view(-1) pred = pred.contiguous().view(-1) self.C.bleu_add( ctypes.byref(self.stat), ctypes.c_size_t(rref.size(0)), ctypes.c_void_p(rref.data_ptr()), ctypes.c_size_t(pred.size(0)), ctypes.c_void_p(pred.data_ptr()), ctypes.c_int(self.pad), ctypes.c_int(self.eos), ) def score(self, order=4): psum = sum( math.log(p) if p > 0 else float("-Inf") for p in self.precision()[:order] ) return self.brevity() * math.exp(psum / order) * 100 def precision(self): def ratio(a, b): return a / b if b > 0 else 0 return [ ratio(self.stat.match1, self.stat.count1), ratio(self.stat.match2, self.stat.count2), ratio(self.stat.match3, self.stat.count3), ratio(self.stat.match4, self.stat.count4), ] def brevity(self): r = self.stat.reflen / self.stat.predlen return min(1, math.exp(1 - r)) def result_string(self, order=4): assert order <= 4, "BLEU scores for order > 4 aren't supported" fmt = "BLEU{} = {:2.2f}, {:2.1f}" for _ in range(1, order): fmt += "/{:2.1f}" fmt += " (BP={:.3f}, ratio={:.3f}, syslen={}, reflen={})" bleup = [p * 100 for p in self.precision()[:order]] return fmt.format( order, self.score(order=order), *bleup, self.brevity(), self.stat.predlen / self.stat.reflen, self.stat.predlen, self.stat.reflen )
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/scoring/bleu.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import os from abc import ABC, abstractmethod from fairseq import registry from omegaconf import DictConfig class BaseScorer(ABC): def __init__(self, cfg): self.cfg = cfg self.ref = [] self.pred = [] def add_string(self, ref, pred): self.ref.append(ref) self.pred.append(pred) @abstractmethod def score(self) -> float: pass @abstractmethod def result_string(self) -> str: pass _build_scorer, register_scorer, SCORER_REGISTRY, _ = registry.setup_registry( "--scoring", default="bleu" ) def build_scorer(choice, tgt_dict): _choice = choice._name if isinstance(choice, DictConfig) else choice if _choice == "bleu": from fairseq.scoring import bleu return bleu.Scorer( bleu.BleuConfig(pad=tgt_dict.pad(), eos=tgt_dict.eos(), unk=tgt_dict.unk()) ) return _build_scorer(choice) # automatically import any Python files in the current directory for file in sorted(os.listdir(os.path.dirname(__file__))): if file.endswith(".py") and not file.startswith("_"): module = file[: file.find(".py")] importlib.import_module("fairseq.scoring." + module)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/scoring/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from fairseq.dataclass import FairseqDataclass from fairseq.scoring import BaseScorer, register_scorer from fairseq.scoring.tokenizer import EvaluationTokenizer @dataclass class WerScorerConfig(FairseqDataclass): wer_tokenizer: EvaluationTokenizer.ALL_TOKENIZER_TYPES = field( default="none", metadata={"help": "sacreBLEU tokenizer to use for evaluation"} ) wer_remove_punct: bool = field( default=False, metadata={"help": "remove punctuation"} ) wer_char_level: bool = field( default=False, metadata={"help": "evaluate at character level"} ) wer_lowercase: bool = field(default=False, metadata={"help": "lowercasing"}) @register_scorer("wer", dataclass=WerScorerConfig) class WerScorer(BaseScorer): def __init__(self, cfg): super().__init__(cfg) self.reset() try: import editdistance as ed except ImportError: raise ImportError("Please install editdistance to use WER scorer") self.ed = ed self.tokenizer = EvaluationTokenizer( tokenizer_type=self.cfg.wer_tokenizer, lowercase=self.cfg.wer_lowercase, punctuation_removal=self.cfg.wer_remove_punct, character_tokenization=self.cfg.wer_char_level, ) def reset(self): self.distance = 0 self.ref_length = 0 def add_string(self, ref, pred): ref_items = self.tokenizer.tokenize(ref).split() pred_items = self.tokenizer.tokenize(pred).split() self.distance += self.ed.eval(ref_items, pred_items) self.ref_length += len(ref_items) def result_string(self): return f"WER: {self.score():.2f}" def score(self): return 100.0 * self.distance / self.ref_length if self.ref_length > 0 else 0
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/scoring/wer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unicodedata from fairseq.dataclass import ChoiceEnum class EvaluationTokenizer(object): """A generic evaluation-time tokenizer, which leverages built-in tokenizers in sacreBLEU (https://github.com/mjpost/sacrebleu). It additionally provides lowercasing, punctuation removal and character tokenization, which are applied after sacreBLEU tokenization. Args: tokenizer_type (str): the type of sacreBLEU tokenizer to apply. lowercase (bool): lowercase the text. punctuation_removal (bool): remove punctuation (based on unicode category) from text. character_tokenization (bool): tokenize the text to characters. """ SPACE = chr(32) SPACE_ESCAPE = chr(9601) ALL_TOKENIZER_TYPES = ChoiceEnum(["none", "13a", "intl", "zh", "ja-mecab"]) def __init__( self, tokenizer_type: str = "13a", lowercase: bool = False, punctuation_removal: bool = False, character_tokenization: bool = False, ): from sacrebleu.tokenizers import TOKENIZERS assert tokenizer_type in TOKENIZERS, f"{tokenizer_type}, {TOKENIZERS}" self.lowercase = lowercase self.punctuation_removal = punctuation_removal self.character_tokenization = character_tokenization self.tokenizer = TOKENIZERS[tokenizer_type] @classmethod def remove_punctuation(cls, sent: str): """Remove punctuation based on Unicode category.""" return cls.SPACE.join( t for t in sent.split(cls.SPACE) if not all(unicodedata.category(c)[0] == "P" for c in t) ) def tokenize(self, sent: str): tokenized = self.tokenizer()(sent) if self.punctuation_removal: tokenized = self.remove_punctuation(tokenized) if self.character_tokenization: tokenized = self.SPACE.join( list(tokenized.replace(self.SPACE, self.SPACE_ESCAPE)) ) if self.lowercase: tokenized = tokenized.lower() return tokenized
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/scoring/tokenizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.scoring import BaseScorer, register_scorer @register_scorer("chrf") class ChrFScorer(BaseScorer): def __init__(self, args): super(ChrFScorer, self).__init__(args) import sacrebleu self.sacrebleu = sacrebleu def add_string(self, ref, pred): self.ref.append(ref) self.pred.append(pred) def score(self, order=4): return self.result_string(order).score def result_string(self, order=4): if order != 4: raise NotImplementedError return self.sacrebleu.corpus_chrf(self.pred, [self.ref]).format()
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/scoring/chrf.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys from dataclasses import _MISSING_TYPE, dataclass, field from typing import Any, List, Optional import torch from fairseq.dataclass.constants import ( DATASET_IMPL_CHOICES, DDP_BACKEND_CHOICES, DDP_COMM_HOOK_CHOICES, GENERATION_CONSTRAINTS_CHOICES, GENERATION_DECODING_FORMAT_CHOICES, LOG_FORMAT_CHOICES, PIPELINE_CHECKPOINT_CHOICES, PRINT_ALIGNMENT_CHOICES, ZERO_SHARDING_CHOICES, ) from omegaconf import II, MISSING @dataclass class FairseqDataclass: """fairseq base dataclass that supported fetching attributes and metas""" _name: Optional[str] = None @staticmethod def name(): return None def _get_all_attributes(self) -> List[str]: return [k for k in self.__dataclass_fields__.keys()] def _get_meta( self, attribute_name: str, meta: str, default: Optional[Any] = None ) -> Any: return self.__dataclass_fields__[attribute_name].metadata.get(meta, default) def _get_name(self, attribute_name: str) -> str: return self.__dataclass_fields__[attribute_name].name def _get_default(self, attribute_name: str) -> Any: if hasattr(self, attribute_name): if str(getattr(self, attribute_name)).startswith("${"): return str(getattr(self, attribute_name)) elif str(self.__dataclass_fields__[attribute_name].default).startswith( "${" ): return str(self.__dataclass_fields__[attribute_name].default) elif ( getattr(self, attribute_name) != self.__dataclass_fields__[attribute_name].default ): return getattr(self, attribute_name) f = self.__dataclass_fields__[attribute_name] if not isinstance(f.default_factory, _MISSING_TYPE): return f.default_factory() return f.default def _get_type(self, attribute_name: str) -> Any: return self.__dataclass_fields__[attribute_name].type def _get_help(self, attribute_name: str) -> Any: return self._get_meta(attribute_name, "help") def _get_argparse_const(self, attribute_name: str) -> Any: return self._get_meta(attribute_name, "argparse_const") def _get_argparse_alias(self, attribute_name: str) -> Any: return self._get_meta(attribute_name, "argparse_alias") def _get_choices(self, attribute_name: str) -> Any: return self._get_meta(attribute_name, "choices") @classmethod def from_namespace(cls, args): if isinstance(args, cls): return args else: config = cls() for k in config.__dataclass_fields__.keys(): if k.startswith("_"): # private member, skip continue if hasattr(args, k): setattr(config, k, getattr(args, k)) return config @dataclass class CommonConfig(FairseqDataclass): # This is the core dataclass including common parameters shared by all different jobs. Please append your params to other dataclasses if they were # used for a particular purpose or task, such as those dedicated for `distributed training`, `optimization`, etc. no_progress_bar: bool = field( default=False, metadata={"help": "disable progress bar"} ) log_interval: int = field( default=100, metadata={ "help": "log progress every N batches (when progress bar is disabled)" }, ) log_format: Optional[LOG_FORMAT_CHOICES] = field( default=None, metadata={"help": "log format to use"} ) log_file: Optional[str] = field( default=None, metadata={"help": "log file to copy metrics to."} ) tensorboard_logdir: Optional[str] = field( default=None, metadata={ "help": "path to save logs for tensorboard, should match --logdir " "of running tensorboard (default: no tensorboard logging)" }, ) wandb_project: Optional[str] = field( default=None, metadata={"help": "Weights and Biases project name to use for logging"}, ) azureml_logging: Optional[bool] = field( default=False, metadata={"help": "Log scalars to AzureML context"}, ) seed: int = field( default=1, metadata={"help": "pseudo random number generator seed"} ) cpu: bool = field(default=False, metadata={"help": "use CPU instead of CUDA"}) tpu: bool = field(default=False, metadata={"help": "use TPU instead of CUDA"}) bf16: bool = field(default=False, metadata={"help": "use bfloat16; implies --tpu"}) memory_efficient_bf16: bool = field( default=False, metadata={ "help": "use a memory-efficient version of BF16 training; implies --bf16" }, ) fp16: bool = field(default=False, metadata={"help": "use FP16"}) memory_efficient_fp16: bool = field( default=False, metadata={ "help": "use a memory-efficient version of FP16 training; implies --fp16" }, ) fp16_no_flatten_grads: bool = field( default=False, metadata={"help": "don't flatten FP16 grads tensor"} ) fp16_init_scale: int = field( default=2 ** 7, metadata={"help": "default FP16 loss scale"} ) fp16_scale_window: Optional[int] = field( default=None, metadata={"help": "number of updates before increasing loss scale"}, ) fp16_scale_tolerance: float = field( default=0.0, metadata={ "help": "pct of updates that can overflow before decreasing the loss scale" }, ) on_cpu_convert_precision: bool = field( default=False, metadata={ "help": "if set, the floating point conversion to fp16/bf16 runs on CPU. " "This reduces bus transfer time and GPU memory usage." } ) min_loss_scale: float = field( default=1e-4, metadata={"help": "minimum FP16/AMP loss scale, after which training is stopped"}, ) threshold_loss_scale: Optional[float] = field( default=None, metadata={"help": "threshold FP16 loss scale from below"} ) amp: bool = field(default=False, metadata={"help": "use automatic mixed precision"}) amp_batch_retries: int = field( default=2, metadata={"help": "number of retries of same batch after reducing loss scale with AMP"}, ) amp_init_scale: int = field( default=2 ** 7, metadata={"help": "default AMP loss scale"} ) amp_scale_window: Optional[int] = field( default=None, metadata={"help": "number of updates before increasing AMP loss scale"}, ) user_dir: Optional[str] = field( default=None, metadata={ "help": "path to a python module containing custom extensions (tasks and/or architectures)" }, ) empty_cache_freq: int = field( default=0, metadata={"help": "how often to clear the PyTorch CUDA cache (0 to disable)"}, ) all_gather_list_size: int = field( default=16384, metadata={"help": "number of bytes reserved for gathering stats from workers"}, ) model_parallel_size: int = field( default=1, metadata={"help": "total number of GPUs to parallelize model over"} ) quantization_config_path: Optional[str] = field( default=None, metadata={"help": "path to quantization config file"} ) profile: bool = field( default=False, metadata={"help": "enable autograd profiler emit_nvtx"} ) reset_logging: bool = field( default=False, metadata={ "help": "when using Hydra, reset the logging at the beginning of training" }, ) suppress_crashes: bool = field( default=False, metadata={ "help": "suppress crashes when training with the hydra_train entry point so that the " "main method can return a value (useful for sweeps)" }, ) use_plasma_view: bool = field( default=False, metadata={"help": "Store indices and sizes in shared memory"} ) plasma_path: Optional[str] = field( default="/tmp/plasma", metadata={ "help": "path to run plasma_store, defaults to /tmp/plasma. Paths outside /tmp tend to fail." }, ) @dataclass class DistributedTrainingConfig(FairseqDataclass): distributed_world_size: int = field( default=max(1, torch.cuda.device_count()), metadata={ "help": "total number of GPUs across all nodes (default: all visible GPUs)" }, ) distributed_num_procs: Optional[int] = field( default=max(1, torch.cuda.device_count()), metadata={ "help": "total number of processes to fork (default: all visible GPUs)" }, ) distributed_rank: Optional[int] = field( default=0, metadata={"help": "rank of the current worker"} ) distributed_backend: str = field( default="nccl", metadata={"help": "distributed backend"} ) distributed_init_method: Optional[str] = field( default=None, metadata={ "help": "typically tcp://hostname:port that will be used to " "establish initial connetion" }, ) distributed_port: int = field( default=-1, metadata={ "help": "port number (not required if using --distributed-init-method)" }, ) device_id: int = field( default=0, metadata={ "help": "which GPU to use (usually configured automatically)", "argparse_alias": "--local_rank", }, ) distributed_no_spawn: bool = field( default=False, metadata={ "help": "do not spawn multiple processes even if multiple GPUs are visible" }, ) ddp_backend: DDP_BACKEND_CHOICES = field( default="pytorch_ddp", metadata={"help": "DistributedDataParallel backend"} ) ddp_comm_hook: DDP_COMM_HOOK_CHOICES = field( default="none", metadata={"help": "communication hook"} ) bucket_cap_mb: int = field( default=25, metadata={"help": "bucket size for reduction"} ) fix_batches_to_gpus: bool = field( default=False, metadata={ "help": "don't shuffle batches between GPUs; this reduces overall " "randomness and may affect precision but avoids the cost of re-reading the data" }, ) find_unused_parameters: bool = field( default=False, metadata={ "help": "disable unused parameter detection (not applicable to " "--ddp-backend=legacy_ddp)" }, ) gradient_as_bucket_view: bool = field( default=False, metadata={ "help": "when set to True, gradients will be views pointing to different offsets of allreduce communication buckets. This can reduce peak memory usage, where the saved memory size will be equal to the total gradients size. " "--gradient-as-bucket-view=gradient_as_bucket_view)" }, ) fast_stat_sync: bool = field( default=False, metadata={"help": "[deprecated] this is now defined per Criterion"}, ) heartbeat_timeout: int = field( default=-1, metadata={ "help": "kill the job if no progress is made in N seconds; " "set to -1 to disable" }, ) broadcast_buffers: bool = field( default=False, metadata={ "help": "Copy non-trainable parameters between GPUs, such as " "batchnorm population statistics" }, ) slowmo_momentum: Optional[float] = field( default=None, metadata={ "help": "SlowMo momentum term; by default use 0.0 for 16 GPUs, " "0.2 for 32 GPUs; 0.5 for 64 GPUs, 0.6 for > 64 GPUs" }, ) slowmo_algorithm: str = field( default="LocalSGD", metadata={"help": "whether to use LocalSGD or SGP"} ) localsgd_frequency: int = field( default=3, metadata={"help": "Local SGD allreduce frequency"} ) nprocs_per_node: int = field( default=max(1, torch.cuda.device_count()), metadata={ "help": "number of GPUs in each node. An allreduce operation across GPUs in " "a node is very fast. Hence, we do allreduce across GPUs in a node, " "and gossip across different nodes" }, ) pipeline_model_parallel: bool = field( default=False, metadata={"help": "if set, use pipeline model parallelism across GPUs"}, ) pipeline_balance: Optional[str] = field( default=None, metadata={ "help": "partition the model into N_K pieces, where each piece " "contains N_i layers. The sum(args.pipeline_balance) " "should equal the total number of layers in the model" }, ) pipeline_devices: Optional[str] = field( default=None, metadata={ "help": "a list of device indices indicating which device to place " "each of the N_K partitions. The length of this list should " "equal the length of the --pipeline-balance argument" }, ) pipeline_chunks: Optional[int] = field( default=0, metadata={"help": "microbatch count for pipeline model parallelism"} ) pipeline_encoder_balance: Optional[str] = field( default=None, metadata={ "help": "partition the pipeline parallel encoder into N_K pieces, where each piece " "contains N_i layers. The sum(args.pipeline_encoder_balance) " "should equal the total number of encoder layers in the model" }, ) pipeline_encoder_devices: Optional[str] = field( default=None, metadata={ "help": "a list of device indices indicating which device to place " "each of the N_K partitions. The length of this list should " "equal the length of the --pipeline-encoder-balance argument" }, ) pipeline_decoder_balance: Optional[str] = field( default=None, metadata={ "help": "partition the pipeline parallel decoder into N_K pieces, where each piece " "contains N_i layers. The sum(args.pipeline_decoder_balance) " "should equal the total number of decoder layers in the model" }, ) pipeline_decoder_devices: Optional[str] = field( default=None, metadata={ "help": "a list of device indices indicating which device to place " "each of the N_K partitions. The length of this list should " "equal the length of the --pipeline-decoder-balance argument" }, ) pipeline_checkpoint: PIPELINE_CHECKPOINT_CHOICES = field( default="never", metadata={"help": "checkpointing mode for pipeline model parallelism"}, ) zero_sharding: ZERO_SHARDING_CHOICES = field( default="none", metadata={"help": "ZeRO sharding"} ) fp16: bool = II("common.fp16") memory_efficient_fp16: bool = II("common.memory_efficient_fp16") tpu: bool = II("common.tpu") # configuration for --ddp-backend=fully_sharded no_reshard_after_forward: bool = field( default=False, metadata={"help": "don't reshard parameters after forward pass"}, ) fp32_reduce_scatter: bool = field( default=False, metadata={"help": "reduce-scatter grads in FP32"}, ) cpu_offload: bool = field( default=False, metadata={"help": "offload FP32 params to CPU"} ) use_sharded_state: bool = field( default=False, metadata={"help": "use sharded checkpoint files"}, ) @dataclass class DatasetConfig(FairseqDataclass): num_workers: int = field( default=1, metadata={"help": "how many subprocesses to use for data loading"} ) skip_invalid_size_inputs_valid_test: bool = field( default=False, metadata={"help": "ignore too long or too short lines in valid and test set"}, ) max_tokens: Optional[int] = field( default=None, metadata={"help": "maximum number of tokens in a batch"} ) batch_size: Optional[int] = field( default=None, metadata={ "help": "number of examples in a batch", "argparse_alias": "--max-sentences", }, ) required_batch_size_multiple: int = field( default=8, metadata={"help": "batch size will be a multiplier of this value"} ) required_seq_len_multiple: int = field( default=1, metadata={ "help": "maximum sequence length in batch will be a multiplier of this value" }, ) dataset_impl: Optional[DATASET_IMPL_CHOICES] = field( default=None, metadata={"help": "output dataset implementation"} ) data_buffer_size: int = field( default=10, metadata={"help": "Number of batches to preload"} ) train_subset: str = field( default="train", metadata={"help": "data subset to use for training (e.g. train, valid, test)"}, ) valid_subset: str = field( default="valid", metadata={ "help": "comma separated list of data subsets to use for validation" " (e.g. train, valid, test)" }, ) combine_valid_subsets: Optional[bool] = field( default=None, metadata={ "help": "comma separated list of data subsets to use for validation" " (e.g. train, valid, test)", "argparse_alias": "--combine-val", }, ) ignore_unused_valid_subsets: Optional[bool] = field( default=False, metadata={"help": "do not raise error if valid subsets are ignored"}, ) validate_interval: int = field( default=1, metadata={"help": "validate every N epochs"} ) validate_interval_updates: int = field( default=0, metadata={"help": "validate every N updates"} ) validate_after_updates: int = field( default=0, metadata={"help": "dont validate until reaching this many updates"} ) fixed_validation_seed: Optional[int] = field( default=None, metadata={"help": "specified random seed for validation"} ) disable_validation: bool = field( default=False, metadata={"help": "disable validation"} ) max_tokens_valid: Optional[int] = field( default=II("dataset.max_tokens"), metadata={ "help": "maximum number of tokens in a validation batch" " (defaults to --max-tokens)" }, ) batch_size_valid: Optional[int] = field( default=II("dataset.batch_size"), metadata={ "help": "batch size of the validation batch (defaults to --batch-size)", "argparse_alias": "--max-sentences-valid", }, ) max_valid_steps: Optional[int] = field(default=None, metadata={'help': 'How many batches to evaluate', "argparse_alias": "--nval"}) curriculum: int = field( default=0, metadata={"help": "don't shuffle batches for first N epochs"} ) gen_subset: str = field( default="test", metadata={"help": "data subset to generate (train, valid, test)"}, ) num_shards: int = field( default=1, metadata={"help": "shard generation over N shards"} ) shard_id: int = field( default=0, metadata={"help": "id of the shard to generate (id < num_shards)"} ) @dataclass class OptimizationConfig(FairseqDataclass): max_epoch: int = field( default=0, metadata={"help": "force stop training at specified epoch"} ) max_update: int = field( default=0, metadata={"help": "force stop training at specified update"} ) stop_time_hours: float = field( default=0, metadata={ "help": "force stop training after specified cumulative time (if >0)" }, ) clip_norm: float = field( default=0.0, metadata={"help": "clip threshold of gradients"} ) sentence_avg: bool = field( default=False, metadata={ "help": "normalize gradients by the number of sentences in a batch" " (default is to normalize by number of tokens)" }, ) update_freq: List[int] = field( default_factory=lambda: [1], metadata={"help": "update parameters every N_i batches, when in epoch i"}, ) lr: List[float] = field( default_factory=lambda: [0.25], metadata={ "help": "learning rate for the first N epochs; all epochs >N using LR_N" " (note: this may be interpreted differently depending on --lr-scheduler)" }, ) stop_min_lr: float = field( default=-1.0, metadata={"help": "stop training when the learning rate reaches this minimum"}, ) use_bmuf: bool = field( default=False, metadata={ "help": "specify global optimizer for syncing models on different GPUs/shards" }, ) @dataclass class CheckpointConfig(FairseqDataclass): save_dir: str = field( default="checkpoints", metadata={"help": "path to save checkpoints"} ) restore_file: str = field( default="checkpoint_last.pt", metadata={ "help": "filename from which to load checkpoint " "(default: <save-dir>/checkpoint_last.pt" }, ) finetune_from_model: Optional[str] = field( default=None, metadata={ "help": "finetune from a pretrained model; note that meters and lr scheduler will be reset" }, ) reset_dataloader: bool = field( default=False, metadata={ "help": "if set, does not reload dataloader state from the checkpoint" }, ) reset_lr_scheduler: bool = field( default=False, metadata={ "help": "if set, does not load lr scheduler state from the checkpoint" }, ) reset_meters: bool = field( default=False, metadata={"help": "if set, does not load meters from the checkpoint"}, ) reset_optimizer: bool = field( default=False, metadata={"help": "if set, does not load optimizer state from the checkpoint"}, ) optimizer_overrides: str = field( default="{}", metadata={ "help": "a dictionary used to override optimizer args when loading a checkpoint" }, ) save_interval: int = field( default=1, metadata={"help": "save a checkpoint every N epochs"} ) save_interval_updates: int = field( default=0, metadata={"help": "save a checkpoint (and validate) every N updates"} ) keep_interval_updates: int = field( default=-1, metadata={ "help": "keep the last N checkpoints saved with --save-interval-updates" }, ) keep_interval_updates_pattern: int = field( default=-1, metadata={ "help": "when used with --keep-interval-updates, skips deleting " "any checkpoints with update X where " "X %% keep_interval_updates_pattern == 0" }, ) keep_last_epochs: int = field( default=-1, metadata={"help": "keep last N epoch checkpoints"} ) keep_best_checkpoints: int = field( default=-1, metadata={"help": "keep best N checkpoints based on scores"} ) no_save: bool = field( default=False, metadata={"help": "don't save models or checkpoints"} ) no_epoch_checkpoints: bool = field( default=False, metadata={"help": "only store last and best checkpoints"} ) no_last_checkpoints: bool = field( default=False, metadata={"help": "don't store last checkpoints"} ) no_save_optimizer_state: bool = field( default=False, metadata={"help": "don't save optimizer-state as part of checkpoint"}, ) best_checkpoint_metric: str = field( default="loss", metadata={"help": 'metric to use for saving "best" checkpoints'} ) maximize_best_checkpoint_metric: bool = field( default=False, metadata={ "help": 'select the largest metric value for saving "best" checkpoints' }, ) patience: int = field( default=-1, metadata={ "help": ( "early stop training if valid performance doesn't " "improve for N consecutive validation runs; note " "that this is influenced by --validate-interval" ) }, ) checkpoint_suffix: str = field( default="", metadata={"help": "suffix to add to the checkpoint file name"} ) checkpoint_shard_count: int = field( default=1, metadata={ "help": "Number of shards containing the checkpoint - " "if the checkpoint is over 300GB, it is preferable " "to split it into shards to prevent OOM on CPU while loading " "the checkpoint" }, ) load_checkpoint_on_all_dp_ranks: bool = field( default=False, metadata={ "help": "load checkpoints on all data parallel devices " "(default: only load on rank 0 and broadcast to other devices)" }, ) write_checkpoints_asynchronously: bool = field( default=False, metadata={ "help": ( "Write checkpoints asynchronously in a separate " "thread. NOTE: This feature is currently being tested." ), "argparse_alias": "--save-async", }, ) model_parallel_size: int = II("common.model_parallel_size") @dataclass class FairseqBMUFConfig(FairseqDataclass): block_lr: float = field( default=1, metadata={"help": "block learning rate for bmuf"} ) block_momentum: float = field( default=0.875, metadata={"help": "block momentum for bmuf"} ) global_sync_iter: int = field( default=50, metadata={"help": "Iteration for syncing global model"} ) warmup_iterations: int = field( default=500, metadata={"help": "warmup iterations for model to broadcast"} ) use_nbm: bool = field( default=False, metadata={"help": "Specify whether you want to use classical BM / Nesterov BM"}, ) average_sync: bool = field( default=False, metadata={ "help": "Specify whether you want to average the local momentum after each sync" }, ) distributed_world_size: int = II("distributed_training.distributed_world_size") @dataclass class GenerationConfig(FairseqDataclass): truncate_length: int = field( default=0, metadata={"help": "Truncate the source to the length; 0 means disabled."}, ) beam: int = field( default=5, metadata={"help": "beam size"}, ) nbest: int = field( default=1, metadata={"help": "number of hypotheses to output"}, ) max_len_a: float = field( default=0, metadata={ "help": "generate sequences of maximum length ax + b, where x is the source length" }, ) max_len_b: int = field( default=200, metadata={ "help": "generate sequences of maximum length ax + b, where x is the source length" }, ) min_len: int = field( default=1, metadata={"help": "minimum generation length"}, ) match_source_len: bool = field( default=False, metadata={"help": "generations should match the source length"}, ) unnormalized: bool = field( default=False, metadata={"help": "compare unnormalized hypothesis scores"}, ) no_early_stop: bool = field( default=False, metadata={"help": "deprecated"}, ) no_beamable_mm: bool = field( default=False, metadata={"help": "don't use BeamableMM in attention layers"}, ) lenpen: float = field( default=1, metadata={ "help": "length penalty: <1.0 favors shorter, >1.0 favors longer sentences" }, ) unkpen: float = field( default=0, metadata={ "help": "unknown word penalty: <0 produces more unks, >0 produces fewer" }, ) replace_unk: Optional[str] = field( default=None, metadata={ "help": "perform unknown replacement (optionally with alignment dictionary)", "argparse_const": "@@ ", }, ) sacrebleu: bool = field( default=False, metadata={"help": "score with sacrebleu"}, ) score_reference: bool = field( default=False, metadata={"help": "just score the reference translation"}, ) prefix_size: int = field( default=0, metadata={"help": "initialize generation by target prefix of given length"}, ) no_repeat_ngram_size: int = field( default=0, metadata={ "help": "ngram blocking such that this size ngram cannot be repeated in the generation" }, ) sampling: bool = field( default=False, metadata={"help": "sample hypotheses instead of using beam search"}, ) sampling_topk: int = field( default=-1, metadata={"help": "sample from top K likely next words instead of all words"}, ) sampling_topp: float = field( default=-1.0, metadata={ "help": "sample from the smallest set whose cumulative probability mass exceeds p for next words" }, ) constraints: Optional[GENERATION_CONSTRAINTS_CHOICES] = field( default=None, metadata={ "help": "enables lexically constrained decoding", "argparse_const": "ordered", }, ) temperature: float = field( default=1.0, metadata={"help": "temperature for generation"}, ) diverse_beam_groups: int = field( default=-1, metadata={"help": "number of groups for Diverse Beam Search"}, ) diverse_beam_strength: float = field( default=0.5, metadata={"help": "strength of diversity penalty for Diverse Beam Search"}, ) diversity_rate: float = field( default=-1.0, metadata={"help": "strength of diversity penalty for Diverse Siblings Search"}, ) print_alignment: Optional[PRINT_ALIGNMENT_CHOICES] = field( default=None, metadata={ "help": "if set, uses attention feedback to compute and print alignment to source tokens " "(valid options are: hard, soft, otherwise treated as hard alignment)", "argparse_const": "hard", }, ) print_step: bool = field( default=False, metadata={"help": "print steps"}, ) lm_path: Optional[str] = field( default=None, metadata={"help": "path to lm checkpoint for lm fusion"}, ) lm_weight: float = field( default=0.0, metadata={"help": "weight for lm probs for lm fusion"}, ) # arguments for iterative refinement generator iter_decode_eos_penalty: float = field( default=0.0, metadata={"help": "if > 0.0, it penalized early-stopping in decoding."}, ) iter_decode_max_iter: int = field( default=10, metadata={"help": "maximum iterations for iterative refinement."}, ) iter_decode_force_max_iter: bool = field( default=False, metadata={ "help": "if set, run exact the maximum number of iterations without early stop" }, ) iter_decode_with_beam: int = field( default=1, metadata={ "help": "if > 1, model will generate translations varying by the lengths." }, ) iter_decode_with_external_reranker: bool = field( default=False, metadata={ "help": "if set, the last checkpoint are assumed to be a reranker to rescore the translations" }, ) retain_iter_history: bool = field( default=False, metadata={ "help": "if set, decoding returns the whole history of iterative refinement" }, ) retain_dropout: bool = field( default=False, metadata={"help": "Use dropout at inference time"}, ) # temporarily set to Any until https://github.com/facebookresearch/hydra/issues/1117 is fixed # retain_dropout_modules: Optional[List[str]] = field( retain_dropout_modules: Any = field( default=None, metadata={ "help": "if set, only retain dropout for the specified modules; " "if not set, then dropout will be retained for all modules" }, ) # special decoding format for advanced decoding. decoding_format: Optional[GENERATION_DECODING_FORMAT_CHOICES] = field( default=None, metadata={"help": "special decoding format for advanced decoding."}, ) no_seed_provided: bool = field( default=False, metadata={"help": "if set, dont use seed for initializing random generators"}, ) @dataclass class CommonEvalConfig(FairseqDataclass): path: Optional[str] = field( default=None, metadata={"help": "path(s) to model file(s), colon separated"}, ) post_process: Optional[str] = field( default=None, metadata={ "help": ( "post-process text by removing BPE, letter segmentation, etc. " "Valid options can be found in fairseq.data.utils.post_process." ), "argparse_const": "subword_nmt", "argparse_alias": "--remove-bpe", }, ) quiet: bool = field(default=False, metadata={"help": "only print final scores"}) model_overrides: str = field( default="{}", metadata={ "help": "a dictionary used to override model args at generation that were used during model training" }, ) results_path: Optional[str] = field( default=None, metadata={"help": "path to save eval results (optional)"} ) @dataclass class EvalLMConfig(FairseqDataclass): output_word_probs: bool = field( default=False, metadata={ "help": "if set, outputs words and their predicted log probabilities to standard output" }, ) output_word_stats: bool = field( default=False, metadata={ "help": "if set, outputs word statistics such as word count, average probability, etc" }, ) context_window: int = field( default=0, metadata={ "help": "ensures that every evaluated token has access to a context of at least this size, if possible" }, ) softmax_batch: int = field( default=sys.maxsize, metadata={ "help": "if BxT is more than this, will batch the softmax over vocab to this amount of tokens, in order to fit into GPU memory" }, ) @dataclass class InteractiveConfig(FairseqDataclass): buffer_size: int = field( default=0, metadata={ "help": "read this many sentences into a buffer before processing them" }, ) input: str = field( default="-", metadata={"help": "file to read from; use - for stdin"}, ) @dataclass class EMAConfig(FairseqDataclass): store_ema: bool = field( default=False, metadata={ help: "store exponential moving average shadow model" } ) ema_decay: float = field( default=0.9999, metadata={ "help": 'decay for exponential moving average model' } ) ema_start_update : int = field( default=0, metadata={"help": "start EMA update after this many model updates"} ) ema_seed_model : Optional[str] = field( default=None, metadata={ "help": "Seed to load EMA model from. " "Used to load EMA model separately from the actual model." } ) ema_update_freq : int = field( default=1, metadata={"help": "Do EMA update every this many model updates"} ) ema_fp32: bool = field( default=False, metadata={"help": "If true, store EMA model in fp32 even if model is in fp16"}, ) @dataclass class FairseqConfig(FairseqDataclass): common: CommonConfig = CommonConfig() common_eval: CommonEvalConfig = CommonEvalConfig() distributed_training: DistributedTrainingConfig = DistributedTrainingConfig() dataset: DatasetConfig = DatasetConfig() optimization: OptimizationConfig = OptimizationConfig() checkpoint: CheckpointConfig = CheckpointConfig() bmuf: FairseqBMUFConfig = FairseqBMUFConfig() generation: GenerationConfig = GenerationConfig() eval_lm: EvalLMConfig = EvalLMConfig() interactive: InteractiveConfig = InteractiveConfig() model: Any = MISSING task: Any = None criterion: Any = None optimizer: Any = None lr_scheduler: Any = None scoring: Any = None bpe: Any = None tokenizer: Any = None ema: EMAConfig = EMAConfig()
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/dataclass/configs.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" import logging from hydra.core.config_store import ConfigStore from fairseq.dataclass.configs import FairseqConfig from omegaconf import DictConfig, OmegaConf logger = logging.getLogger(__name__) def hydra_init(cfg_name="config") -> None: cs = ConfigStore.instance() cs.store(name=f"{cfg_name}", node=FairseqConfig) for k in FairseqConfig.__dataclass_fields__: v = FairseqConfig.__dataclass_fields__[k].default try: cs.store(name=k, node=v) except BaseException: logger.error(f"{k} - {v}") raise def add_defaults(cfg: DictConfig) -> None: """This function adds default values that are stored in dataclasses that hydra doesn't know about """ from fairseq.registry import REGISTRIES from fairseq.tasks import TASK_DATACLASS_REGISTRY from fairseq.models import ARCH_MODEL_NAME_REGISTRY, MODEL_DATACLASS_REGISTRY from fairseq.dataclass.utils import merge_with_parent from typing import Any OmegaConf.set_struct(cfg, False) for k, v in FairseqConfig.__dataclass_fields__.items(): field_cfg = cfg.get(k) if field_cfg is not None and v.type == Any: dc = None if isinstance(field_cfg, str): field_cfg = DictConfig({"_name": field_cfg}) field_cfg.__dict__["_parent"] = field_cfg.__dict__["_parent"] name = getattr(field_cfg, "_name", None) if k == "task": dc = TASK_DATACLASS_REGISTRY.get(name) elif k == "model": name = ARCH_MODEL_NAME_REGISTRY.get(name, name) dc = MODEL_DATACLASS_REGISTRY.get(name) elif k in REGISTRIES: dc = REGISTRIES[k]["dataclass_registry"].get(name) if dc is not None: cfg[k] = merge_with_parent(dc, field_cfg)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/dataclass/initialize.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from enum import Enum, EnumMeta from typing import List class StrEnumMeta(EnumMeta): # this is workaround for submitit pickling leading to instance checks failing in hydra for StrEnum, see # https://github.com/facebookresearch/hydra/issues/1156 @classmethod def __instancecheck__(cls, other): return "enum" in str(type(other)) class StrEnum(Enum, metaclass=StrEnumMeta): def __str__(self): return self.value def __eq__(self, other: str): return self.value == other def __repr__(self): return self.value def __hash__(self): return hash(str(self)) def ChoiceEnum(choices: List[str]): """return the Enum class used to enforce list of choices""" return StrEnum("Choices", {k: k for k in choices}) LOG_FORMAT_CHOICES = ChoiceEnum(["json", "none", "simple", "tqdm"]) DDP_BACKEND_CHOICES = ChoiceEnum([ "c10d", # alias for pytorch_ddp "fully_sharded", # FullyShardedDataParallel from fairscale "legacy_ddp", "no_c10d", # alias for legacy_ddp "pytorch_ddp", "slow_mo", ]) DDP_COMM_HOOK_CHOICES = ChoiceEnum(["none", "fp16"]) DATASET_IMPL_CHOICES = ChoiceEnum(["raw", "lazy", "cached", "mmap", "fasta", "huffman"]) GENERATION_CONSTRAINTS_CHOICES = ChoiceEnum(["ordered", "unordered"]) GENERATION_DECODING_FORMAT_CHOICES = ChoiceEnum( ["unigram", "ensemble", "vote", "dp", "bs"] ) ZERO_SHARDING_CHOICES = ChoiceEnum(["none", "os"]) PIPELINE_CHECKPOINT_CHOICES = ChoiceEnum(["always", "never", "except_last"]) PRINT_ALIGNMENT_CHOICES = ChoiceEnum(["hard", "soft"])
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/dataclass/constants.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .configs import FairseqDataclass from .constants import ChoiceEnum __all__ = [ "FairseqDataclass", "ChoiceEnum", ]
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/dataclass/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import ast import inspect import logging import os import re from argparse import ArgumentError, ArgumentParser, Namespace from dataclasses import _MISSING_TYPE, MISSING, is_dataclass from enum import Enum from typing import Any, Dict, List, Optional, Tuple, Type from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.configs import FairseqConfig from hydra.core.global_hydra import GlobalHydra from hydra.experimental import compose, initialize from omegaconf import DictConfig, OmegaConf, open_dict, _utils logger = logging.getLogger(__name__) def eval_str_list(x, x_type=float): if x is None: return None if isinstance(x, str): if len(x) == 0: return [] x = ast.literal_eval(x) try: return list(map(x_type, x)) except TypeError: return [x_type(x)] def interpret_dc_type(field_type): if isinstance(field_type, str): raise RuntimeError("field should be a type") if field_type == Any: return str typestring = str(field_type) if re.match( r"(typing.|^)Union\[(.*), NoneType\]$", typestring ) or typestring.startswith("typing.Optional"): return field_type.__args__[0] return field_type def gen_parser_from_dataclass( parser: ArgumentParser, dataclass_instance: FairseqDataclass, delete_default: bool = False, with_prefix: Optional[str] = None, ) -> None: """ convert a dataclass instance to tailing parser arguments. If `with_prefix` is provided, prefix all the keys in the resulting parser with it. It means that we are building a flat namespace from a structured dataclass (see transformer_config.py for example). """ def argparse_name(name: str): if name == "data" and (with_prefix is None or with_prefix == ''): # normally data is positional args, so we don't add the -- nor the prefix return name if name == "_name": # private member, skip return None full_name = "--" + name.replace("_", "-") if with_prefix is not None and with_prefix != '': # if a prefix is specified, construct the prefixed arg name full_name = with_prefix + "-" + full_name[2:] # strip -- when composing return full_name def get_kwargs_from_dc( dataclass_instance: FairseqDataclass, k: str ) -> Dict[str, Any]: """k: dataclass attributes""" kwargs = {} field_type = dataclass_instance._get_type(k) inter_type = interpret_dc_type(field_type) field_default = dataclass_instance._get_default(k) if isinstance(inter_type, type) and issubclass(inter_type, Enum): field_choices = [t.value for t in list(inter_type)] else: field_choices = None field_help = dataclass_instance._get_help(k) field_const = dataclass_instance._get_argparse_const(k) if isinstance(field_default, str) and field_default.startswith("${"): kwargs["default"] = field_default else: if field_default is MISSING: kwargs["required"] = True if field_choices is not None: kwargs["choices"] = field_choices if ( isinstance(inter_type, type) and (issubclass(inter_type, List) or issubclass(inter_type, Tuple)) ) or ("List" in str(inter_type) or "Tuple" in str(inter_type)): if "int" in str(inter_type): kwargs["type"] = lambda x: eval_str_list(x, int) elif "float" in str(inter_type): kwargs["type"] = lambda x: eval_str_list(x, float) elif "str" in str(inter_type): kwargs["type"] = lambda x: eval_str_list(x, str) else: raise NotImplementedError( "parsing of type " + str(inter_type) + " is not implemented" ) if field_default is not MISSING: kwargs["default"] = ( ",".join(map(str, field_default)) if field_default is not None else None ) elif ( isinstance(inter_type, type) and issubclass(inter_type, Enum) ) or "Enum" in str(inter_type): kwargs["type"] = str if field_default is not MISSING: if isinstance(field_default, Enum): kwargs["default"] = field_default.value else: kwargs["default"] = field_default elif inter_type is bool: kwargs["action"] = ( "store_false" if field_default is True else "store_true" ) kwargs["default"] = field_default else: kwargs["type"] = inter_type if field_default is not MISSING: kwargs["default"] = field_default # build the help with the hierarchical prefix if with_prefix is not None and with_prefix != '' and field_help is not None: field_help = with_prefix[2:] + ': ' + field_help kwargs["help"] = field_help if field_const is not None: kwargs["const"] = field_const kwargs["nargs"] = "?" return kwargs for k in dataclass_instance._get_all_attributes(): field_name = argparse_name(dataclass_instance._get_name(k)) field_type = dataclass_instance._get_type(k) if field_name is None: continue elif inspect.isclass(field_type) and issubclass(field_type, FairseqDataclass): # for fields that are of type FairseqDataclass, we can recursively # add their fields to the namespace (so we add the args from model, task, etc. to the root namespace) prefix = None if with_prefix is not None: # if a prefix is specified, then we don't want to copy the subfields directly to the root namespace # but we prefix them with the name of the current field. prefix = field_name gen_parser_from_dataclass(parser, field_type(), delete_default, prefix) continue kwargs = get_kwargs_from_dc(dataclass_instance, k) field_args = [field_name] alias = dataclass_instance._get_argparse_alias(k) if alias is not None: field_args.append(alias) if "default" in kwargs: if isinstance(kwargs["default"], str) and kwargs["default"].startswith( "${" ): if kwargs["help"] is None: # this is a field with a name that will be added elsewhere continue else: del kwargs["default"] if delete_default and "default" in kwargs: del kwargs["default"] try: parser.add_argument(*field_args, **kwargs) except ArgumentError: pass def _set_legacy_defaults(args, cls): """Helper to set default arguments based on *add_args*.""" if not hasattr(cls, "add_args"): return import argparse parser = argparse.ArgumentParser( argument_default=argparse.SUPPRESS, allow_abbrev=False ) cls.add_args(parser) # copied from argparse.py: defaults = argparse.Namespace() for action in parser._actions: if action.dest is not argparse.SUPPRESS: if not hasattr(defaults, action.dest): if action.default is not argparse.SUPPRESS: setattr(defaults, action.dest, action.default) for key, default_value in vars(defaults).items(): if not hasattr(args, key): setattr(args, key, default_value) def _override_attr( sub_node: str, data_class: Type[FairseqDataclass], args: Namespace ) -> List[str]: overrides = [] if not inspect.isclass(data_class) or not issubclass(data_class, FairseqDataclass): return overrides def get_default(f): if not isinstance(f.default_factory, _MISSING_TYPE): return f.default_factory() return f.default for k, v in data_class.__dataclass_fields__.items(): if k.startswith("_"): # private member, skip continue val = get_default(v) if not hasattr(args, k) else getattr(args, k) field_type = interpret_dc_type(v.type) if ( isinstance(val, str) and not val.startswith("${") # not interpolation and field_type != str and ( not inspect.isclass(field_type) or not issubclass(field_type, Enum) ) # not choices enum ): # upgrade old models that stored complex parameters as string val = ast.literal_eval(val) if isinstance(val, tuple): val = list(val) v_type = getattr(v.type, "__origin__", None) if ( (v_type is List or v_type is list or v_type is Optional) # skip interpolation and not (isinstance(val, str) and val.startswith("${")) ): # if type is int but val is float, then we will crash later - try to convert here if hasattr(v.type, "__args__"): t_args = v.type.__args__ if len(t_args) == 1 and (t_args[0] is float or t_args[0] is int): val = list(map(t_args[0], val)) elif val is not None and ( field_type is int or field_type is bool or field_type is float ): try: val = field_type(val) except: pass # ignore errors here, they are often from interpolation args if val is None: overrides.append("{}.{}=null".format(sub_node, k)) elif val == "": overrides.append("{}.{}=''".format(sub_node, k)) elif isinstance(val, str): val = val.replace("'", r"\'") overrides.append("{}.{}='{}'".format(sub_node, k, val)) elif isinstance(val, FairseqDataclass): overrides += _override_attr(f"{sub_node}.{k}", type(val), args) elif isinstance(val, Namespace): sub_overrides, _ = override_module_args(val) for so in sub_overrides: overrides.append(f"{sub_node}.{k}.{so}") else: overrides.append("{}.{}={}".format(sub_node, k, val)) return overrides def migrate_registry( name, value, registry, args, overrides, deletes, use_name_as_val=False ): if value in registry: overrides.append("{}={}".format(name, value)) overrides.append("{}._name={}".format(name, value)) overrides.extend(_override_attr(name, registry[value], args)) elif use_name_as_val and value is not None: overrides.append("{}={}".format(name, value)) else: deletes.append(name) def override_module_args(args: Namespace) -> Tuple[List[str], List[str]]: """use the field in args to overrides those in cfg""" overrides = [] deletes = [] for k in FairseqConfig.__dataclass_fields__.keys(): overrides.extend( _override_attr(k, FairseqConfig.__dataclass_fields__[k].type, args) ) if args is not None: if hasattr(args, "task"): from fairseq.tasks import TASK_DATACLASS_REGISTRY migrate_registry( "task", args.task, TASK_DATACLASS_REGISTRY, args, overrides, deletes ) else: deletes.append("task") # these options will be set to "None" if they have not yet been migrated # so we can populate them with the entire flat args CORE_REGISTRIES = {"criterion", "optimizer", "lr_scheduler"} from fairseq.registry import REGISTRIES for k, v in REGISTRIES.items(): if hasattr(args, k): migrate_registry( k, getattr(args, k), v["dataclass_registry"], args, overrides, deletes, use_name_as_val=k not in CORE_REGISTRIES, ) else: deletes.append(k) no_dc = True if hasattr(args, "arch"): from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_MODEL_NAME_REGISTRY if args.arch in ARCH_MODEL_REGISTRY: m_cls = ARCH_MODEL_REGISTRY[args.arch] dc = getattr(m_cls, "__dataclass", None) if dc is not None: m_name = ARCH_MODEL_NAME_REGISTRY[args.arch] overrides.append("model={}".format(m_name)) overrides.append("model._name={}".format(args.arch)) # override model params with those exist in args overrides.extend(_override_attr("model", dc, args)) no_dc = False if no_dc: deletes.append("model") return overrides, deletes class omegaconf_no_object_check: def __init__(self): self.old_is_primitive = _utils.is_primitive_type def __enter__(self): _utils.is_primitive_type = lambda _: True def __exit__(self, type, value, traceback): _utils.is_primitive_type = self.old_is_primitive def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig: """Convert a flat argparse.Namespace to a structured DictConfig.""" # Here we are using field values provided in args to override counterparts inside config object overrides, deletes = override_module_args(args) # configs will be in fairseq/config after installation config_path = os.path.join("..", "config") GlobalHydra.instance().clear() with initialize(config_path=config_path): try: composed_cfg = compose("config", overrides=overrides, strict=False) except: logger.error("Error when composing. Overrides: " + str(overrides)) raise for k in deletes: composed_cfg[k] = None cfg = OmegaConf.create( OmegaConf.to_container(composed_cfg, resolve=True, enum_to_str=True) ) # hack to be able to set Namespace in dict config. this should be removed when we update to newer # omegaconf version that supports object flags, or when we migrate all existing models from omegaconf import _utils with omegaconf_no_object_check(): if cfg.task is None and getattr(args, "task", None): cfg.task = Namespace(**vars(args)) from fairseq.tasks import TASK_REGISTRY _set_legacy_defaults(cfg.task, TASK_REGISTRY[args.task]) cfg.task._name = args.task if cfg.model is None and getattr(args, "arch", None): cfg.model = Namespace(**vars(args)) from fairseq.models import ARCH_MODEL_REGISTRY _set_legacy_defaults(cfg.model, ARCH_MODEL_REGISTRY[args.arch]) cfg.model._name = args.arch if cfg.optimizer is None and getattr(args, "optimizer", None): cfg.optimizer = Namespace(**vars(args)) from fairseq.optim import OPTIMIZER_REGISTRY _set_legacy_defaults(cfg.optimizer, OPTIMIZER_REGISTRY[args.optimizer]) cfg.optimizer._name = args.optimizer if cfg.lr_scheduler is None and getattr(args, "lr_scheduler", None): cfg.lr_scheduler = Namespace(**vars(args)) from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY _set_legacy_defaults( cfg.lr_scheduler, LR_SCHEDULER_REGISTRY[args.lr_scheduler] ) cfg.lr_scheduler._name = args.lr_scheduler if cfg.criterion is None and getattr(args, "criterion", None): cfg.criterion = Namespace(**vars(args)) from fairseq.criterions import CRITERION_REGISTRY _set_legacy_defaults(cfg.criterion, CRITERION_REGISTRY[args.criterion]) cfg.criterion._name = args.criterion OmegaConf.set_struct(cfg, True) return cfg def overwrite_args_by_name(cfg: DictConfig, overrides: Dict[str, any]): # this will be deprecated when we get rid of argparse and model_overrides logic from fairseq.registry import REGISTRIES with open_dict(cfg): for k in cfg.keys(): # "k in cfg" will return false if its a "mandatory value (e.g. ???)" if k in cfg and isinstance(cfg[k], DictConfig): if k in overrides and isinstance(overrides[k], dict): for ok, ov in overrides[k].items(): if isinstance(ov, dict) and cfg[k][ok] is not None: overwrite_args_by_name(cfg[k][ok], ov) else: cfg[k][ok] = ov else: overwrite_args_by_name(cfg[k], overrides) elif k in cfg and isinstance(cfg[k], Namespace): for override_key, val in overrides.items(): setattr(cfg[k], override_key, val) elif k in overrides: if ( k in REGISTRIES and overrides[k] in REGISTRIES[k]["dataclass_registry"] ): cfg[k] = DictConfig( REGISTRIES[k]["dataclass_registry"][overrides[k]] ) overwrite_args_by_name(cfg[k], overrides) cfg[k]._name = overrides[k] else: cfg[k] = overrides[k] def merge_with_parent(dc: FairseqDataclass, cfg: DictConfig, remove_missing=True): if remove_missing: if is_dataclass(dc): target_keys = set(dc.__dataclass_fields__.keys()) else: target_keys = set(dc.keys()) with open_dict(cfg): for k in list(cfg.keys()): if k not in target_keys: del cfg[k] merged_cfg = OmegaConf.merge(dc, cfg) merged_cfg.__dict__["_parent"] = cfg.__dict__["_parent"] OmegaConf.set_struct(merged_cfg, True) return merged_cfg
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/dataclass/utils.py