python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
import numpy as np from fairseq.data import FairseqDataset class DummyDataset(FairseqDataset): def __init__(self, batch, num_items, item_size): super().__init__() self.batch = batch self.num_items = num_items self.item_size = item_size def __getitem__(self, index): return index def __len__(self): return self.num_items def collater(self, samples): return self.batch @property def sizes(self): return np.array([self.item_size] * self.num_items) def num_tokens(self, index): return self.item_size def size(self, index): return self.item_size def ordered_indices(self): return np.arange(self.num_items) @property def supports_prefetch(self): return False
KosmosX-API-main
kosmosX/fairseq/fairseq/benchmark/dummy_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn import torch.nn.functional as F from fairseq.data import Dictionary from fairseq.models import ( FairseqDecoder, FairseqLanguageModel, register_model, register_model_architecture, ) @register_model("dummy_model") class DummyModel(FairseqLanguageModel): def __init__(self, args, encoder): super().__init__(encoder) self.args = args @staticmethod def add_args(parser): parser.add_argument("--num-layers", type=int, default=24) parser.add_argument("--embed-dim", type=int, default=1024) @classmethod def build_model(cls, args, task): encoder = DummyEncoder( num_embed=len(task.target_dictionary), embed_dim=args.embed_dim, num_layers=args.num_layers, ) return cls(args, encoder) def forward(self, src_tokens, masked_tokens=None, **kwargs): return self.decoder(src_tokens, masked_tokens=masked_tokens) class DummyEncoder(FairseqDecoder): def __init__(self, num_embed=50000, embed_dim=1024, num_layers=24): super().__init__(Dictionary()) self.embed = nn.Embedding( num_embeddings=num_embed, embedding_dim=embed_dim, padding_idx=0 ) self.layers_a = nn.ModuleList( [ nn.Sequential( nn.LayerNorm(embed_dim), nn.Linear(embed_dim, 3 * embed_dim), # q, k, v input projection nn.Linear(3 * embed_dim, embed_dim), # skip self-attention nn.Linear(embed_dim, embed_dim), # output projection nn.Dropout(), ) for i in range(num_layers) ] ) self.layers_b = nn.ModuleList( [ nn.Sequential( nn.LayerNorm(embed_dim), nn.Linear(embed_dim, 4 * embed_dim), # FFN nn.ReLU(), nn.Linear(4 * embed_dim, embed_dim), # FFN nn.Dropout(0.1), ) for i in range(num_layers) ] ) self.out_proj = nn.Linear(embed_dim, num_embed) def forward(self, tokens, masked_tokens=None): x = self.embed(tokens) for layer_a, layer_b in zip(self.layers_a, self.layers_b): x = x + layer_a(x) x = x + layer_b(x) x = self.out_proj(x) if masked_tokens is not None: x = x[masked_tokens] return (x,) def max_positions(self): return 1024 def get_normalized_probs(self, net_output, log_probs, sample=None): logits = net_output[0].float() if log_probs: return F.log_softmax(logits, dim=-1) else: return F.softmax(logits, dim=-1) @register_model_architecture("dummy_model", "dummy_model") def base_architecture(args): pass
KosmosX-API-main
kosmosX/fairseq/fairseq/benchmark/dummy_model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import warnings from argparse import Namespace from typing import Any, Callable, Dict, List import torch from fairseq import metrics, search, tokenizer, utils from fairseq.data import Dictionary, FairseqDataset, data_utils, encoders, iterators from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.utils import gen_parser_from_dataclass from fairseq.optim.amp_optimizer import AMPOptimizer from omegaconf import DictConfig from deepspeed.runtime.engine import DeepSpeedEngine logger = logging.getLogger(__name__) class StatefulContainer(object): def __init__(self): self._state = dict() self._factories = dict() def add_factory(self, name, factory: Callable[[], Any]): self._factories[name] = factory def merge_state_dict(self, state_dict: Dict[str, Any]): self._state.update(state_dict) @property def state_dict(self) -> Dict[str, Any]: return self._state def __getattr__(self, name): if name not in self._state and name in self._factories: self._state[name] = self._factories[name]() if name in self._state: return self._state[name] raise AttributeError(f"Task state has no factory for attribute {name}") class FairseqTask(object): """ Tasks store dictionaries and provide helpers for loading/iterating over Datasets, initializing the Model/Criterion and calculating the loss. Tasks have limited statefulness. In particular, state that needs to be saved to/loaded from checkpoints needs to be stored in the `self.state` :class:`StatefulContainer` object. For example:: self.state.add_factory("dictionary", self.load_dictionary) print(self.state.dictionary) # calls self.load_dictionary() This is necessary so that when loading checkpoints, we can properly recreate the task state after initializing the task instance. """ @classmethod def add_args(cls, parser): """Add task-specific arguments to the parser.""" dc = getattr(cls, "__dataclass", None) if dc is not None: gen_parser_from_dataclass(parser, dc()) @staticmethod def logging_outputs_can_be_summed(criterion) -> bool: """ Whether the logging outputs returned by `train_step` and `valid_step` can be summed across workers prior to calling `aggregate_logging_outputs`. Setting this to True will improves distributed training speed. """ return criterion.logging_outputs_can_be_summed() def __init__(self, cfg: FairseqDataclass, **kwargs): self.cfg = cfg self.datasets = dict() self.dataset_to_epoch_iter = dict() self.state = StatefulContainer() @classmethod def load_dictionary(cls, filename): """Load the dictionary from the filename Args: filename (str): the filename """ return Dictionary.load(filename) @classmethod def build_dictionary( cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8 ): """Build the dictionary Args: filenames (list): list of filenames workers (int): number of concurrent workers threshold (int): defines the minimum word count nwords (int): defines the total number of words in the final dictionary, including special symbols padding_factor (int): can be used to pad the dictionary size to be a multiple of 8, which is important on some hardware (e.g., Nvidia Tensor Cores). """ d = Dictionary() for filename in filenames: Dictionary.add_file_to_dictionary( filename, d, tokenizer.tokenize_line, workers ) d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor) return d @classmethod def setup_task(cls, cfg: DictConfig, **kwargs): """Setup the task (e.g., load dictionaries). Args: cfg (omegaconf.DictConfig): parsed command-line arguments """ return cls(cfg, **kwargs) def has_sharded_data(self, split): return os.pathsep in getattr(self.cfg, "data", "") def load_dataset( self, split: str, combine: bool = False, task_cfg: FairseqDataclass = None, **kwargs, ): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) combine (bool): combines a split segmented into pieces into one dataset task_cfg (FairseqDataclass): optional task configuration stored in the checkpoint that can be used to load datasets """ raise NotImplementedError def dataset(self, split): """ Return a loaded dataset split. Args: split (str): name of the split (e.g., train, valid, test) Returns: a :class:`~fairseq.data.FairseqDataset` corresponding to *split* """ from fairseq.data import FairseqDataset if split not in self.datasets: raise KeyError("Dataset not loaded: " + split) if not isinstance(self.datasets[split], FairseqDataset): raise TypeError("Datasets are expected to be of type FairseqDataset") return self.datasets[split] def filter_indices_by_size( self, indices, dataset, max_positions=None, ignore_invalid_inputs=False ): """ Filter examples that are too large Args: indices (np.array): original array of sample indices dataset (~fairseq.data.FairseqDataset): dataset to batch max_positions (optional): max sentence length supported by the model (default: None). ignore_invalid_inputs (bool, optional): don't raise Exception for sentences that are too long (default: False). Returns: np.array: array of filtered sample indices """ indices, ignored = dataset.filter_indices_by_size(indices, max_positions) if len(ignored) > 0: if not ignore_invalid_inputs: raise Exception( ( "Size of sample #{} is invalid (={}) since max_positions={}, " "skip this example with --skip-invalid-size-inputs-valid-test" ).format(ignored[0], dataset.size(ignored[0]), max_positions) ) logger.warning( ( "{:,} samples have invalid sizes and will be skipped, " "max_positions={}, first few sample ids={}" ).format(len(ignored), max_positions, ignored[:10]) ) return indices def can_reuse_epoch_itr(self, dataset): # We can reuse the epoch iterator across epochs as long as the dataset # hasn't disabled it. We default to ``False`` here, although in practice # this will be ``True`` for most datasets that inherit from # ``FairseqDataset`` due to the base implementation there. return getattr(dataset, "can_reuse_epoch_itr_across_epochs", False) def get_batch_iterator( self, dataset, max_tokens=None, max_sentences=None, max_positions=None, ignore_invalid_inputs=False, required_batch_size_multiple=1, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=1, data_buffer_size=0, disable_iterator_cache=False, skip_remainder_batch=False, grouped_shuffling=False, update_epoch_batch_itr=False, ): """ Get an iterator that yields batches of data from the given dataset. Args: dataset (~fairseq.data.FairseqDataset): dataset to batch max_tokens (int, optional): max number of tokens in each batch (default: None). max_sentences (int, optional): max number of sentences in each batch (default: None). max_positions (optional): max sentence length supported by the model (default: None). ignore_invalid_inputs (bool, optional): don't raise Exception for sentences that are too long (default: False). required_batch_size_multiple (int, optional): require batch size to be a multiple of N (default: 1). seed (int, optional): seed for random number generator for reproducibility (default: 1). num_shards (int, optional): shard the data iterator into N shards (default: 1). shard_id (int, optional): which shard of the data iterator to return (default: 0). num_workers (int, optional): how many subprocesses to use for data loading. 0 means the data will be loaded in the main process (default: 0). epoch (int, optional): the epoch to start the iterator from (default: 1). data_buffer_size (int, optional): number of batches to preload (default: 0). disable_iterator_cache (bool, optional): don't cache the EpochBatchIterator (ignores `FairseqTask::can_reuse_epoch_itr`) (default: False). skip_remainder_batch (bool, optional): if set, discard the last batch in each training epoch, as the last batch is often smaller than local_batch_size * distributed_word_size (default: ``True``). grouped_shuffling (bool, optional): group batches with each groups containing num_shards batches and shuffle groups. Reduces difference between sequence lengths among workers for batches sorted by length. update_epoch_batch_itr (bool optional): if true then donot use the cached batch iterator for the epoch Returns: ~fairseq.iterators.EpochBatchIterator: a batched iterator over the given dataset split """ can_reuse_epoch_itr = ( not disable_iterator_cache and not update_epoch_batch_itr and self.can_reuse_epoch_itr(dataset) ) if can_reuse_epoch_itr and dataset in self.dataset_to_epoch_iter: logger.debug("reusing EpochBatchIterator for epoch {}".format(epoch)) return self.dataset_to_epoch_iter[dataset] assert isinstance(dataset, FairseqDataset) # initialize the dataset with the correct starting epoch dataset.set_epoch(epoch) # get indices ordered by example size with data_utils.numpy_seed(seed): indices = dataset.ordered_indices() # filter examples that are too large if max_positions is not None: indices = self.filter_indices_by_size( indices, dataset, max_positions, ignore_invalid_inputs ) # create mini-batches with given size constraints batch_sampler = dataset.batch_by_size( indices, max_tokens=max_tokens, max_sentences=max_sentences, required_batch_size_multiple=required_batch_size_multiple, ) # return a reusable, sharded iterator epoch_iter = iterators.EpochBatchIterator( dataset=dataset, collate_fn=dataset.collater, batch_sampler=batch_sampler, seed=seed, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, epoch=epoch, buffer_size=data_buffer_size, skip_remainder_batch=skip_remainder_batch, grouped_shuffling=grouped_shuffling, ) if can_reuse_epoch_itr: self.dataset_to_epoch_iter[dataset] = epoch_iter return epoch_iter def build_model(self, cfg: FairseqDataclass, from_checkpoint=False): """ Build the :class:`~fairseq.models.BaseFairseqModel` instance for this task. Args: cfg (FairseqDataclass): configuration object Returns: a :class:`~fairseq.models.BaseFairseqModel` instance """ from fairseq import models, quantization_utils model = models.build_model(cfg, self, from_checkpoint) model = quantization_utils.quantize_model_scalar(model, cfg) return model def build_criterion(self, cfg: DictConfig): """ Build the :class:`~fairseq.criterions.FairseqCriterion` instance for this task. Args: cfg (omegaconf.DictConfig): configration object Returns: a :class:`~fairseq.criterions.FairseqCriterion` instance """ from fairseq import criterions return criterions.build_criterion(cfg, self) def build_generator( self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None, prefix_allowed_tokens_fn=None, ): """ Build a :class:`~fairseq.SequenceGenerator` instance for this task. Args: models (List[~fairseq.models.FairseqModel]): ensemble of models args (fairseq.dataclass.configs.GenerationConfig): configuration object (dataclass) for generation extra_gen_cls_kwargs (Dict[str, Any]): extra options to pass through to SequenceGenerator prefix_allowed_tokens_fn (Callable[[int, torch.Tensor], List[int]]): If provided, this function constrains the beam search to allowed tokens only at each step. The provided function should take 2 arguments: the batch ID (`batch_id: int`) and a unidimensional tensor of token ids (`inputs_ids: torch.Tensor`). It has to return a `List[int]` with the allowed tokens for the next generation step conditioned on the previously generated tokens (`inputs_ids`) and the batch ID (`batch_id`). This argument is useful for constrained generation conditioned on the prefix, as described in "Autoregressive Entity Retrieval" (https://arxiv.org/abs/2010.00904) and https://github.com/facebookresearch/GENRE. """ if getattr(args, "score_reference", False): from fairseq.sequence_scorer import SequenceScorer return SequenceScorer( self.target_dictionary, compute_alignment=getattr(args, "print_alignment", False), ) from fairseq.sequence_generator import ( SequenceGenerator, SequenceGeneratorWithAlignment, ) # Choose search strategy. Defaults to Beam Search. sampling = getattr(args, "sampling", False) sampling_topk = getattr(args, "sampling_topk", -1) sampling_topp = getattr(args, "sampling_topp", -1.0) diverse_beam_groups = getattr(args, "diverse_beam_groups", -1) diverse_beam_strength = getattr(args, "diverse_beam_strength", 0.5) match_source_len = getattr(args, "match_source_len", False) diversity_rate = getattr(args, "diversity_rate", -1) constrained = getattr(args, "constraints", False) if prefix_allowed_tokens_fn is None: prefix_allowed_tokens_fn = getattr(args, "prefix_allowed_tokens_fn", None) if ( sum( int(cond) for cond in [ sampling, diverse_beam_groups > 0, match_source_len, diversity_rate > 0, ] ) > 1 ): raise ValueError("Provided Search parameters are mutually exclusive.") assert sampling_topk < 0 or sampling, "--sampling-topk requires --sampling" assert sampling_topp < 0 or sampling, "--sampling-topp requires --sampling" if sampling: search_strategy = search.Sampling( self.target_dictionary, sampling_topk, sampling_topp ) elif diverse_beam_groups > 0: search_strategy = search.DiverseBeamSearch( self.target_dictionary, diverse_beam_groups, diverse_beam_strength ) elif match_source_len: # this is useful for tagging applications where the output # length should match the input length, so we hardcode the # length constraints for simplicity search_strategy = search.LengthConstrainedBeamSearch( self.target_dictionary, min_len_a=1, min_len_b=0, max_len_a=1, max_len_b=0, ) elif diversity_rate > -1: search_strategy = search.DiverseSiblingsSearch( self.target_dictionary, diversity_rate ) elif constrained: search_strategy = search.LexicallyConstrainedBeamSearch( self.target_dictionary, args.constraints ) elif prefix_allowed_tokens_fn: search_strategy = search.PrefixConstrainedBeamSearch( self.target_dictionary, prefix_allowed_tokens_fn ) else: search_strategy = search.BeamSearch(self.target_dictionary) extra_gen_cls_kwargs = extra_gen_cls_kwargs or {} if seq_gen_cls is None: if getattr(args, "print_alignment", False): seq_gen_cls = SequenceGeneratorWithAlignment extra_gen_cls_kwargs["print_alignment"] = args.print_alignment else: seq_gen_cls = SequenceGenerator return seq_gen_cls( models, self.target_dictionary, beam_size=getattr(args, "beam", 5), max_len_a=getattr(args, "max_len_a", 0), max_len_b=getattr(args, "max_len_b", 200), min_len=getattr(args, "min_len", 1), normalize_scores=(not getattr(args, "unnormalized", False)), len_penalty=getattr(args, "lenpen", 1), unk_penalty=getattr(args, "unkpen", 0), temperature=getattr(args, "temperature", 1.0), match_source_len=getattr(args, "match_source_len", False), no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0), search_strategy=search_strategy, **extra_gen_cls_kwargs, ) def train_step( self, sample, model, criterion, optimizer, update_num, ignore_grad=False ): """ Do forward and backward, and return the loss as computed by *criterion* for the given *model* and *sample*. Args: sample (dict): the mini-batch. The format is defined by the :class:`~fairseq.data.FairseqDataset`. model (~fairseq.models.BaseFairseqModel): the model criterion (~fairseq.criterions.FairseqCriterion): the criterion optimizer (~fairseq.optim.FairseqOptimizer): the optimizer update_num (int): the current update ignore_grad (bool): multiply loss by 0 if this is set to True Returns: tuple: - the loss - the sample size, which is used as the denominator for the gradient - logging outputs to display while training """ model.train() model.set_num_updates(update_num) with torch.autograd.profiler.record_function("forward"): with torch.cuda.amp.autocast(enabled=(isinstance(optimizer, AMPOptimizer))): loss, sample_size, logging_output = criterion(model, sample) if ignore_grad: loss *= 0 with torch.autograd.profiler.record_function("backward"): if isinstance(model, DeepSpeedEngine): model.backward(loss) else: optimizer.backward(loss) return loss, sample_size, logging_output def valid_step(self, sample, model, criterion): model.eval() with torch.no_grad(): loss, sample_size, logging_output = criterion(model, sample) return loss, sample_size, logging_output def optimizer_step(self, optimizer, model, update_num): if isinstance(model, DeepSpeedEngine): model.step() else: optimizer.step() def build_dataset_for_inference( self, src_tokens: List[torch.Tensor], src_lengths: List[int], **kwargs ) -> torch.utils.data.Dataset: raise NotImplementedError def inference_step( self, generator, models, sample, prefix_tokens=None, constraints=None ): with torch.no_grad(): return generator.generate( models, sample, prefix_tokens=prefix_tokens, constraints=constraints ) def begin_epoch(self, epoch, model): """Hook function called before the start of each epoch.""" pass def begin_valid_epoch(self, epoch, model): """Hook function called before the start of each validation epoch.""" pass def aggregate_logging_outputs(self, logging_outputs, criterion): """[deprecated] Aggregate logging outputs from data parallel training.""" utils.deprecation_warning( "The aggregate_logging_outputs API is deprecated. " "Please use the reduce_metrics API instead." ) with metrics.aggregate() as agg: self.reduce_metrics(logging_outputs, criterion) return agg.get_smoothed_values() def reduce_metrics(self, logging_outputs, criterion): """Aggregate logging outputs from data parallel training.""" # backward compatibility for tasks that override aggregate_logging_outputs base_func = FairseqTask.aggregate_logging_outputs self_func = getattr(self, "aggregate_logging_outputs").__func__ if self_func is not base_func: utils.deprecation_warning( "Tasks should implement the reduce_metrics API. " "Falling back to deprecated aggregate_logging_outputs API." ) agg_logging_outputs = self.aggregate_logging_outputs( logging_outputs, criterion ) for k, v in agg_logging_outputs.items(): metrics.log_scalar(k, v) return if not any("ntokens" in log for log in logging_outputs): warnings.warn( "ntokens not found in Criterion logging outputs, cannot log wpb or wps" ) else: ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) metrics.log_scalar("wpb", ntokens, priority=180, round=1) metrics.log_speed("wps", ntokens, priority=90, round=1) if not any("nsentences" in log for log in logging_outputs): warnings.warn( "nsentences not found in Criterion logging outputs, cannot log bsz" ) else: nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) metrics.log_scalar("bsz", nsentences, priority=190, round=1) criterion.__class__.reduce_metrics(logging_outputs) def state_dict(self): if self.state is not None: return self.state.state_dict return {} def load_state_dict(self, state_dict: Dict[str, Any]): if self.state is not None: self.state.merge_state_dict(state_dict) def max_positions(self): """Return the max input length allowed by the task.""" return None @property def source_dictionary(self): """Return the source :class:`~fairseq.data.Dictionary` (if applicable for this task).""" raise NotImplementedError @property def target_dictionary(self): """Return the target :class:`~fairseq.data.Dictionary` (if applicable for this task).""" raise NotImplementedError def build_tokenizer(self, args): """Build the pre-tokenizer for this task.""" return encoders.build_tokenizer(args) def build_bpe(self, args): """Build the tokenizer for this task.""" return encoders.build_bpe(args) def get_interactive_tokens_and_lengths(self, lines, encode_fn): tokens = [ self.source_dictionary.encode_line( encode_fn(src_str), add_if_not_exist=False ).long() for src_str in lines ] lengths = [t.numel() for t in tokens] return tokens, lengths class LegacyFairseqTask(FairseqTask): def __init__(self, args: Namespace): super().__init__(None) self.args = args self.datasets = {} self.dataset_to_epoch_iter = {} @classmethod def setup_task(cls, args: Namespace, **kwargs): """Setup the task (e.g., load dictionaries). Args: args (argparse.Namespace): parsed command-line arguments """ return cls(args, **kwargs) def has_sharded_data(self, split): return os.pathsep in getattr(self.args, "data", "") def build_model(self, args: Namespace, from_checkpoint=False): """ Build the :class:`~fairseq.models.BaseFairseqModel` instance for this task. Args: args (argparse.Namespace): parsed command-line arguments Returns: a :class:`~fairseq.models.BaseFairseqModel` instance """ from fairseq import models, quantization_utils model = models.build_model(args, self, from_checkpoint) model = quantization_utils.quantize_model_scalar(model, args) return model def build_criterion(self, args: Namespace): """ Build the :class:`~fairseq.criterions.FairseqCriterion` instance for this task. Args: args (argparse.Namespace): parsed command-line arguments Returns: a :class:`~fairseq.criterions.FairseqCriterion` instance """ from fairseq import criterions return criterions.build_criterion(args, self)
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/fairseq_task.py
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import logging import os import sys from typing import List, Optional, Tuple import numpy as np from dataclasses import dataclass, field from fairseq.data import Dictionary, HubertDataset from fairseq.dataclass.configs import FairseqDataclass from fairseq.tasks import register_task from fairseq.tasks.fairseq_task import FairseqTask from omegaconf import MISSING logger = logging.getLogger(__name__) class LabelEncoder(object): def __init__(self, dictionary: Dictionary) -> None: self.dictionary = dictionary def __call__(self, label: str) -> List[str]: return self.dictionary.encode_line( label, append_eos=False, add_if_not_exist=False, ) @dataclass class HubertPretrainingConfig(FairseqDataclass): data: str = field(default=MISSING, metadata={"help": "path to data directory"}) fine_tuning: bool = field( default=False, metadata={"help": "set to true if fine-tuning Hubert"} ) labels: List[str] = field( default_factory=lambda: ["ltr"], metadata={ "help": ( "extension of the label files to load, frame-level labels for" " pre-training, and sequence-level label for fine-tuning" ) }, ) label_dir: Optional[str] = field( default=None, metadata={ "help": "if set, looks for labels in this directory instead", }, ) label_rate: int = field( default=-1, metadata={"help": "label frame rate. -1 for sequence label"}, ) sample_rate: int = field( default=16_000, metadata={ "help": "target sample rate. audio files will be up/down " "sampled to this rate" }, ) normalize: bool = field( default=False, metadata={"help": "if set, normalizes input to have 0 mean and unit variance"}, ) enable_padding: bool = field( default=False, metadata={"help": "pad shorter samples instead of cropping"}, ) max_keep_size: Optional[int] = field( default=None, metadata={"help": "exclude sample longer than this"}, ) max_sample_size: Optional[int] = field( default=None, metadata={"help": "max sample size to crop to for batching"}, ) min_sample_size: Optional[int] = field( default=None, metadata={"help": "min sample size to crop to for batching"}, ) single_target: Optional[bool] = field( default=False, metadata={ "help": "if set, AddTargetDatasets outputs same keys " "as AddTargetDataset" }, ) random_crop: Optional[bool] = field( default=True, metadata={"help": "always crop from the beginning if false"}, ) pad_audio: Optional[bool] = field( default=False, metadata={"help": "pad audio to the longest one in the batch if true"}, ) @register_task("hubert_pretraining", dataclass=HubertPretrainingConfig) class HubertPretrainingTask(FairseqTask): cfg: HubertPretrainingConfig def __init__( self, cfg: HubertPretrainingConfig, ) -> None: super().__init__(cfg) logger.info(f"current directory is {os.getcwd()}") logger.info(f"HubertPretrainingTask Config {cfg}") self.cfg = cfg self.fine_tuning = cfg.fine_tuning if cfg.fine_tuning: self.state.add_factory("target_dictionary", self.load_dictionaries) else: self.state.add_factory("dictionaries", self.load_dictionaries) self.blank_symbol = "<s>" @property def source_dictionary(self) -> Optional[Dictionary]: return None @property def target_dictionary(self) -> Optional[Dictionary]: return self.state.target_dictionary @property def dictionaries(self) -> List[Dictionary]: return self.state.dictionaries @classmethod def setup_task( cls, cfg: HubertPretrainingConfig, **kwargs ) -> "HubertPretrainingTask": return cls(cfg) def load_dictionaries(self): label_dir = self.cfg.data if self.cfg.label_dir is None else self.cfg.label_dir dictionaries = [ Dictionary.load(f"{label_dir}/dict.{label}.txt") for label in self.cfg.labels ] return dictionaries[0] if self.cfg.fine_tuning else dictionaries def get_label_dir(self) -> str: if self.cfg.label_dir is None: return self.cfg.data return self.cfg.label_dir def load_dataset(self, split: str, **kwargs) -> None: manifest = f"{self.cfg.data}/{split}.tsv" dicts = [self.target_dictionary] if self.cfg.fine_tuning else self.dictionaries pad_list = [dict.pad() for dict in dicts] eos_list = [dict.eos() for dict in dicts] procs = [LabelEncoder(dict) for dict in dicts] paths = [f"{self.get_label_dir()}/{split}.{l}" for l in self.cfg.labels] # hubert v1: pad_audio=True, random_crop=False; self.datasets[split] = HubertDataset( manifest, sample_rate=self.cfg.sample_rate, label_paths=paths, label_rates=self.cfg.label_rate, pad_list=pad_list, eos_list=eos_list, label_processors=procs, max_keep_sample_size=self.cfg.max_keep_size, min_keep_sample_size=self.cfg.min_sample_size, max_sample_size=self.cfg.max_sample_size, pad_audio=self.cfg.pad_audio, normalize=self.cfg.normalize, store_labels=False, random_crop=self.cfg.random_crop, single_target=self.cfg.single_target, ) def max_positions(self) -> Tuple[int, int]: return (sys.maxsize, sys.maxsize) def filter_indices_by_size(self, indices: np.array, *args, **kwargs) -> np.array: return indices
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/hubert_pretraining.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import numpy as np from fairseq.data import ( AppendTokenDataset, ConcatDataset, DenoisingDataset, Dictionary, PrependTokenDataset, ResamplingDataset, SortDataset, TokenBlockDataset, data_utils, ) from fairseq.data.encoders.utils import get_whole_word_mask from fairseq.tasks import register_task from .denoising import DenoisingTask logger = logging.getLogger(__name__) @register_task("multilingual_denoising") class MultilingualDenoisingTask(DenoisingTask): @staticmethod def add_args(parser): DenoisingTask.add_args(parser) parser.add_argument( "--multilang-sampling-alpha", type=float, default=1.0, help="smoothing alpha for sample ratios across multiple datasets", ) parser.add_argument("--add-lang-token", default=False, action="store_true") parser.add_argument( "--langs", type=str, help="language ids we are considering", default=None ) parser.add_argument( "--no-whole-word-mask-langs", type=str, default="", metavar="N", help="languages without spacing between words dont support whole word masking", ) @classmethod def setup_task(cls, args, **kwargs): """Setup the task.""" paths = args.data.split(":") assert len(paths) > 0 dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt")) data_path = paths[0] if args.langs is None: languages = sorted( [ name for name in os.listdir(data_path) if os.path.isdir(os.path.join(data_path, name)) ] ) else: languages = args.langs.split(",") if args.add_lang_token: for lang in languages: dictionary.add_symbol("[{}]".format(lang)) logger.info("dictionary: {} types".format(len(dictionary))) if not hasattr(args, "shuffle_instance"): args.shuffle_instance = False return cls(args, dictionary) def __init__(self, args, dictionary): super().__init__(args, dictionary) self.dictionary = dictionary self.seed = args.seed # add mask token self.mask_idx = self.dictionary.add_symbol("<mask>") self.langs = args.langs self.args = args def _get_sample_prob(self, dataset_lens): """ Get smoothed sampling porbability by languages. This helps low resource languages by upsampling them. """ prob = dataset_lens / dataset_lens.sum() smoothed_prob = prob ** self.args.multilang_sampling_alpha smoothed_prob = smoothed_prob / smoothed_prob.sum() return smoothed_prob def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ paths = self.args.data.split(":") assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] split_path = os.path.join(data_path, split) if self.langs is None: languages = sorted( [ name for name in os.listdir(data_path) if os.path.isdir(os.path.join(data_path, name)) ] ) else: languages = self.langs.split(",") for name in languages: p = os.path.join(data_path, name) assert os.path.exists(p), "data not found: {}".format(p) logger.info("Training on {0} languages: {1}".format(len(languages), languages)) logger.info( "Language to id mapping: ", {lang: id for id, lang in enumerate(languages)} ) mask_whole_words = get_whole_word_mask(self.args, self.dictionary) language_without_segmentations = self.args.no_whole_word_mask_langs.split(",") lang_datasets = [] for language in languages: split_path = os.path.join(data_path, language, split) dataset = data_utils.load_indexed_dataset( split_path, self.source_dictionary, self.args.dataset_impl, combine=combine, ) if dataset is None: raise FileNotFoundError( "Dataset not found: {} ({})".format(split, split_path) ) end_token = ( self.source_dictionary.index("[{}]".format(language)) if self.args.add_lang_token else self.source_dictionary.eos() ) # create continuous blocks of tokens dataset = TokenBlockDataset( dataset, dataset.sizes, self.args.tokens_per_sample - 2, # one less for <s> pad=self.source_dictionary.pad(), eos=end_token, break_mode=self.args.sample_break_mode, ) logger.info("loaded {} blocks from: {}".format(len(dataset), split_path)) # prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT) dataset = PrependTokenDataset(dataset, self.source_dictionary.bos()) dataset = AppendTokenDataset(dataset, end_token) lang_mask_whole_words = ( mask_whole_words if language not in language_without_segmentations else None ) lang_dataset = DenoisingDataset( dataset, dataset.sizes, self.dictionary, self.mask_idx, lang_mask_whole_words, shuffle=self.args.shuffle_instance, seed=self.seed, args=self.args, eos=None if not self.args.add_lang_token else self.source_dictionary.index("[{}]".format(language)), ) lang_datasets.append(lang_dataset) dataset_lengths = np.array( [len(d) for d in lang_datasets], dtype=float, ) logger.info( "loaded total {} blocks for all languages".format( int(dataset_lengths.sum()), ) ) if split == self.args.train_subset: # For train subset, additionally up or down sample languages. sample_probs = self._get_sample_prob(dataset_lengths) logger.info( "Sample probability by language: {}".format( { lang: "{0:.4f}".format(sample_probs[id]) for id, lang in enumerate(languages) } ) ) size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths logger.info( "Up/Down Sampling ratio by language: {}".format( { lang: "{0:.2f}".format(size_ratio[id]) for id, lang in enumerate(languages) } ) ) resampled_lang_datasets = [ ResamplingDataset( lang_datasets[i], size_ratio=size_ratio[i], seed=self.args.seed, epoch=epoch, replace=size_ratio[i] >= 1.0, ) for i, d in enumerate(lang_datasets) ] dataset = ConcatDataset( resampled_lang_datasets, ) else: dataset = ConcatDataset(lang_datasets) lang_splits = [split] for lang_id, lang_dataset in enumerate(lang_datasets): split_name = split + "_" + languages[lang_id] lang_splits.append(split_name) self.datasets[split_name] = lang_dataset if split in self.args.valid_subset: self.args.valid_subset = self.args.valid_subset.replace( split, ",".join(lang_splits) ) with data_utils.numpy_seed(self.args.seed + epoch): shuffle = np.random.permutation(len(dataset)) self.datasets[split] = SortDataset( dataset, sort_order=[ shuffle, dataset.sizes, ], )
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/multilingual_denoising.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os from fairseq import utils from fairseq.data import ( AppendTokenDataset, DenoisingDataset, Dictionary, IdDataset, NestedDictionaryDataset, NumelDataset, PadDataset, PrependTokenDataset, StripTokenDataset, TokenBlockDataset, data_utils, ) from fairseq.data.encoders.utils import get_whole_word_mask from fairseq.data.shorten_dataset import maybe_shorten_dataset from fairseq.tasks import LegacyFairseqTask, register_task import numpy as np logger = logging.getLogger(__name__) @register_task("denoising") class DenoisingTask(LegacyFairseqTask): """ Denoising task for applying sequence to sequence denoising. (ie. BART) """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument("data", help="path to data directory") parser.add_argument( "--tokens-per-sample", default=512, type=int, help="max number of total tokens over all segments" " per sample for dataset", ) parser.add_argument( "--sample-break-mode", default="complete_doc", type=str, help="mode for breaking sentence", ) parser.add_argument( "--mask", default=0.0, type=float, help="fraction of words/subwords that will be masked", ) parser.add_argument( "--mask-random", default=0.0, type=float, help="instead of using [MASK], use random token this often", ) parser.add_argument( "--insert", default=0.0, type=float, help="insert this percentage of additional random tokens", ) parser.add_argument( "--permute", default=0.0, type=float, help="take this proportion of subwords and permute them", ) parser.add_argument( "--rotate", default=0.5, type=float, help="rotate this proportion of inputs", ) parser.add_argument( "--poisson-lambda", default=3.0, type=float, help="randomly shuffle sentences for this proportion of inputs", ) parser.add_argument( "--permute-sentences", default=0.0, type=float, help="shuffle this proportion of sentences in all inputs", ) parser.add_argument( "--mask-length", default="subword", type=str, choices=["subword", "word", "span-poisson"], help="mask length to choose", ) parser.add_argument( "--replace-length", default=-1, type=int, help="when masking N tokens, replace with 0, 1, or N tokens (use -1 for N)", ) parser.add_argument( "--max-source-positions", default=1024, type=int, metavar="N", help="max number of tokens in the source sequence", ) parser.add_argument( "--max-target-positions", default=1024, type=int, metavar="N", help="max number of tokens in the target sequence", ) parser.add_argument( "--shorten-method", default="none", choices=["none", "truncate", "random_crop"], help="if not none, shorten sequences that exceed --tokens-per-sample", ) parser.add_argument( "--shorten-data-split-list", default="", help="comma-separated list of dataset splits to apply shortening to, " 'e.g., "train,valid" (default: all dataset splits)', ) def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary self.seed = args.seed # add mask token self.mask_idx = self.dictionary.add_symbol("<mask>") @classmethod def setup_task(cls, args, **kwargs): """Setup the task.""" paths = utils.split_paths(args.data) assert len(paths) > 0 dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt")) logger.info("dictionary: {} types".format(len(dictionary))) if not hasattr(args, "shuffle_instance"): args.shuffle_instance = False return cls(args, dictionary) def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ paths = utils.split_paths(self.args.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] split_path = os.path.join(data_path, split) dataset = data_utils.load_indexed_dataset( split_path, self.dictionary, self.args.dataset_impl, combine=combine, ) if dataset is None: raise FileNotFoundError( "Dataset not found: {} ({})".format(split, split_path) ) dataset = StripTokenDataset(dataset, self.dictionary.eos()) dataset = maybe_shorten_dataset( dataset, split, self.args.shorten_data_split_list, self.args.shorten_method, self.args.tokens_per_sample, self.args.seed, ) # create continuous blocks of tokens dataset = TokenBlockDataset( dataset, dataset.sizes, self.args.tokens_per_sample - 2, # one less for <s> and one for </s> pad=self.dictionary.pad(), eos=self.dictionary.eos(), break_mode=self.args.sample_break_mode, document_sep_len=0, ) logger.info("loaded {} blocks from: {}".format(len(dataset), split_path)) # prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT) dataset = PrependTokenDataset(dataset, self.source_dictionary.bos()) dataset = AppendTokenDataset(dataset, self.source_dictionary.eos()) mask_whole_words = ( get_whole_word_mask(self.args, self.source_dictionary) if self.args.mask_length != "subword" else None ) self.datasets[split] = DenoisingDataset( dataset, dataset.sizes, self.dictionary, self.mask_idx, mask_whole_words, shuffle=self.args.shuffle_instance, seed=self.seed, args=self.args, ) logger.info( "Split: {0}, Loaded {1} samples of denoising_dataset".format( split, len(self.datasets[split]), ) ) def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs): """ Generate batches for inference. We assume that the input begins with a bos symbol (`<s>`) and ends with an eos symbol (`</s>`). """ pad = self.source_dictionary.pad() eos = self.source_dictionary.eos() src_dataset = TokenBlockDataset( src_tokens, src_lengths, block_size=self.args.tokens_per_sample - 2, # for <s> and </s> pad=pad, eos=eos, break_mode=self.args.sample_break_mode, document_sep_len=0, ) prev_output_tokens = PrependTokenDataset( StripTokenDataset(src_dataset, eos), eos ) src_dataset = PadDataset(src_dataset, pad_idx=pad, left_pad=False) return NestedDictionaryDataset( { "id": IdDataset(), "net_input": { "src_tokens": src_dataset, "src_lengths": NumelDataset(src_dataset, reduce=False), "prev_output_tokens": PadDataset( prev_output_tokens, pad_idx=pad, left_pad=False ), }, "target": src_dataset, }, sizes=[np.array(src_lengths)], ) def max_positions(self): """Return the max sentence length allowed by the task.""" return (self.args.max_source_positions, self.args.max_target_positions) @property def source_dictionary(self): """Return the source :class:`~fairseq.data.Dictionary`.""" return self.dictionary @property def target_dictionary(self): """Return the target :class:`~fairseq.data.Dictionary`.""" return self.dictionary
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/denoising.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import logging import os from collections import OrderedDict from argparse import ArgumentError import torch from fairseq import metrics, utils from fairseq.data import ( Dictionary, LanguagePairDataset, RoundRobinZipDatasets, TransformEosLangPairDataset, ) from fairseq.models import FairseqMultiModel from fairseq.tasks.translation import load_langpair_dataset from . import LegacyFairseqTask, register_task logger = logging.getLogger(__name__) def _lang_token(lang: str): return "__{}__".format(lang) def _lang_token_index(dic: Dictionary, lang: str): """Return language token index.""" idx = dic.index(_lang_token(lang)) assert idx != dic.unk_index, "cannot find language token for lang {}".format(lang) return idx @register_task("multilingual_translation") class MultilingualTranslationTask(LegacyFairseqTask): """A task for training multiple translation models simultaneously. We iterate round-robin over batches from multiple language pairs, ordered according to the `--lang-pairs` argument. The training loop is roughly: for i in range(len(epoch)): for lang_pair in args.lang_pairs: batch = next_batch_for_lang_pair(lang_pair) loss = criterion(model_for_lang_pair(lang_pair), batch) loss.backward() optimizer.step() In practice, `next_batch_for_lang_pair` is abstracted in a FairseqDataset (e.g., `RoundRobinZipDatasets`) and `model_for_lang_pair` is a model that implements the `FairseqMultiModel` interface. During inference it is required to specify a single `--source-lang` and `--target-lang`, which indicates the inference langauge direction. `--lang-pairs`, `--encoder-langtok`, `--decoder-langtok` have to be set to the same value as training. """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" # fmt: off parser.add_argument('data', metavar='DIR', help='path to data directory') parser.add_argument('--lang-pairs', default=None, metavar='PAIRS', help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr') parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language (only needed for inference)') parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language (only needed for inference)') parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL', help='pad the source on the left (default: True)') parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL', help='pad the target on the left (default: False)') try: parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence') parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence') except ArgumentError: # this might have already been defined. Once we transition this to hydra it should be fine to add it here. pass parser.add_argument('--upsample-primary', default=1, type=int, help='amount to upsample primary dataset') parser.add_argument('--encoder-langtok', default=None, type=str, choices=['src', 'tgt'], metavar='SRCTGT', help='replace beginning-of-sentence in source sentence with source or target ' 'language token. (src/tgt)') parser.add_argument('--decoder-langtok', action='store_true', help='replace beginning-of-sentence in target sentence with target language token') # fmt: on def __init__(self, args, dicts, training): super().__init__(args) self.dicts = dicts self.training = training if training: self.lang_pairs = args.lang_pairs else: self.lang_pairs = ["{}-{}".format(args.source_lang, args.target_lang)] # eval_lang_pairs for multilingual translation is usually all of the # lang_pairs. However for other multitask settings or when we want to # optimize for certain languages we want to use a different subset. Thus # the eval_lang_pairs class variable is provided for classes that extend # this class. self.eval_lang_pairs = self.lang_pairs # model_lang_pairs will be used to build encoder-decoder model pairs in # models.build_model(). This allows multitask type of sub-class can # build models other than the input lang_pairs self.model_lang_pairs = self.lang_pairs self.langs = list(dicts.keys()) @classmethod def setup_task(cls, args, **kwargs): dicts, training = cls.prepare(args, **kwargs) return cls(args, dicts, training) @classmethod def update_args(cls, args): args.left_pad_source = utils.eval_bool(args.left_pad_source) args.left_pad_target = utils.eval_bool(args.left_pad_target) if args.lang_pairs is None: raise ValueError( "--lang-pairs is required. List all the language pairs in the training objective." ) if isinstance(args.lang_pairs, str): args.lang_pairs = args.lang_pairs.split(",") @classmethod def prepare(cls, args, **kargs): cls.update_args(args) sorted_langs = sorted( list({x for lang_pair in args.lang_pairs for x in lang_pair.split("-")}) ) if args.source_lang is not None or args.target_lang is not None: training = False else: training = True # load dictionaries dicts = OrderedDict() for lang in sorted_langs: paths = utils.split_paths(args.data) assert len(paths) > 0 dicts[lang] = cls.load_dictionary( os.path.join(paths[0], "dict.{}.txt".format(lang)) ) if len(dicts) > 0: assert dicts[lang].pad() == dicts[sorted_langs[0]].pad() assert dicts[lang].eos() == dicts[sorted_langs[0]].eos() assert dicts[lang].unk() == dicts[sorted_langs[0]].unk() if args.encoder_langtok is not None or args.decoder_langtok: for lang_to_add in sorted_langs: dicts[lang].add_symbol(_lang_token(lang_to_add)) logger.info("[{}] dictionary: {} types".format(lang, len(dicts[lang]))) return dicts, training def get_encoder_langtok(self, src_lang, tgt_lang): if self.args.encoder_langtok is None: return self.dicts[src_lang].eos() if self.args.encoder_langtok == "src": return _lang_token_index(self.dicts[src_lang], src_lang) else: return _lang_token_index(self.dicts[src_lang], tgt_lang) def get_decoder_langtok(self, tgt_lang): if not self.args.decoder_langtok: return self.dicts[tgt_lang].eos() return _lang_token_index(self.dicts[tgt_lang], tgt_lang) def alter_dataset_langtok( self, lang_pair_dataset, src_eos=None, src_lang=None, tgt_eos=None, tgt_lang=None, ): if self.args.encoder_langtok is None and not self.args.decoder_langtok: return lang_pair_dataset new_src_eos = None if ( self.args.encoder_langtok is not None and src_eos is not None and src_lang is not None and tgt_lang is not None ): new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang) else: src_eos = None new_tgt_bos = None if self.args.decoder_langtok and tgt_eos is not None and tgt_lang is not None: new_tgt_bos = self.get_decoder_langtok(tgt_lang) else: tgt_eos = None return TransformEosLangPairDataset( lang_pair_dataset, src_eos=src_eos, new_src_eos=new_src_eos, tgt_bos=tgt_eos, new_tgt_bos=new_tgt_bos, ) def load_dataset(self, split, epoch=1, **kwargs): """Load a dataset split.""" paths = utils.split_paths(self.args.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] def language_pair_dataset(lang_pair): src, tgt = lang_pair.split("-") langpair_dataset = load_langpair_dataset( data_path, split, src, self.dicts[src], tgt, self.dicts[tgt], combine=True, dataset_impl=self.args.dataset_impl, upsample_primary=self.args.upsample_primary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions, ) return self.alter_dataset_langtok( langpair_dataset, src_eos=self.dicts[src].eos(), src_lang=src, tgt_eos=self.dicts[tgt].eos(), tgt_lang=tgt, ) self.datasets[split] = RoundRobinZipDatasets( OrderedDict( [ (lang_pair, language_pair_dataset(lang_pair)) for lang_pair in self.lang_pairs ] ), eval_key=None if self.training else "%s-%s" % (self.args.source_lang, self.args.target_lang), ) def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None): if constraints is not None: raise NotImplementedError( "Constrained decoding with the multilingual_translation task is not supported" ) lang_pair = "%s-%s" % (self.args.source_lang, self.args.target_lang) return RoundRobinZipDatasets( OrderedDict( [ ( lang_pair, self.alter_dataset_langtok( LanguagePairDataset( src_tokens, src_lengths, self.source_dictionary ), src_eos=self.source_dictionary.eos(), src_lang=self.args.source_lang, tgt_eos=self.target_dictionary.eos(), tgt_lang=self.args.target_lang, ), ) ] ), eval_key=lang_pair, ) def build_model(self, args, from_checkpoint=False): def check_args(): messages = [] if ( len(set(self.args.lang_pairs).symmetric_difference(args.lang_pairs)) != 0 ): messages.append( "--lang-pairs should include all the language pairs {}.".format( args.lang_pairs ) ) if self.args.encoder_langtok != args.encoder_langtok: messages.append( "--encoder-langtok should be {}.".format(args.encoder_langtok) ) if self.args.decoder_langtok != args.decoder_langtok: messages.append( "--decoder-langtok should {} be set.".format( "" if args.decoder_langtok else "not" ) ) if len(messages) > 0: raise ValueError(" ".join(messages)) # Update args -> the fact that the constructor here # changes the args object doesn't mean you get the same one here self.update_args(args) # Check if task args are consistant with model args check_args() from fairseq import models model = models.build_model(args, self, from_checkpoint) if not isinstance(model, FairseqMultiModel): raise ValueError( "MultilingualTranslationTask requires a FairseqMultiModel architecture" ) return model def _per_lang_pair_train_loss( self, lang_pair, model, update_num, criterion, sample, optimizer, ignore_grad ): loss, sample_size, logging_output = criterion( model.models[lang_pair], sample[lang_pair] ) if ignore_grad: loss *= 0 optimizer.backward(loss) return loss, sample_size, logging_output def train_step( self, sample, model, criterion, optimizer, update_num, ignore_grad=False ): model.train() from collections import defaultdict agg_loss, agg_sample_size, agg_logging_output = 0.0, 0.0, defaultdict(float) curr_lang_pairs = [ lang_pair for lang_pair in self.model_lang_pairs if sample[lang_pair] is not None and len(sample[lang_pair]) != 0 ] for idx, lang_pair in enumerate(curr_lang_pairs): def maybe_no_sync(): if ( self.args.distributed_world_size > 1 and hasattr(model, "no_sync") and idx < len(curr_lang_pairs) - 1 ): return model.no_sync() else: return contextlib.ExitStack() # dummy contextmanager with maybe_no_sync(): loss, sample_size, logging_output = self._per_lang_pair_train_loss( lang_pair, model, update_num, criterion, sample, optimizer, ignore_grad, ) agg_loss += loss.detach().item() # TODO make summing of the sample sizes configurable agg_sample_size += sample_size for k in logging_output: agg_logging_output[k] += logging_output[k] agg_logging_output[f"{lang_pair}:{k}"] += logging_output[k] return agg_loss, agg_sample_size, agg_logging_output def _per_lang_pair_valid_loss(self, lang_pair, model, criterion, sample): return criterion(model.models[lang_pair], sample[lang_pair]) def valid_step(self, sample, model, criterion): model.eval() with torch.no_grad(): from collections import defaultdict agg_loss, agg_sample_size, agg_logging_output = 0.0, 0.0, defaultdict(float) for lang_pair in self.eval_lang_pairs: if ( lang_pair not in sample or sample[lang_pair] is None or len(sample[lang_pair]) == 0 ): continue loss, sample_size, logging_output = self._per_lang_pair_valid_loss( lang_pair, model, criterion, sample ) agg_loss += loss.data.item() # TODO make summing of the sample sizes configurable agg_sample_size += sample_size for k in logging_output: agg_logging_output[k] += logging_output[k] agg_logging_output[f"{lang_pair}:{k}"] += logging_output[k] return agg_loss, agg_sample_size, agg_logging_output def inference_step( self, generator, models, sample, prefix_tokens=None, constraints=None ): with torch.no_grad(): if self.args.decoder_langtok: bos_token = _lang_token_index( self.target_dictionary, self.args.target_lang ) else: bos_token = self.target_dictionary.eos() return generator.generate( models, sample, prefix_tokens=prefix_tokens, constraints=constraints, bos_token=bos_token, ) def reduce_metrics(self, logging_outputs, criterion): with metrics.aggregate(): # pass 'sample_size', 'nsentences', 'ntokens' stats to fairseq_task super().reduce_metrics(logging_outputs, criterion) for k in ["sample_size", "nsentences", "ntokens"]: metrics.log_scalar(k, sum(l[k] for l in logging_outputs)) @property def source_dictionary(self): if self.training: return next(iter(self.dicts.values())) else: return self.dicts[self.args.source_lang] @property def target_dictionary(self): if self.training: return next(iter(self.dicts.values())) else: return self.dicts[self.args.target_lang] def max_positions(self): """Return the max sentence length allowed by the task.""" if len(self.datasets.values()) == 0: return { "%s-%s" % (self.args.source_lang, self.args.target_lang): ( self.args.max_source_positions, self.args.max_target_positions, ) } return OrderedDict( [ (key, (self.args.max_source_positions, self.args.max_target_positions)) for split in self.datasets.keys() for key in self.datasets[split].datasets.keys() ] )
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/multilingual_translation.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq import utils from fairseq.data import LanguagePairDataset from . import register_task from .translation import TranslationTask, load_langpair_dataset @register_task("translation_from_pretrained_bart") class TranslationFromPretrainedBARTTask(TranslationTask): """ Translate from source language to target language with a model initialized with a multilingual pretrain. Args: src_dict (~fairseq.data.Dictionary): dictionary for the source language tgt_dict (~fairseq.data.Dictionary): dictionary for the target language .. note:: The translation task is compatible with :mod:`fairseq-train`, :mod:`fairseq-generate` and :mod:`fairseq-interactive`. The translation task provides the following additional command-line arguments: .. argparse:: :ref: fairseq.tasks.translation_parser :prog: """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" # fmt: off TranslationTask.add_args(parser) parser.add_argument('--langs', type=str, metavar='LANG', help='comma-separated list of monolingual language, ' 'for example, "en,de,fr". These should match the ' 'langs from pretraining (and be in the same order). ' 'You should always add all pretraining language idx ' 'during finetuning.') parser.add_argument('--prepend-bos', action='store_true', help='prepend bos token to each sentence, which matches ' 'mBART pretraining') # fmt: on def __init__(self, args, src_dict, tgt_dict): super().__init__(args, src_dict, tgt_dict) self.langs = args.langs.split(",") for d in [src_dict, tgt_dict]: for l in self.langs: d.add_symbol("[{}]".format(l)) d.add_symbol("<mask>") def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ paths = utils.split_paths(self.args.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] # infer langcode src, tgt = self.args.source_lang, self.args.target_lang self.datasets[split] = load_langpair_dataset( data_path, split, src, self.src_dict, tgt, self.tgt_dict, combine=combine, dataset_impl=self.args.dataset_impl, upsample_primary=self.args.upsample_primary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=getattr(self.args, "max_source_positions", 1024), max_target_positions=getattr(self.args, "max_target_positions", 1024), load_alignments=self.args.load_alignments, prepend_bos=getattr(self.args, "prepend_bos", False), append_source_id=True, ) def build_generator(self, models, args, **unused): if getattr(args, "score_reference", False): from fairseq.sequence_scorer import SequenceScorer return SequenceScorer( self.target_dictionary, eos=self.tgt_dict.index("[{}]".format(self.args.target_lang)), ) else: from fairseq.sequence_generator import SequenceGenerator return SequenceGenerator( models, self.target_dictionary, beam_size=getattr(args, "beam", 5), max_len_a=getattr(args, "max_len_a", 0), max_len_b=getattr(args, "max_len_b", 200), min_len=getattr(args, "min_len", 1), normalize_scores=(not getattr(args, "unnormalized", False)), len_penalty=getattr(args, "lenpen", 1), unk_penalty=getattr(args, "unkpen", 0), temperature=getattr(args, "temperature", 1.0), match_source_len=getattr(args, "match_source_len", False), no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0), eos=self.tgt_dict.index("[{}]".format(self.args.target_lang)), ) def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None): src_lang_id = self.source_dictionary.index("[{}]".format(self.args.source_lang)) source_tokens = [] for s_t in src_tokens: s_t = torch.cat([s_t, s_t.new(1).fill_(src_lang_id)]) source_tokens.append(s_t) dataset = LanguagePairDataset( source_tokens, src_lengths, self.source_dictionary, tgt_dict=self.target_dictionary, constraints=constraints, ) return dataset
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/translation_from_pretrained_bart.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field import torch from fairseq import utils from fairseq.data import LanguagePairDataset from fairseq.dataclass import ChoiceEnum from fairseq.tasks import register_task from fairseq.tasks.translation import ( TranslationConfig, TranslationTask, load_langpair_dataset, ) from fairseq.utils import new_arange NOISE_CHOICES = ChoiceEnum(["random_delete", "random_mask", "no_noise", "full_mask"]) @dataclass class TranslationLevenshteinConfig(TranslationConfig): noise: NOISE_CHOICES = field( default="random_delete", metadata={"help": "type of noise"}, ) @register_task("translation_lev", dataclass=TranslationLevenshteinConfig) class TranslationLevenshteinTask(TranslationTask): """ Translation (Sequence Generation) task for Levenshtein Transformer See `"Levenshtein Transformer" <https://arxiv.org/abs/1905.11006>`_. """ cfg: TranslationLevenshteinConfig def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ paths = utils.split_paths(self.cfg.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] # infer langcode src, tgt = self.cfg.source_lang, self.cfg.target_lang self.datasets[split] = load_langpair_dataset( data_path, split, src, self.src_dict, tgt, self.tgt_dict, combine=combine, dataset_impl=self.cfg.dataset_impl, upsample_primary=self.cfg.upsample_primary, left_pad_source=self.cfg.left_pad_source, left_pad_target=self.cfg.left_pad_target, max_source_positions=self.cfg.max_source_positions, max_target_positions=self.cfg.max_target_positions, prepend_bos=True, ) def inject_noise(self, target_tokens): def _random_delete(target_tokens): pad = self.tgt_dict.pad() bos = self.tgt_dict.bos() eos = self.tgt_dict.eos() max_len = target_tokens.size(1) target_mask = target_tokens.eq(pad) target_score = target_tokens.clone().float().uniform_() target_score.masked_fill_( target_tokens.eq(bos) | target_tokens.eq(eos), 0.0 ) target_score.masked_fill_(target_mask, 1) target_score, target_rank = target_score.sort(1) target_length = target_mask.size(1) - target_mask.float().sum( 1, keepdim=True ) # do not delete <bos> and <eos> (we assign 0 score for them) target_cutoff = ( 2 + ( (target_length - 2) * target_score.new_zeros(target_score.size(0), 1).uniform_() ).long() ) target_cutoff = target_score.sort(1)[1] >= target_cutoff prev_target_tokens = ( target_tokens.gather(1, target_rank) .masked_fill_(target_cutoff, pad) .gather(1, target_rank.masked_fill_(target_cutoff, max_len).sort(1)[1]) ) prev_target_tokens = prev_target_tokens[ :, : prev_target_tokens.ne(pad).sum(1).max() ] return prev_target_tokens def _random_mask(target_tokens): pad = self.tgt_dict.pad() bos = self.tgt_dict.bos() eos = self.tgt_dict.eos() unk = self.tgt_dict.unk() target_masks = ( target_tokens.ne(pad) & target_tokens.ne(bos) & target_tokens.ne(eos) ) target_score = target_tokens.clone().float().uniform_() target_score.masked_fill_(~target_masks, 2.0) target_length = target_masks.sum(1).float() target_length = target_length * target_length.clone().uniform_() target_length = target_length + 1 # make sure to mask at least one token. _, target_rank = target_score.sort(1) target_cutoff = new_arange(target_rank) < target_length[:, None].long() prev_target_tokens = target_tokens.masked_fill( target_cutoff.scatter(1, target_rank, target_cutoff), unk ) return prev_target_tokens def _full_mask(target_tokens): pad = self.tgt_dict.pad() bos = self.tgt_dict.bos() eos = self.tgt_dict.eos() unk = self.tgt_dict.unk() target_mask = ( target_tokens.eq(bos) | target_tokens.eq(eos) | target_tokens.eq(pad) ) return target_tokens.masked_fill(~target_mask, unk) if self.cfg.noise == "random_delete": return _random_delete(target_tokens) elif self.cfg.noise == "random_mask": return _random_mask(target_tokens) elif self.cfg.noise == "full_mask": return _full_mask(target_tokens) elif self.cfg.noise == "no_noise": return target_tokens else: raise NotImplementedError def build_generator(self, models, args, **unused): # add models input to match the API for SequenceGenerator from fairseq.iterative_refinement_generator import IterativeRefinementGenerator return IterativeRefinementGenerator( self.target_dictionary, eos_penalty=getattr(args, "iter_decode_eos_penalty", 0.0), max_iter=getattr(args, "iter_decode_max_iter", 10), beam_size=getattr(args, "iter_decode_with_beam", 1), reranking=getattr(args, "iter_decode_with_external_reranker", False), decoding_format=getattr(args, "decoding_format", None), adaptive=not getattr(args, "iter_decode_force_max_iter", False), retain_history=getattr(args, "retain_iter_history", False), ) def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None): if constraints is not None: # Though see Susanto et al. (ACL 2020): https://www.aclweb.org/anthology/2020.acl-main.325/ raise NotImplementedError( "Constrained decoding with the translation_lev task is not supported" ) return LanguagePairDataset( src_tokens, src_lengths, self.source_dictionary, append_bos=True ) def train_step( self, sample, model, criterion, optimizer, update_num, ignore_grad=False ): model.train() sample["prev_target"] = self.inject_noise(sample["target"]) loss, sample_size, logging_output = criterion(model, sample) if ignore_grad: loss *= 0 optimizer.backward(loss) return loss, sample_size, logging_output def valid_step(self, sample, model, criterion): model.eval() with torch.no_grad(): sample["prev_target"] = self.inject_noise(sample["target"]) loss, sample_size, logging_output = criterion(model, sample) return loss, sample_size, logging_output
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/translation_lev.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from fairseq import utils from fairseq.data import ( AppendTokenDataset, Dictionary, IdDataset, LMContextWindowDataset, MonolingualDataset, NestedDictionaryDataset, NumelDataset, PadDataset, PrependTokenDataset, StripTokenDataset, TokenBlockDataset, TruncatedDictionary, data_utils, ) from fairseq.data.indexed_dataset import get_available_dataset_impl from fairseq.data.shorten_dataset import maybe_shorten_dataset from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.tasks import LegacyFairseqTask, register_task from omegaconf import II SAMPLE_BREAK_MODE_CHOICES = ChoiceEnum(["none", "complete", "complete_doc", "eos"]) SHORTEN_METHOD_CHOICES = ChoiceEnum(["none", "truncate", "random_crop"]) logger = logging.getLogger(__name__) @dataclass class LanguageModelingConfig(FairseqDataclass): data: Optional[str] = field( default=None, metadata={"help": "path to data directory"} ) sample_break_mode: SAMPLE_BREAK_MODE_CHOICES = field( default="none", metadata={ "help": 'If omitted or "none", fills each sample with tokens-per-sample ' 'tokens. If set to "complete", splits samples only at the end ' "of sentence, but may include multiple sentences per sample. " '"complete_doc" is similar but respects doc boundaries. ' 'If set to "eos", includes only one sentence per sample.' }, ) tokens_per_sample: int = field( default=1024, metadata={"help": "max number of tokens per sample for LM dataset"}, ) output_dictionary_size: int = field( default=-1, metadata={"help": "limit the size of output dictionary"} ) self_target: bool = field(default=False, metadata={"help": "include self target"}) future_target: bool = field( default=False, metadata={"help": "include future target"} ) past_target: bool = field(default=False, metadata={"help": "include past target"}) add_bos_token: bool = field( default=False, metadata={"help": "prepend beginning of sentence token (<s>)"} ) max_target_positions: Optional[int] = field( default=None, metadata={"help": "max number of tokens in the target sequence"} ) shorten_method: SHORTEN_METHOD_CHOICES = field( default="none", metadata={ "help": "if not none, shorten sequences that exceed --tokens-per-sample" }, ) shorten_data_split_list: str = field( default="", metadata={ "help": "comma-separated list of dataset splits to apply shortening to, " 'e.g., "train,valid" (default: all dataset splits)' }, ) pad_to_fixed_length: Optional[bool] = field( default=False, metadata={"help": "pad to fixed length"}, ) pad_to_fixed_bsz: Optional[bool] = field( default=False, metadata={"help": "boolean to pad to fixed batch size"}, ) # TODO common vars below add to parent seed: int = II("common.seed") batch_size: Optional[int] = II("dataset.batch_size") batch_size_valid: Optional[int] = II("dataset.batch_size_valid") dataset_impl: Optional[ChoiceEnum(get_available_dataset_impl())] = II( "dataset.dataset_impl" ) data_buffer_size: int = II("dataset.data_buffer_size") tpu: bool = II("common.tpu") use_plasma_view: bool = II("common.use_plasma_view") plasma_path: str = II("common.plasma_path") @register_task("language_modeling", dataclass=LanguageModelingConfig) class LanguageModelingTask(LegacyFairseqTask): """ Train a language model. Args: dictionary (~fairseq.data.Dictionary): the dictionary for the input of the language model output_dictionary (~fairseq.data.Dictionary): the dictionary for the output of the language model. In most cases it will be the same as *dictionary*, but could possibly be a more limited version of the dictionary (if ``--output-dictionary-size`` is used). targets (List[str]): list of the target types that the language model should predict. Can be one of "self", "future", and "past". Defaults to "future". .. note:: The language modeling task is compatible with :mod:`fairseq-train`, :mod:`fairseq-generate`, :mod:`fairseq-interactive` and :mod:`fairseq-eval-lm`. The language modeling task provides the following additional command-line arguments: .. argparse:: :ref: fairseq.tasks.language_modeling_parser :prog: """ def __init__(self, args, dictionary, output_dictionary=None, targets=None): super().__init__(args) self.dictionary = dictionary self.output_dictionary = output_dictionary or dictionary if targets is None: targets = ["future"] self.targets = targets @classmethod def setup_dictionary(cls, args, **kwargs): dictionary = None output_dictionary = None if args.data: paths = utils.split_paths(args.data) assert len(paths) > 0 dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt")) logger.info("dictionary: {} types".format(len(dictionary))) output_dictionary = dictionary if args.output_dictionary_size >= 0: output_dictionary = TruncatedDictionary( dictionary, args.output_dictionary_size ) return (dictionary, output_dictionary) @classmethod def setup_task(cls, args, **kwargs): """Setup the task (e.g., load dictionaries). Args: args (argparse.Namespace): parsed command-line arguments """ dictionary, output_dictionary = cls.setup_dictionary(args, **kwargs) # upgrade old checkpoints if getattr(args, "exclude_self_target", False): args.self_target = False targets = [] if getattr(args, "self_target", False): targets.append("self") if getattr(args, "future_target", False): targets.append("future") if getattr(args, "past_target", False): targets.append("past") if len(targets) == 0: # standard language modeling targets = ["future"] return cls(args, dictionary, output_dictionary, targets=targets) def build_model(self, args, from_checkpoint=False): model = super().build_model(args, from_checkpoint) for target in self.targets: if target not in model.supported_targets: raise ValueError( "Unsupported language modeling target: {}".format(target) ) return model def load_dataset( self, split: str, epoch=1, combine=False, **kwargs ) -> MonolingualDataset: """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, valid1, test) """ paths = utils.split_paths(self.args.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] split_path = os.path.join(data_path, split) # each process has its own copy of the raw data (likely to be an np.memmap) dataset = data_utils.load_indexed_dataset( split_path, self.dictionary, self.args.dataset_impl, combine=combine ) if dataset is None: raise FileNotFoundError(f"Dataset not found: {split} ({split_path})") dataset = maybe_shorten_dataset( dataset, split, self.args.shorten_data_split_list, self.args.shorten_method, self.args.tokens_per_sample, self.args.seed, ) dataset = TokenBlockDataset( dataset, dataset.sizes, self.args.tokens_per_sample, pad=self.dictionary.pad(), eos=self.dictionary.eos(), break_mode=self.args.sample_break_mode, include_targets=True, use_plasma_view=self.args.use_plasma_view, split_path=split_path, plasma_path=self.args.plasma_path, ) add_eos_for_other_targets = ( self.args.sample_break_mode is not None and self.args.sample_break_mode != "none" ) fixed_pad_length = None if self.args.pad_to_fixed_length: fixed_pad_length = self.args.tokens_per_sample pad_to_bsz = None if self.args.pad_to_fixed_bsz: pad_to_bsz = ( self.args.batch_size_valid if "valid" in split else self.args.batch_size ) self.datasets[split] = MonolingualDataset( dataset=dataset, sizes=dataset.sizes, src_vocab=self.dictionary, tgt_vocab=self.output_dictionary, add_eos_for_other_targets=add_eos_for_other_targets, shuffle=True, targets=self.targets, add_bos_token=self.args.add_bos_token, fixed_pad_length=fixed_pad_length, pad_to_bsz=pad_to_bsz, ) def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs): """ Generate batches for inference. We prepend an eos token to src_tokens (or bos if `--add-bos-token` is set) and we append a <pad> to target. This is convenient both for generation with a prefix and LM scoring. """ dataset = StripTokenDataset( TokenBlockDataset( src_tokens, src_lengths, block_size=None, # ignored for "eos" break mode pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode="eos", ), # remove eos from (end of) target sequence self.source_dictionary.eos(), ) src_dataset = PrependTokenDataset( dataset, token=( self.source_dictionary.bos() if getattr(self.args, "add_bos_token", False) else self.source_dictionary.eos() ), ) tgt_dataset = AppendTokenDataset(dataset, token=self.source_dictionary.pad()) return NestedDictionaryDataset( { "id": IdDataset(), "net_input": { "src_tokens": PadDataset( src_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False, ), "src_lengths": NumelDataset(src_dataset, reduce=False), }, "target": PadDataset( tgt_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False ), }, sizes=[np.array(src_lengths)], ) def inference_step( self, generator, models, sample, prefix_tokens=None, constraints=None ): with torch.no_grad(): # Generation will always be conditioned on bos_token if getattr(self.args, "add_bos_token", False): bos_token = self.source_dictionary.bos() else: bos_token = self.source_dictionary.eos() if constraints is not None: raise NotImplementedError( "Constrained decoding with the language_modeling task is not supported" ) # SequenceGenerator doesn't use src_tokens directly, we need to # pass the `prefix_tokens` argument instead if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement(): prefix_tokens = sample["net_input"]["src_tokens"] if prefix_tokens[:, 0].eq(bos_token).all(): prefix_tokens = prefix_tokens[:, 1:] return generator.generate( models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token ) def eval_lm_dataloader( self, dataset, max_tokens: Optional[int] = 36000, batch_size: Optional[int] = None, max_positions: Optional[int] = None, num_shards: int = 1, shard_id: int = 0, num_workers: int = 1, data_buffer_size: int = 10, # ensures that every evaluated token has access to a context of at least # this size, if possible context_window: int = 0, ): if context_window > 0: dataset = LMContextWindowDataset( dataset=dataset, tokens_per_sample=self.args.tokens_per_sample, context_window=context_window, pad_idx=self.source_dictionary.pad(), ) return self.get_batch_iterator( dataset=dataset, max_tokens=max_tokens, max_sentences=batch_size, max_positions=max_positions, ignore_invalid_inputs=True, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, data_buffer_size=data_buffer_size, ).next_epoch_itr(shuffle=False) @property def source_dictionary(self): """Return the :class:`~fairseq.data.Dictionary` for the language model.""" return self.dictionary @property def target_dictionary(self): """Return the :class:`~fairseq.data.Dictionary` for the language model.""" return self.output_dictionary
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/language_modeling.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from pathlib import Path from argparse import Namespace from fairseq.data import Dictionary, encoders from fairseq.data.audio.speech_to_text_dataset import ( S2TDataConfig, SpeechToTextDataset, SpeechToTextDatasetCreator, get_features_or_waveform, ) from fairseq.tasks import LegacyFairseqTask, register_task logger = logging.getLogger(__name__) @register_task("speech_to_text") class SpeechToTextTask(LegacyFairseqTask): @classmethod def add_args(cls, parser): parser.add_argument("data", help="manifest root path") parser.add_argument( "--config-yaml", type=str, default="config.yaml", help="Configuration YAML filename (under manifest root)", ) parser.add_argument( "--max-source-positions", default=6000, type=int, metavar="N", help="max number of tokens in the source sequence", ) parser.add_argument( "--max-target-positions", default=1024, type=int, metavar="N", help="max number of tokens in the target sequence", ) def __init__(self, args, tgt_dict): super().__init__(args) self.tgt_dict = tgt_dict self.data_cfg = S2TDataConfig(Path(args.data) / args.config_yaml) self.speaker_to_id = self._get_speaker_to_id() def _get_speaker_to_id(self): speaker_to_id = None speaker_set_filename = self.data_cfg.config.get("speaker_set_filename") if speaker_set_filename is not None: speaker_set_path = Path(self.args.data) / speaker_set_filename with open(speaker_set_path) as f: speaker_to_id = {r.strip(): i for i, r in enumerate(f)} return speaker_to_id @classmethod def setup_task(cls, args, **kwargs): data_cfg = S2TDataConfig(Path(args.data) / args.config_yaml) dict_path = Path(args.data) / data_cfg.vocab_filename if not dict_path.is_file(): raise FileNotFoundError(f"Dict not found: {dict_path.as_posix()}") tgt_dict = Dictionary.load(dict_path.as_posix()) logger.info( f"dictionary size ({data_cfg.vocab_filename}): " f"{len(tgt_dict):,}" ) if getattr(args, "train_subset", None) is not None: if not all(s.startswith("train") for s in args.train_subset.split(",")): raise ValueError('Train splits should be named like "train*".') return cls(args, tgt_dict) def build_criterion(self, args): from fairseq import criterions if self.data_cfg.prepend_tgt_lang_tag and args.ignore_prefix_size != 1: raise ValueError( 'Please set "--ignore-prefix-size 1" since ' "target language ID token is prepended as BOS." ) return criterions.build_criterion(args, self) def load_dataset(self, split, epoch=1, combine=False, **kwargs): is_train_split = split.startswith("train") pre_tokenizer = self.build_tokenizer(self.args) bpe_tokenizer = self.build_bpe(self.args) self.datasets[split] = SpeechToTextDatasetCreator.from_tsv( self.args.data, self.data_cfg, split, self.tgt_dict, pre_tokenizer, bpe_tokenizer, is_train_split=is_train_split, epoch=epoch, seed=self.args.seed, speaker_to_id=self.speaker_to_id, ) @property def target_dictionary(self): return self.tgt_dict @property def source_dictionary(self): return None def max_positions(self): return self.args.max_source_positions, self.args.max_target_positions def build_model(self, args, from_checkpoint=False): args.input_feat_per_channel = self.data_cfg.input_feat_per_channel args.input_channels = self.data_cfg.input_channels args.speaker_to_id = self.speaker_to_id return super(SpeechToTextTask, self).build_model(args, from_checkpoint) def build_generator( self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None, ): if self.data_cfg.prepend_tgt_lang_tag and args.prefix_size != 1: raise ValueError( 'Please set "--prefix-size 1" since ' "target language ID token is prepended as BOS." ) lang_token_ids = { i for s, i in self.tgt_dict.indices.items() if SpeechToTextDataset.is_lang_tag(s) } if extra_gen_cls_kwargs is None: extra_gen_cls_kwargs = {} extra_gen_cls_kwargs["symbols_to_strip_from_output"] = lang_token_ids return super().build_generator( models, args, seq_gen_cls=None, extra_gen_cls_kwargs=extra_gen_cls_kwargs ) def build_tokenizer(self, args): logger.info(f"pre-tokenizer: {self.data_cfg.pre_tokenizer}") return encoders.build_tokenizer(Namespace(**self.data_cfg.pre_tokenizer)) def build_bpe(self, args): logger.info(f"tokenizer: {self.data_cfg.bpe_tokenizer}") return encoders.build_bpe(Namespace(**self.data_cfg.bpe_tokenizer)) def get_interactive_tokens_and_lengths(self, lines, encode_fn): n_frames = [get_features_or_waveform(p).shape[0] for p in lines] return lines, n_frames def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs): return SpeechToTextDataset( "interactive", False, self.data_cfg, src_tokens, src_lengths )
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/speech_to_text.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import itertools import logging import os import numpy as np from fairseq import tokenizer, utils from fairseq.data import ConcatDataset, Dictionary, data_utils, indexed_dataset from fairseq.data.legacy.block_pair_dataset import BlockPairDataset from fairseq.data.legacy.masked_lm_dataset import MaskedLMDataset from fairseq.data.legacy.masked_lm_dictionary import BertDictionary from fairseq.tasks import LegacyFairseqTask, register_task logger = logging.getLogger(__name__) @register_task("legacy_masked_lm") class LegacyMaskedLMTask(LegacyFairseqTask): """ Task for training Masked LM (BERT) model. Args: dictionary (Dictionary): the dictionary for the input of the task """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument( "data", help="colon separated path to data directories list, \ will be iterated upon during epochs in round-robin manner", ) parser.add_argument( "--tokens-per-sample", default=512, type=int, help="max number of total tokens over all segments" " per sample for BERT dataset", ) parser.add_argument( "--break-mode", default="doc", type=str, help="mode for breaking sentence" ) parser.add_argument("--shuffle-dataset", action="store_true", default=False) def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary self.seed = args.seed @classmethod def load_dictionary(cls, filename): return BertDictionary.load(filename) @classmethod def build_dictionary( cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8 ): d = BertDictionary() for filename in filenames: Dictionary.add_file_to_dictionary( filename, d, tokenizer.tokenize_line, workers ) d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor) return d @property def target_dictionary(self): return self.dictionary @classmethod def setup_task(cls, args, **kwargs): """Setup the task.""" paths = utils.split_paths(args.data) assert len(paths) > 0 dictionary = BertDictionary.load(os.path.join(paths[0], "dict.txt")) logger.info("dictionary: {} types".format(len(dictionary))) return cls(args, dictionary) def load_dataset(self, split, epoch=1, combine=False): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ loaded_datasets = [] paths = utils.split_paths(self.args.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] logger.info("data_path", data_path) for k in itertools.count(): split_k = split + (str(k) if k > 0 else "") path = os.path.join(data_path, split_k) ds = indexed_dataset.make_dataset( path, impl=self.args.dataset_impl, fix_lua_indexing=True, dictionary=self.dictionary, ) if ds is None: if k > 0: break else: raise FileNotFoundError( "Dataset not found: {} ({})".format(split, data_path) ) with data_utils.numpy_seed(self.seed + k): loaded_datasets.append( BlockPairDataset( ds, self.dictionary, ds.sizes, self.args.tokens_per_sample, break_mode=self.args.break_mode, doc_break_size=1, ) ) logger.info( "{} {} {} examples".format(data_path, split_k, len(loaded_datasets[-1])) ) if not combine: break if len(loaded_datasets) == 1: dataset = loaded_datasets[0] sizes = dataset.sizes else: dataset = ConcatDataset(loaded_datasets) sizes = np.concatenate([ds.sizes for ds in loaded_datasets]) self.datasets[split] = MaskedLMDataset( dataset=dataset, sizes=sizes, vocab=self.dictionary, pad_idx=self.dictionary.pad(), mask_idx=self.dictionary.mask(), classif_token_idx=self.dictionary.cls(), sep_token_idx=self.dictionary.sep(), shuffle=self.args.shuffle_dataset, seed=self.seed, )
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/legacy_masked_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" import argparse import importlib import os from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.utils import merge_with_parent from hydra.core.config_store import ConfigStore from .fairseq_task import FairseqTask, LegacyFairseqTask # noqa # register dataclass TASK_DATACLASS_REGISTRY = {} TASK_REGISTRY = {} TASK_CLASS_NAMES = set() def setup_task(cfg: FairseqDataclass, **kwargs): task = None task_name = getattr(cfg, "task", None) if isinstance(task_name, str): # legacy tasks task = TASK_REGISTRY[task_name] if task_name in TASK_DATACLASS_REGISTRY: dc = TASK_DATACLASS_REGISTRY[task_name] cfg = dc.from_namespace(cfg) else: task_name = getattr(cfg, "_name", None) if task_name and task_name in TASK_DATACLASS_REGISTRY: dc = TASK_DATACLASS_REGISTRY[task_name] cfg = merge_with_parent(dc(), cfg) task = TASK_REGISTRY[task_name] assert ( task is not None ), f"Could not infer task type from {cfg}. Available argparse tasks: {TASK_REGISTRY.keys()}. Available hydra tasks: {TASK_DATACLASS_REGISTRY.keys()}" return task.setup_task(cfg, **kwargs) def register_task(name, dataclass=None): """ New tasks can be added to fairseq with the :func:`~fairseq.tasks.register_task` function decorator. For example:: @register_task('classification') class ClassificationTask(FairseqTask): (...) .. note:: All Tasks must implement the :class:`~fairseq.tasks.FairseqTask` interface. Args: name (str): the name of the task """ def register_task_cls(cls): if name in TASK_REGISTRY: raise ValueError("Cannot register duplicate task ({})".format(name)) if not issubclass(cls, FairseqTask): raise ValueError( "Task ({}: {}) must extend FairseqTask".format(name, cls.__name__) ) if cls.__name__ in TASK_CLASS_NAMES: raise ValueError( "Cannot register task with duplicate class name ({})".format( cls.__name__ ) ) TASK_REGISTRY[name] = cls TASK_CLASS_NAMES.add(cls.__name__) if dataclass is not None and not issubclass(dataclass, FairseqDataclass): raise ValueError( "Dataclass {} must extend FairseqDataclass".format(dataclass) ) cls.__dataclass = dataclass if dataclass is not None: TASK_DATACLASS_REGISTRY[name] = dataclass cs = ConfigStore.instance() node = dataclass() node._name = name cs.store(name=name, group="task", node=node, provider="fairseq") return cls return register_task_cls def get_task(name): return TASK_REGISTRY[name] def import_tasks(tasks_dir, namespace): for file in os.listdir(tasks_dir): path = os.path.join(tasks_dir, file) if ( not file.startswith("_") and not file.startswith(".") and (file.endswith(".py") or os.path.isdir(path)) ): task_name = file[: file.find(".py")] if file.endswith(".py") else file importlib.import_module(namespace + "." + task_name) # expose `task_parser` for sphinx if task_name in TASK_REGISTRY: parser = argparse.ArgumentParser(add_help=False) group_task = parser.add_argument_group("Task name") # fmt: off group_task.add_argument('--task', metavar=task_name, help='Enable this task with: ``--task=' + task_name + '``') # fmt: on group_args = parser.add_argument_group( "Additional command-line arguments" ) TASK_REGISTRY[task_name].add_args(group_args) globals()[task_name + "_parser"] = parser # automatically import any Python files in the tasks/ directory tasks_dir = os.path.dirname(__file__) import_tasks(tasks_dir, "fairseq.tasks")
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from argparse import Namespace import json import logging import math from pathlib import Path import torch import torch.nn as nn from fairseq import utils from fairseq.data import Dictionary from fairseq.data.audio.data_cfg import S2SDataConfig, MultitaskConfig from fairseq.data.audio.speech_to_speech_dataset import SpeechToSpeechDatasetCreator from fairseq.tasks import LegacyFairseqTask, register_task from fairseq.tasks.text_to_speech import batch_mel_cepstral_distortion logger = logging.getLogger(__name__) class StackUnitSequenceGenerator(nn.Module): def __init__(self, tgt_dict, vocab_size): super().__init__() self.pad = tgt_dict.pad() self.eos = tgt_dict.eos() self.unk = tgt_dict.unk() self.offset = len(tgt_dict) - vocab_size self.vocab_size = vocab_size def pack_units(self, input: torch.Tensor, n_frames_per_step) -> torch.Tensor: if n_frames_per_step <= 1: return input bsz, _, n = input.shape assert n == n_frames_per_step scale = [ pow(self.vocab_size, n_frames_per_step - 1 - i) for i in range(n_frames_per_step) ] scale = torch.LongTensor(scale).squeeze(0).to(input.device) mask = input >= self.offset res = ((input - self.offset) * scale * mask).sum(dim=2) + self.offset return res @torch.no_grad() def generate(self, models, sample, **kwargs): # currently only support viterbi search for stacked units model = models[0] model.eval() max_len = model.max_decoder_positions() # TODO: incorporate max_len_a and max_len_b src_tokens = sample["net_input"]["src_tokens"] src_lengths = sample["net_input"]["src_lengths"] bsz, src_len, _ = src_tokens.size() n_frames_per_step = model.decoder.n_frames_per_step # initialize encoder_out = model.forward_encoder( src_tokens, src_lengths, speaker=sample["speaker"] ) incremental_state = {} pred_out, attn, scores = [], [], [] finished = src_tokens.new_zeros((bsz,)).bool() prev_output_tokens = src_lengths.new_zeros((bsz, 1)).long().fill_(self.eos) for _ in range(max_len): cur_out, cur_extra = model.forward_decoder( prev_output_tokens, encoder_out=encoder_out, incremental_state=incremental_state, ) lprobs = model.get_normalized_probs([cur_out], log_probs=True) # never select pad, unk lprobs[:, :, self.pad] = -math.inf lprobs[:, :, self.unk] = -math.inf cur_pred_lprob, cur_pred_out = torch.max(lprobs, dim=2) scores.append(cur_pred_lprob) pred_out.append(cur_pred_out) prev_output_tokens = torch.cat( ( prev_output_tokens, self.pack_units( cur_pred_out.view(bsz, 1, n_frames_per_step), n_frames_per_step ), ), dim=1, ) attn.append(cur_extra["attn"][0]) cur_finished = torch.any(cur_pred_out.squeeze(1) == self.eos, dim=1) finished = finished | cur_finished if finished.sum().item() == bsz: break pred_out = torch.cat(pred_out, dim=1).view(bsz, -1) attn = torch.cat(attn, dim=2) alignment = attn.max(dim=1)[1] attn = attn.repeat_interleave(n_frames_per_step, dim=2) alignment = alignment.repeat_interleave(n_frames_per_step, dim=1) scores = torch.cat(scores, dim=1) eos_idx = (pred_out == self.eos).nonzero(as_tuple=True) out_lens = src_lengths.new_zeros((bsz,)).long().fill_(max_len) for b, l in zip(eos_idx[0], eos_idx[1]): out_lens[b] = min(l, out_lens[b]) hypos = [ [ { "tokens": pred_out[b, :out_len], "attn": attn[b, :, :out_len], "alignment": alignment[b, :out_len], "positional_scores": scores[b, :out_len], "score": utils.item(scores[b, :out_len].sum().data), } ] for b, out_len in zip(range(bsz), out_lens) ] return hypos @register_task("speech_to_speech") class SpeechToSpeechTask(LegacyFairseqTask): @classmethod def add_args(cls, parser): parser.add_argument("data", help="manifest root path") parser.add_argument( "--config-yaml", type=str, default="config.yaml", help="Configuration YAML filename (under manifest root)", ) parser.add_argument( "--max-source-positions", default=6000, type=int, metavar="N", help="max number of tokens in the source sequence", ) parser.add_argument( "--max-target-positions", default=1024, type=int, metavar="N", help="max number of tokens in the target sequence", ) parser.add_argument( "--target-is-code", action="store_true", help="set if target is discrete unit instead of spectrogram", ) parser.add_argument( "--target-code-size", type=int, default=None, help="# discrete units" ) parser.add_argument( "--n-frames-per-step", type=int, default=1, help="# stacked frames, use 0 for reduced discrete unit sequence", ) parser.add_argument( "--multitask-config-yaml", type=str, default=None, help="Configuration YAML filename for the multitasks (under manifest root)", ) parser.add_argument("--eval-inference", action="store_true") parser.add_argument( "--eval-args", type=str, default="{}", help='generation args for speech-to-unit model , e.g., \'{"beam": 5, "max_len_a": 1}\', as JSON string', ) parser.add_argument("--eos-prob-threshold", type=float, default=0.5) parser.add_argument( "--mcd-normalize-type", type=str, default="targ", choices=["targ", "pred", "path"], ) parser.add_argument( "--vocoder", type=str, default="griffin_lim", choices=["griffin_lim", "hifigan", "code_hifigan"], ) parser.add_argument("--spec-bwd-max-iter", type=int, default=8) def __init__(self, args, tgt_dict): super().__init__(args) self.tgt_dict = tgt_dict self.data_cfg = S2SDataConfig(Path(args.data) / args.config_yaml) self.multitask_tasks = {} if getattr(args, "multitask_config_yaml", None) is not None: multitask_cfg = MultitaskConfig( Path(args.data) / args.multitask_config_yaml ) for task_name, task_config in multitask_cfg.get_all_tasks().items(): self.multitask_tasks[task_name] = DummyMultiTask( task_config, task_config.tgt_dict ) @classmethod def setup_task(cls, args, **kwargs): tgt_dict = None if args.target_is_code: assert args.target_code_size is not None tgt_dict = Dictionary() for i in range(args.target_code_size): tgt_dict.add_symbol(str(i)) logger.info(f"dictionary size: " f"{len(tgt_dict):,}") if getattr(args, "train_subset", None) is not None: if not all(s.startswith("train") for s in args.train_subset.split(",")): raise ValueError('Train splits should be named like "train*".') assert args.n_frames_per_step >= 1 assert ( not args.eval_inference or (args.target_is_code and args.vocoder == "code_hifigan") or (not args.target_is_code and args.vocoder != "code_hifigan") ) return cls(args, tgt_dict) def build_criterion(self, args): from fairseq import criterions if len(self.multitask_tasks) > 0: if self.args.target_is_code and args._name != "speech_to_unit": raise ValueError( "set --criterion speech_to_unit for speech-to-unit loss with multitask" ) elif not self.args.target_is_code and args._name != "speech_to_spectrogram": raise ValueError( "set --criterion speech_to_spectrogram for speech-to-spectrogram loss with multitask" ) return criterions.build_criterion(args, self) def load_dataset(self, split, epoch=1, combine=False, **kwargs): self.datasets[split] = SpeechToSpeechDatasetCreator.from_tsv( self.args.data, self.data_cfg, split, is_train_split=split.startswith("train"), epoch=epoch, seed=self.args.seed, target_is_code=self.args.target_is_code, target_dictionary=self.target_dictionary, n_frames_per_step=self.args.n_frames_per_step, multitask=self.multitask_tasks, ) @property def target_dictionary(self): return self.tgt_dict @property def source_dictionary(self): return None def max_positions(self): return self.args.max_source_positions, self.args.max_target_positions def build_model(self, args, from_checkpoint=False): args.input_feat_per_channel = self.data_cfg.input_feat_per_channel args.input_channels = self.data_cfg.input_transformed_channels args.target_speaker_embed = self.data_cfg.target_speaker_embed is not None args.n_frames_per_step = self.args.n_frames_per_step model = super().build_model(args, from_checkpoint) if len(self.multitask_tasks) > 0: from fairseq.models.speech_to_speech.s2s_transformer import ( S2STransformerMultitaskModelBase, ) assert isinstance(model, S2STransformerMultitaskModelBase) if self.args.eval_inference: self.eval_gen_args = json.loads(self.args.eval_args) self.generator = self.build_generator( [model], Namespace(**self.eval_gen_args) ) return model def build_generator( self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None, ): if not self.args.target_is_code or self.args.eval_inference: from fairseq.models.text_to_speech.vocoder import get_vocoder self.vocoder = get_vocoder(self.args, self.data_cfg) self.vocoder = ( self.vocoder.cuda() if torch.cuda.is_available() and not self.args.cpu else self.vocoder.cpu() ) if self.args.target_is_code: if self.args.n_frames_per_step == 1: seq_generator = super().build_generator( models, args, seq_gen_cls=None, extra_gen_cls_kwargs=extra_gen_cls_kwargs, ) else: assert ( getattr(args, "beam", 1) == 1 and getattr(args, "nbest", 1) == 1 ), "only support viterbi search for stacked units" seq_generator = StackUnitSequenceGenerator( self.tgt_dict, self.args.target_code_size, ) else: if getattr(args, "teacher_forcing", False): from fairseq.speech_generator import ( TeacherForcingAutoRegressiveSpeechGenerator, ) generator = TeacherForcingAutoRegressiveSpeechGenerator logger.info("Teacher forcing mode for generation") else: from fairseq.speech_generator import AutoRegressiveSpeechGenerator generator = AutoRegressiveSpeechGenerator seq_generator = generator( models[0], self.vocoder, self.data_cfg, max_iter=self.args.max_target_positions, eos_prob_threshold=self.args.eos_prob_threshold, ) return seq_generator def train_step( self, sample, model, criterion, optimizer, update_num, ignore_grad=False ): for task_name, task_obj in self.multitask_tasks.items(): criterion.set_multitask_loss_weight( task_name, task_obj.args.get_loss_weight(update_num) ) loss, sample_size, logging_output = super().train_step( sample, model, criterion, optimizer, update_num, ignore_grad ) return loss, sample_size, logging_output def valid_step(self, sample, model, criterion): loss, sample_size, logging_output = super().valid_step(sample, model, criterion) if self.args.eval_inference: hypos, inference_losses = self.valid_step_with_inference( sample, model, self.generator ) for k, v in inference_losses.items(): assert k not in logging_output logging_output[k] = v return loss, sample_size, logging_output def valid_step_with_inference(self, sample, model, generator): if self.args.target_is_code: hypos = generator.generate([model], sample) tgt_lens = ( sample["target_lengths"] - 1 ) * self.args.n_frames_per_step # strip <eos> for b, (f, l) in enumerate(zip(sample["target"], tgt_lens)): hypos[b][0]["targ_waveform"] = self.vocoder( {"code": f[:l] - 4}, # remove <bos>, <pad>, <eos>, <unk> dur_prediction=self.eval_gen_args.get("dur_prediction", False), ) if len(hypos[b][0]["tokens"]) > 0: hypos[b][0]["waveform"] = self.vocoder( {"code": hypos[b][0]["tokens"] - 4}, dur_prediction=self.eval_gen_args.get("dur_prediction", False), ) else: hypos[b][0]["waveform"] = torch.flip( hypos[b][0]["targ_waveform"], dims=[0] ) else: hypos = [ [hypo] for hypo in generator.generate(model, sample, has_targ=True) ] losses = { "mcd_loss": 0.0, "targ_frames": 0.0, "pred_frames": 0.0, "path_frames": 0.0, "nins": 0.0, "ndel": 0.0, } rets = batch_mel_cepstral_distortion( [hypo[0]["targ_waveform"] for hypo in hypos], [hypo[0]["waveform"] for hypo in hypos], self.data_cfg.output_sample_rate, normalize_type=None, ) for d, extra in rets: pathmap = extra[-1] losses["mcd_loss"] += d.item() losses["targ_frames"] += pathmap.size(0) losses["pred_frames"] += pathmap.size(1) losses["path_frames"] += pathmap.sum().item() losses["nins"] += (pathmap.sum(dim=1) - 1).sum().item() losses["ndel"] += (pathmap.sum(dim=0) - 1).sum().item() losses["norm_frames"] = losses[ f"{getattr(self.args, 'mcd_normalize_type', 'targ')}_frames" ] return hypos, losses class DummyMultiTask(LegacyFairseqTask): def __init__(self, args, tgt_dict): super().__init__(args) self.tgt_dict = tgt_dict @property def target_dictionary(self): return self.tgt_dict def inference_step( self, generator, models, sample, prefix_tokens=None, constraints=None ): if self.args.decoder_type == "ctc": model = models[0] # only support single model encoder_out = model(**sample) if hasattr(model, "get_logits"): emissions = model.get_logits( encoder_out ) # no need to normalize emissions else: emissions = model.get_normalized_probs(encoder_out, log_probs=True) return generator.decode( emissions.transpose(0, 1).float().cpu().contiguous() ) else: raise NotImplementedError("only ctc decoder is supported at the moment") def build_generator( self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None ): if self.args.decoder_type == "ctc": from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder return W2lViterbiDecoder(args, self.tgt_dict) else: raise NotImplementedError("only ctc decoder is supported at the moment")
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/speech_to_speech.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import json import logging import math import os from argparse import Namespace from collections import OrderedDict, defaultdict from pathlib import Path from typing import Dict, Sequence, Tuple from argparse import ArgumentError import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import fairseq from fairseq import metrics, options, utils from fairseq.data import ( FairseqDataset, LanguagePairDataset, NoisingDataset, PrependTokenDataset, RoundRobinZipDatasets, TransformEosLangPairDataset, data_utils, encoders, ) from fairseq.sequence_generator import SequenceGenerator from fairseq.tasks import register_task from fairseq.tasks.translation import TranslationTask, load_langpair_dataset logger = logging.getLogger(__name__) class PiecewiseLinearFn: """Piecewise linear function. Can be configured with a string.""" def __init__(self, pieces: Sequence[Tuple[int, float]]): assert pieces == sorted( pieces ), f"PiecewiseLinearFn configuration should be sorted, received: {pieces}" self.pieces = pieces def __call__(self, x: int) -> float: for i, (x_a, y_a) in enumerate(self.pieces[:-1]): x_b, y_b = self.pieces[i + 1] if x_a <= x <= x_b: return y_a + (x - x_a) * (y_b - y_a) / (x_b - x_a) return self.pieces[-1][1] @staticmethod def from_string(configuration: str) -> "PiecewiseLinearFn": """ Parse the configuration of lambda coefficient (for scheduling). x = "3" # lambda will be a constant equal to x x = "0:1,1000:0" # lambda will start from 1 and linearly decrease # to 0 during the first 1000 iterations x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000 # iterations, then will linearly increase to 1 until iteration 2000 """ if isinstance(configuration, float): return PiecewiseLinearFn([(0, configuration)]) try: parts = configuration.split(",") if len(parts) == 1: v = float(configuration) return PiecewiseLinearFn([(0, v)]) split = [s.split(":") for s in parts] pieces = [(int(t), float(v)) for t, v in split] return PiecewiseLinearFn(pieces) except Exception: raise ValueError( f"Invalid PiecewiseLinearFn configuration: {configuration!r}" ) @staticmethod def one() -> "PiecewiseLinearFn": return PiecewiseLinearFn([(0, 1.0)]) @register_task("online_backtranslation") class OnlineBackTranslationTask(TranslationTask): @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" # fmt: off # Generic translation args parser.add_argument('data', help='colon separated path to data directories list, \ will be iterated upon during epochs in round-robin manner; \ however, valid and test data are always in the first directory to \ avoid the need for repeating them in all directories') parser.add_argument('--mono-langs', metavar='MONO_LANGS', help='monolingual languages for training') parser.add_argument('--valid-lang-pairs', default=None, metavar='VALID_LANG_PAIRS', help='language pairs for validation') parser.add_argument('--load-alignments', action='store_true', help='load the binarized alignments') parser.add_argument('--left-pad-source', default='False', type=str, metavar='BOOL', help='pad the source on the left') parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL', help='pad the target on the left') parser.add_argument('--upsample-primary', default=1, type=int, help='amount to upsample primary dataset') try: parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence') parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence') except ArgumentError: # this might have already been defined. Once we transition this to hydra it should be fine to add it here. pass parser.add_argument('--truncate-source', action='store_true', default=False, help='truncate source to max-source-positions') parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N', help='if >0, then bucket source and target lengths into N ' 'buckets and pad accordingly; this is useful on TPUs ' 'to minimize the number of compilations') # Denoising args parser.add_argument('--max-word-shuffle-distance', default=3.0, type=float, metavar='N', help='maximum word shuffle distance for denoising autoencoding data generation') parser.add_argument('--word-dropout-prob', default=0.1, type=float, metavar='N', help='word dropout probability for denoising autoencoding data generation') parser.add_argument('--word-blanking-prob', default=0.2, type=float, metavar='N', help='word blanking probability for denoising autoencoding data generation') # Backtranslation args parser.add_argument('--lambda-bt', default="1.0", type=str, metavar='N', help='back-translation weight') parser.add_argument('--lambda-dae', default="1.0", type=str, metavar='N', help='denoising auto-encoder weight') # Evaluation args parser.add_argument('--generate-one-by-one', action='store_true', help='generate one sentence at a time for backtranslation') parser.add_argument('--eval-bleu', action='store_true', help='evaluation with BLEU scores') parser.add_argument('--eval-bleu-detok', type=str, default="space", help='detokenize before computing BLEU (e.g., "moses"); ' 'required if using --eval-bleu; use "space" to ' 'disable detokenization; see fairseq.data.encoders ' 'for other options') parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON', help='args for building the tokenizer, if needed') parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False, help='compute tokenized BLEU instead of sacrebleu') parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None, help='remove BPE before computing BLEU') parser.add_argument('--eval-bleu-args', type=str, metavar='JSON', help='generation args for BLUE scoring, ' 'e.g., \'{"beam": 4, "lenpen": 0.6}\'') parser.add_argument('--eval-bleu-print-samples', action='store_true', help='print sample generations during validation') # fmt: on def __init__(self, args, common_dict, mono_langs, valid_lang_pairs): super().__init__(args, common_dict, common_dict) self.common_dict = common_dict self.mono_langs = mono_langs self.valid_lang_pairs = valid_lang_pairs self.SHOW_SAMPLES_INTERVAL = 1000 # Start by showing samples self._show_samples_ctr = self.SHOW_SAMPLES_INTERVAL self.SHOW_SAMPLES_NUMBER = 5 self.lambda_bt = PiecewiseLinearFn.from_string(args.lambda_bt) self.lambda_dae = PiecewiseLinearFn.from_string(args.lambda_dae) self.args = args self.data = utils.split_paths(self.args.data) if len(self.data) == 1: shards = list(Path(self.data[0]).glob("shard*")) if len(shards) > 0: # keep this as strings, since it can also be a manifold path old_data = self.data self.data = [str(shard) for shard in shards] logging.warning(f"Expanded data directory {old_data} to {self.data}") @classmethod def setup_task(cls, args, **kwargs): """Setup the task (e.g., load dictionaries). Args: args (argparse.Namespace): parsed command-line arguments """ args.left_pad_source = options.eval_bool(args.left_pad_source) args.left_pad_target = options.eval_bool(args.left_pad_target) paths = utils.split_paths(args.data) assert len(paths) > 0 assert args.mono_langs is not None mono_langs = args.mono_langs.split(",") valid_lang_pairs = args.valid_lang_pairs.split(",") # load dictionary dict_path = os.path.join(paths[0], "dict.txt") common_dict = cls.load_dictionary(dict_path) return cls(args, common_dict, mono_langs, valid_lang_pairs) def load_dataset(self, split, epoch=1, combine=False, **kwargs) -> FairseqDataset: """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ if split == "train": data_path = self.data[(epoch - 1) % len(self.data)] dataset = self.load_train_dataset(data_path) else: # valid/test should always be the same. dataset = self.load_translation_dataset(split, self.data[0]) self.datasets[split] = dataset return dataset def load_train_dataset(self, data_path: str) -> FairseqDataset: """The training dataset is made of backtranslation dataset and denoising dataset.""" data = [] for lang in self.mono_langs: train_path = os.path.join(data_path, lang, "train") # TODO: could we do the BT using denoise sample ? # this would half the data loading work data.append((f"{lang}-BT", self.load_bt_dataset(train_path, lang))) data.append( (f"{lang}-DENOISE", self.load_denoise_dataset(train_path, lang)) ) return RoundRobinZipDatasets(OrderedDict(data)) def _langpair_dataset( self, src: FairseqDataset, tgt: FairseqDataset ) -> LanguagePairDataset: return LanguagePairDataset( src, src.sizes, self.dictionary, tgt=tgt, tgt_sizes=tgt.sizes, tgt_dict=self.dictionary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, # TODO: should we shuffle ? we are already sorting batch by sizes so ? # shuffle=True, ) def _prepend_lang_bos_to_target( self, dataset: LanguagePairDataset, lang: str ) -> LanguagePairDataset: bos = _lang_token_index(self.dictionary, lang) return TransformEosLangPairDataset( dataset, src_eos=self.dictionary.eos(), new_src_eos=self.dictionary.eos(), tgt_bos=self.dictionary.eos(), new_tgt_bos=bos, ) def load_bt_dataset(self, data_path: str, lang: str) -> FairseqDataset: """The BT dataset is generated with (tgt, tgt) pairs. The actual translation to a (generated_src, tgt) pair is done on the fly during training. """ mono_dataset = data_utils.load_indexed_dataset( data_path, self.common_dict, self.args.dataset_impl ) assert mono_dataset is not None, f"No dataset found for {lang}" mono_dataset_src = PrependTokenDataset( mono_dataset, _lang_token_index(self.dictionary, lang) ) mono_dataset_bt = self._langpair_dataset(mono_dataset_src, mono_dataset) logger.info( f"mono_lang = {lang} " f"lang token index = {_lang_token_index(self.dictionary, lang)} " f"lang token = {_lang_token(lang)}" ) mono_dataset_bt = self._prepend_lang_bos_to_target(mono_dataset_bt, lang) return mono_dataset_bt def load_denoise_dataset(self, data_path: str, lang: str) -> FairseqDataset: """Classic denoising dataset""" dataset = data_utils.load_indexed_dataset( data_path, self.common_dict, self.args.dataset_impl ) noisy_dataset = NoisingDataset( dataset, self.dictionary, seed=1, max_word_shuffle_distance=self.args.max_word_shuffle_distance, word_dropout_prob=self.args.word_dropout_prob, word_blanking_prob=self.args.word_blanking_prob, ) noisy_dataset = PrependTokenDataset( noisy_dataset, _lang_token_index(self.dictionary, lang) ) clean_dataset = data_utils.load_indexed_dataset( data_path, self.common_dict, self.args.dataset_impl ) denoising_dataset = self._langpair_dataset(noisy_dataset, clean_dataset) denoising_dataset = self._prepend_lang_bos_to_target(denoising_dataset, lang) return denoising_dataset def load_translation_dataset( self, split: str, data_path: str, combine: bool = False ): # only judging with one language pair for the moment, # since ConcatDataset doesn't work as expected assert len(self.valid_lang_pairs) == 1, "For now..." valid_lang_pair = self.valid_lang_pairs[0] src, tgt = valid_lang_pair.split("-") # use the same function than TranslationTask src_tgt_dt = load_langpair_dataset( data_path, split, src, self.common_dict, tgt, self.common_dict, combine=combine, dataset_impl=self.args.dataset_impl, upsample_primary=self.args.upsample_primary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions, load_alignments=self.args.load_alignments, truncate_source=self.args.truncate_source, num_buckets=self.args.num_batch_buckets, shuffle=(split != "test"), prepend_bos_src=_lang_token_index(self.dictionary, src), ) src_tgt_eos_dt = self._prepend_lang_bos_to_target(src_tgt_dt, tgt) src_tgt_eos_dt.args = self.args return src_tgt_eos_dt def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None): raise NotImplementedError def build_model(self, args, from_checkpoint=False): # torch.autograd.set_detect_anomaly(True) model = super().build_model(args, from_checkpoint) add_secial_tokens_to_dict_and_model(self.common_dict, model, self.mono_langs) self.sequence_generators = {} for mono_lang in self.mono_langs: self.sequence_generators[mono_lang] = SequenceGenerator( [model], tgt_dict=self.dictionary, beam_size=1, max_len_a=1.3, max_len_b=5, min_len=5, # keep 1 to be able to prepend bos max_len=model.max_decoder_positions() - 1, ) if getattr(args, "eval_bleu", False): assert getattr(args, "eval_bleu_detok", None) is not None, ( "--eval-bleu-detok is required if using --eval-bleu; " "try --eval-bleu-detok=moses (or --eval-bleu-detok=space " "to disable detokenization, e.g., when using sentencepiece)" ) detok_args = json.loads(getattr(args, "eval_bleu_detok_args", "{}") or "{}") self.tokenizer = encoders.build_tokenizer( Namespace( tokenizer=getattr(args, "eval_bleu_detok", None), **detok_args ) ) gen_args = json.loads(getattr(args, "eval_bleu_args", "{}") or "{}") self.bleu_sequence_generator = self.build_generator( [model], Namespace(**gen_args) ) return model def max_positions(self): """Return the max sentence length allowed by the task.""" return (self.args.max_source_positions, self.args.max_target_positions) @property def dictionary(self): """Return the source :class:`~fairseq.data.Dictionary`.""" return self.common_dict def display_samples_once_in_a_while(self, smp, mono_lang, other_lang): self._show_samples_ctr += 1 if self._show_samples_ctr < self.SHOW_SAMPLES_INTERVAL: return self._show_samples_ctr = 0 ln = smp["net_input"]["src_tokens"].shape[0] logger.info( f"(r:{self.args.distributed_rank}) : " f"{other_lang} ---> {mono_lang} " f"({other_lang} was generated by back-translation.) {ln} samples" ) for i in range(min(ln, self.SHOW_SAMPLES_NUMBER)): src_tokens = smp["net_input"]["src_tokens"][i] tgt_tokens = smp["target"][i] src_str = self.dictionary.string(src_tokens, "sentencepiece") tgt_str = self.dictionary.string(tgt_tokens, "sentencepiece") logger.info( f"\n{i}\t\t[{other_lang} generated] {src_str}\n" f"\t\t[{mono_lang} original ] {tgt_str}\n" f"\t\t[ src tokens] {src_tokens}\n" ) def backtranslate_sample(self, smp, orig_lang, other_lang) -> None: """ * WARNING: smp is modified in place. * At the start of this function, `smp` has the same input and target: |--------------------------------------------------------| | smp['net_input']['src_tokens'] | smp['target'] | | (from data) __en__ hello world | __en__ hello world | |--------------------------------------------------------| * We call generator.generate(smp, bos_token = token("ro")), and copy the result as input * At the end, `smp` has the translation to other language. |--------------------------------------------------------| | smp['net_input']['src_tokens'] | smp['target'] | | (generated) __ro__ salut lume | __en__ hello world | |--------------------------------------------------------| """ bos_token = _lang_token_index(self.dictionary, other_lang) generated = self.sequence_generators[orig_lang].generate( models=[], sample=smp, bos_token=bos_token ) max_lngth = max([gn[0]["tokens"].size(0) for gn in generated]) net_input = smp["net_input"] n_src_tokens = torch.empty( size=(len(generated), max_lngth + 1), dtype=net_input["src_tokens"].dtype ) n_src_lengths = torch.empty( len(generated), dtype=net_input["src_lengths"].dtype ) for i, gn in enumerate(generated): tokens = gn[0]["tokens"] tokens_size = tokens.size(0) padding_needed = max_lngth - tokens_size tokens = torch.cat([tokens.new([bos_token]), tokens]) tokens = F.pad(tokens, (0, padding_needed), value=self.dictionary.pad()) n_src_tokens[i] = tokens n_src_lengths[i] = tokens_size + 1 device = net_input["src_tokens"].device # This seems to be important del net_input["src_tokens"] del net_input["src_lengths"] net_input["src_tokens"] = n_src_tokens.to(device) net_input["src_lengths"] = n_src_lengths.to(device) def generate(self, smp, model): model.eval() orig_lang = ( self.dictionary[smp["net_input"]["src_tokens"][0][0]] .replace(" ", "") .replace("_", "") ) bos_token = smp["net_input"]["prev_output_tokens"][0][0] with torch.no_grad(): generated = self.sequence_generators[orig_lang].generate( models=[model], sample=smp, bos_token=bos_token ) return generated def get_other_lang(self, lang): # TODO: allow more complex mapping if lang != self.mono_langs[0]: return self.mono_langs[0] if len(self.mono_langs) == 2: return self.mono_langs[1] return self.mono_langs[np.random.randint(1, len(self.mono_langs))] def train_step( self, sample, model, criterion, optimizer, update_num, ignore_grad=False ): model.train() model.set_num_updates(update_num) agg_loss, agg_sample_size = 0.0, 0.0 agg_logging_output: Dict[str, float] = defaultdict(float) dataset_keys = self.datasets["train"].datasets.keys() weights = { "BT": self.lambda_bt(update_num), "DENOISE": self.lambda_dae(update_num), } log_keys = {"BT": "bt_", "DENOISE": "dae_"} for dataset_key in dataset_keys: smp = sample[dataset_key] mono_lang, task_subtype = dataset_key.split("-") if weights[task_subtype] == 0: continue if task_subtype == "BT": with torch.autograd.profiler.record_function("backtranslation"): model.eval() # TODO: Could we translate to several language at once ? # this would allow to share encoder_out and maximize GPU usage. other_lang = self.get_other_lang(mono_lang) self.backtranslate_sample(smp, mono_lang, other_lang) self.display_samples_once_in_a_while(smp, mono_lang, other_lang) model.train() # Like in FairseqTask.train_step with torch.autograd.profiler.record_function("forward"): loss, sample_size, logging_output = criterion(model, smp) loss *= weights[task_subtype] if ignore_grad: loss *= 0 with torch.autograd.profiler.record_function("backward"): optimizer.backward(loss) agg_loss += loss.item() agg_sample_size += sample_size for k in logging_output: agg_logging_output[log_keys[task_subtype] + k] += logging_output[k] agg_logging_output[k] += logging_output[k] return agg_loss, agg_sample_size, agg_logging_output def get_bos_token_from_sample(self, sample): net_input = sample["net_input"] source_lang_token_id = torch.unique(net_input["src_tokens"][:, 0]).item() source_lang_token = self.dictionary[source_lang_token_id].replace("_", "") target_lang_token_id = _lang_token_index( self.dictionary, self.get_other_lang(source_lang_token) ) return target_lang_token_id def reduce_metrics(self, logging_outputs, criterion): super().reduce_metrics(logging_outputs, criterion) bt_sample_size = sum(x.get("bt_sample_size", 0) for x in logging_outputs) if bt_sample_size: bt_loss_sum = sum(x.get("bt_loss", 0) for x in logging_outputs) bt_loss_sum *= 1 / bt_sample_size / math.log(2) metrics.log_scalar("bt_loss", bt_loss_sum, bt_sample_size, round=3) bt_nll_loss_sum = sum(x.get("bt_nll_loss", 0) for x in logging_outputs) bt_ntokens = sum(x.get("bt_ntokens", 0) for x in logging_outputs) bt_nll_loss_sum *= 1 / bt_ntokens / math.log(2) metrics.log_scalar("bt_nll_loss", bt_nll_loss_sum, bt_ntokens, round=3) metrics.log_derived( "bt_ppl", lambda meters: utils.get_perplexity(meters["bt_nll_loss"].avg) ) dae_sample_size = sum(x.get("dae_sample_size", 0) for x in logging_outputs) if dae_sample_size: dae_loss_sum = sum(x.get("dae_loss", 0) for x in logging_outputs) dae_loss_sum *= 1 / dae_sample_size / math.log(2) metrics.log_scalar("dae_loss", dae_loss_sum, dae_sample_size, round=3) dae_nll_loss_sum = sum(x.get("dae_nll_loss", 0) for x in logging_outputs) dae_ntokens = sum(x.get("dae_ntokens", 0) for x in logging_outputs) dae_nll_loss_sum *= 1 / dae_ntokens / math.log(2) metrics.log_scalar("dae_nll_loss", dae_nll_loss_sum, dae_ntokens, round=3) metrics.log_derived( "dae_ppl", lambda meters: utils.get_perplexity(meters["dae_nll_loss"].avg), ) @torch.no_grad() def extend_embedding( emb: nn.Module, new_vocab_size: int, copy_from_token_id: int ) -> None: old_emb_data = emb.weight.data (old_vocab_size, dim) = old_emb_data.shape assert new_vocab_size >= old_vocab_size if new_vocab_size > old_vocab_size: emb.weight.data = torch.zeros((new_vocab_size, dim)) emb.weight.data[:old_vocab_size, :] = old_emb_data # initialize new embeddings emb.weight.data[old_vocab_size:, :] = old_emb_data[copy_from_token_id] if hasattr(emb, "num_embeddings"): emb.num_embeddings = new_vocab_size if hasattr(emb, "out_features"): emb.out_features = new_vocab_size if getattr(emb, "bias", None) is None: return # Fix the bias. # Bias shape can be different from the previous vocab size # if the weight matrix was shared and alread extended but not the bias. (old_vocab_size,) = emb.bias.shape assert new_vocab_size >= old_vocab_size if new_vocab_size > old_vocab_size: old_bias = emb.bias.data new_bias = torch.zeros( (new_vocab_size,), dtype=old_bias.dtype, device=old_bias.device ) new_bias[:old_vocab_size] = old_bias emb.bias.data = new_bias def add_secial_tokens_to_dict_and_model( dictionary: "fairseq.data.Dictionary", model: nn.Module, mono_langs: Sequence[str], ) -> None: embs = model.encoder.embed_tokens vocab_size, embedding_dim = embs.weight.shape # The model may or may not have a '<mask>' embedding yet assert ( len(dictionary) <= vocab_size <= len(dictionary) + 1 ), f"Dictionary len ({len(dictionary)}) doesn't match embs shape ({embs.weight.shape})" # TODO: we should reuse the pretrained model dict which already has <mask> dictionary.add_symbol("<mask>") for lang in mono_langs: lang_token = _lang_token(lang) dictionary.add_symbol(lang_token) logger.info( f"dictionary: {len(dictionary)} -> {vocab_size} tokens " f"after adding {len(mono_langs)} lang tokens." ) if len(dictionary) <= vocab_size: return extend_embedding(embs, len(dictionary), dictionary.bos()) dec_embs = model.decoder.embed_tokens extend_embedding(dec_embs, len(dictionary), dictionary.bos()) lm_head = model.decoder.output_projection extend_embedding(lm_head, len(dictionary), dictionary.bos()) assert lm_head.weight.shape == (len(dictionary), embedding_dim) def _lang_token(lang: str) -> str: return f"__{lang}__" def _lang_token_index(dictionary, lang: str) -> int: return dictionary.index(_lang_token(lang)) @contextlib.contextmanager def assert_weights_have_changed(model: nn.Module): def checksum(model: nn.Module) -> float: return sum(p.sum().item() for p in model.parameters()) initial_checksum = checksum(model) yield model final_checksum = checksum(model) logger.info( f"initial_checksum={initial_checksum} -> final_checksum={final_checksum}" ) assert initial_checksum != final_checksum, "Model hasn't changed !"
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/online_backtranslation.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from fairseq.tasks import register_task from fairseq.tasks.speech_to_text import SpeechToTextTask from fairseq.tasks.translation import TranslationTask, TranslationConfig try: import examples.simultaneous_translation # noqa import_successful = True except BaseException: import_successful = False logger = logging.getLogger(__name__) def check_import(flag): if not flag: raise ImportError( "'examples.simultaneous_translation' is not correctly imported. " "Please considering `pip install -e $FAIRSEQ_DIR`." ) @register_task("simul_speech_to_text") class SimulSpeechToTextTask(SpeechToTextTask): def __init__(self, args, tgt_dict): check_import(import_successful) super().__init__(args, tgt_dict) @register_task("simul_text_to_text", dataclass=TranslationConfig) class SimulTextToTextTask(TranslationTask): def __init__(self, cfg, src_dict, tgt_dict): check_import(import_successful) super().__init__(cfg, src_dict, tgt_dict)
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/simultaneous_translation.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import os.path as op import torch import torch.nn.functional as F import numpy as np from fairseq.data.audio.text_to_speech_dataset import TextToSpeechDatasetCreator from fairseq.tasks import register_task from fairseq.tasks.speech_to_text import SpeechToTextTask from fairseq.speech_generator import ( AutoRegressiveSpeechGenerator, NonAutoregressiveSpeechGenerator, TeacherForcingAutoRegressiveSpeechGenerator, ) logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, ) logger = logging.getLogger(__name__) try: from tensorboardX import SummaryWriter except ImportError: logger.info("Please install tensorboardX: pip install tensorboardX") SummaryWriter = None @register_task("text_to_speech") class TextToSpeechTask(SpeechToTextTask): @staticmethod def add_args(parser): parser.add_argument("data", help="manifest root path") parser.add_argument( "--config-yaml", type=str, default="config.yaml", help="Configuration YAML filename (under manifest root)", ) parser.add_argument( "--max-source-positions", default=1024, type=int, metavar="N", help="max number of tokens in the source sequence", ) parser.add_argument( "--max-target-positions", default=1200, type=int, metavar="N", help="max number of tokens in the target sequence", ) parser.add_argument("--n-frames-per-step", type=int, default=1) parser.add_argument("--eos-prob-threshold", type=float, default=0.5) parser.add_argument("--eval-inference", action="store_true") parser.add_argument("--eval-tb-nsample", type=int, default=8) parser.add_argument("--vocoder", type=str, default="griffin_lim") parser.add_argument("--spec-bwd-max-iter", type=int, default=8) def __init__(self, args, src_dict): super().__init__(args, src_dict) self.src_dict = src_dict self.sr = self.data_cfg.config.get("features").get("sample_rate") self.tensorboard_writer = None self.tensorboard_dir = "" if args.tensorboard_logdir and SummaryWriter is not None: self.tensorboard_dir = os.path.join(args.tensorboard_logdir, "valid_extra") def load_dataset(self, split, epoch=1, combine=False, **kwargs): is_train_split = split.startswith("train") pre_tokenizer = self.build_tokenizer(self.args) bpe_tokenizer = self.build_bpe(self.args) self.datasets[split] = TextToSpeechDatasetCreator.from_tsv( self.args.data, self.data_cfg, split, self.src_dict, pre_tokenizer, bpe_tokenizer, is_train_split=is_train_split, epoch=epoch, seed=self.args.seed, n_frames_per_step=self.args.n_frames_per_step, speaker_to_id=self.speaker_to_id, ) @property def target_dictionary(self): return None @property def source_dictionary(self): return self.src_dict def get_speaker_embeddings_path(self): speaker_emb_path = None if self.data_cfg.config.get("speaker_emb_filename") is not None: speaker_emb_path = op.join( self.args.data, self.data_cfg.config.get("speaker_emb_filename") ) return speaker_emb_path @classmethod def get_speaker_embeddings(cls, args): embed_speaker = None if args.speaker_to_id is not None: if args.speaker_emb_path is None: embed_speaker = torch.nn.Embedding( len(args.speaker_to_id), args.speaker_embed_dim ) else: speaker_emb_mat = np.load(args.speaker_emb_path) assert speaker_emb_mat.shape[1] == args.speaker_embed_dim embed_speaker = torch.nn.Embedding.from_pretrained( torch.from_numpy(speaker_emb_mat), freeze=True, ) logger.info( f"load speaker embeddings from {args.speaker_emb_path}. " f"train embedding? {embed_speaker.weight.requires_grad}\n" f"embeddings:\n{speaker_emb_mat}" ) return embed_speaker def build_model(self, cfg, from_checkpoint=False): cfg.pitch_min = self.data_cfg.config["features"].get("pitch_min", None) cfg.pitch_max = self.data_cfg.config["features"].get("pitch_max", None) cfg.energy_min = self.data_cfg.config["features"].get("energy_min", None) cfg.energy_max = self.data_cfg.config["features"].get("energy_max", None) cfg.speaker_emb_path = self.get_speaker_embeddings_path() model = super().build_model(cfg, from_checkpoint) self.generator = None if getattr(cfg, "eval_inference", False): self.generator = self.build_generator([model], cfg) return model def build_generator(self, models, cfg, vocoder=None, **unused): if vocoder is None: vocoder = self.build_default_vocoder() model = models[0] if getattr(model, "NON_AUTOREGRESSIVE", False): return NonAutoregressiveSpeechGenerator(model, vocoder, self.data_cfg) else: generator = AutoRegressiveSpeechGenerator if getattr(cfg, "teacher_forcing", False): generator = TeacherForcingAutoRegressiveSpeechGenerator logger.info("Teacher forcing mode for generation") return generator( model, vocoder, self.data_cfg, max_iter=self.args.max_target_positions, eos_prob_threshold=self.args.eos_prob_threshold, ) def build_default_vocoder(self): from fairseq.models.text_to_speech.vocoder import get_vocoder vocoder = get_vocoder(self.args, self.data_cfg) if torch.cuda.is_available() and not self.args.cpu: vocoder = vocoder.cuda() else: vocoder = vocoder.cpu() return vocoder def valid_step(self, sample, model, criterion): loss, sample_size, logging_output = super().valid_step(sample, model, criterion) if getattr(self.args, "eval_inference", False): hypos, inference_losses = self.valid_step_with_inference( sample, model, self.generator ) for k, v in inference_losses.items(): assert k not in logging_output logging_output[k] = v picked_id = 0 if self.tensorboard_dir and (sample["id"] == picked_id).any(): self.log_tensorboard( sample, hypos[: self.args.eval_tb_nsample], model._num_updates, is_na_model=getattr(model, "NON_AUTOREGRESSIVE", False), ) return loss, sample_size, logging_output def valid_step_with_inference(self, sample, model, generator): hypos = generator.generate(model, sample, has_targ=True) losses = { "mcd_loss": 0.0, "targ_frames": 0.0, "pred_frames": 0.0, "nins": 0.0, "ndel": 0.0, } rets = batch_mel_cepstral_distortion( [hypo["targ_waveform"] for hypo in hypos], [hypo["waveform"] for hypo in hypos], self.sr, normalize_type=None, ) for d, extra in rets: pathmap = extra[-1] losses["mcd_loss"] += d.item() losses["targ_frames"] += pathmap.size(0) losses["pred_frames"] += pathmap.size(1) losses["nins"] += (pathmap.sum(dim=1) - 1).sum().item() losses["ndel"] += (pathmap.sum(dim=0) - 1).sum().item() return hypos, losses def log_tensorboard(self, sample, hypos, num_updates, is_na_model=False): if self.tensorboard_writer is None: self.tensorboard_writer = SummaryWriter(self.tensorboard_dir) tb_writer = self.tensorboard_writer for b in range(len(hypos)): idx = sample["id"][b] text = sample["src_texts"][b] targ = hypos[b]["targ_feature"] pred = hypos[b]["feature"] attn = hypos[b]["attn"] if is_na_model: data = plot_tts_output( [targ.transpose(0, 1), pred.transpose(0, 1)], [f"target (idx={idx})", "output"], attn, "alignment", ret_np=True, suptitle=text, ) else: eos_prob = hypos[b]["eos_prob"] data = plot_tts_output( [targ.transpose(0, 1), pred.transpose(0, 1), attn], [f"target (idx={idx})", "output", "alignment"], eos_prob, "eos prob", ret_np=True, suptitle=text, ) tb_writer.add_image( f"inference_sample_{b}", data, num_updates, dataformats="HWC" ) if hypos[b]["waveform"] is not None: targ_wave = hypos[b]["targ_waveform"].detach().cpu().float() pred_wave = hypos[b]["waveform"].detach().cpu().float() tb_writer.add_audio( f"inference_targ_{b}", targ_wave, num_updates, sample_rate=self.sr ) tb_writer.add_audio( f"inference_pred_{b}", pred_wave, num_updates, sample_rate=self.sr ) def save_figure_to_numpy(fig): data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) return data DEFAULT_V_MIN = np.log(1e-5) def plot_tts_output( data_2d, title_2d, data_1d, title_1d, figsize=(24, 4), v_min=DEFAULT_V_MIN, v_max=3, ret_np=False, suptitle="", ): try: import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable except ImportError: raise ImportError("Please install Matplotlib: pip install matplotlib") data_2d = [ x.detach().cpu().float().numpy() if isinstance(x, torch.Tensor) else x for x in data_2d ] fig, axes = plt.subplots(1, len(data_2d) + 1, figsize=figsize) if suptitle: fig.suptitle(suptitle[:400]) # capped at 400 chars axes = [axes] if len(data_2d) == 0 else axes for ax, x, name in zip(axes, data_2d, title_2d): ax.set_title(name) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) im = ax.imshow( x, origin="lower", aspect="auto", vmin=max(x.min(), v_min), vmax=min(x.max(), v_max), ) fig.colorbar(im, cax=cax, orientation="vertical") if isinstance(data_1d, torch.Tensor): data_1d = data_1d.detach().cpu().numpy() axes[-1].plot(data_1d) axes[-1].set_title(title_1d) plt.tight_layout() if ret_np: fig.canvas.draw() data = save_figure_to_numpy(fig) plt.close(fig) return data def antidiag_indices(offset, min_i=0, max_i=None, min_j=0, max_j=None): """ for a (3, 4) matrix with min_i=1, max_i=3, min_j=1, max_j=4, outputs offset=2 (1, 1), offset=3 (2, 1), (1, 2) offset=4 (2, 2), (1, 3) offset=5 (2, 3) constraints: i + j = offset min_j <= j < max_j min_i <= offset - j < max_i """ if max_i is None: max_i = offset + 1 if max_j is None: max_j = offset + 1 min_j = max(min_j, offset - max_i + 1, 0) max_j = min(max_j, offset - min_i + 1, offset + 1) j = torch.arange(min_j, max_j) i = offset - j return torch.stack([i, j]) def batch_dynamic_time_warping(distance, shapes=None): """full batched DTW without any constraints distance: (batchsize, max_M, max_N) matrix shapes: (batchsize,) vector specifying (M, N) for each entry """ # ptr: 0=left, 1=up-left, 2=up ptr2dij = {0: (0, -1), 1: (-1, -1), 2: (-1, 0)} bsz, m, n = distance.size() cumdist = torch.zeros_like(distance) backptr = torch.zeros_like(distance).type(torch.int32) - 1 # initialize cumdist[:, 0, :] = distance[:, 0, :].cumsum(dim=-1) cumdist[:, :, 0] = distance[:, :, 0].cumsum(dim=-1) backptr[:, 0, :] = 0 backptr[:, :, 0] = 2 # DP with optimized anti-diagonal parallelization, O(M+N) steps for offset in range(2, m + n - 1): ind = antidiag_indices(offset, 1, m, 1, n) c = torch.stack( [ cumdist[:, ind[0], ind[1] - 1], cumdist[:, ind[0] - 1, ind[1] - 1], cumdist[:, ind[0] - 1, ind[1]], ], dim=2, ) v, b = c.min(axis=-1) backptr[:, ind[0], ind[1]] = b.int() cumdist[:, ind[0], ind[1]] = v + distance[:, ind[0], ind[1]] # backtrace pathmap = torch.zeros_like(backptr) for b in range(bsz): i = m - 1 if shapes is None else (shapes[b][0] - 1).item() j = n - 1 if shapes is None else (shapes[b][1] - 1).item() dtwpath = [(i, j)] while (i != 0 or j != 0) and len(dtwpath) < 10000: assert i >= 0 and j >= 0 di, dj = ptr2dij[backptr[b, i, j].item()] i, j = i + di, j + dj dtwpath.append((i, j)) dtwpath = dtwpath[::-1] indices = torch.from_numpy(np.array(dtwpath)) pathmap[b, indices[:, 0], indices[:, 1]] = 1 return cumdist, backptr, pathmap def compute_l2_dist(x1, x2): """compute an (m, n) L2 distance matrix from (m, d) and (n, d) matrices""" return torch.cdist(x1.unsqueeze(0), x2.unsqueeze(0), p=2).squeeze(0).pow(2) def compute_rms_dist(x1, x2): l2_dist = compute_l2_dist(x1, x2) return (l2_dist / x1.size(1)).pow(0.5) def get_divisor(pathmap, normalize_type): if normalize_type is None: return 1 elif normalize_type == "len1": return pathmap.size(0) elif normalize_type == "len2": return pathmap.size(1) elif normalize_type == "path": return pathmap.sum().item() else: raise ValueError(f"normalize_type {normalize_type} not supported") def batch_compute_distortion(y1, y2, sr, feat_fn, dist_fn, normalize_type): d, s, x1, x2 = [], [], [], [] for cur_y1, cur_y2 in zip(y1, y2): assert cur_y1.ndim == 1 and cur_y2.ndim == 1 cur_x1 = feat_fn(cur_y1) cur_x2 = feat_fn(cur_y2) x1.append(cur_x1) x2.append(cur_x2) cur_d = dist_fn(cur_x1, cur_x2) d.append(cur_d) s.append(d[-1].size()) max_m = max(ss[0] for ss in s) max_n = max(ss[1] for ss in s) d = torch.stack( [F.pad(dd, (0, max_n - dd.size(1), 0, max_m - dd.size(0))) for dd in d] ) s = torch.LongTensor(s).to(d.device) cumdists, backptrs, pathmaps = batch_dynamic_time_warping(d, s) rets = [] itr = zip(s, x1, x2, d, cumdists, backptrs, pathmaps) for (m, n), cur_x1, cur_x2, dist, cumdist, backptr, pathmap in itr: cumdist = cumdist[:m, :n] backptr = backptr[:m, :n] pathmap = pathmap[:m, :n] divisor = get_divisor(pathmap, normalize_type) distortion = cumdist[-1, -1] / divisor ret = distortion, (cur_x1, cur_x2, dist, cumdist, backptr, pathmap) rets.append(ret) return rets def batch_mel_cepstral_distortion(y1, y2, sr, normalize_type="path", mfcc_fn=None): """ https://arxiv.org/pdf/2011.03568.pdf The root mean squared error computed on 13-dimensional MFCC using DTW for alignment. MFCC features are computed from an 80-channel log-mel spectrogram using a 50ms Hann window and hop of 12.5ms. y1: list of waveforms y2: list of waveforms sr: sampling rate """ try: import torchaudio except ImportError: raise ImportError("Please install torchaudio: pip install torchaudio") if mfcc_fn is None or mfcc_fn.sample_rate != sr: melkwargs = { "n_fft": int(0.05 * sr), "win_length": int(0.05 * sr), "hop_length": int(0.0125 * sr), "f_min": 20, "n_mels": 80, "window_fn": torch.hann_window, } mfcc_fn = torchaudio.transforms.MFCC( sr, n_mfcc=13, log_mels=True, melkwargs=melkwargs ).to(y1[0].device) return batch_compute_distortion( y1, y2, sr, lambda y: mfcc_fn(y).transpose(-1, -2), compute_rms_dist, normalize_type, )
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/text_to_speech.py
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import logging import os import sys from argparse import Namespace from dataclasses import dataclass, field from typing import Optional from omegaconf import MISSING, II, OmegaConf from fairseq.data import BinarizedAudioDataset, FileAudioDataset from fairseq.dataclass import FairseqDataclass, ChoiceEnum from fairseq.data.text_compressor import TextCompressionLevel from . import FairseqTask, register_task logger = logging.getLogger(__name__) @dataclass class InferredW2vConfig: # The following are needed to precompute mask and mask channel indices # before model's forward. mask_length: Optional[int] = II("model.mask_length") mask_prob: Optional[float] = II("model.mask_prob") mask_selection: Optional[str] = II("model.mask_selection") mask_other: Optional[float] = II("model.mask_other") no_mask_overlap: Optional[bool] = II("model.no_mask_overlap") mask_min_space: Optional[int] = II("model.mask_min_space") mask_channel_length: Optional[int] = II("model.mask_channel_length") mask_channel_prob: Optional[float] = II("model.mask_channel_prob") mask_channel_selection: Optional[str] = II("model.mask_channel_selection") mask_channel_other: Optional[float] = II("model.mask_channel_other") no_mask_channel_overlap: Optional[bool] = II("model.no_mask_channel_overlap") mask_channel_min_space: Optional[int] = II("model.mask_channel_min_space") conv_feature_layers: Optional[str] = II("model.conv_feature_layers") encoder_embed_dim: Optional[int] = II("model.encoder_embed_dim") @dataclass class AudioPretrainingConfig(FairseqDataclass): data: str = field(default=MISSING, metadata={"help": "path to data directory"}) labels: Optional[str] = field( default=None, metadata={"help": "extension of the label file to load, used for fine-tuning"}, ) binarized_dataset: bool = field( default=False, metadata={ "help": "if true, loads binarized dataset (useful for very large datasets). " "See examples/wav2vec/scripts/binarize_manifest.sh" }, ) sample_rate: int = field( default=16_000, metadata={ "help": "target sample rate. audio files will be up/down sampled to this rate" }, ) normalize: bool = field( default=False, metadata={"help": "if set, normalizes input to have 0 mean and unit variance"}, ) enable_padding: bool = field( default=False, metadata={"help": "pad shorter samples instead of cropping"} ) max_sample_size: Optional[int] = field( default=None, metadata={"help": "max sample size to crop to for batching"} ) min_sample_size: Optional[int] = field( default=None, metadata={"help": "min sample size to skip small examples"} ) num_batch_buckets: int = field( default=0, metadata={"help": "number of buckets"}, ) precompute_mask_indices: bool = field( default=False, metadata={ "help": "flag to compute mask indices in data preparation.", }, ) inferred_w2v_config: Optional[InferredW2vConfig] = field( default=None, metadata={ "help": "wav2vec 2.0 masking arguments used to pre-compute masks (required for TPU)", }, ) tpu: bool = II("common.tpu") text_compression_level: ChoiceEnum([x.name for x in TextCompressionLevel]) = field( default="none", metadata={ "help": "compression level for texts (e.g. audio filenames, " "target texts): none/low/high (default: none). " }, ) @register_task("audio_pretraining", dataclass=AudioPretrainingConfig) class AudioPretrainingTask(FairseqTask): """ """ cfg: AudioPretrainingConfig @classmethod def setup_task(cls, cfg: AudioPretrainingConfig, **kwargs): """Setup the task (e.g., load dictionaries). Args: cfg (AudioPretrainingConfig): configuration of this task """ return cls(cfg) def _get_mask_precompute_kwargs(self, cfg): if self.cfg.precompute_mask_indices or self.cfg.tpu: assert ( cfg.inferred_w2v_config is not None ), "inferred_w2v_config must be set" return OmegaConf.to_container( cfg.inferred_w2v_config, resolve=True, enum_to_str=True ) else: return {} def load_dataset(self, split: str, task_cfg: FairseqDataclass = None, **kwargs): data_path = self.cfg.data task_cfg = task_cfg or self.cfg # upgrade old task if isinstance(task_cfg, Namespace): if not hasattr(task_cfg, "autoregressive"): task_cfg.autoregressive = not task_cfg.criterion == "ctc" text_compression_level = getattr( TextCompressionLevel, str(self.cfg.text_compression_level) ) if getattr(task_cfg, "binarized_dataset", False): self.datasets[split] = BinarizedAudioDataset( data_path, split=split, sample_rate=task_cfg.get("sample_rate", self.cfg.sample_rate), max_sample_size=self.cfg.max_sample_size, min_sample_size=self.cfg.min_sample_size, pad=task_cfg.labels is not None or task_cfg.enable_padding, normalize=task_cfg.normalize, num_buckets=self.cfg.num_batch_buckets or int(self.cfg.tpu), compute_mask_indices=(self.cfg.precompute_mask_indices or self.cfg.tpu), **self._get_mask_precompute_kwargs(task_cfg), ) else: manifest_path = os.path.join(data_path, "{}.tsv".format(split)) self.datasets[split] = FileAudioDataset( manifest_path=manifest_path, sample_rate=task_cfg.get("sample_rate", self.cfg.sample_rate), max_sample_size=self.cfg.max_sample_size, min_sample_size=self.cfg.min_sample_size, pad=task_cfg.labels is not None or task_cfg.enable_padding, normalize=task_cfg.normalize, num_buckets=self.cfg.num_batch_buckets or int(self.cfg.tpu), compute_mask_indices=(self.cfg.precompute_mask_indices or self.cfg.tpu), text_compression_level=text_compression_level, **self._get_mask_precompute_kwargs(task_cfg), ) if self.cfg.tpu and task_cfg.inferred_w2v_config.mask_channel_prob == 0.0: logger.info( "Pretraining on TPUs may suffer convergence " "issues when training with `mask_channel_prob` value of " "0. You may want to set this to a low value close to 0." ) @property def source_dictionary(self): return None @property def target_dictionary(self): return None def max_positions(self): """Maximum input length supported by the encoder.""" return sys.maxsize, sys.maxsize def build_model(self, model_cfg: FairseqDataclass, from_checkpoint=False): model = super().build_model(model_cfg, from_checkpoint) actualized_cfg = getattr(model, "cfg", None) if actualized_cfg is not None: # if "w2v_args" in actualized_cfg: if hasattr(actualized_cfg, "w2v_args"): model_cfg.w2v_args = actualized_cfg.w2v_args return model
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/audio_pretraining.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field import itertools import json import logging import os from typing import Optional from argparse import Namespace from omegaconf import II import numpy as np from fairseq import metrics, utils from fairseq.data import ( AppendTokenDataset, ConcatDataset, LanguagePairDataset, PrependTokenDataset, StripTokenDataset, TruncateDataset, data_utils, encoders, indexed_dataset, ) from fairseq.data.indexed_dataset import get_available_dataset_impl from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.tasks import FairseqTask, register_task EVAL_BLEU_ORDER = 4 logger = logging.getLogger(__name__) def load_langpair_dataset( data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, left_pad_source, left_pad_target, max_source_positions, max_target_positions, prepend_bos=False, load_alignments=False, truncate_source=False, append_source_id=False, num_buckets=0, shuffle=True, pad_to_multiple=1, prepend_bos_src=None, ): def split_exists(split, src, tgt, lang, data_path): filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang)) return indexed_dataset.dataset_exists(filename, impl=dataset_impl) src_datasets = [] tgt_datasets = [] for k in itertools.count(): split_k = split + (str(k) if k > 0 else "") # infer langcode if split_exists(split_k, src, tgt, src, data_path): prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt)) elif split_exists(split_k, tgt, src, src, data_path): prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src)) else: if k > 0: break else: raise FileNotFoundError( "Dataset not found: {} ({})".format(split, data_path) ) src_dataset = data_utils.load_indexed_dataset( prefix + src, src_dict, dataset_impl ) if truncate_source: src_dataset = AppendTokenDataset( TruncateDataset( StripTokenDataset(src_dataset, src_dict.eos()), max_source_positions - 1, ), src_dict.eos(), ) src_datasets.append(src_dataset) tgt_dataset = data_utils.load_indexed_dataset( prefix + tgt, tgt_dict, dataset_impl ) if tgt_dataset is not None: tgt_datasets.append(tgt_dataset) logger.info( "{} {} {}-{} {} examples".format( data_path, split_k, src, tgt, len(src_datasets[-1]) ) ) if not combine: break assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0 if len(src_datasets) == 1: src_dataset = src_datasets[0] tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None else: sample_ratios = [1] * len(src_datasets) sample_ratios[0] = upsample_primary src_dataset = ConcatDataset(src_datasets, sample_ratios) if len(tgt_datasets) > 0: tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios) else: tgt_dataset = None if prepend_bos: assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index") src_dataset = PrependTokenDataset(src_dataset, src_dict.bos()) if tgt_dataset is not None: tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos()) elif prepend_bos_src is not None: logger.info(f"prepending src bos: {prepend_bos_src}") src_dataset = PrependTokenDataset(src_dataset, prepend_bos_src) eos = None if append_source_id: src_dataset = AppendTokenDataset( src_dataset, src_dict.index("[{}]".format(src)) ) if tgt_dataset is not None: tgt_dataset = AppendTokenDataset( tgt_dataset, tgt_dict.index("[{}]".format(tgt)) ) eos = tgt_dict.index("[{}]".format(tgt)) align_dataset = None if load_alignments: align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt)) if indexed_dataset.dataset_exists(align_path, impl=dataset_impl): align_dataset = data_utils.load_indexed_dataset( align_path, None, dataset_impl ) tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None return LanguagePairDataset( src_dataset, src_dataset.sizes, src_dict, tgt_dataset, tgt_dataset_sizes, tgt_dict, left_pad_source=left_pad_source, left_pad_target=left_pad_target, align_dataset=align_dataset, eos=eos, num_buckets=num_buckets, shuffle=shuffle, pad_to_multiple=pad_to_multiple, ) @dataclass class TranslationConfig(FairseqDataclass): data: Optional[str] = field( default=None, metadata={ "help": "colon separated path to data directories list, will be iterated upon during epochs " "in round-robin manner; however, valid and test data are always in the first directory " "to avoid the need for repeating them in all directories" }, ) source_lang: Optional[str] = field( default=None, metadata={ "help": "source language", "argparse_alias": "-s", }, ) target_lang: Optional[str] = field( default=None, metadata={ "help": "target language", "argparse_alias": "-t", }, ) load_alignments: bool = field( default=False, metadata={"help": "load the binarized alignments"} ) left_pad_source: bool = field( default=True, metadata={"help": "pad the source on the left"} ) left_pad_target: bool = field( default=False, metadata={"help": "pad the target on the left"} ) max_source_positions: int = field( default=1024, metadata={"help": "max number of tokens in the source sequence"} ) max_target_positions: int = field( default=1024, metadata={"help": "max number of tokens in the target sequence"} ) upsample_primary: int = field( default=-1, metadata={"help": "the amount of upsample primary dataset"} ) truncate_source: bool = field( default=False, metadata={"help": "truncate source to max-source-positions"} ) num_batch_buckets: int = field( default=0, metadata={ "help": "if >0, then bucket source and target lengths into " "N buckets and pad accordingly; this is useful on TPUs to minimize the number of compilations" }, ) train_subset: str = II("dataset.train_subset") dataset_impl: Optional[ChoiceEnum(get_available_dataset_impl())] = II( "dataset.dataset_impl" ) required_seq_len_multiple: int = II("dataset.required_seq_len_multiple") # options for reporting BLEU during validation eval_bleu: bool = field( default=False, metadata={"help": "evaluation with BLEU scores"} ) eval_bleu_args: Optional[str] = field( default="{}", metadata={ "help": 'generation args for BLUE scoring, e.g., \'{"beam": 4, "lenpen": 0.6}\', as JSON string' }, ) eval_bleu_detok: str = field( default="space", metadata={ "help": "detokenize before computing BLEU (e.g., 'moses'); required if using --eval-bleu; " "use 'space' to disable detokenization; see fairseq.data.encoders for other options" }, ) eval_bleu_detok_args: Optional[str] = field( default="{}", metadata={"help": "args for building the tokenizer, if needed, as JSON string"}, ) eval_tokenized_bleu: bool = field( default=False, metadata={"help": "compute tokenized BLEU instead of sacrebleu"} ) eval_bleu_remove_bpe: Optional[str] = field( default=None, metadata={ "help": "remove BPE before computing BLEU", "argparse_const": "@@ ", }, ) eval_bleu_print_samples: bool = field( default=False, metadata={"help": "print sample generations during validation"} ) @register_task("translation", dataclass=TranslationConfig) class TranslationTask(FairseqTask): """ Translate from one (source) language to another (target) language. Args: src_dict (~fairseq.data.Dictionary): dictionary for the source language tgt_dict (~fairseq.data.Dictionary): dictionary for the target language .. note:: The translation task is compatible with :mod:`fairseq-train`, :mod:`fairseq-generate` and :mod:`fairseq-interactive`. """ cfg: TranslationConfig def __init__(self, cfg: TranslationConfig, src_dict, tgt_dict): super().__init__(cfg) self.src_dict = src_dict self.tgt_dict = tgt_dict @classmethod def setup_task(cls, cfg: TranslationConfig, **kwargs): """Setup the task (e.g., load dictionaries). Args: args (argparse.Namespace): parsed command-line arguments """ paths = utils.split_paths(cfg.data) assert len(paths) > 0 # find language pair automatically if cfg.source_lang is None or cfg.target_lang is None: cfg.source_lang, cfg.target_lang = data_utils.infer_language_pair(paths[0]) if cfg.source_lang is None or cfg.target_lang is None: raise Exception( "Could not infer language pair, please provide it explicitly" ) # load dictionaries src_dict = cls.load_dictionary( os.path.join(paths[0], "dict.{}.txt".format(cfg.source_lang)) ) tgt_dict = cls.load_dictionary( os.path.join(paths[0], "dict.{}.txt".format(cfg.target_lang)) ) assert src_dict.pad() == tgt_dict.pad() assert src_dict.eos() == tgt_dict.eos() assert src_dict.unk() == tgt_dict.unk() logger.info("[{}] dictionary: {} types".format(cfg.source_lang, len(src_dict))) logger.info("[{}] dictionary: {} types".format(cfg.target_lang, len(tgt_dict))) return cls(cfg, src_dict, tgt_dict) def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ paths = utils.split_paths(self.cfg.data) assert len(paths) > 0 if split != self.cfg.train_subset: # if not training data set, use the first shard for valid and test paths = paths[:1] data_path = paths[(epoch - 1) % len(paths)] # infer langcode src, tgt = self.cfg.source_lang, self.cfg.target_lang self.datasets[split] = load_langpair_dataset( data_path, split, src, self.src_dict, tgt, self.tgt_dict, combine=combine, dataset_impl=self.cfg.dataset_impl, upsample_primary=self.cfg.upsample_primary, left_pad_source=self.cfg.left_pad_source, left_pad_target=self.cfg.left_pad_target, max_source_positions=self.cfg.max_source_positions, max_target_positions=self.cfg.max_target_positions, load_alignments=self.cfg.load_alignments, truncate_source=self.cfg.truncate_source, num_buckets=self.cfg.num_batch_buckets, shuffle=(split != "test"), pad_to_multiple=self.cfg.required_seq_len_multiple, ) def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None): return LanguagePairDataset( src_tokens, src_lengths, self.source_dictionary, tgt_dict=self.target_dictionary, constraints=constraints, ) def build_model(self, cfg, from_checkpoint=False): model = super().build_model(cfg, from_checkpoint) if self.cfg.eval_bleu: detok_args = json.loads(self.cfg.eval_bleu_detok_args) self.tokenizer = encoders.build_tokenizer( Namespace(tokenizer=self.cfg.eval_bleu_detok, **detok_args) ) gen_args = json.loads(self.cfg.eval_bleu_args) self.sequence_generator = self.build_generator( [model], Namespace(**gen_args) ) return model def valid_step(self, sample, model, criterion): loss, sample_size, logging_output = super().valid_step(sample, model, criterion) if self.cfg.eval_bleu: bleu = self._inference_with_bleu(self.sequence_generator, sample, model) logging_output["_bleu_sys_len"] = bleu.sys_len logging_output["_bleu_ref_len"] = bleu.ref_len # we split counts into separate entries so that they can be # summed efficiently across workers using fast-stat-sync assert len(bleu.counts) == EVAL_BLEU_ORDER for i in range(EVAL_BLEU_ORDER): logging_output["_bleu_counts_" + str(i)] = bleu.counts[i] logging_output["_bleu_totals_" + str(i)] = bleu.totals[i] return loss, sample_size, logging_output def reduce_metrics(self, logging_outputs, criterion): super().reduce_metrics(logging_outputs, criterion) if self.cfg.eval_bleu: def sum_logs(key): import torch result = sum(log.get(key, 0) for log in logging_outputs) if torch.is_tensor(result): result = result.cpu() return result counts, totals = [], [] for i in range(EVAL_BLEU_ORDER): counts.append(sum_logs("_bleu_counts_" + str(i))) totals.append(sum_logs("_bleu_totals_" + str(i))) if max(totals) > 0: # log counts as numpy arrays -- log_scalar will sum them correctly metrics.log_scalar("_bleu_counts", np.array(counts)) metrics.log_scalar("_bleu_totals", np.array(totals)) metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len")) metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len")) def compute_bleu(meters): import inspect try: from sacrebleu.metrics import BLEU comp_bleu = BLEU.compute_bleu except ImportError: # compatibility API for sacrebleu 1.x import sacrebleu comp_bleu = sacrebleu.compute_bleu fn_sig = inspect.getfullargspec(comp_bleu)[0] if "smooth_method" in fn_sig: smooth = {"smooth_method": "exp"} else: smooth = {"smooth": "exp"} bleu = comp_bleu( correct=meters["_bleu_counts"].sum, total=meters["_bleu_totals"].sum, sys_len=meters["_bleu_sys_len"].sum, ref_len=meters["_bleu_ref_len"].sum, **smooth, ) return round(bleu.score, 2) metrics.log_derived("bleu", compute_bleu) def max_positions(self): """Return the max sentence length allowed by the task.""" return (self.cfg.max_source_positions, self.cfg.max_target_positions) @property def source_dictionary(self): """Return the source :class:`~fairseq.data.Dictionary`.""" return self.src_dict @property def target_dictionary(self): """Return the target :class:`~fairseq.data.Dictionary`.""" return self.tgt_dict def _inference_with_bleu(self, generator, sample, model): import sacrebleu def decode(toks, escape_unk=False): s = self.tgt_dict.string( toks.int().cpu(), self.cfg.eval_bleu_remove_bpe, # The default unknown string in fairseq is `<unk>`, but # this is tokenized by sacrebleu as `< unk >`, inflating # BLEU scores. Instead, we use a somewhat more verbose # alternative that is unlikely to appear in the real # reference, but doesn't get split into multiple tokens. unk_string=("UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"), ) if self.tokenizer: s = self.tokenizer.decode(s) return s gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None) hyps, refs = [], [] for i in range(len(gen_out)): hyps.append(decode(gen_out[i][0]["tokens"])) refs.append( decode( utils.strip_pad(sample["target"][i], self.tgt_dict.pad()), escape_unk=True, # don't count <unk> as matches to the hypo ) ) if self.cfg.eval_bleu_print_samples: logger.info("example hypothesis: " + hyps[0]) logger.info("example reference: " + refs[0]) if self.cfg.eval_tokenized_bleu: return sacrebleu.corpus_bleu(hyps, [refs], tokenize="none") else: return sacrebleu.corpus_bleu(hyps, [refs])
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/translation.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os from collections import OrderedDict from fairseq import utils from fairseq.data import ( BacktranslationDataset, LanguagePairDataset, NoisingDataset, RoundRobinZipDatasets, data_utils, indexed_dataset, ) from fairseq.models import FairseqMultiModel from fairseq.sequence_generator import SequenceGenerator from . import register_task from .multilingual_translation import MultilingualTranslationTask logger = logging.getLogger(__name__) def _get_bt_dataset_key(lang_pair): return "bt:" + lang_pair def _get_denoising_dataset_key(lang_pair): return "denoising:" + lang_pair # ported from UnsupervisedMT def parse_lambda_config(x): """ Parse the configuration of lambda coefficient (for scheduling). x = "3" # lambda will be a constant equal to x x = "0:1,1000:0" # lambda will start from 1 and linearly decrease # to 0 during the first 1000 iterations x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000 # iterations, then will linearly increase to 1 until iteration 2000 """ split = x.split(",") if len(split) == 1: return float(x), None else: split = [s.split(os.pathsep) for s in split] assert all(len(s) == 2 for s in split) assert all(k.isdigit() for k, _ in split) assert all( int(split[i][0]) < int(split[i + 1][0]) for i in range(len(split) - 1) ) return float(split[0][1]), [(int(k), float(v)) for k, v in split] @register_task("semisupervised_translation") class SemisupervisedTranslationTask(MultilingualTranslationTask): """A task for training multiple translation models simultaneously. We iterate round-robin over batches from multiple language pairs, ordered according to the `--lang-pairs` argument. The training loop is roughly: for i in range(len(epoch)): for lang_pair in args.lang_pairs: batch = next_batch_for_lang_pair(lang_pair) loss = criterion(model_for_lang_pair(lang_pair), batch) loss.backward() optimizer.step() In practice, `next_batch_for_lang_pair` is abstracted in a FairseqDataset (e.g., `RoundRobinZipDatasets`) and `model_for_lang_pair` is a model that implements the `FairseqMultiModel` interface. During inference it is required to specify a single `--source-lang` and `--target-lang`, instead of `--lang-pairs`. """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" # fmt: off MultilingualTranslationTask.add_args(parser) parser.add_argument('--lambda-parallel-config', default="1.0", type=str, metavar='CONFIG', help='cross-entropy reconstruction coefficient (parallel data). ' 'use fixed weight during training if set to floating point number. ' 'use piecewise linear function over number of updates to schedule the ' 'weight with the format: w0:step0,w1:step1,...') parser.add_argument('--lambda-denoising-config', default="0.0", type=str, metavar='CONFIG', help='Cross-entropy reconstruction coefficient (denoising autoencoding)' 'use fixed weight during training if set to floating point number. ' 'use piecewise linear function over number of updates to schedule the ' 'weight with the format: w0:step0,w1:step1,...') parser.add_argument('--lambda-otf-bt-config', default="0.0", type=str, metavar='CONFIG', help='cross-entropy reconstruction coefficient (on-the-fly back-translation parallel data)' 'use fixed weight during training if set to floating point number. ' 'use piecewise linear function over number of updates to schedule the ' 'weight with the format: w0:step0,w1:step1,...') parser.add_argument('--bt-max-len-a', default=1.1, type=float, metavar='N', help='generate back-translated sequences of maximum length ax + b, where x is the ' 'source length') parser.add_argument('--bt-max-len-b', default=10.0, type=float, metavar='N', help='generate back-translated sequences of maximum length ax + b, where x is the ' 'source length') parser.add_argument('--bt-beam-size', default=1, type=int, metavar='N', help='beam size used in beam search of online back-translation') parser.add_argument('--max-word-shuffle-distance', default=3.0, type=float, metavar='N', help='maximum word shuffle distance for denoising autoencoding data generation') parser.add_argument('--word-dropout-prob', default=0.1, type=float, metavar='N', help='word dropout probability for denoising autoencoding data generation') parser.add_argument('--word-blanking-prob', default=0.2, type=float, metavar='N', help='word blanking probability for denoising autoencoding data generation') # fmt: on def __init__(self, args, dicts, training): super().__init__(args, dicts, training) self.lambda_parallel, self.lambda_parallel_steps = parse_lambda_config( args.lambda_parallel_config ) self.lambda_otf_bt, self.lambda_otf_bt_steps = parse_lambda_config( args.lambda_otf_bt_config ) self.lambda_denoising, self.lambda_denoising_steps = parse_lambda_config( args.lambda_denoising_config ) if self.lambda_denoising > 0.0 or self.lambda_denoising_steps is not None: denoising_lang_pairs = [ "%s-%s" % (tgt, tgt) for tgt in {lang_pair.split("-")[1] for lang_pair in args.lang_pairs} ] self.model_lang_pairs = self.model_lang_pairs + denoising_lang_pairs self.backtranslate_datasets = {} self.backtranslators = {} @classmethod def setup_task(cls, args, **kwargs): dicts, training = MultilingualTranslationTask.prepare(args, **kwargs) return cls(args, dicts, training) def load_dataset(self, split, epoch=1, **kwargs): """Load a dataset split.""" paths = utils.split_paths(self.args.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] def split_exists(split, src, tgt, lang): if src is not None: filename = os.path.join( data_path, "{}.{}-{}.{}".format(split, src, tgt, lang) ) else: filename = os.path.join( data_path, "{}.{}-None.{}".format(split, src, tgt) ) return indexed_dataset.dataset_exists(filename, impl=self.args.dataset_impl) def load_indexed_dataset(path, dictionary): return data_utils.load_indexed_dataset( path, dictionary, self.args.dataset_impl ) # load parallel datasets src_datasets, tgt_datasets = {}, {} if ( self.lambda_parallel > 0.0 or self.lambda_parallel_steps is not None or not split.startswith("train") ): for lang_pair in self.lang_pairs: src, tgt = lang_pair.split("-") if split_exists(split, src, tgt, src): prefix = os.path.join( data_path, "{}.{}-{}.".format(split, src, tgt) ) elif split_exists(split, tgt, src, src): prefix = os.path.join( data_path, "{}.{}-{}.".format(split, tgt, src) ) else: continue src_datasets[lang_pair] = load_indexed_dataset( prefix + src, self.dicts[src] ) tgt_datasets[lang_pair] = load_indexed_dataset( prefix + tgt, self.dicts[tgt] ) logger.info( "parallel-{} {} {} examples".format( data_path, split, len(src_datasets[lang_pair]) ) ) if len(src_datasets) == 0: raise FileNotFoundError( "Dataset not found: {} ({})".format(split, data_path) ) # back translation datasets backtranslate_datasets = {} if ( self.lambda_otf_bt > 0.0 or self.lambda_otf_bt_steps is not None ) and split.startswith("train"): for lang_pair in self.lang_pairs: src, tgt = lang_pair.split("-") if not split_exists(split, tgt, None, tgt): raise FileNotFoundError( "Dataset not found: backtranslation {} ({})".format( split, data_path ) ) filename = os.path.join( data_path, "{}.{}-None.{}".format(split, tgt, tgt) ) dataset = load_indexed_dataset(filename, self.dicts[tgt]) lang_pair_dataset_tgt = LanguagePairDataset( dataset, dataset.sizes, self.dicts[tgt], left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, ) lang_pair_dataset = LanguagePairDataset( dataset, dataset.sizes, src_dict=self.dicts[src], tgt=dataset, tgt_sizes=dataset.sizes, tgt_dict=self.dicts[tgt], left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, ) backtranslate_datasets[lang_pair] = BacktranslationDataset( tgt_dataset=self.alter_dataset_langtok( lang_pair_dataset_tgt, src_eos=self.dicts[tgt].eos(), src_lang=tgt, tgt_lang=src, ), backtranslation_fn=self.backtranslators[lang_pair], src_dict=self.dicts[src], tgt_dict=self.dicts[tgt], output_collater=self.alter_dataset_langtok( lang_pair_dataset=lang_pair_dataset, src_eos=self.dicts[src].eos(), src_lang=src, tgt_eos=self.dicts[tgt].eos(), tgt_lang=tgt, ).collater, ) logger.info( "backtranslate-{}: {} {} {} examples".format( tgt, data_path, split, len(backtranslate_datasets[lang_pair]), ) ) self.backtranslate_datasets[lang_pair] = backtranslate_datasets[ lang_pair ] # denoising autoencoder noising_datasets = {} if ( self.lambda_denoising > 0.0 or self.lambda_denoising_steps is not None ) and split.startswith("train"): for lang_pair in self.lang_pairs: _, tgt = lang_pair.split("-") if not split_exists(split, tgt, None, tgt): continue filename = os.path.join( data_path, "{}.{}-None.{}".format(split, tgt, tgt) ) tgt_dataset1 = load_indexed_dataset(filename, self.dicts[tgt]) tgt_dataset2 = load_indexed_dataset(filename, self.dicts[tgt]) noising_dataset = NoisingDataset( tgt_dataset1, self.dicts[tgt], seed=1, max_word_shuffle_distance=self.args.max_word_shuffle_distance, word_dropout_prob=self.args.word_dropout_prob, word_blanking_prob=self.args.word_blanking_prob, ) noising_datasets[lang_pair] = self.alter_dataset_langtok( LanguagePairDataset( noising_dataset, tgt_dataset1.sizes, self.dicts[tgt], tgt_dataset2, tgt_dataset2.sizes, self.dicts[tgt], left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, ), src_eos=self.dicts[tgt].eos(), src_lang=tgt, tgt_eos=self.dicts[tgt].eos(), tgt_lang=tgt, ) logger.info( "denoising-{}: {} {} {} examples".format( tgt, data_path, split, len(noising_datasets[lang_pair]), ) ) def language_pair_dataset(lang_pair): src, tgt = lang_pair.split("-") src_dataset, tgt_dataset = src_datasets[lang_pair], tgt_datasets[lang_pair] return self.alter_dataset_langtok( LanguagePairDataset( src_dataset, src_dataset.sizes, self.dicts[src], tgt_dataset, tgt_dataset.sizes, self.dicts[tgt], left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, ), self.dicts[src].eos(), src, self.dicts[tgt].eos(), tgt, ) self.datasets[split] = RoundRobinZipDatasets( OrderedDict( [ (lang_pair, language_pair_dataset(lang_pair)) for lang_pair in src_datasets.keys() ] + [ (_get_bt_dataset_key(lang_pair), dataset) for lang_pair, dataset in backtranslate_datasets.items() ] + [ (_get_denoising_dataset_key(lang_pair), dataset) for lang_pair, dataset in noising_datasets.items() ] ), eval_key=None if self.training else "%s-%s" % (self.args.source_lang, self.args.target_lang), ) def build_model(self, args, from_checkpoint=False): from fairseq import models model = models.build_model(args, self, from_checkpoint) if not isinstance(model, FairseqMultiModel): raise ValueError( "SemisupervisedTranslationTask requires a FairseqMultiModel architecture" ) # create SequenceGenerator for each model that has backtranslation dependency on it self.sequence_generators = {} if ( self.lambda_otf_bt > 0.0 or self.lambda_otf_bt_steps is not None ) and self.training: for lang_pair in self.lang_pairs: src, tgt = lang_pair.split("-") key = "{}-{}".format(tgt, src) self.sequence_generators[key] = SequenceGenerator( [model.models[key]], tgt_dict=self.dicts[src], beam_size=args.bt_beam_size, max_len_a=args.bt_max_len_a, max_len_b=args.bt_max_len_b, ) decoder_lang_tok_idx = self.get_decoder_langtok(src) def backtranslate_fn( sample, model=model.models[key], bos_token=decoder_lang_tok_idx, sequence_generator=self.sequence_generators[key], ): return sequence_generator.generate( [model], sample, bos_token=bos_token, ) self.backtranslators[lang_pair] = backtranslate_fn return model def train_step( self, sample, model, criterion, optimizer, update_num, ignore_grad=False ): model.train() if update_num > 0: self.update_step(update_num) agg_loss, agg_sample_size, agg_logging_output = 0.0, 0.0, {} def forward_backward(model, samples, logging_output_key, weight): nonlocal agg_loss, agg_sample_size, agg_logging_output if samples is None or len(samples) == 0: return loss, sample_size, logging_output = criterion(model, samples) if ignore_grad: loss *= 0 else: loss *= weight optimizer.backward(loss) agg_loss += loss.detach().item() # TODO make summing of the sample sizes configurable agg_sample_size += sample_size for k in logging_output: agg_logging_output[k] += logging_output[k] agg_logging_output[logging_output_key] += logging_output[k] if self.lambda_parallel > 0.0: for lang_pair in self.lang_pairs: forward_backward( model.models[lang_pair], sample[lang_pair], lang_pair, self.lambda_parallel, ) if self.lambda_otf_bt > 0.0: for lang_pair in self.lang_pairs: sample_key = _get_bt_dataset_key(lang_pair) forward_backward( model.models[lang_pair], sample[sample_key], sample_key, self.lambda_otf_bt, ) if self.lambda_denoising > 0.0: for lang_pair in self.lang_pairs: _, tgt = lang_pair.split("-") sample_key = _get_denoising_dataset_key(lang_pair) forward_backward( model.models["{0}-{0}".format(tgt)], sample[sample_key], sample_key, self.lambda_denoising, ) return agg_loss, agg_sample_size, agg_logging_output def update_step(self, num_updates): def lambda_step_func(config, n_iter): """ Update a lambda value according to its schedule configuration. """ ranges = [ i for i in range(len(config) - 1) if config[i][0] <= n_iter < config[i + 1][0] ] if len(ranges) == 0: assert n_iter >= config[-1][0] return config[-1][1] assert len(ranges) == 1 i = ranges[0] x_a, y_a = config[i] x_b, y_b = config[i + 1] return y_a + (n_iter - x_a) * float(y_b - y_a) / float(x_b - x_a) if self.lambda_parallel_steps is not None: self.lambda_parallel = lambda_step_func( self.lambda_parallel_steps, num_updates ) if self.lambda_denoising_steps is not None: self.lambda_denoising = lambda_step_func( self.lambda_denoising_steps, num_updates ) if self.lambda_otf_bt_steps is not None: self.lambda_otf_bt = lambda_step_func(self.lambda_otf_bt_steps, num_updates)
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/semisupervised_translation.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from fairseq.data.audio.frm_text_to_speech_dataset import FrmTextToSpeechDatasetCreator from fairseq.tasks import register_task from fairseq.tasks.text_to_speech import TextToSpeechTask logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, ) logger = logging.getLogger(__name__) @register_task("frm_text_to_speech") class FrmTextToSpeechTask(TextToSpeechTask): @staticmethod def add_args(parser): TextToSpeechTask.add_args(parser) parser.add_argument("--do_chunk", action="store_true", help="train on chunks") parser.add_argument("--chunk_bound", default=-1, type=int) parser.add_argument("--chunk_init", default=50, type=int) parser.add_argument("--chunk_incr", default=5, type=int) parser.add_argument("--add_eos", action="store_true") parser.add_argument("--dedup", action="store_true") parser.add_argument("--ref_fpu", default=-1, type=float) def load_dataset(self, split, **unused_kwargs): is_train_split = split.startswith("train") pre_tokenizer = self.build_tokenizer(self.args) bpe_tokenizer = self.build_bpe(self.args) self.datasets[split] = FrmTextToSpeechDatasetCreator.from_tsv( self.args.data, self.data_cfg, split, self.src_dict, pre_tokenizer, bpe_tokenizer, is_train_split=is_train_split, n_frames_per_step=self.args.n_frames_per_step, speaker_to_id=self.speaker_to_id, do_chunk=self.args.do_chunk, chunk_bound=self.args.chunk_bound, chunk_init=self.args.chunk_init, chunk_incr=self.args.chunk_incr, add_eos=self.args.add_eos, dedup=self.args.dedup, ref_fpu=self.args.ref_fpu, )
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/frm_text_to_speech.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import contextlib from dataclasses import dataclass, field from typing import Optional from omegaconf import MISSING, II, open_dict, OmegaConf import numpy as np from fairseq.data import ( ConcatSentencesDataset, Dictionary, IdDataset, NestedDictionaryDataset, NumelDataset, NumSamplesDataset, OffsetTokensDataset, PrependTokenDataset, RawLabelDataset, RightPadDataset, RollDataset, SortDataset, StripTokenDataset, data_utils, ) from fairseq.data.shorten_dataset import maybe_shorten_dataset from fairseq.tasks import FairseqDataclass, FairseqTask, register_task from fairseq.dataclass import ChoiceEnum logger = logging.getLogger(__name__) SHORTEN_METHOD_CHOICES = ChoiceEnum(["none", "truncate", "random_crop"]) @dataclass class SentencePredictionConfig(FairseqDataclass): data: str = field(default=MISSING, metadata={"help": "path to data directory"}) num_classes: int = field( default=-1, metadata={"help": "number of classes or regression targets"}, ) init_token: Optional[int] = field( default=None, metadata={"help": "add token at the beginning of each batch item"}, ) separator_token: Optional[int] = field( default=None, metadata={"help": "add separator token between inputs"}, ) no_shuffle: bool = field( default=False, ) shorten_method: SHORTEN_METHOD_CHOICES = field( default="none", metadata={ "help": "if not none, shorten sequences that exceed tokens_per_sample" }, ) shorten_data_split_list: str = field( default="", metadata={ "help": "comma-separated list of dataset splits to apply shortening to, " 'e.g., "train,valid" (default: all dataset splits)' }, ) add_prev_output_tokens: bool = field( default=False, metadata={ "help": "add prev_output_tokens to sample, used for encoder-decoder arch" }, ) max_positions: int = field( default=512, metadata={"help": "max tokens per example"}, ) regression_target: bool = II("criterion.regression_target") classification_head_name: str = II("criterion.classification_head_name") seed: int = II("common.seed") @register_task("sentence_prediction", dataclass=SentencePredictionConfig) class SentencePredictionTask(FairseqTask): """ Sentence (or sentence pair) prediction (classification or regression) task. Args: dictionary (Dictionary): the dictionary for the input of the task """ def __init__(self, cfg, data_dictionary, label_dictionary): super().__init__(cfg) self.dictionary = data_dictionary self._label_dictionary = label_dictionary @classmethod def load_dictionary(cls, filename): """Load the dictionary from the filename Args: filename (str): the filename """ dictionary = Dictionary.load(filename) dictionary.add_symbol("<mask>") return dictionary @classmethod def setup_task(cls, cfg, **kwargs): assert cfg.num_classes > 0, "Must set task.num_classes" # load data dictionary data_dict = cls.load_dictionary( os.path.join(cfg.data, "input0", "dict.txt"), ) logger.info("[input] dictionary: {} types".format(len(data_dict))) # load label dictionary if not cfg.regression_target: label_dict = cls.load_dictionary( os.path.join(cfg.data, "label", "dict.txt"), ) logger.info("[label] dictionary: {} types".format(len(label_dict))) else: label_dict = data_dict return cls(cfg, data_dict, label_dict) def load_dataset(self, split, combine=False, **kwargs): """Load a given dataset split (e.g., train, valid, test).""" def get_path(key, split): return os.path.join(self.cfg.data, key, split) def make_dataset(key, dictionary): split_path = get_path(key, split) try: dataset = data_utils.load_indexed_dataset( split_path, dictionary, combine=combine, ) except Exception as e: if "StorageException: [404] Path not found" in str(e): logger.warning(f"dataset {e} not found") dataset = None else: raise e return dataset input0 = make_dataset("input0", self.source_dictionary) assert input0 is not None, "could not find dataset: {}".format( get_path("input0", split) ) input1 = make_dataset("input1", self.source_dictionary) if self.cfg.init_token is not None: input0 = PrependTokenDataset(input0, self.cfg.init_token) if input1 is None: src_tokens = input0 else: if self.cfg.separator_token is not None: input1 = PrependTokenDataset(input1, self.cfg.separator_token) src_tokens = ConcatSentencesDataset(input0, input1) with data_utils.numpy_seed(self.cfg.seed): shuffle = np.random.permutation(len(src_tokens)) src_tokens = maybe_shorten_dataset( src_tokens, split, self.cfg.shorten_data_split_list, self.cfg.shorten_method, self.max_positions(), self.cfg.seed, ) dataset = { "id": IdDataset(), "net_input": { "src_tokens": RightPadDataset( src_tokens, pad_idx=self.source_dictionary.pad(), ), "src_lengths": NumelDataset(src_tokens, reduce=False), }, "nsentences": NumSamplesDataset(), "ntokens": NumelDataset(src_tokens, reduce=True), } if self.cfg.add_prev_output_tokens: prev_tokens_dataset = RightPadDataset( RollDataset(src_tokens, 1), pad_idx=self.dictionary.pad(), ) dataset["net_input"].update( prev_output_tokens=prev_tokens_dataset, ) if not self.cfg.regression_target: label_dataset = make_dataset("label", self.label_dictionary) if label_dataset is not None: dataset.update( target=OffsetTokensDataset( StripTokenDataset( label_dataset, id_to_strip=self.label_dictionary.eos(), ), offset=-self.label_dictionary.nspecial, ) ) else: label_path = "{0}.label".format(get_path("label", split)) if os.path.exists(label_path): def parse_regression_target(i, line): values = line.split() assert ( len(values) == self.cfg.num_classes ), f'expected num_classes={self.cfg.num_classes} regression target values on line {i}, found: "{line}"' return [float(x) for x in values] with open(label_path) as h: dataset.update( target=RawLabelDataset( [ parse_regression_target(i, line.strip()) for i, line in enumerate(h.readlines()) ] ) ) nested_dataset = NestedDictionaryDataset( dataset, sizes=[src_tokens.sizes], ) if self.cfg.no_shuffle: dataset = nested_dataset else: dataset = SortDataset( nested_dataset, # shuffle sort_order=[shuffle], ) logger.info("Loaded {0} with #samples: {1}".format(split, len(dataset))) self.datasets[split] = dataset return self.datasets[split] def build_model(self, cfg, from_checkpoint=False): from fairseq import models with open_dict(cfg) if OmegaConf.is_config(cfg) else contextlib.ExitStack(): cfg.max_positions = self.cfg.max_positions model = models.build_model(cfg, self, from_checkpoint) model.register_classification_head( self.cfg.classification_head_name, num_classes=self.cfg.num_classes, ) return model def max_positions(self): return self.cfg.max_positions @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary @property def label_dictionary(self): return self._label_dictionary
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/sentence_prediction.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import itertools import logging import os from collections import OrderedDict import numpy as np from fairseq import tokenizer, utils from fairseq.data import ConcatDataset, Dictionary, TokenBlockDataset, data_utils from fairseq.data.legacy.masked_lm_dataset import MaskedLMDataset from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary from fairseq.data.multi_corpus_sampled_dataset import MultiCorpusSampledDataset from fairseq.tasks import LegacyFairseqTask, register_task logger = logging.getLogger(__name__) @register_task("cross_lingual_lm") class CrossLingualLMTask(LegacyFairseqTask): """ Task for training cross-lingual language models. For more details look at: https://arxiv.org/pdf/1901.07291.pdf Args: dictionary (Dictionary): the dictionary for the input of the task """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument( "data", help="colon separated path to data directories list, \ will be iterated upon during epochs in round-robin manner", ) parser.add_argument( "--tokens-per-sample", default=512, type=int, help="max number of total tokens over all segments" " per sample", ) parser.add_argument( "--monolingual-langs", default="en", type=str, help="comma separated list of languages for which we" " want to train XLM on", ) parser.add_argument( "--shuffle", action="store_true", help="shuffle each monolingual dataset while" " training", ) def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary self.seed = args.seed self.distributed_world_size = args.distributed_world_size self.langs2id = self._lang_to_id(args.monolingual_langs) def _lang_to_id(self, languages: str): """ Build a map from languages to ids. These ids are used as segment labels for cross-lingual LM training. """ lang2id = {} langs = [l.strip() for l in languages.split(",")] for id, lang in enumerate(langs): lang2id[lang] = id return lang2id @classmethod def load_dictionary(cls, filename): return MaskedLMDictionary.load(filename) @classmethod def build_dictionary( cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8 ): d = MaskedLMDictionary() for filename in filenames: Dictionary.add_file_to_dictionary( filename, d, tokenizer.tokenize_line, workers ) d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor) return d @property def target_dictionary(self): return self.dictionary @classmethod def setup_task(cls, args, **kwargs): """Setup the task.""" dictionary = MaskedLMDictionary.load(os.path.join(args.data, "dict.txt")) logger.info("dictionary: {} types".format(len(dictionary))) return cls(args, dictionary) def _load_single_lang_dataset(self, split, epoch): loaded_datasets = [] paths = utils.split_paths(self.args.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] for k in itertools.count(): split_k = split + (str(k) if k > 0 else "") path = os.path.join(data_path, split_k) ds = data_utils.load_indexed_dataset( path, self.dictionary, self.args.dataset_impl ) if ds is None: if k > 0: break else: raise FileNotFoundError( "Dataset not found: {} ({})".format(split, data_path) ) # Since we append each block with the classification_token, # we need to effectively create blocks of length # tokens_per_sample-1 loaded_datasets.append( TokenBlockDataset( ds, ds.sizes, self.args.tokens_per_sample - 1, pad=self.dictionary.pad(), eos=self.dictionary.eos(), ) ) logger.info( "{} {} {} examples".format(data_path, split_k, len(loaded_datasets[-1])) ) if len(loaded_datasets) == 1: dataset = loaded_datasets[0] sizes = dataset.sizes else: dataset = ConcatDataset(loaded_datasets) sizes = np.concatenate([ds.sizes for ds in loaded_datasets]) return dataset, sizes def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ dataset_map = OrderedDict() for lang in self.langs2id.keys(): # Datasets are expected to be in "split.lang" format (Eg: train.en) language_split = "{}.{}".format(split, lang) block_dataset, sizes = self._load_single_lang_dataset( split=language_split, epoch=epoch ) dataset_map[lang] = MaskedLMDataset( dataset=block_dataset, sizes=sizes, vocab=self.dictionary, pad_idx=self.dictionary.pad(), mask_idx=self.dictionary.mask(), classif_token_idx=self.dictionary.eos(), sep_token_idx=self.dictionary.eos(), shuffle=getattr(self.args, "shuffle", False), has_pairs=False, segment_id=self.langs2id[lang], seed=self.seed, ) self.datasets[split] = MultiCorpusSampledDataset(dataset_map) logger.info( "{} {} {} examples".format( utils.split_paths(self.args.data)[epoch - 1], split, len(self.datasets[split]), ) )
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/cross_lingual_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary from fairseq.tasks.translation import TranslationConfig, TranslationTask from . import register_task @dataclass class TranslationFromPretrainedXLMConfig(TranslationConfig): pass @register_task( "translation_from_pretrained_xlm", dataclass=TranslationFromPretrainedXLMConfig ) class TranslationFromPretrainedXLMTask(TranslationTask): """ Same as TranslationTask except use the MaskedLMDictionary class so that we can load data that was binarized with the MaskedLMDictionary class. This task should be used for the entire training pipeline when we want to train an NMT model from a pretrained XLM checkpoint: binarizing NMT data, training NMT with the pretrained XLM checkpoint, and subsequent evaluation of that trained model. """ @classmethod def load_dictionary(cls, filename): """Load the masked LM dictionary from the filename Args: filename (str): the filename """ return MaskedLMDictionary.load(filename)
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/translation_from_pretrained_xlm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from omegaconf import II from fairseq import utils from fairseq.data import ( AppendTokenDataset, ConcatDataset, Dictionary, IdDataset, LMContextWindowDataset, MonolingualDataset, NestedDictionaryDataset, NumelDataset, PadDataset, PrependTokenDataset, ResamplingDataset, SortDataset, StripTokenDataset, TokenBlockDataset, TruncatedDictionary, data_utils, ) from fairseq.data.indexed_dataset import get_available_dataset_impl from fairseq.data.shorten_dataset import maybe_shorten_dataset from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.tasks import LegacyFairseqTask, register_task SAMPLE_BREAK_MODE_CHOICES = ChoiceEnum(["none", "complete", "complete_doc", "eos"]) SHORTEN_METHOD_CHOICES = ChoiceEnum(["none", "truncate", "random_crop"]) logger = logging.getLogger(__name__) def lang_token(lang): return f"<{lang}>" @dataclass class MultilingualLanguageModelingConfig(FairseqDataclass): # TODO common var add to parent data: Optional[str] = field( default=None, metadata={"help": "path to data directory"} ) sample_break_mode: SAMPLE_BREAK_MODE_CHOICES = field( default="none", metadata={ "help": 'If omitted or "none", fills each sample with tokens-per-sample ' 'tokens. If set to "complete", splits samples only at the end ' "of sentence, but may include multiple sentences per sample. " '"complete_doc" is similar but respects doc boundaries. ' 'If set to "eos", includes only one sentence per sample.' }, ) tokens_per_sample: int = field( default=1024, metadata={"help": "max number of tokens per sample for LM dataset"}, ) output_dictionary_size: int = field( default=-1, metadata={"help": "limit the size of output dictionary"} ) self_target: bool = field(default=False, metadata={"help": "include self target"}) future_target: bool = field( default=False, metadata={"help": "include future target"} ) past_target: bool = field(default=False, metadata={"help": "include past target"}) add_bos_token: bool = field( default=False, metadata={"help": "prepend lang id token <dialect>"} ) max_source_positions: Optional[int] = field( default=None, metadata={"help": "max number of tokens in the source sequence"} ) max_target_positions: Optional[int] = field( default=None, metadata={"help": "max number of tokens in the target sequence"} ) pad_to_fixed_length: Optional[bool] = field( default=False, metadata={"help": "pad to fixed length"} ) pad_to_fixed_bsz: Optional[bool] = field( default=False, metadata={"help": "boolean to pad to fixed batch size"} ) multilang_sampling_alpha: Optional[float] = field( default=1.0, metadata={ "help": "smoothing alpha for sample rations across multiple datasets" }, ) shorten_method: SHORTEN_METHOD_CHOICES = field( default="none", metadata={ "help": "if not none, shorten sequences that exceed --tokens-per-sample" }, ) shorten_data_split_list: str = field( default="", metadata={ "help": "comma-separated list of dataset splits to apply shortening to, " 'e.g., "train,valid" (default: all dataset splits)' }, ) langs: str = field( default="", metadata={ "help": "comma-separated list of languages (default: all directories in data path)" }, ) baseline_model_langs: str = field( default="", metadata={ "help": "comma-separated list of languages in the baseline model (default: none)" }, ) # TODO: legacy parameter kept for compatibility baseline_model: str = field( default="", metadata={"help": "path to the baseline model (default: none)"}, ) lang_to_offline_shard_ratio: str = field( default="", metadata={ "help": "absolute path of tsv file location to indicate lang to offline shard ratio.", }, ) # TODO common vars below add to parent seed: int = II("common.seed") dataset_impl: Optional[ChoiceEnum(get_available_dataset_impl())] = II( "dataset.dataset_impl" ) data_buffer_size: int = II("dataset.data_buffer_size") tpu: bool = II("common.tpu") batch_size: Optional[int] = II("dataset.batch_size") batch_size_valid: Optional[int] = II("dataset.batch_size_valid") train_subset: str = II("common.train_subset") valid_subset: str = II("common.valid_subset") @register_task( "multilingual_language_modeling", dataclass=MultilingualLanguageModelingConfig ) class MultilingualLanguageModelingTask(LegacyFairseqTask): """ Train a language model. Args: dictionary (~fairseq.data.Dictionary): the dictionary for the input of the language model output_dictionary (~fairseq.data.Dictionary): the dictionary for the output of the language model. In most cases it will be the same as *dictionary*, but could possibly be a more limited version of the dictionary (if ``--output-dictionary-size`` is used). targets (List[str]): list of the target types that the language model should predict. Can be one of "self", "future", and "past". Defaults to "future". .. note:: The language modeling task is compatible with :mod:`fairseq-train`, :mod:`fairseq-generate`, :mod:`fairseq-interactive` and :mod:`fairseq-eval-lm`. The language modeling task provides the following additional command-line arguments: .. argparse:: :ref: fairseq.tasks.language_modeling_parser :prog: """ def __init__(self, args, dictionary, output_dictionary=None, targets=None): super().__init__(args) self.dictionary = dictionary self.output_dictionary = output_dictionary or dictionary if targets is None: targets = ["future"] self.targets = targets @staticmethod def _get_langs(args, epoch=1): paths = utils.split_paths(args.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] languages = sorted( name for name in os.listdir(data_path) if os.path.isdir(os.path.join(data_path, name)) ) if args.langs: keep_langs = set(args.langs.split(",")) languages = [lang for lang in languages if lang in keep_langs] assert len(languages) == len(keep_langs) return languages, data_path @classmethod def setup_dictionary(cls, args, **kwargs): dictionary = None output_dictionary = None if args.data: paths = utils.split_paths(args.data) assert len(paths) > 0 dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt")) if args.add_bos_token: languages, _ = cls._get_langs(args) logger.info("----------------") for lang in languages: dictionary.add_symbol(lang_token(lang)) logger.info(f"add language token: {lang_token(lang)}") logger.info("----------------") logger.info("dictionary: {} types".format(len(dictionary))) output_dictionary = dictionary if args.output_dictionary_size >= 0: output_dictionary = TruncatedDictionary( dictionary, args.output_dictionary_size ) return (dictionary, output_dictionary) @classmethod def setup_task(cls, args, **kwargs): """Setup the task (e.g., load dictionaries). Args: args (argparse.Namespace): parsed command-line arguments """ dictionary, output_dictionary = cls.setup_dictionary(args, **kwargs) # upgrade old checkpoints if hasattr(args, "exclude_self_target"): args.self_target = not args.exclude_self_target targets = [] if getattr(args, "self_target", False): targets.append("self") if getattr(args, "future_target", False): targets.append("future") if getattr(args, "past_target", False): targets.append("past") if len(targets) == 0: # standard language modeling targets = ["future"] return cls(args, dictionary, output_dictionary, targets=targets) def build_model(self, args, from_checkpoint=False): model = super().build_model(args, from_checkpoint) for target in self.targets: if target not in model.supported_targets: raise ValueError( f"Unsupported language modeling target: {target} not in {model.supported_targets}" ) return model def _get_sample_prob(self, dataset_lens): """ Get smoothed sampling porbability by languages. This helps low resource languages by upsampling them. """ prob = dataset_lens / dataset_lens.sum() smoothed_prob = prob ** self.args.multilang_sampling_alpha smoothed_prob = smoothed_prob / smoothed_prob.sum() return smoothed_prob def load_dataset(self, split: str, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ languages, data_path = MultilingualLanguageModelingTask._get_langs( self.args, epoch ) lang_to_offline_shard_ratio = None if self.args.lang_to_offline_shard_ratio != "": lang_to_offline_shard_ratio = {} assert os.path.exists( self.args.lang_to_offline_shard_ratio ), "provided offline shard ratio file doesn't exist: {0}".format( self.args.lang_to_offline_shard_ratio ) with open(self.args.lang_to_offline_shard_ratio) as fin: for line in fin: lang, ratio = line.strip().split("\t") ratio = float(ratio) lang_to_offline_shard_ratio[lang] = ratio logger.info( "Found offline sharded ratio: %s", lang_to_offline_shard_ratio, ) if split == self.args.train_subset: logger.info( "Training on {0} languages: {1}".format(len(languages), languages) ) else: logger.info( "Evaluating on {0} languages: {1}".format(len(languages), languages) ) tokens_per_sample = self.args.tokens_per_sample - int(self.args.add_bos_token) fixed_pad_length = None if self.args.pad_to_fixed_length: fixed_pad_length = self.args.tokens_per_sample pad_to_bsz = None if self.args.pad_to_fixed_bsz: pad_to_bsz = ( self.args.batch_size_valid if "valid" in split else self.args.batch_size ) lang_datasets = [] for lang_id, language in enumerate(languages): split_path = os.path.join(data_path, language, split) dataset = data_utils.load_indexed_dataset( split_path, self.dictionary, self.args.dataset_impl, combine=combine ) # print('len(dataset) =', len(dataset)) if dataset is None: raise FileNotFoundError( "Dataset not found: {} ({})".format(split, split_path) ) dataset = maybe_shorten_dataset( dataset, split, self.args.shorten_data_split_list, self.args.shorten_method, tokens_per_sample, self.args.seed, ) dataset = TokenBlockDataset( dataset, dataset.sizes, tokens_per_sample, pad=self.dictionary.pad(), eos=self.dictionary.eos(), break_mode=self.args.sample_break_mode, include_targets=True, ) add_eos_for_other_targets = ( self.args.sample_break_mode is not None and self.args.sample_break_mode != "none" ) src_lang_idx, tgt_lang_idx = None, None if self.args.add_bos_token: src_lang_idx = self.dictionary.index(lang_token(language)) tgt_lang_idx = self.output_dictionary.index(lang_token(language)) lang_datasets.append( MonolingualDataset( dataset=dataset, sizes=dataset.sizes, src_vocab=self.dictionary, tgt_vocab=self.output_dictionary, add_eos_for_other_targets=add_eos_for_other_targets, shuffle=True, targets=self.targets, fixed_pad_length=fixed_pad_length, pad_to_bsz=pad_to_bsz, add_bos_token=self.args.add_bos_token, src_lang_idx=src_lang_idx, tgt_lang_idx=tgt_lang_idx, ) ) dataset_lengths = np.array( [len(d) for d in lang_datasets], dtype=float, ) logger.info( "loaded total {} blocks for all languages".format( dataset_lengths.sum(), ) ) if split == self.args.train_subset: dataset_lengths_ratio_multiplier = np.ones(len(dataset_lengths)) if lang_to_offline_shard_ratio is not None: dataset_lengths_ratio_multiplier = [] for lang in languages: assert ( lang in lang_to_offline_shard_ratio ), "Lang: {0} missing in offline shard ratio file: {1}".format( lang, self.args.lang_to_offline_shard_ratio, ) dataset_lengths_ratio_multiplier.append( lang_to_offline_shard_ratio[lang] ) dataset_lengths_ratio_multiplier = np.array( dataset_lengths_ratio_multiplier ) true_dataset_lengths = ( dataset_lengths * dataset_lengths_ratio_multiplier ) else: true_dataset_lengths = dataset_lengths # For train subset, additionally up or down sample languages. sample_probs = self._get_sample_prob(true_dataset_lengths) logger.info( "Sample probability by language: %s", { lang: "{0:.4f}".format(sample_probs[id]) for id, lang in enumerate(languages) }, ) size_ratio = (sample_probs * true_dataset_lengths.sum()) / dataset_lengths # TODO: add an option for shrinking all size ratios to below 1 # if self.args.multilang_sampling_alpha != 1: # size_ratio /= size_ratio.max() # Fix numeric errors in size ratio computation # 0.999999999999999999 -> 1 # 1.000000000000000002 -> 1 for i in range(len(size_ratio)): size_ratio[i] = round(size_ratio[i], 8) logger.info( "Up/Down Sampling ratio by language: %s", { lang: "{0:.2f}".format(size_ratio[id]) for id, lang in enumerate(languages) }, ) logger.info( "Actual dataset size by language: %s", { lang: "{0:.2f}".format(len(lang_datasets[id])) for id, lang in enumerate(languages) }, ) resampled_lang_datasets = [ ResamplingDataset( lang_datasets[i], size_ratio=size_ratio[i], seed=self.args.seed, epoch=epoch, replace=size_ratio[i] > 1.0, ) for i, d in enumerate(lang_datasets) ] logger.info( "Resampled dataset size by language: %s", { lang: "{0:.2f}".format(len(resampled_lang_datasets[id])) for id, lang in enumerate(languages) }, ) dataset = ConcatDataset(resampled_lang_datasets) else: dataset = ConcatDataset(lang_datasets) lang_splits = [split] for lang_id, lang_dataset in enumerate(lang_datasets): split_name = split + "_" + languages[lang_id] lang_splits.append(split_name) self.datasets[split_name] = lang_dataset # [TODO]: This is hacky for now to print validation ppl for each # language individually. Maybe need task API changes to allow it # in more generic ways. if split in self.args.valid_subset: self.args.valid_subset = self.args.valid_subset.replace( split, ",".join(lang_splits) ) with data_utils.numpy_seed(self.args.seed + epoch): shuffle = np.random.permutation(len(dataset)) self.datasets[split] = SortDataset( dataset, sort_order=[ shuffle, dataset.sizes, ], ) def build_dataset_for_inference( self, src_tokens, src_lengths, language="en_XX", **kwargs ): """ Generate batches for inference. We prepend an eos token to src_tokens (or bos if `--add-bos-token` is set) and we append a <pad> to target. This is convenient both for generation with a prefix and LM scoring. """ dataset = StripTokenDataset( TokenBlockDataset( src_tokens, src_lengths, block_size=None, # ignored for "eos" break mode pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode="eos", ), # remove eos from (end of) target sequence self.source_dictionary.eos(), ) src_lang_idx = self.dictionary.index(lang_token(language)) src_dataset = PrependTokenDataset( dataset, token=( (src_lang_idx or self.source_dictionary.bos()) if getattr(self.args, "add_bos_token", False) else self.source_dictionary.eos() ), ) max_seq_len = max(src_lengths) + 1 tgt_dataset = AppendTokenDataset(dataset, token=self.source_dictionary.pad()) return NestedDictionaryDataset( { "id": IdDataset(), "net_input": { "src_tokens": PadDataset( src_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False, pad_length=max_seq_len, ), "src_lengths": NumelDataset(src_dataset, reduce=False), }, "target": PadDataset( tgt_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False, pad_length=max_seq_len, ), }, sizes=[np.array(src_lengths)], ) @torch.no_grad() def inference_step( self, generator, models, sample, language="en_XX", prefix_tokens=None, constraints=None, ): # Generation will always be conditioned on bos_token if getattr(self.args, "add_bos_token", False): src_lang_idx = self.dictionary.index(lang_token(language)) bos_token = src_lang_idx or self.source_dictionary.bos() else: bos_token = self.source_dictionary.eos() if constraints is not None: raise NotImplementedError( "Constrained decoding with the language_modeling task is not supported" ) # SequenceGenerator doesn't use src_tokens directly, we need to # pass the `prefix_tokens` argument instead if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement(): prefix_tokens = sample["net_input"]["src_tokens"] if prefix_tokens[:, 0].eq(bos_token).all(): prefix_tokens = prefix_tokens[:, 1:] return generator.generate( models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token ) def eval_lm_dataloader( self, dataset, max_tokens: Optional[int] = 36000, batch_size: Optional[int] = None, max_positions: Optional[int] = None, num_shards: int = 1, shard_id: int = 0, num_workers: int = 1, data_buffer_size: int = 10, # ensures that every evaluated token has access to a context of at least # this size, if possible context_window: int = 0, ): if context_window > 0: dataset = LMContextWindowDataset( dataset=dataset, tokens_per_sample=self.args.tokens_per_sample, context_window=context_window, pad_idx=self.source_dictionary.pad(), ) return self.get_batch_iterator( dataset=dataset, max_tokens=max_tokens, max_sentences=batch_size, max_positions=max_positions, ignore_invalid_inputs=True, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, data_buffer_size=data_buffer_size, ) @property def source_dictionary(self): """Return the :class:`~fairseq.data.Dictionary` for the language model.""" return self.dictionary @property def target_dictionary(self): """Return the :class:`~fairseq.data.Dictionary` for the language model.""" return self.output_dictionary
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/multilingual_language_modeling.py
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import logging import os import torch import json from argparse import Namespace from dataclasses import dataclass, field from typing import Optional, Any from fairseq.data import AddTargetDataset, Dictionary, encoders from fairseq.tasks.audio_pretraining import AudioPretrainingTask, AudioPretrainingConfig from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.configs import GenerationConfig from fairseq.data.text_compressor import TextCompressor, TextCompressionLevel from . import register_task from .. import utils from ..logging import metrics logger = logging.getLogger(__name__) class LabelEncoder(object): def __init__(self, dictionary): self.dictionary = dictionary def __call__(self, label): return self.dictionary.encode_line( label, append_eos=False, add_if_not_exist=False ) def label_len_fn(label): return len(label.split(" ")) @dataclass class AudioFinetuningConfig(AudioPretrainingConfig): # Options for reporting WER metrics during validation. Only applicable to # Seq2Seq models during fine-tuning eval_wer: bool = field( default=False, metadata={"help": "compute WER for Seq2Seq models"} ) eval_wer_config: GenerationConfig = field( default_factory=lambda: GenerationConfig(), metadata={"help": "beam search config for evaluating wer during training"}, ) eval_wer_tokenizer: Any = field( default=None, metadata={"help": "tokenizer config for evaluating wer during training"}, ) eval_wer_post_process: str = field( default="letter", metadata={ "help": "remove BPE tokens before scoring (can be sentencepiece, letter, and more)" }, ) eval_bleu: bool = field( default=False, metadata={"help": "evaluation with BLEU scores"} ) eval_bleu_detok: Optional[str] = field( default=None, metadata={ "help": "detokenize before computing BLEU (e.g., 'moses'); " "required if using --eval-bleu; use 'space' to disable " "detokenization; see fairseq.data.encoders for other options" }, ) eval_bleu_detok_args: str = field( default="{}", metadata={"help": "args for building the tokenizer, if needed"} ) eval_tokenized_bleu: bool = field( default=False, metadata={"help": "compute tokenized BLEU instead of sacrebleu"} ) eval_bleu_remove_bpe: Optional[str] = field( default=None, metadata={"help": "remove BPE before computing BLEU"} ) eval_bleu_args: str = field( default="{}", metadata={ "help": "generation args for BLUE scoring, e.g., " '\'{"beam": 4, "lenpen": 0.6}\'' }, ) eval_bleu_print_samples: bool = field( default=False, metadata={"help": "print sample generations during validation"} ) autoregressive: bool = field( default=False, metadata={ "help": "required for autoregressive decoders (like seq2seq models); " "adds 'prev_output_tokens' to input and appends eos to target" }, ) @register_task("audio_finetuning", dataclass=AudioFinetuningConfig) class AudioFinetuningTask(AudioPretrainingTask): """ """ cfg: AudioFinetuningConfig def __init__( self, cfg: AudioFinetuningConfig, ): super().__init__(cfg) self.blank_symbol = "<s>" self.state.add_factory("target_dictionary", self.load_target_dictionary) def load_target_dictionary(self): if self.cfg.labels: dict_path = os.path.join(self.cfg.data, f"dict.{self.cfg.labels}.txt") return Dictionary.load(dict_path) return None def load_dataset( self, split: str, task_cfg: AudioFinetuningConfig = None, **kwargs ): super().load_dataset(split, task_cfg, **kwargs) task_cfg = task_cfg or self.cfg assert task_cfg.labels is not None text_compression_level = getattr( TextCompressionLevel, str(self.cfg.text_compression_level) ) data_path = self.cfg.data label_path = os.path.join(data_path, f"{split}.{task_cfg.labels}") skipped_indices = getattr(self.datasets[split], "skipped_indices", set()) text_compressor = TextCompressor(level=text_compression_level) with open(label_path, "r") as f: labels = [ text_compressor.compress(l) for i, l in enumerate(f) if i not in skipped_indices ] assert len(labels) == len(self.datasets[split]), ( f"labels length ({len(labels)}) and dataset length " f"({len(self.datasets[split])}) do not match" ) process_label = LabelEncoder(self.target_dictionary) self.datasets[split] = AddTargetDataset( self.datasets[split], labels, pad=self.target_dictionary.pad(), eos=self.target_dictionary.eos(), batch_targets=True, process_label=process_label, label_len_fn=label_len_fn, add_to_input=task_cfg.get("autoregressive", False), text_compression_level=text_compression_level, ) @property def target_dictionary(self): """Return the :class:`~fairseq.data.Dictionary` for the language model.""" return self.state.target_dictionary def valid_step(self, sample, model, criterion): loss, sample_size, logging_output = super().valid_step(sample, model, criterion) if self.cfg.eval_wer and self.cfg.autoregressive: metrics = self._inference_with_wer(self.sequence_generator, sample, model) logging_output["_num_char_errors"] = metrics["num_char_errors"] logging_output["_num_chars"] = metrics["num_chars"] logging_output["_num_word_errors"] = metrics["num_word_errors"] logging_output["_num_words"] = metrics["num_words"] if self.cfg.eval_bleu and self.cfg.autoregressive: metrics = self._inference_with_bleu(self.sequence_generator, sample, model) logging_output["_bleu_sys_len"] = metrics.sys_len logging_output["_bleu_ref_len"] = metrics.ref_len # we split counts into separate entries so that they can be # summed efficiently across workers using fast-stat-sync assert len(metrics.counts) == 4 for i in range(4): logging_output[f"_bleu_counts_{i}"] = metrics.counts[i] logging_output[f"_bleu_totals_{i}"] = metrics.totals[i] return loss, sample_size, logging_output def build_model(self, model_cfg: FairseqDataclass, from_checkpoint=False): model = super().build_model(model_cfg, from_checkpoint) if self.cfg.eval_wer and self.cfg.autoregressive: self.sequence_generator = self.build_generator( [model], self.cfg.eval_wer_config, ) if self.cfg.eval_wer_tokenizer: self.tokenizer = encoders.build_tokenizer(self.cfg.eval_wer_tokenizer) else: self.tokenizer = None if self.cfg.eval_bleu and self.cfg.autoregressive: assert self.cfg.eval_bleu_detok is not None, ( "--eval-bleu-detok is required if using --eval-bleu; " "try --eval-bleu-detok=moses (or --eval-bleu-detok=space " "to disable detokenization, e.g., when using sentencepiece)" ) detok_args = json.loads(self.cfg.eval_bleu_detok_args) self.tokenizer = encoders.build_tokenizer( Namespace(tokenizer=self.cfg.eval_bleu_detok, **detok_args) ) gen_args = json.loads(self.cfg.eval_bleu_args) gen_args = Namespace(**gen_args) self.sequence_generator = self.build_generator([model], gen_args) return model def _inference_with_wer(self, generator, sample, model): import editdistance def decode(toks): s = self.target_dictionary.string( toks.int().cpu(), self.cfg.eval_wer_post_process, escape_unk=True, ) if self.tokenizer: s = self.tokenizer.decode(s) return s num_word_errors, num_char_errors = 0, 0 num_chars, num_words = 0, 0 gen_out = self.inference_step(generator, [model], sample, None) for i in range(len(gen_out)): hyp = decode(gen_out[i][0]["tokens"]) ref = decode( utils.strip_pad(sample["target"][i], self.target_dictionary.pad()), ) num_char_errors += editdistance.eval(hyp, ref) num_chars += len(ref) hyp_words = hyp.split() ref_words = ref.split() num_word_errors += editdistance.eval(hyp_words, ref_words) num_words += len(ref_words) return { "num_char_errors": num_char_errors, "num_chars": num_chars, "num_word_errors": num_word_errors, "num_words": num_words, } def _inference_with_bleu(self, generator, sample, model): import sacrebleu def decode(toks, is_ref): s = self.target_dictionary.string( toks.int().cpu(), self.cfg.eval_bleu_remove_bpe, # The default unknown string in fairseq is `<unk>`, but # this is tokenized by sacrebleu as `< unk >`, inflating # BLEU scores. Instead, we use a somewhat more verbose # alternative that is unlikely to appear in the real # reference, but doesn't get split into multiple tokens. unk_string=("UNKNOWNTOKENINREF" if is_ref else "UNKNOWNTOKENINHYP"), ) if self.tokenizer: s = self.tokenizer.decode(s) return s gen_out = self.inference_step(generator, [model], sample) hyps, refs = [], [] for i in range(len(gen_out)): hyps.append(decode(gen_out[i][0]["tokens"], is_ref=False)) refs.append( decode( utils.strip_pad(sample["target"][i], self.target_dictionary.pad()), is_ref=True, # don't count <unk> as matches to the hypo ) ) if self.cfg.eval_bleu_print_samples: logger.info("H-{} {}".format(sample["id"][0], hyps[0])) logger.info("T-{} {}".format(sample["id"][0], refs[0])) eval_tokenization = "none" if self.cfg.eval_tokenized_bleu else "13a" return sacrebleu.corpus_bleu(hyps, [refs], tokenize=eval_tokenization) def reduce_metrics(self, logging_outputs, criterion): super().reduce_metrics(logging_outputs, criterion) if self.cfg.eval_wer: zero = torch.scalar_tensor(0.0) num_char_errors = sum( log.get("_num_char_errors", zero) for log in logging_outputs ) num_chars = sum(log.get("_num_chars", zero) for log in logging_outputs) num_word_errors = sum( log.get("_num_word_errors", zero) for log in logging_outputs ) num_words = sum(log.get("_num_words", zero) for log in logging_outputs) metrics.log_scalar("_num_char_errors", num_char_errors) metrics.log_scalar("_num_chars", num_chars) metrics.log_scalar("_num_word_errors", num_word_errors) metrics.log_scalar("_num_words", num_words) if num_chars > 0: metrics.log_derived( "uer", lambda meters: meters["_num_char_errors"].sum * 100.0 / meters["_num_chars"].sum if meters["_num_chars"].sum > 0 else float("nan"), ) if num_words > 0: metrics.log_derived( "wer", lambda meters: meters["_num_word_errors"].sum * 100.0 / meters["_num_words"].sum if meters["_num_words"].sum > 0 else float("nan"), ) if self.cfg.eval_bleu: len_keys = ["_bleu_sys_len", "_bleu_ref_len"] count_keys = [f"_bleu_counts_{i}" for i in range(4)] total_keys = [f"_bleu_totals_{i}" for i in range(4)] for k in len_keys + count_keys + total_keys: metrics.log_scalar(k, sum(log.get(k, 0) for log in logging_outputs)) import sacrebleu metrics.log_derived( "bleu", lambda meters: sacrebleu.compute_bleu( correct=[meters[k].sum for k in count_keys], total=[meters[k].sum for k in total_keys], sys_len=meters["_bleu_sys_len"].sum, ref_len=meters["_bleu_ref_len"].sum, smooth_method="exp", ).score, )
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/audio_finetuning.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field import logging import os from omegaconf import MISSING, II import numpy as np from fairseq import utils from fairseq.data import ( Dictionary, IdDataset, MaskTokensDataset, NestedDictionaryDataset, NumelDataset, NumSamplesDataset, PrependTokenDataset, RightPadDataset, SortDataset, TokenBlockDataset, data_utils, ) from fairseq.data.encoders.utils import get_whole_word_mask from fairseq.data.shorten_dataset import maybe_shorten_dataset from fairseq.dataclass import FairseqDataclass from fairseq.tasks import FairseqTask, register_task from .language_modeling import SAMPLE_BREAK_MODE_CHOICES, SHORTEN_METHOD_CHOICES logger = logging.getLogger(__name__) @dataclass class MaskedLMConfig(FairseqDataclass): data: str = field( default=MISSING, metadata={ "help": "colon separated path to data directories list, \ will be iterated upon during epochs in round-robin manner" }, ) sample_break_mode: SAMPLE_BREAK_MODE_CHOICES = field( default="none", metadata={ "help": 'If omitted or "none", fills each sample with tokens-per-sample ' 'tokens. If set to "complete", splits samples only at the end ' "of sentence, but may include multiple sentences per sample. " '"complete_doc" is similar but respects doc boundaries. ' 'If set to "eos", includes only one sentence per sample.' }, ) tokens_per_sample: int = field( default=1024, metadata={"help": "max number of tokens per sample for LM dataset"}, ) mask_prob: float = field( default=0.15, metadata={"help": "probability of replacing a token with mask"}, ) leave_unmasked_prob: float = field( default=0.1, metadata={"help": "probability that a masked token is unmasked"}, ) random_token_prob: float = field( default=0.1, metadata={"help": "probability of replacing a token with a random token"}, ) freq_weighted_replacement: bool = field( default=False, metadata={"help": "sample random replacement words based on word frequencies"}, ) mask_whole_words: bool = field( default=False, metadata={"help": "mask whole words; you may also want to set --bpe"}, ) mask_multiple_length: int = field( default=1, metadata={"help": "repeat the mask indices multiple times"}, ) mask_stdev: float = field( default=0.0, metadata={"help": "stdev of the mask length"}, ) shorten_method: SHORTEN_METHOD_CHOICES = field( default="none", metadata={ "help": "if not none, shorten sequences that exceed --tokens-per-sample" }, ) shorten_data_split_list: str = field( default="", metadata={ "help": "comma-separated list of dataset splits to apply shortening to, " 'e.g., "train,valid" (default: all dataset splits)' }, ) seed: int = II("common.seed") @register_task("masked_lm", dataclass=MaskedLMConfig) class MaskedLMTask(FairseqTask): cfg: MaskedLMConfig """Task for training masked language models (e.g., BERT, RoBERTa).""" def __init__(self, cfg: MaskedLMConfig, dictionary): super().__init__(cfg) self.dictionary = dictionary # add mask token self.mask_idx = dictionary.add_symbol("<mask>") @classmethod def setup_task(cls, cfg: MaskedLMConfig, **kwargs): paths = utils.split_paths(cfg.data) assert len(paths) > 0 dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt")) logger.info("dictionary: {} types".format(len(dictionary))) return cls(cfg, dictionary) def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ paths = utils.split_paths(self.cfg.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] split_path = os.path.join(data_path, split) dataset = data_utils.load_indexed_dataset( split_path, self.source_dictionary, combine=combine, ) if dataset is None: raise FileNotFoundError( "Dataset not found: {} ({})".format(split, split_path) ) dataset = maybe_shorten_dataset( dataset, split, self.cfg.shorten_data_split_list, self.cfg.shorten_method, self.cfg.tokens_per_sample, self.cfg.seed, ) # create continuous blocks of tokens dataset = TokenBlockDataset( dataset, dataset.sizes, self.cfg.tokens_per_sample - 1, # one less for <s> pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode=self.cfg.sample_break_mode, ) logger.info("loaded {} blocks from: {}".format(len(dataset), split_path)) # prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT) dataset = PrependTokenDataset(dataset, self.source_dictionary.bos()) # create masked input and targets mask_whole_words = ( get_whole_word_mask(self.args, self.source_dictionary) if self.cfg.mask_whole_words else None ) src_dataset, tgt_dataset = MaskTokensDataset.apply_mask( dataset, self.source_dictionary, pad_idx=self.source_dictionary.pad(), mask_idx=self.mask_idx, seed=self.cfg.seed, mask_prob=self.cfg.mask_prob, leave_unmasked_prob=self.cfg.leave_unmasked_prob, random_token_prob=self.cfg.random_token_prob, freq_weighted_replacement=self.cfg.freq_weighted_replacement, mask_whole_words=mask_whole_words, mask_multiple_length=self.cfg.mask_multiple_length, mask_stdev=self.cfg.mask_stdev, ) with data_utils.numpy_seed(self.cfg.seed): shuffle = np.random.permutation(len(src_dataset)) self.datasets[split] = SortDataset( NestedDictionaryDataset( { "id": IdDataset(), "net_input": { "src_tokens": RightPadDataset( src_dataset, pad_idx=self.source_dictionary.pad(), ), "src_lengths": NumelDataset(src_dataset, reduce=False), }, "target": RightPadDataset( tgt_dataset, pad_idx=self.source_dictionary.pad(), ), "nsentences": NumSamplesDataset(), "ntokens": NumelDataset(src_dataset, reduce=True), }, sizes=[src_dataset.sizes], ), sort_order=[ shuffle, src_dataset.sizes, ], ) def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True): src_dataset = RightPadDataset( TokenBlockDataset( src_tokens, src_lengths, self.cfg.tokens_per_sample - 1, # one less for <s> pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode="eos", ), pad_idx=self.source_dictionary.pad(), ) src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos()) src_dataset = NestedDictionaryDataset( { "id": IdDataset(), "net_input": { "src_tokens": src_dataset, "src_lengths": NumelDataset(src_dataset, reduce=False), }, }, sizes=src_lengths, ) if sort: src_dataset = SortDataset(src_dataset, sort_order=[src_lengths]) return src_dataset @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/masked_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import numpy as np from fairseq.data import ( ConcatSentencesDataset, Dictionary, IdDataset, NestedDictionaryDataset, NumelDataset, NumSamplesDataset, PrependTokenDataset, RawLabelDataset, RightPadDataset, SortDataset, TruncateDataset, data_utils, ) from fairseq.data.shorten_dataset import maybe_shorten_dataset from fairseq.tasks import LegacyFairseqTask, register_task logger = logging.getLogger(__name__) @register_task("sentence_ranking") class SentenceRankingTask(LegacyFairseqTask): """ Ranking task on multiple sentences. Args: dictionary (Dictionary): the dictionary for the input of the task """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument("data", metavar="FILE", help="file prefix for data") parser.add_argument( "--num-classes", type=int, help="number of sentences to be ranked" ) parser.add_argument( "--init-token", type=int, help="add token at the beginning of each batch item", ) parser.add_argument( "--separator-token", type=int, help="add separator token between inputs" ) parser.add_argument("--no-shuffle", action="store_true") parser.add_argument( "--shorten-method", default="none", choices=["none", "truncate", "random_crop"], help="if not none, shorten sequences that exceed --tokens-per-sample", ) parser.add_argument( "--shorten-data-split-list", default="", help="comma-separated list of dataset splits to apply shortening to, " 'e.g., "train,valid" (default: all dataset splits)', ) parser.add_argument( "--max-option-length", type=int, help="max length for each option" ) def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary @classmethod def load_dictionary(cls, args, filename, source=True): """Load the dictionary from the filename Args: filename (str): the filename """ dictionary = Dictionary.load(filename) dictionary.add_symbol("<mask>") return dictionary @classmethod def setup_task(cls, args, **kwargs): assert ( args.criterion == "sentence_ranking" ), "Must set --criterion=sentence_ranking" # load data dictionary data_dict = cls.load_dictionary( args, os.path.join(args.data, "input0", "dict.txt"), source=True, ) logger.info("[input] dictionary: {} types".format(len(data_dict))) return SentenceRankingTask(args, data_dict) def load_dataset(self, split, combine=False, **kwargs): """Load a given dataset split (e.g., train, valid, test).""" def get_path(type, split): return os.path.join(self.args.data, type, split) def make_dataset(type, dictionary): split_path = get_path(type, split) dataset = data_utils.load_indexed_dataset( split_path, self.source_dictionary, self.args.dataset_impl, combine=combine, ) return dataset input0 = make_dataset("input0", self.source_dictionary) input_options = [ make_dataset("input{idx}".format(idx=idx + 1), self.source_dictionary) for idx in range(self.args.num_classes) ] if self.args.separator_token is not None: input0 = PrependTokenDataset(input0, self.args.separator_token) src_tokens = [] for input_option in input_options: if self.args.init_token is not None: input_option = PrependTokenDataset(input_option, self.args.init_token) if self.args.max_option_length is not None: input_option = TruncateDataset( input_option, self.args.max_option_length ) src_token = ConcatSentencesDataset(input_option, input0) src_token = maybe_shorten_dataset( src_token, split, self.args.shorten_data_split_list, self.args.shorten_method, self.args.max_positions, self.args.seed, ) src_tokens.append(src_token) with data_utils.numpy_seed(self.args.seed): shuffle = np.random.permutation(len(src_tokens[0])) dataset = { "id": IdDataset(), "nsentences": NumSamplesDataset(), "ntokens": NumelDataset(src_tokens[0], reduce=True), } for src_token_idx in range(len(src_tokens)): dataset.update( { "net_input{idx}".format(idx=src_token_idx + 1): { "src_tokens": RightPadDataset( src_tokens[src_token_idx], pad_idx=self.source_dictionary.pad(), ), "src_lengths": NumelDataset( src_tokens[src_token_idx], reduce=False ), } } ) label_path = "{}.label".format(get_path("label", split)) if os.path.exists(label_path): with open(label_path) as h: dataset.update( target=RawLabelDataset([int(x.strip()) for x in h.readlines()]) ) nested_dataset = NestedDictionaryDataset( dataset, sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])], ) if self.args.no_shuffle: dataset = nested_dataset else: dataset = SortDataset( nested_dataset, # shuffle sort_order=[shuffle], ) logger.info("Loaded {0} with #samples: {1}".format(split, len(dataset))) self.datasets[split] = dataset return self.datasets[split] def build_model(self, args, from_checkpoint=False): from fairseq import models model = models.build_model(args, self, from_checkpoint) model.register_classification_head( getattr(args, "ranking_head_name", "sentence_classification_head"), num_classes=1, ) return model def max_positions(self): return self.args.max_positions @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/sentence_ranking.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import datetime import logging import time import torch from fairseq.data import ( FairseqDataset, LanguagePairDataset, ListDataset, data_utils, iterators, ) from fairseq.data.multilingual.multilingual_data_manager import ( MultilingualDatasetManager, ) from fairseq.data.multilingual.sampling_method import SamplingMethod from fairseq.tasks import LegacyFairseqTask, register_task from fairseq.utils import FileContentsAction ### def get_time_gap(s, e): return ( datetime.datetime.fromtimestamp(e) - datetime.datetime.fromtimestamp(s) ).__str__() ### logger = logging.getLogger(__name__) @register_task("translation_multi_simple_epoch") class TranslationMultiSimpleEpochTask(LegacyFairseqTask): """ Translate from one (source) language to another (target) language. Args: langs (List[str]): a list of languages that are being supported dicts (Dict[str, fairseq.data.Dictionary]): mapping from supported languages to their dictionaries training (bool): whether the task should be configured for training or not .. note:: The translation task is compatible with :mod:`fairseq-train`, :mod:`fairseq-generate` and :mod:`fairseq-interactive`. The translation task provides the following additional command-line arguments: .. argparse:: :ref: fairseq.tasks.translation_parser :prog: """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" # fmt: off parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='inference source language') parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='inference target language') parser.add_argument('--lang-pairs', default=None, metavar='PAIRS', help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr', action=FileContentsAction) parser.add_argument('--keep-inference-langtok', action='store_true', help='keep language tokens in inference output (e.g. for analysis or debugging)') SamplingMethod.add_arguments(parser) MultilingualDatasetManager.add_args(parser) # fmt: on def __init__(self, args, langs, dicts, training): super().__init__(args) self.langs = langs self.dicts = dicts self.training = training if training: self.lang_pairs = args.lang_pairs else: self.lang_pairs = ["{}-{}".format(args.source_lang, args.target_lang)] # eval_lang_pairs for multilingual translation is usually all of the # lang_pairs. However for other multitask settings or when we want to # optimize for certain languages we want to use a different subset. Thus # the eval_lang_pairs class variable is provided for classes that extend # this class. self.eval_lang_pairs = self.lang_pairs # model_lang_pairs will be used to build encoder-decoder model pairs in # models.build_model(). This allows multitask type of sub-class can # build models other than the input lang_pairs self.model_lang_pairs = self.lang_pairs self.source_langs = [d.split("-")[0] for d in self.lang_pairs] self.target_langs = [d.split("-")[1] for d in self.lang_pairs] self.check_dicts(self.dicts, self.source_langs, self.target_langs) self.sampling_method = SamplingMethod.build_sampler(args, self) self.data_manager = MultilingualDatasetManager.setup_data_manager( args, self.lang_pairs, langs, dicts, self.sampling_method ) def check_dicts(self, dicts, source_langs, target_langs): if self.args.source_dict is not None or self.args.target_dict is not None: # no need to check whether the source side and target side are sharing dictionaries return src_dict = dicts[source_langs[0]] tgt_dict = dicts[target_langs[0]] for src_lang in source_langs: assert ( src_dict == dicts[src_lang] ), "Diffrent dictionary are specified for different source languages; " "TranslationMultiSimpleEpochTask only supports one shared dictionary across all source languages" for tgt_lang in target_langs: assert ( tgt_dict == dicts[tgt_lang] ), "Diffrent dictionary are specified for different target languages; " "TranslationMultiSimpleEpochTask only supports one shared dictionary across all target languages" @classmethod def setup_task(cls, args, **kwargs): langs, dicts, training = MultilingualDatasetManager.prepare( cls.load_dictionary, args, **kwargs ) return cls(args, langs, dicts, training) def has_sharded_data(self, split): return self.data_manager.has_sharded_data(split) def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ if split in self.datasets: dataset = self.datasets[split] if self.has_sharded_data(split): if self.args.virtual_epoch_size is not None: if dataset.load_next_shard: shard_epoch = dataset.shard_epoch else: # no need to load next shard so skip loading # also this avoid always loading from beginning of the data return else: shard_epoch = epoch else: # estimate the shard epoch from virtual data size and virtual epoch size shard_epoch = self.data_manager.estimate_global_pass_epoch(epoch) logger.info(f"loading data for {split} epoch={epoch}/{shard_epoch}") logger.info(f"mem usage: {data_utils.get_mem_usage()}") if split in self.datasets: del self.datasets[split] logger.info("old dataset deleted manually") logger.info(f"mem usage: {data_utils.get_mem_usage()}") self.datasets[split] = self.data_manager.load_dataset( split, self.training, epoch=epoch, combine=combine, shard_epoch=shard_epoch, **kwargs, ) def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None): if constraints is not None: raise NotImplementedError( "Constrained decoding with the multilingual_translation task is not supported" ) src_data = ListDataset(src_tokens, src_lengths) dataset = LanguagePairDataset(src_data, src_lengths, self.source_dictionary) src_langtok_spec, tgt_langtok_spec = self.args.langtoks["main"] if self.args.lang_tok_replacing_bos_eos: dataset = self.data_manager.alter_dataset_langtok( dataset, src_eos=self.source_dictionary.eos(), src_lang=self.args.source_lang, tgt_eos=self.target_dictionary.eos(), tgt_lang=self.args.target_lang, src_langtok_spec=src_langtok_spec, tgt_langtok_spec=tgt_langtok_spec, ) else: dataset.src = self.data_manager.src_dataset_tranform_func( self.args.source_lang, self.args.target_lang, dataset=dataset.src, spec=src_langtok_spec, ) return dataset def build_generator( self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None, ): if not getattr(args, "keep_inference_langtok", False): _, tgt_langtok_spec = self.args.langtoks["main"] if tgt_langtok_spec: tgt_lang_tok = self.data_manager.get_decoder_langtok( self.args.target_lang, tgt_langtok_spec ) extra_gen_cls_kwargs = extra_gen_cls_kwargs or {} extra_gen_cls_kwargs["symbols_to_strip_from_output"] = {tgt_lang_tok} return super().build_generator( models, args, seq_gen_cls=None, extra_gen_cls_kwargs=extra_gen_cls_kwargs ) def build_model(self, args, from_checkpoint=False): return super().build_model(args, from_checkpoint) def valid_step(self, sample, model, criterion): loss, sample_size, logging_output = super().valid_step(sample, model, criterion) return loss, sample_size, logging_output def inference_step( self, generator, models, sample, prefix_tokens=None, constraints=None ): with torch.no_grad(): _, tgt_langtok_spec = self.args.langtoks["main"] if not self.args.lang_tok_replacing_bos_eos: if prefix_tokens is None and tgt_langtok_spec: tgt_lang_tok = self.data_manager.get_decoder_langtok( self.args.target_lang, tgt_langtok_spec ) src_tokens = sample["net_input"]["src_tokens"] bsz = src_tokens.size(0) prefix_tokens = ( torch.LongTensor([[tgt_lang_tok]]).expand(bsz, 1).to(src_tokens) ) return generator.generate( models, sample, prefix_tokens=prefix_tokens, constraints=constraints, ) else: return generator.generate( models, sample, prefix_tokens=prefix_tokens, bos_token=self.data_manager.get_decoder_langtok( self.args.target_lang, tgt_langtok_spec ) if tgt_langtok_spec else self.target_dictionary.eos(), ) def reduce_metrics(self, logging_outputs, criterion): super().reduce_metrics(logging_outputs, criterion) def max_positions(self): """Return the max sentence length allowed by the task.""" return (self.args.max_source_positions, self.args.max_target_positions) @property def source_dictionary(self): return self.data_manager.get_source_dictionary(self.source_langs[0]) @property def target_dictionary(self): return self.data_manager.get_target_dictionary(self.target_langs[0]) def create_batch_sampler_func( self, max_positions, ignore_invalid_inputs, max_tokens, max_sentences, required_batch_size_multiple=1, seed=1, ): def construct_batch_sampler(dataset, epoch): splits = [ s for s, _ in self.datasets.items() if self.datasets[s] == dataset ] split = splits[0] if len(splits) > 0 else None # NEW implementation if epoch is not None: # initialize the dataset with the correct starting epoch dataset.set_epoch(epoch) # get indices ordered by example size start_time = time.time() logger.info(f"start batch sampler: mem usage: {data_utils.get_mem_usage()}") with data_utils.numpy_seed(seed): indices = dataset.ordered_indices() logger.info( f"[{split}] @batch_sampler order indices time: {get_time_gap(start_time, time.time())}" ) logger.info(f"mem usage: {data_utils.get_mem_usage()}") # filter examples that are too large if max_positions is not None: my_time = time.time() indices = self.filter_indices_by_size( indices, dataset, max_positions, ignore_invalid_inputs ) logger.info( f"[{split}] @batch_sampler filter_by_size time: {get_time_gap(my_time, time.time())}" ) logger.info(f"mem usage: {data_utils.get_mem_usage()}") # create mini-batches with given size constraints my_time = time.time() batch_sampler = dataset.batch_by_size( indices, max_tokens=max_tokens, max_sentences=max_sentences, required_batch_size_multiple=required_batch_size_multiple, ) logger.info( f"[{split}] @batch_sampler batch_by_size time: {get_time_gap(my_time, time.time())}" ) logger.info( f"[{split}] per epoch batch_sampler set-up time: {get_time_gap(start_time, time.time())}" ) logger.info(f"mem usage: {data_utils.get_mem_usage()}") return batch_sampler return construct_batch_sampler # we need to override get_batch_iterator because we want to reset the epoch iterator each time def get_batch_iterator( self, dataset, max_tokens=None, max_sentences=None, max_positions=None, ignore_invalid_inputs=False, required_batch_size_multiple=1, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=1, data_buffer_size=0, disable_iterator_cache=False, skip_remainder_batch=False, grouped_shuffling=False, update_epoch_batch_itr=False, ): """ Get an iterator that yields batches of data from the given dataset. Args: dataset (~fairseq.data.FairseqDataset): dataset to batch max_tokens (int, optional): max number of tokens in each batch (default: None). max_sentences (int, optional): max number of sentences in each batch (default: None). max_positions (optional): max sentence length supported by the model (default: None). ignore_invalid_inputs (bool, optional): don't raise Exception for sentences that are too long (default: False). required_batch_size_multiple (int, optional): require batch size to be a multiple of N (default: 1). seed (int, optional): seed for random number generator for reproducibility (default: 1). num_shards (int, optional): shard the data iterator into N shards (default: 1). shard_id (int, optional): which shard of the data iterator to return (default: 0). num_workers (int, optional): how many subprocesses to use for data loading. 0 means the data will be loaded in the main process (default: 0). epoch (int, optional): the epoch to start the iterator from (default: 0). data_buffer_size (int, optional): number of batches to preload (default: 0). disable_iterator_cache (bool, optional): don't cache the EpochBatchIterator (ignores `FairseqTask::can_reuse_epoch_itr`) (default: False). grouped_shuffling (bool, optional): group batches with each groups containing num_shards batches and shuffle groups. Reduces difference between sequence lengths among workers for batches sorted by length. update_epoch_batch_itr (bool optional): if true then donot use the cached batch iterator for the epoch Returns: ~fairseq.iterators.EpochBatchIterator: a batched iterator over the given dataset split """ # initialize the dataset with the correct starting epoch assert isinstance(dataset, FairseqDataset) if dataset in self.dataset_to_epoch_iter: return self.dataset_to_epoch_iter[dataset] if self.args.sampling_method == "RoundRobin": batch_iter = super().get_batch_iterator( dataset, max_tokens=max_tokens, max_sentences=max_sentences, max_positions=max_positions, ignore_invalid_inputs=ignore_invalid_inputs, required_batch_size_multiple=required_batch_size_multiple, seed=seed, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, epoch=epoch, data_buffer_size=data_buffer_size, disable_iterator_cache=disable_iterator_cache, skip_remainder_batch=skip_remainder_batch, update_epoch_batch_itr=update_epoch_batch_itr, ) self.dataset_to_epoch_iter[dataset] = batch_iter return batch_iter construct_batch_sampler = self.create_batch_sampler_func( max_positions, ignore_invalid_inputs, max_tokens, max_sentences, required_batch_size_multiple=required_batch_size_multiple, seed=seed, ) epoch_iter = iterators.EpochBatchIterator( dataset=dataset, collate_fn=dataset.collater, batch_sampler=construct_batch_sampler, seed=seed, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, epoch=epoch, ) return epoch_iter
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/translation_multi_simple_epoch.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import numpy as np import torch from fairseq import utils from fairseq.data import ( ConcatDataset, Dictionary, IdDataset, MaskTokensDataset, NestedDictionaryDataset, NumelDataset, NumSamplesDataset, PadDataset, PrependTokenDataset, RawLabelDataset, ResamplingDataset, SortDataset, TokenBlockDataset, data_utils, encoders, ) from fairseq.tasks import LegacyFairseqTask, register_task logger = logging.getLogger(__name__) @register_task("multilingual_masked_lm") class MultiLingualMaskedLMTask(LegacyFairseqTask): """Task for training masked language models (e.g., BERT, RoBERTa).""" @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument( "data", help="colon separated path to data directories list, \ will be iterated upon during epochs in round-robin manner", ) parser.add_argument( "--sample-break-mode", default="complete", choices=["none", "complete", "complete_doc", "eos"], help='If omitted or "none", fills each sample with tokens-per-sample ' 'tokens. If set to "complete", splits samples only at the end ' "of sentence, but may include multiple sentences per sample. " '"complete_doc" is similar but respects doc boundaries. ' 'If set to "eos", includes only one sentence per sample.', ) parser.add_argument( "--tokens-per-sample", default=512, type=int, help="max number of total tokens over all segments " "per sample for BERT dataset", ) parser.add_argument( "--mask-prob", default=0.15, type=float, help="probability of replacing a token with mask", ) parser.add_argument( "--leave-unmasked-prob", default=0.1, type=float, help="probability that a masked token is unmasked", ) parser.add_argument( "--random-token-prob", default=0.1, type=float, help="probability of replacing a token with a random token", ) parser.add_argument( "--freq-weighted-replacement", action="store_true", help="sample random replacement words based on word frequencies", ) parser.add_argument( "--mask-whole-words", default=False, action="store_true", help="mask whole words; you may also want to set --bpe", ) parser.add_argument( "--multilang-sampling-alpha", type=float, default=1.0, help="smoothing alpha for sample rations across multiple datasets", ) def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary self.seed = args.seed # add mask token self.mask_idx = dictionary.add_symbol("<mask>") @classmethod def setup_task(cls, args, **kwargs): paths = utils.split_paths(args.data) assert len(paths) > 0 dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt")) logger.info("dictionary: {} types".format(len(dictionary))) return cls(args, dictionary) def _get_whole_word_mask(self): # create masked input and targets if self.args.mask_whole_words: bpe = encoders.build_bpe(self.args) if bpe is not None: def is_beginning_of_word(i): if i < self.source_dictionary.nspecial: # special elements are always considered beginnings return True tok = self.source_dictionary[i] if tok.startswith("madeupword"): return True try: return bpe.is_beginning_of_word(tok) except ValueError: return True mask_whole_words = torch.ByteTensor( list(map(is_beginning_of_word, range(len(self.source_dictionary)))) ) else: mask_whole_words = None return mask_whole_words def _get_sample_prob(self, dataset_lens): """ Get smoothed sampling porbability by languages. This helps low resource languages by upsampling them. """ prob = dataset_lens / dataset_lens.sum() smoothed_prob = prob ** self.args.multilang_sampling_alpha smoothed_prob = smoothed_prob / smoothed_prob.sum() return smoothed_prob def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ paths = utils.split_paths(self.args.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] languages = sorted( name for name in os.listdir(data_path) if os.path.isdir(os.path.join(data_path, name)) ) logger.info("Training on {0} languages: {1}".format(len(languages), languages)) logger.info( "Language to id mapping: ", {lang: id for id, lang in enumerate(languages)} ) mask_whole_words = self._get_whole_word_mask() lang_datasets = [] for lang_id, language in enumerate(languages): split_path = os.path.join(data_path, language, split) dataset = data_utils.load_indexed_dataset( split_path, self.source_dictionary, self.args.dataset_impl, combine=combine, ) if dataset is None: raise FileNotFoundError( "Dataset not found: {} ({})".format(split, split_path) ) # create continuous blocks of tokens dataset = TokenBlockDataset( dataset, dataset.sizes, self.args.tokens_per_sample - 1, # one less for <s> pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode=self.args.sample_break_mode, ) logger.info("loaded {} blocks from: {}".format(len(dataset), split_path)) # prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT) dataset = PrependTokenDataset(dataset, self.source_dictionary.bos()) src_dataset, tgt_dataset = MaskTokensDataset.apply_mask( dataset, self.source_dictionary, pad_idx=self.source_dictionary.pad(), mask_idx=self.mask_idx, seed=self.args.seed, mask_prob=self.args.mask_prob, leave_unmasked_prob=self.args.leave_unmasked_prob, random_token_prob=self.args.random_token_prob, freq_weighted_replacement=self.args.freq_weighted_replacement, mask_whole_words=mask_whole_words, ) lang_dataset = NestedDictionaryDataset( { "net_input": { "src_tokens": PadDataset( src_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False, ), "src_lengths": NumelDataset(src_dataset, reduce=False), }, "target": PadDataset( tgt_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False, ), "nsentences": NumSamplesDataset(), "ntokens": NumelDataset(src_dataset, reduce=True), "lang_id": RawLabelDataset([lang_id] * src_dataset.sizes.shape[0]), }, sizes=[src_dataset.sizes], ) lang_datasets.append(lang_dataset) dataset_lengths = np.array( [len(d) for d in lang_datasets], dtype=float, ) logger.info( "loaded total {} blocks for all languages".format( dataset_lengths.sum(), ) ) if split == self.args.train_subset: # For train subset, additionally up or down sample languages. sample_probs = self._get_sample_prob(dataset_lengths) logger.info( "Sample probability by language: ", { lang: "{0:.4f}".format(sample_probs[id]) for id, lang in enumerate(languages) }, ) size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths logger.info( "Up/Down Sampling ratio by language: ", { lang: "{0:.2f}".format(size_ratio[id]) for id, lang in enumerate(languages) }, ) resampled_lang_datasets = [ ResamplingDataset( lang_datasets[i], size_ratio=size_ratio[i], seed=self.args.seed, epoch=epoch, replace=size_ratio[i] >= 1.0, ) for i, d in enumerate(lang_datasets) ] dataset = ConcatDataset(resampled_lang_datasets) else: dataset = ConcatDataset(lang_datasets) lang_splits = [split] for lang_id, lang_dataset in enumerate(lang_datasets): split_name = split + "_" + languages[lang_id] lang_splits.append(split_name) self.datasets[split_name] = lang_dataset # [TODO]: This is hacky for now to print validation ppl for each # language individually. Maybe need task API changes to allow it # in more generic ways. if split in self.args.valid_subset: self.args.valid_subset = self.args.valid_subset.replace( split, ",".join(lang_splits) ) with data_utils.numpy_seed(self.args.seed + epoch): shuffle = np.random.permutation(len(dataset)) self.datasets[split] = SortDataset( dataset, sort_order=[ shuffle, dataset.sizes, ], ) def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True): src_dataset = PadDataset( TokenBlockDataset( src_tokens, src_lengths, self.args.tokens_per_sample - 1, # one less for <s> pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode="eos", ), pad_idx=self.source_dictionary.pad(), left_pad=False, ) src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos()) src_dataset = NestedDictionaryDataset( { "id": IdDataset(), "net_input": { "src_tokens": src_dataset, "src_lengths": NumelDataset(src_dataset, reduce=False), }, }, sizes=src_lengths, ) if sort: src_dataset = SortDataset(src_dataset, sort_order=[src_lengths]) return src_dataset @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary
KosmosX-API-main
kosmosX/fairseq/fairseq/tasks/multilingual_masked_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ A modified version of the legacy DistributedDataParallel module that uses c10d communication primitives. This version is simpler than the latest PyTorch version and is useful for debugging. Notably it does not overlap gradient communication with the backward pass, which makes it slower but more robust than the PyTorch version. This version also supports the *no_sync* context manager, which allows faster training with `--update-freq`. """ from collections import OrderedDict from contextlib import contextmanager import torch from torch import nn from fairseq.distributed import utils class LegacyDistributedDataParallel(nn.Module): """Implements distributed data parallelism at the module level. A simplified version of :class:`torch.nn.parallel.DistributedDataParallel`. This version uses a c10d process group for communication and does not broadcast buffers. Args: module (~torch.nn.Module): module to be parallelized process_group: the c10d process group to be used for distributed data parallel all-reduction. buffer_size (int, optional): number of elements to buffer before performing all-reduce (default: 256M). """ def __init__(self, module, process_group, buffer_size=2 ** 28): super().__init__() self.module = module self.process_group = process_group self.world_size = utils.get_world_size(self.process_group) # Never use a bigger buffer than the number of model params self.buffer_size = min(buffer_size, sum(p.numel() for p in module.parameters())) self.buffer = None # We can also forcibly accumulate grads locally and only do the # all-reduce at some later time self.accumulate_grads = False # make per-device lists of parameters paramlists = OrderedDict() for param in self.module.parameters(): device = param.device if paramlists.get(device) is None: paramlists[device] = [] paramlists[device] += [param] self.per_device_params = list(paramlists.values()) @contextmanager def no_sync(self): """A context manager to disable gradient synchronization.""" old_accumulate_grads = self.accumulate_grads self.accumulate_grads = True yield self.accumulate_grads = old_accumulate_grads def forward(self, *inputs, **kwargs): return self.module(*inputs, **kwargs) def all_reduce_grads(self): """ This function must be called explicitly after backward to reduce gradients. There is no automatic hook like c10d. """ def all_reduce_params(params): buffer = self.buffer nonzero_buffer = False if len(params) > 1: offset = 0 for p in params: sz = p.numel() if p.grad is not None: buffer[offset : offset + sz].copy_(p.grad.data.view(-1)) nonzero_buffer = True else: buffer[offset : offset + sz].zero_() offset += sz else: # we only have a single grad to all-reduce p = params[0] if p.grad is not None: buffer = p.grad.data nonzero_buffer = True elif p.numel() <= self.buffer.numel(): buffer = buffer[: p.numel()] buffer.zero_() else: buffer = torch.zeros_like(p) if nonzero_buffer: buffer.div_(self.world_size) utils.all_reduce(buffer, self.process_group) # copy all-reduced grads back into their original place offset = 0 for p in params: sz = p.numel() if p.grad is not None: p.grad.data.copy_(buffer[offset : offset + sz].view_as(p)) else: p.grad = buffer[offset : offset + sz].view_as(p).clone() offset += sz def reduction_fn(): # This function only needs to be called once if self.accumulate_grads: return if self.buffer is None: self.buffer = next(self.module.parameters()).new(self.buffer_size) for params in self.per_device_params: # All-reduce the gradients in buckets offset = 0 buffered_params = [] for param in params: if not param.requires_grad: continue if param.grad is None: param.grad = torch.zeros_like(param) if hasattr(param, "expert"): # Skip gradient sync for unshared parameters continue if param.grad.requires_grad: raise RuntimeError( "DistributedDataParallel only works " "with gradients that don't require " "grad" ) sz = param.numel() if sz > self.buffer.numel(): # all-reduce big params directly all_reduce_params([param]) else: if offset + sz > self.buffer.numel(): all_reduce_params(buffered_params) offset = 0 buffered_params.clear() buffered_params.append(param) offset += sz if len(buffered_params) > 0: all_reduce_params(buffered_params) reduction_fn()
KosmosX-API-main
kosmosX/fairseq/fairseq/distributed/legacy_distributed_data_parallel.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import signal import threading from torch import nn logger = logging.getLogger(__name__) class DistributedTimeoutWrapper(nn.Module): """ A wrapper that kills the process if no progress is made within a given *timeout*. The timer is reset every time :func:`forward` is called. Usage:: module = DistributedTimeoutWrapper(module, timeout=30) x = module(input) time.sleep(20) # safe x = module(input) time.sleep(45) # job will be killed before this returns Args: module (nn.Module): module to wrap timeout (int): number of seconds before killing the process (set to a value <= 0 to disable the timeout) signal (Optional): signal to send once timeout is triggered """ def __init__(self, module: nn.Module, timeout: int, signal=signal.SIGINT): super().__init__() self.module = module self.timeout = timeout self.signal = signal if timeout > 0: self._heartbeat = threading.Event() self._heartbeat_thread = threading.Thread( target=self._check_heartbeat, args=(os.getpid(),), daemon=True, ) self._heartbeat_thread.start() self._terminated = False else: self._heartbeat = None self._heartbeat_thread = None def __del__(self): self.stop_timeout() def __getattr__(self, name): """Forward missing attributes to wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: return getattr(self.module, name) def stop_timeout(self): if self._heartbeat_thread is not None: self._terminated = True self._heartbeat_thread.join() def state_dict(self, *args, **kwargs): return self.module.state_dict(*args, **kwargs) def load_state_dict(self, *args, **kwargs): return self.module.load_state_dict(*args, **kwargs) def forward(self, *args, **kwargs): if self._heartbeat is not None: self._heartbeat.set() return self.module(*args, **kwargs) def _check_heartbeat(self, parent_pid): self._heartbeat.wait() # wait for the first forward pass while True: self._heartbeat.clear() success = self._heartbeat.wait(timeout=self.timeout) if self._terminated: break elif not success: logger.error( ( "Killing job for not making progress in {} seconds. " "Set --heartbeat-timeout=-1 to disable this timeout." ).format(int(self.timeout)) ) os.kill(parent_pid, self.signal) return
KosmosX-API-main
kosmosX/fairseq/fairseq/distributed/distributed_timeout_wrapper.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib from typing import Optional import torch from fairseq.dataclass.configs import DistributedTrainingConfig from fairseq.distributed import utils as dist_utils try: from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP has_FSDP = True except ImportError: FSDP = torch.nn.Module has_FSDP = False class FullyShardedDataParallel(FSDP): """ A small wrapper around fairscale's FullyShardedDataParallel (FSDP) with some fairseq-specific checkpoint saving/loading logic. Args: use_sharded_state (bool): if True, then ``state_dict`` will return ``FSDP.local_state_dict`` and ``load_state_dict`` will call ``FSDP.load_local_state_dict``. Otherwise, ``state_dict`` will return the full model weights on data parallel rank 0 (empty on other ranks) and ``load_state_dict`` will broadcast model weights from rank 0 to other ranks. """ def __init__(self, *args, use_sharded_state: bool = False, **kwargs): if not has_FSDP: raise ImportError( "Cannot find FullyShardedDataParallel. " "Please install fairscale with: pip install fairscale" ) super().__init__(*args, **kwargs) self.use_sharded_state = use_sharded_state @property def unwrapped_module(self) -> torch.nn.Module: if self.flatten_parameters: return self.module.module else: return self.module def state_dict(self, destination=None, prefix="", keep_vars=False): if self.use_sharded_state: return super().local_state_dict( destination=destination, prefix=prefix, keep_vars=keep_vars ) else: if self.rank == 0: return super().state_dict( destination=destination, prefix=prefix, keep_vars=keep_vars ) else: # We must call state_dict() due to use of communication # primitives. But we don't use the result. super().state_dict() return destination or {} def load_state_dict(self, state_dict, strict=True, model_cfg=None): if self.use_sharded_state: return super().load_local_state_dict(state_dict, strict=strict) else: state_dict = dist_utils.broadcast_object( state_dict, src_rank=0, group=self.process_group ) return super().load_state_dict(state_dict, strict=strict) @contextlib.contextmanager def fsdp_enable_wrap(cfg: DistributedTrainingConfig): try: from fairscale.nn import enable_wrap except ImportError: raise ImportError( "Cannot find FullyShardedDataParallel. " "Please install fairscale with: pip install fairscale" ) if cfg.memory_efficient_fp16: assert cfg.fp16 # memory_efficient_fp16 should imply fp16 group = dist_utils.get_data_parallel_group() if group is None and cfg.distributed_world_size == 1: from fairscale.utils.testing import DummyProcessGroup group = DummyProcessGroup(rank=0, size=1) fsdp_config = { "process_group": group, "reshard_after_forward": not cfg.no_reshard_after_forward, "mixed_precision": cfg.fp16 and not cfg.memory_efficient_fp16, "fp32_reduce_scatter": cfg.fp32_reduce_scatter, "flatten_parameters": not cfg.not_fsdp_flatten_parameters, "cpu_offload": cfg.cpu_offload, "compute_dtype": torch.float16 if cfg.fp16 else torch.float32, "bucket_cap_mb": cfg.bucket_cap_mb, "state_dict_device": torch.device("cpu"), # reduce GPU mem usage } with enable_wrap( wrapper_cls=FullyShardedDataParallel, use_sharded_state=cfg.use_sharded_state, **fsdp_config, ): yield def fsdp_wrap(module, min_num_params: Optional[int] = None, **kwargs): """ Helper to wrap layers/modules in FSDP. This falls back to a no-op if fairscale is not available. Args: module (nn.Module): module to (maybe) wrap min_num_params (int, Optional): minimum number of layer params to wrap """ try: from fairscale.nn import wrap if min_num_params is not None: num_params = sum(p.numel() for p in module.parameters()) if num_params >= min_num_params: return wrap(module, **kwargs) else: return module else: return wrap(module, **kwargs) except ImportError: return module
KosmosX-API-main
kosmosX/fairseq/fairseq/distributed/fully_sharded_data_parallel.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .distributed_timeout_wrapper import DistributedTimeoutWrapper from .fully_sharded_data_parallel import ( fsdp_enable_wrap, fsdp_wrap, FullyShardedDataParallel, ) from .legacy_distributed_data_parallel import LegacyDistributedDataParallel from .module_proxy_wrapper import ModuleProxyWrapper from .tpu_distributed_data_parallel import TPUDistributedDataParallel __all__ = [ "DistributedTimeoutWrapper", "fsdp_enable_wrap", "fsdp_wrap", "FullyShardedDataParallel", "LegacyDistributedDataParallel", "ModuleProxyWrapper", "TPUDistributedDataParallel", ]
KosmosX-API-main
kosmosX/fairseq/fairseq/distributed/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import io import logging import os import pickle import random import socket import struct import subprocess import warnings from argparse import Namespace from collections import OrderedDict from dataclasses import dataclass from typing import Any, Dict, List, Mapping, Optional import torch import torch.distributed as dist from fairseq.dataclass.configs import DistributedTrainingConfig, FairseqConfig from omegaconf import open_dict try: import torch_xla.core.xla_model as xm except ImportError: xm = None # Flag to indicate if we're using Megatron # NOTE: this is a temporary hack until we move away from Megatron's model parallel init _USE_MEGATRON = False # Whether to use XLA ops (e.g., on TPUs) instead of CUDA ops. _USE_XLA = False logger = logging.getLogger(__name__) def is_master(cfg: DistributedTrainingConfig): return cfg.distributed_rank == 0 def is_local_master(cfg: DistributedTrainingConfig): # local master rank return cfg.distributed_rank % cfg.nprocs_per_node == 0 def infer_init_method(cfg: DistributedTrainingConfig, force_distributed=False): if cfg.distributed_init_method is not None or cfg.tpu: return num_pipelines_per_node = None if cfg.pipeline_model_parallel: num_pipeline_devices, num_pipelines_per_node = _pipeline_parallel_pre_init(cfg) if all( key in os.environ for key in ["MASTER_ADDR", "MASTER_PORT", "WORLD_SIZE", "RANK"] ): # support torch.distributed.launch _infer_torch_distributed_launch_init(cfg) elif cfg.distributed_port > 0: # we can determine the init method automatically for Slurm _infer_slurm_init(cfg, num_pipelines_per_node) elif cfg.distributed_world_size > 1 or force_distributed: # fallback for single node with multiple GPUs _infer_single_node_init(cfg) if cfg.pipeline_model_parallel: _pipeline_parallel_post_init(cfg, num_pipeline_devices, num_pipelines_per_node) elif not cfg.distributed_no_spawn: with open_dict(cfg): cfg.distributed_num_procs = min( torch.cuda.device_count(), cfg.distributed_world_size ) def _infer_torch_distributed_launch_init(cfg: DistributedTrainingConfig): cfg.distributed_init_method = "env://" cfg.distributed_world_size = int(os.environ["WORLD_SIZE"]) cfg.distributed_rank = int(os.environ["RANK"]) # processes are created by torch.distributed.launch cfg.distributed_no_spawn = True def _infer_slurm_init(cfg: DistributedTrainingConfig, num_pipelines_per_node): node_list = os.environ.get("SLURM_STEP_NODELIST") if node_list is None: node_list = os.environ.get("SLURM_JOB_NODELIST") if node_list is not None: try: hostnames = subprocess.check_output( ["scontrol", "show", "hostnames", node_list] ) cfg.distributed_init_method = "tcp://{host}:{port}".format( host=hostnames.split()[0].decode("utf-8"), port=cfg.distributed_port, ) nnodes = int(os.environ.get("SLURM_NNODES")) ntasks_per_node = os.environ.get("SLURM_NTASKS_PER_NODE") if ntasks_per_node is not None: ntasks_per_node = int(ntasks_per_node) else: ntasks = int(os.environ.get("SLURM_NTASKS")) nnodes = int(os.environ.get("SLURM_NNODES")) assert ntasks % nnodes == 0 ntasks_per_node = int(ntasks / nnodes) if ntasks_per_node == 1: gpus_per_node = torch.cuda.device_count() node_id = int(os.environ.get("SLURM_NODEID")) cfg.distributed_rank = node_id * gpus_per_node cfg.distributed_world_size = nnodes * gpus_per_node elif cfg.pipeline_model_parallel: assert ntasks_per_node == num_pipelines_per_node, ( "SLURM --ntasks-per-node must match number of pipelines per " "node (={})".format(num_pipelines_per_node) ) cfg.distributed_no_spawn = True # For 4-way MP on nodes with 8 GPUs, ranks will be [0, 1] on # the first node, [1, 2] on the second node, etc. This # matches torch.distributed.launch. node_id = int(os.environ.get("SLURM_NODEID")) local_id = int(os.environ.get("SLURM_LOCALID")) cfg.distributed_rank = node_id * num_pipelines_per_node + local_id # In the above example, device_id will always be in [0, 1], # which also matches torch.distributed.launch. cfg.device_id = local_id # We also want to set distributed_world_size to be the total # number of pipelines across all nodes. cfg.distributed_world_size = nnodes * num_pipelines_per_node else: assert ntasks_per_node == cfg.distributed_world_size // nnodes cfg.distributed_no_spawn = True cfg.distributed_rank = int(os.environ.get("SLURM_PROCID")) cfg.device_id = int(os.environ.get("SLURM_LOCALID")) except subprocess.CalledProcessError as e: # scontrol failed raise e except FileNotFoundError: # Slurm is not installed pass def _infer_single_node_init(cfg: DistributedTrainingConfig): assert ( cfg.distributed_world_size <= torch.cuda.device_count() ), f"world size is {cfg.distributed_world_size} but have {torch.cuda.device_count()} available devices" port = random.randint(10000, 20000) cfg.distributed_init_method = "tcp://localhost:{port}".format(port=port) def _pipeline_parallel_pre_init(cfg: DistributedTrainingConfig): from fairseq import utils balance_exists = ( cfg.pipeline_balance is not None or cfg.pipeline_encoder_balance is not None or cfg.pipeline_decoder_balance is not None ) devices_exist = ( cfg.pipeline_devices is not None or cfg.pipeline_encoder_devices is not None or cfg.pipeline_decoder_devices is not None ) if not balance_exists: raise ValueError( "--pipeline-balance is currently required for pipeline model parallelism" ) if not devices_exist: raise ValueError( "--pipeline-devices is currently required for pipeline model parallelism" ) cfg.pipeline_balance = utils.eval_str_list(cfg.pipeline_balance, type=int) if cfg.pipeline_devices is not None: cfg.pipeline_devices = utils.eval_str_list(cfg.pipeline_devices, type=int) num_pipeline_devices = len(set(cfg.pipeline_devices)) else: cfg.pipeline_encoder_devices = utils.eval_str_list( cfg.pipeline_encoder_devices, type=int ) cfg.pipeline_decoder_devices = utils.eval_str_list( cfg.pipeline_decoder_devices, type=int ) num_pipeline_devices = len( set(cfg.pipeline_encoder_devices + cfg.pipeline_decoder_devices) ) gpus_per_node = torch.cuda.device_count() assert ( gpus_per_node >= num_pipeline_devices and gpus_per_node % num_pipeline_devices == 0 ), ( "the number of unique device IDs in --pipeline-devices must evenly divide " "the number of GPUs per node (multi-node pipelining is not yet supported)" ) num_pipelines_per_node = gpus_per_node // num_pipeline_devices return num_pipeline_devices, num_pipelines_per_node def _pipeline_parallel_post_init( cfg: DistributedTrainingConfig, num_pipeline_devices, num_pipelines_per_node ): if not cfg.distributed_no_spawn: # When distributed_no_spawn is False, we expect distributed_rank and # distributed_world_size to be based on the total number of GPUs, so # we need to correct them to be based on the number of pipelines. assert cfg.distributed_world_size % num_pipeline_devices == 0 cfg.distributed_world_size = cfg.distributed_world_size // num_pipeline_devices # In the case of 4-way MP on nodes with 8 GPUs, we want # distributed_rank to be the starting GPU index for each pipeline # i.e., 0, 2, ... gpus_per_node = torch.cuda.device_count() assert cfg.distributed_rank % gpus_per_node == 0 assert cfg.distributed_rank % num_pipeline_devices == 0 with open_dict(cfg): cfg.distributed_rank = cfg.distributed_rank // num_pipeline_devices # launch one process per pipeline cfg.distributed_num_procs = num_pipelines_per_node # if we have 4-way MP on a node with 8 GPUs, we want device_ids to be 0 # and 4, indicating the starting device IDs for each pipeline cfg.device_id *= num_pipeline_devices if cfg.device_id > 0: # if there's multiple pipelines on a node (e.g., 4-way MP on an 8 # GPU node), we need to adjust pipeline_devices accordingly logger.debug( "setting CUDA device={} on rank {}".format( cfg.device_id, cfg.distributed_rank ) ) torch.cuda.set_device(cfg.device_id) with open_dict(cfg): cfg.pipeline_devices = [cfg.device_id + d for d in cfg.pipeline_devices] logger.info( "setting pipeline_devices={} on rank {}".format( cfg.pipeline_devices, cfg.distributed_rank ) ) def distributed_init(cfg: FairseqConfig): if isinstance(cfg, Namespace): from fairseq.dataclass.utils import convert_namespace_to_omegaconf cfg = convert_namespace_to_omegaconf(cfg) if not cfg.common.tpu: if torch.distributed.is_available() and torch.distributed.is_initialized(): warnings.warn( "Distributed is already initialized, cannot initialize twice!" ) else: logger.info( "distributed init (rank {}): {}".format( cfg.distributed_training.distributed_rank, cfg.distributed_training.distributed_init_method, ) ) dist.init_process_group( backend=cfg.distributed_training.distributed_backend, init_method=cfg.distributed_training.distributed_init_method, world_size=cfg.distributed_training.distributed_world_size, rank=cfg.distributed_training.distributed_rank, ) logger.info( "initialized host {} as rank {}".format( socket.gethostname(), cfg.distributed_training.distributed_rank, ) ) # perform a dummy all-reduce to initialize the NCCL communicator if torch.cuda.is_available(): dist.all_reduce(torch.zeros(1).cuda()) cfg.distributed_training.distributed_rank = torch.distributed.get_rank() else: assert xm.xrt_world_size() == cfg.distributed_training.distributed_world_size global _USE_XLA _USE_XLA = True cfg.distributed_training.device_id = xm.get_local_ordinal() cfg.distributed_training.distributed_rank = xm.get_ordinal() xm.rendezvous("distributed_init") # wait for all workers if is_master(cfg.distributed_training): logging.getLogger().setLevel(logging.INFO) else: logging.getLogger().setLevel(logging.WARNING) if cfg.common.model_parallel_size > 1: try: from fairseq.model_parallel.megatron.mpu import ( initialize_model_parallel, model_parallel_cuda_manual_seed, ) except ImportError: raise ImportError( "\n\nPlease install the megatron submodule:" "\n\n git submodule update --init " "fairseq/model_parallel/megatron" ) global _USE_MEGATRON _USE_MEGATRON = True initialize_model_parallel(cfg.common.model_parallel_size) model_parallel_cuda_manual_seed(cfg.common.seed) model_part_number = get_model_parallel_rank() cfg.checkpoint.checkpoint_suffix += "-model_part-{0}".format(model_part_number) if hasattr(cfg, "model") and getattr(cfg.model, "base_layers", 0) > 0: cfg.checkpoint.checkpoint_suffix = ( f"-rank-{cfg.distributed_training.distributed_rank}" ) return cfg.distributed_training.distributed_rank def distributed_main(i, main, cfg: FairseqConfig, kwargs): cfg.distributed_training.device_id = i if torch.cuda.is_available() and not cfg.common.cpu and not cfg.common.tpu: torch.cuda.set_device(cfg.distributed_training.device_id) if cfg.distributed_training.distributed_rank is None: # torch.multiprocessing.spawn cfg.distributed_training.distributed_rank = kwargs.pop("start_rank", 0) + i cfg.distributed_training.distributed_rank = distributed_init(cfg) after_distributed_init_fn = kwargs.pop("after_distributed_init_fn", None) if after_distributed_init_fn: cfg = after_distributed_init_fn(cfg) main(cfg, **kwargs) if torch.distributed.is_initialized(): torch.distributed.barrier(get_global_group()) def call_main(cfg: FairseqConfig, main, **kwargs): if cfg.distributed_training.distributed_init_method is None: infer_init_method(cfg.distributed_training) if cfg.distributed_training.distributed_init_method is not None: # distributed training if not cfg.distributed_training.distributed_no_spawn: start_rank = cfg.distributed_training.distributed_rank cfg.distributed_training.distributed_rank = None # assign automatically kwargs["start_rank"] = start_rank torch.multiprocessing.spawn( fn=distributed_main, args=(main, cfg, kwargs), nprocs=min( torch.cuda.device_count(), cfg.distributed_training.distributed_world_size, ), join=True, ) else: distributed_main(cfg.distributed_training.device_id, main, cfg, kwargs) elif cfg.common.tpu and cfg.distributed_training.distributed_world_size > 1: import torch_xla.distributed.xla_multiprocessing as xmp torch.multiprocessing.set_sharing_strategy("file_system") xmp.spawn( fn=distributed_main, args=(main, cfg, kwargs), # tpu-comment: # 8 devices in one TPU VM, is the max processes to be spawned. # The rest is driven by xm.distributed.xla_dist nprocs=min(cfg.distributed_training.distributed_world_size, 8), ) else: # single GPU main main(cfg, **kwargs) def use_xla(): global _USE_XLA return _USE_XLA def new_groups(grouped_ranks: List[List[int]]): if use_xla(): return ("tpu", grouped_ranks) else: groups = [dist.new_group(g) for g in grouped_ranks] my_group_idx = _find_my_group_index(grouped_ranks) return groups[my_group_idx] def _find_my_group_index(grouped_ranks): my_rank = get_global_rank() for i, group in enumerate(grouped_ranks): if my_rank in group: return i raise RuntimeError def _find_my_group(grouped_ranks): index = _find_my_group_index(grouped_ranks) return grouped_ranks[index] def get_rank(group): if use_xla(): assert group[0] == "tpu" my_group = _find_my_group(group[1]) return my_group.index(get_global_rank()) else: return dist.get_rank(group=group) def get_world_size(group): if use_xla(): assert group[0] == "tpu" my_group = _find_my_group(group[1]) return len(my_group) elif torch.distributed.is_initialized(): return dist.get_world_size(group=group) else: return 1 def get_global_group(): if use_xla(): return new_groups([list(range(get_global_world_size()))]) elif torch.distributed.is_initialized(): if not hasattr(get_global_group, "_global_group"): # ideally we could use torch.distributed.group.WORLD, but it seems # to cause random NCCL hangs in some cases get_global_group._global_group = dist.new_group() return get_global_group._global_group else: return None def get_global_rank(): if use_xla(): return xm.get_ordinal() elif torch.distributed.is_initialized(): return torch.distributed.get_rank() else: return 0 def get_global_world_size(): if use_xla(): return xm.xrt_world_size() elif torch.distributed.is_initialized(): return torch.distributed.get_world_size() else: return 1 def get_data_parallel_group(): """Get the data parallel group the caller rank belongs to.""" global _USE_MEGATRON if _USE_MEGATRON: from fairseq.model_parallel.megatron import mpu return mpu.get_data_parallel_group() else: return get_global_group() def get_data_parallel_rank(): """Return my rank for the data parallel group.""" return get_rank(get_data_parallel_group()) def get_data_parallel_world_size(): """Return world size for the data parallel group.""" return get_world_size(get_data_parallel_group()) def get_model_parallel_group(): global _USE_MEGATRON if _USE_MEGATRON: from fairseq.model_parallel.megatron import mpu return mpu.get_model_parallel_group() else: return None def get_model_parallel_rank(): """Return my rank for the model parallel group.""" return get_rank(get_model_parallel_group()) def get_model_parallel_world_size(): """Return world size for the model parallel group.""" return get_world_size(get_model_parallel_group()) def all_reduce(tensor, group, op="sum"): if use_xla(): assert isinstance(group, tuple) and group[0] == "tpu" tensor = [tensor] # wrap in a list to make xm.all_reduce in-place return xm.all_reduce(op, tensor, groups=group[1])[0] else: if op == "sum": op = dist.ReduceOp.SUM elif op == "max": op = dist.ReduceOp.MAX else: raise NotImplementedError dist.all_reduce(tensor, op=op, group=group) return tensor def broadcast(tensor, src, group): if use_xla(): # XLA doesn't support broadcast, hack it with all_reduce if get_rank(group) != src: tensor.zero_() all_reduce(tensor, group) else: dist.broadcast(tensor, src=src, group=group) def all_to_all(tensor, group): """Perform an all-to-all operation on a 1D Tensor.""" assert tensor.dim() == 1 split_count = get_world_size(group=group) assert tensor.numel() % split_count == 0 if use_xla(): assert isinstance(group, tuple) and group[0] == "tpu" return xm.all_to_all( tensor, split_dimension=0, concat_dimension=0, split_count=split_count, groups=group[1], ) else: output = torch.zeros_like(tensor) dist.all_to_all_single(output, tensor, group=group) return output def all_gather(tensor, group, return_tensor=False): """Perform an all-gather operation.""" if use_xla(): result = xm.all_gather(tensor, groups=group[1]) world_size = get_world_size(group=group) result = result.view(world_size, *tensor.size()) if return_tensor: return result else: return [result[i] for i in range(world_size)] else: world_size = get_world_size(group=group) rank = get_rank(group=group) tensor_list = [ tensor if i == rank else torch.empty_like(tensor) for i in range(world_size) ] dist.all_gather(tensor_list, tensor, group=group) if return_tensor: return torch.stack(tensor_list, dim=0) else: return tensor_list def all_gather_list(data, group=None, max_size=16384): """Gathers arbitrary data from all nodes into a list. Similar to :func:`~torch.distributed.all_gather` but for arbitrary Python data. Note that *data* must be picklable and any CUDA tensors will be moved to CPU and returned on CPU as well. Args: data (Any): data from the local worker to be gathered on other workers group: group of the collective max_size (int, optional): maximum size of the data to be gathered across workers """ from fairseq import utils if group is None: group = get_global_group() rank = get_rank(group=group) world_size = get_world_size(group=group) buffer_size = max_size * world_size if ( not hasattr(all_gather_list, "_buffer") or all_gather_list._buffer.numel() < buffer_size ): all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size) all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory() buffer = all_gather_list._buffer buffer.zero_() cpu_buffer = all_gather_list._cpu_buffer data = utils.move_to_cpu(data) enc = pickle.dumps(data) enc_size = len(enc) header_size = 4 # size of header that contains the length of the encoded data size = header_size + enc_size if size > max_size: raise ValueError( "encoded data size ({}) exceeds max_size ({})".format(size, max_size) ) header = struct.pack(">I", enc_size) cpu_buffer[:size] = torch.ByteTensor(list(header + enc)) start = rank * max_size buffer[start : start + size].copy_(cpu_buffer[:size]) all_reduce(buffer, group=group) buffer = buffer.cpu() try: result = [] for i in range(world_size): out_buffer = buffer[i * max_size : (i + 1) * max_size] (enc_size,) = struct.unpack(">I", bytes(out_buffer[:header_size].tolist())) if enc_size > 0: result.append( pickle.loads( bytes(out_buffer[header_size : header_size + enc_size].tolist()) ) ) return result except pickle.UnpicklingError: raise Exception( "Unable to unpickle data from other workers. all_gather_list requires all " "workers to enter the function together, so this error usually indicates " "that the workers have fallen out of sync somehow. Workers can fall out of " "sync if one of them runs out of memory, or if there are other conditions " "in your training script that can cause one worker to finish an epoch " "while other workers are still iterating over their portions of the data. " "Try rerunning with --ddp-backend=legacy_ddp and see if that helps." ) def all_reduce_dict(data: Mapping[str, Any], device, group) -> Dict[str, Any]: """ AllReduce a dictionary of values across workers. We separately reduce items that are already on the device and items on CPU for better performance. Args: data (Mapping[str, Any]): dictionary of data to all-reduce, but cannot be a nested dictionary device (torch.device): device for the reduction group: group of the collective """ data_keys = list(data.keys()) # We want to separately reduce items that are already on the # device and items on CPU for performance reasons. cpu_data = OrderedDict() device_data = OrderedDict() for k in data_keys: t = data[k] if not torch.is_tensor(t): cpu_data[k] = torch.tensor(t, dtype=torch.double) elif t.device.type != device.type: cpu_data[k] = t.to(dtype=torch.double) else: device_data[k] = t.to(dtype=torch.double) def _all_reduce_dict(data: OrderedDict): if len(data) == 0: return data buf = torch.cat([t.view(-1) for t in data.values()]).to(device=device) all_reduce(buf, group=group) split_buf = torch.split(buf.clone(), [t.numel() for t in data.values()]) reduced_data = [t.view_as(orig) for t, orig in zip(split_buf, data.values())] return OrderedDict(zip(data.keys(), reduced_data)) cpu_data = _all_reduce_dict(cpu_data) device_data = _all_reduce_dict(device_data) def get_from_stack(key): if key in cpu_data: return cpu_data[key] elif key in device_data: return device_data[key] raise KeyError return OrderedDict([(key, get_from_stack(key)) for key in data_keys]) def broadcast_tensors( tensors: Optional[List[torch.Tensor]], src_rank: int, group: object, dist_device: Optional[torch.device] = None, ) -> List[torch.Tensor]: """ Broadcasts a list of tensors without other (non-src) ranks needing to know the dtypes/shapes of the tensors. """ if dist_device is None: if torch.distributed.get_backend(group) == "nccl": dist_device = torch.device("cuda") else: dist_device = torch.device("cpu") # share metadata first to simplify transfer is_src_rank = get_rank(group) == src_rank if is_src_rank: metadata = [ {"size": t.size(), "dtype": t.dtype, "device": t.device} for t in tensors ] metadata = _broadcast_object_slow(metadata, src_rank, group, dist_device) else: metadata = _broadcast_object_slow(None, src_rank, group, dist_device) out_tensors = [] for i, meta in enumerate(metadata): if is_src_rank: tensor = tensors[i] broadcast(tensors[i].to(dist_device), src=src_rank, group=group) else: tensor = torch.zeros( [meta["size"].numel()], dtype=meta["dtype"], device=dist_device ) broadcast(tensor, src=src_rank, group=group) tensor = tensor.view(meta["size"]).to(meta["device"]) out_tensors.append(tensor) return out_tensors def broadcast_object( obj: Any, src_rank: int, group: object, dist_device: Optional[torch.device] = None, ) -> Any: """Broadcast an arbitrary Python object to other workers.""" if dist_device is None: if torch.distributed.get_backend(group) == "nccl": dist_device = torch.device("cuda") else: dist_device = torch.device("cpu") if get_rank(group) == src_rank: # split the tensors from the non-tensors so we can broadcast them # directly, avoiding unnecessary serialization/deserialization tensors = [] obj = _split_tensors_from_obj(obj, tensors) obj = _broadcast_object_slow(obj, src_rank, group, dist_device) tensors = broadcast_tensors(tensors, src_rank, group, dist_device) else: obj = _broadcast_object_slow(None, src_rank, group, dist_device) tensors = broadcast_tensors(None, src_rank, group, dist_device) return _put_tensors_in_obj(obj, tensors) def _broadcast_object_slow( obj: Any, src_rank: int, group: object, dist_device: torch.device, ) -> Any: if get_rank(group) == src_rank: # Emit data buffer = io.BytesIO() torch.save(obj, buffer) buffer = torch.ByteTensor(buffer.getbuffer()).to(dist_device) length = torch.LongTensor([len(buffer)]).to(dist_device) broadcast(length, src=src_rank, group=group) broadcast(buffer, src=src_rank, group=group) else: # Fetch from the source length = torch.LongTensor([0]).to(dist_device) broadcast(length, src=src_rank, group=group) buffer = torch.ByteTensor(int(length.item())).to(dist_device) broadcast(buffer, src=src_rank, group=group) buffer = io.BytesIO(buffer.cpu().numpy()) obj = torch.load(buffer, map_location="cpu") return obj @dataclass(frozen=True) class _TensorPlaceholder: index: int def _split_tensors_from_obj(obj: Any, tensors: List[torch.Tensor]) -> Any: if torch.is_tensor(obj): placeholder = _TensorPlaceholder(index=len(tensors)) tensors.append(obj) return placeholder elif isinstance(obj, dict): return {k: _split_tensors_from_obj(v, tensors) for k, v in obj.items()} elif isinstance(obj, list): return [_split_tensors_from_obj(v, tensors) for v in obj] elif isinstance(obj, tuple): return tuple(_split_tensors_from_obj(v, tensors) for v in obj) elif isinstance(obj, set): return {_split_tensors_from_obj(v, tensors) for v in obj} else: return obj def _put_tensors_in_obj(obj: Any, tensors: List[torch.Tensor]) -> Any: if isinstance(obj, _TensorPlaceholder): return tensors[obj.index] elif isinstance(obj, dict): return {k: _put_tensors_in_obj(v, tensors) for k, v in obj.items()} elif isinstance(obj, list): return [_put_tensors_in_obj(v, tensors) for v in obj] elif isinstance(obj, tuple): return tuple(_put_tensors_in_obj(v, tensors) for v in obj) elif isinstance(obj, set): return {_put_tensors_in_obj(v, tensors) for v in obj} else: return obj
KosmosX-API-main
kosmosX/fairseq/fairseq/distributed/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from torch import nn class ModuleProxyWrapper(nn.Module): """ Wrap a DistributedDataParallel module and forward requests for missing attributes to the module wrapped by DDP (the twice-wrapped module). Also forward calls to :func:`state_dict` and :func:`load_state_dict`. Usage:: module.xyz = "hello world" wrapped_module = DistributedDataParallel(module, **ddp_args) wrapped_module = ModuleProxyWrapper(wrapped_module) assert wrapped_module.xyz == "hello world" assert wrapped_module.state_dict().keys() == module.state_dict().keys() Args: module (nn.Module): module to wrap """ def __init__(self, module: nn.Module): super().__init__() assert hasattr( module, "module" ), "ModuleProxyWrapper expects input to wrap another module" self.module = module def __getattr__(self, name): """Forward missing attributes to twice-wrapped module.""" try: # defer to nn.Module's logic return super().__getattr__(name) except AttributeError: try: # forward to the once-wrapped module return getattr(self.module, name) except AttributeError: # forward to the twice-wrapped module return getattr(self.module.module, name) def state_dict(self, *args, **kwargs): """Forward to the twice-wrapped module.""" return self.module.module.state_dict(*args, **kwargs) def load_state_dict(self, *args, **kwargs): """Forward to the twice-wrapped module.""" return self.module.module.load_state_dict(*args, **kwargs) def forward(self, *args, **kwargs): return self.module(*args, **kwargs)
KosmosX-API-main
kosmosX/fairseq/fairseq/distributed/module_proxy_wrapper.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from torch import nn from fairseq.distributed import utils class TPUDistributedDataParallel(nn.Module): def __init__(self, module, process_group): super().__init__() self.module = module self.process_group = process_group self.world_size = utils.get_world_size(self.process_group) def forward(self, *inputs, **kwargs): return self.module(*inputs, **kwargs) def all_reduce_grads(self): gradients = [] for p in self.parameters(): if not p.requires_grad: continue if p.grad is None: p.grad = torch.zeros_like(p) if p.grad.requires_grad: raise RuntimeError( "TPUDistributedDataParallel only works with gradients that don't " "require grad" ) gradients.append(p.grad) import torch_xla.core.xla_model as xm xm.all_reduce( "sum", gradients, scale=1.0 / self.world_size, groups=self.process_group[1], )
KosmosX-API-main
kosmosX/fairseq/fairseq/distributed/tpu_distributed_data_parallel.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree.
KosmosX-API-main
kosmosX/fairseq/fairseq/config/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch from fairseq import optim from omegaconf import DictConfig logger = logging.getLogger(__name__) class AMPOptimizer(optim.FairseqOptimizer): """ Wrap an *optimizer* to support AMP (automatic mixed precision) training. """ def __init__(self, cfg: DictConfig, params, fp32_optimizer, **kwargs): super().__init__(cfg.optimizer) self.fp32_optimizer = fp32_optimizer amp_kwargs = {"init_scale": cfg.common.fp16_init_scale} if getattr(cfg.common, "amp_scale_window", None) is not None: amp_kwargs["growth_interval"] = cfg.common.amp_init_scale self._grad_scaler = torch.cuda.amp.GradScaler(**amp_kwargs) self.min_loss_scale = cfg.common.min_loss_scale @classmethod def build_optimizer(cls, cfg: DictConfig, params, **kwargs): """ Args: cfg (omegaconf.DictConfig): fairseq args params (iterable): iterable of parameters to optimize """ fp32_optimizer = optim.build_optimizer(cfg.optimizer, params) return cls(cfg, params, fp32_optimizer, **kwargs) def backward(self, loss): """Computes the sum of gradients of the given tensor w.r.t. graph leaves. Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this function additionally dynamically scales the loss to avoid gradient underflow. """ self._grad_scaler.scale(loss).backward() def step(self): self.scaler.step(self.fp32_optimizer) self.scaler.update() def clip_grad_norm(self, max_norm, aggregate_norm_fn=None): """Clips gradient norm.""" self.scaler.unscale_(self.optimizer) grad_norm = self.fp32_optimizer.clip_grad_norm(max_norm, aggregate_norm_fn) if not torch.isfinite(grad_norm).all(): new_loss_scale = self.next_loss_scale if new_loss_scale <= self.min_loss_scale: raise FloatingPointError( ( "AMP: Minimum loss scale reached ({}). Your loss is probably exploding. " "Try restarting training or use fp32. {}" ).format(self.min_loss_scale, new_loss_scale) ) else: logger.info( "AMP: overflow detected, setting scale to " f"to {new_loss_scale}" ) return grad_norm @property def scaler(self): return self._grad_scaler @property def next_loss_scale(self): return self.scaler.get_scale() * self.scaler.get_backoff_factor() @property def optimizer(self): return self.fp32_optimizer.optimizer @optimizer.setter def optimizer(self, optimizer): self.fp32_optimizer.optimizer = optimizer @property def lr_scheduler(self): return getattr(self.fp32_optimizer, "lr_scheduler", None) @property def optimizer_config(self): return self.fp32_optimizer.optimizer_config def get_lr(self): return self.fp32_optimizer.get_lr() def set_lr(self, lr): self.fp32_optimizer.set_lr(lr) def all_reduce_grads(self, module): self.fp32_optimizer.all_reduce_grads(module) @property def supports_flat_params(self): return self.fp32_optimizer.supports_flat_params
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/amp_optimizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections.abc import Collection from dataclasses import dataclass, field from typing import List import torch from fairseq.dataclass import FairseqDataclass from omegaconf import II, DictConfig from torch.optim.optimizer import Optimizer, required from . import FairseqOptimizer, register_optimizer @dataclass class FairseqNAGConfig(FairseqDataclass): momentum: float = field(default=0.99, metadata={"help": "momentum factor"}) weight_decay: float = field(default=0.0, metadata={"help": "weight decay"}) # TODO common vars in parent class lr: List[float] = II("optimization.lr") @register_optimizer("nag", dataclass=FairseqNAGConfig) class FairseqNAG(FairseqOptimizer): def __init__(self, cfg: DictConfig, params): super().__init__(cfg) self._optimizer = NAG(params, **self.optimizer_config) @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.cfg.lr[0] if isinstance(self.cfg.lr, Collection) else self.cfg.lr, "momentum": self.cfg.momentum, "weight_decay": self.cfg.weight_decay, } class NAG(Optimizer): def __init__(self, params, lr=required, momentum=0, weight_decay=0): defaults = dict(lr=lr, lr_old=lr, momentum=momentum, weight_decay=weight_decay) super(NAG, self).__init__(params, defaults) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: weight_decay = group["weight_decay"] momentum = group["momentum"] lr = group["lr"] lr_old = group.get("lr_old", lr) lr_correct = lr / lr_old if lr_old > 0 else lr for p in group["params"]: if p.grad is None: continue p_data_fp32 = p.data if p_data_fp32.dtype in {torch.float16, torch.bfloat16}: p_data_fp32 = p_data_fp32.float() d_p = p.grad.data.float() param_state = self.state[p] if "momentum_buffer" not in param_state: param_state["momentum_buffer"] = torch.zeros_like(d_p) else: param_state["momentum_buffer"] = param_state["momentum_buffer"].to( d_p ) buf = param_state["momentum_buffer"] if weight_decay != 0: p_data_fp32.mul_(1 - lr * weight_decay) p_data_fp32.add_(buf, alpha=momentum * momentum * lr_correct) p_data_fp32.add_(d_p, alpha=-(1 + momentum) * lr) buf.mul_(momentum * lr_correct).add_(d_p, alpha=-lr) if p.data.dtype in {torch.float16, torch.bfloat16}: p.data.copy_(p_data_fp32) group["lr_old"] = lr return loss
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/nag.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import types import torch def get_fused_adam_class(): """ Look for the FusedAdam optimizer from apex. We first try to load the "contrib" interface, which is a bit faster than the main interface, but is technically deprecated. """ try: # The "deprecated" interface in recent versions of apex is a bit # faster than the main interface, since we don't use the apex # optimizer. This can be installed by passing the # `--deprecated_fused_adam` option when building apex. global fused_adam_cuda import importlib fused_adam_cuda = importlib.import_module("fused_adam_cuda") return FusedAdamV1 except ImportError: try: # fallback to the newer interface from apex.multi_tensor_apply import multi_tensor_applier from apex.optimizers import FusedAdam as _FusedAdam # noqa if multi_tensor_applier.available: return FusedAdamV2 except ImportError: pass return None class FusedAdamV1(torch.optim.Optimizer): """ Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via ``python setup.py install --cuda_ext --cpp_ext``. It has been proposed in `Adam: A Method for Stochastic Optimization`_. Compared to the original version in Apex, the fairseq version casts grads and params to FP32 internally to support ``--memory-efficient-fp16``. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups. lr (float, optional): learning rate. (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square. (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability. (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) NOT SUPPORTED in FusedAdam! eps_inside_sqrt (boolean, optional): in the 'update parameters' step, adds eps to the bias-corrected second moment estimate before evaluating square root instead of adding it to the square root of second moment estimate as in the original paper. (default: False) .. _Adam: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__( self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, eps_inside_sqrt=False, weight_decay=0.0, max_grad_norm=0.0, amsgrad=False, use_fp16_stats=False, ): global fused_adam_cuda import importlib fused_adam_cuda = importlib.import_module("fused_adam_cuda") if amsgrad: raise RuntimeError("FusedAdam does not support the AMSGrad variant.") defaults = { "lr": lr, "bias_correction": bias_correction, "betas": betas, "eps": eps, "weight_decay": weight_decay, "max_grad_norm": max_grad_norm, } super().__init__(params, defaults) self.eps_mode = 0 if eps_inside_sqrt else 1 self.use_fp16_stats = use_fp16_stats self.FLOAT16_MAX = 65504.0 @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True @property def supports_step_with_scale(self): return True def step(self, closure=None, grads=None, scale=1.0, grad_norms=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. grads (list of tensors, optional): weight gradient to use for the optimizer update. If gradients have type torch.half, parameters are expected to be in type torch.float. (default: None) output params (list of tensors, optional): A reduced precision copy of the updated weights written out in addition to the regular updated weights. Have to be of same type as gradients. (default: None) scale (float, optional): factor to divide gradient tensor values by before applying to weights. (default: 1) """ loss = None if closure is not None: loss = closure() if grads is None: grads_group = [None] * len(self.param_groups) # backward compatibility # assuming a list/generator of parameter means single group elif isinstance(grads, types.GeneratorType): grads_group = [grads] elif type(grads[0]) != list: grads_group = [grads] else: grads_group = grads if grad_norms is None: grad_norms = [None] * len(self.param_groups) for group, grads_this_group, grad_norm in zip( self.param_groups, grads_group, grad_norms ): if grads_this_group is None: grads_this_group = [None] * len(group["params"]) # compute combined scale factor for this group combined_scale = scale if group.get("max_grad_norm", 0) > 0: # norm is in fact norm*scale clip = ((grad_norm / scale) + 1e-6) / group["max_grad_norm"] if clip > 1: combined_scale = clip * scale bias_correction = 1 if group.get("bias_correction", 1) else 0 for p, grad in zip(group["params"], grads_this_group): # note: p.grad should not ever be set for correct # operation of mixed precision optimizer that sometimes # sends None gradients if p.grad is None and grad is None: continue if grad is None: grad = p.grad.data if grad.is_sparse: raise RuntimeError( "FusedAdam does not support sparse gradients, " "please consider SparseAdam instead" ) if p.device.type == "cpu": p_data_fp32 = p.data.cuda(non_blocking=True).float() out_p = torch.tensor([], dtype=torch.float) else: p_data_fp32 = p.data.float() out_p = p.data state = self.state[p] # State initialization dtype = torch.float16 if self.use_fp16_stats else p_data_fp32.dtype if len(state) == 0: state["step"] = 0 # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(p_data_fp32, dtype=dtype) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.zeros_like(p_data_fp32, dtype=dtype) if self.use_fp16_stats: state["exp_avg_scale"] = 1.0 state["exp_avg_sq_scale"] = 1.0 else: device = p_data_fp32.device state["exp_avg"] = state["exp_avg"].to(device, dtype) state["exp_avg_sq"] = state["exp_avg_sq"].to(device, dtype) exp_avg = state["exp_avg"] exp_avg_sq = state["exp_avg_sq"] if self.use_fp16_stats: assert exp_avg.dtype == torch.float16 exp_avg = exp_avg.float() * state["exp_avg_scale"] exp_avg_sq = exp_avg_sq.float() * state["exp_avg_sq_scale"] beta1, beta2 = group["betas"] state["step"] += 1 with torch.cuda.device(p_data_fp32.device): fused_adam_cuda.adam( p_data_fp32, out_p, exp_avg, exp_avg_sq, grad, group["lr"], beta1, beta2, group["eps"], combined_scale, state["step"], self.eps_mode, bias_correction, group["weight_decay"], ) if p.device.type == "cpu": p.data.copy_(p_data_fp32, non_blocking=True) if self.use_fp16_stats: def inf_norm(t): return torch.norm(t, float("inf")) # from github.com/openai/jukebox/blob/master/jukebox/utils/fp16.py state["exp_avg_scale"], state["exp_avg_sq_scale"] = ( 1e-8 + inf_norm(exp_avg) / self.FLOAT16_MAX, 1e-8 + inf_norm(exp_avg_sq) / self.FLOAT16_MAX, ) state["exp_avg"], state["exp_avg_sq"] = ( (exp_avg / state["exp_avg_scale"]).half(), (exp_avg_sq / state["exp_avg_sq_scale"]).half(), ) return loss try: from apex.multi_tensor_apply import multi_tensor_applier from apex.optimizers import FusedAdam class FusedAdamV2(FusedAdam): """ Compared to the original version in Apex, the fairseq version casts grads and params to FP32 internally to support ``--memory-efficient-fp16``. """ def __init__(self, *args, use_fp16_stats=False, **kwargs): if use_fp16_stats: raise NotImplementedError( "--fp16-adam-stats is only supported with FusedAdamV1" ) super().__init__(*args, **kwargs) if not hasattr(self, "multi_tensor_adam"): raise Exception( "Apex installation is outdated. Please install an updated version of apex." ) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True def step( self, closure=None, grads=None, output_params=None, scale=None, grad_norms=None, ): """Performs a single optimization step.""" loss = None if closure is not None: loss = closure() for group in self.param_groups: bias_correction = 1 if group["bias_correction"] else 0 beta1, beta2 = group["betas"] # assume same step across group now to simplify things # per parameter step can be easily support by making it tensor, or pass list into kernel if "step" in group: group["step"] += 1 else: group["step"] = 1 # create lists for multi-tensor apply g_16, p_16, orig_p_16, m_16, v_16 = [], [], [], [], [] g_32, p_32, m_32, v_32 = [], [], [], [] for p in group["params"]: if p.grad is None: continue if p.grad.data.is_sparse: raise RuntimeError( "FusedAdam does not support sparse gradients, " "please consider SparseAdam instead" ) state = self.state[p] # State initialization if len(state) == 0: # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(p.data, dtype=torch.float) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.zeros_like( p.data, dtype=torch.float ) else: state["exp_avg"] = state["exp_avg"].to( device=p.data.device, dtype=torch.float ) state["exp_avg_sq"] = state["exp_avg_sq"].to( device=p.data.device, dtype=torch.float ) if p.dtype == torch.float16: g_16.append(p.grad.data.float()) p_16.append(p.data.float()) orig_p_16.append(p.data) m_16.append(state["exp_avg"]) v_16.append(state["exp_avg_sq"]) elif p.dtype == torch.float32: g_32.append(p.grad.data) p_32.append(p.data) m_32.append(state["exp_avg"]) v_32.append(state["exp_avg_sq"]) else: raise RuntimeError("FusedAdam only support fp16 and fp32.") with torch.cuda.device(p.device): if len(g_16) > 0: multi_tensor_applier( self.multi_tensor_adam, self._dummy_overflow_buf, [g_16, p_16, m_16, v_16], group["lr"], beta1, beta2, group["eps"], group["step"], self.adam_w_mode, bias_correction, group["weight_decay"], ) for orig_p, p in zip(orig_p_16, p_16): orig_p.copy_(p.data) if len(g_32) > 0: multi_tensor_applier( self.multi_tensor_adam, self._dummy_overflow_buf, [g_32, p_32, m_32, v_32], group["lr"], beta1, beta2, group["eps"], group["step"], self.adam_w_mode, bias_correction, group["weight_decay"], ) return loss except ImportError: pass
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/fused_adam.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.distributed as dist from fairseq.dataclass.configs import FairseqBMUFConfig from fairseq.dataclass.utils import gen_parser_from_dataclass from fairseq.optim.fairseq_optimizer import FairseqOptimizer class FairseqBMUF(FairseqOptimizer): """ Implements incremental block distributed data parallelism similar to https://ieeexplore.ieee.org/document/7472805 Paper title: Scalable training of deep learning machines by incremental block training with intra-block parallel optimization and blockwise model-update filtering """ def __init__(self, cfg: FairseqBMUFConfig, optimizer): super().__init__(cfg) self._optimizer = optimizer self._num_updates = 0 self.sync_iter = cfg.global_sync_iter self.block_momentum = cfg.block_momentum self.block_lr = cfg.block_lr self._reset_local_data() self.warmup_iteration = cfg.warmup_iterations self.use_nbm = cfg.use_nbm self.initial_state = self._optimizer.state_dict() self.average_sync = self.cfg.average_sync self.world_size = self.cfg.distributed_world_size @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" gen_parser_from_dataclass(parser, FairseqBMUFConfig()) @property def optimizer(self): return self._optimizer.optimizer @property def optimizer_config(self): return self._optimizer.optimizer_config def get_lr(self): return self._optimizer.get_lr() def set_lr(self, lr): self._optimizer.set_lr(lr) def state_dict(self): return self._optimizer.state_dict() def load_state_dict(self, state_dict, optimizer_overrides=None): self._optimizer.load_state_dict(state_dict, optimizer_overrides) self.initial_state = self._optimizer.state_dict() def multiply_grads(self, c): """Multiplies grads by a constant *c*.""" self._optimizer.multiply_grads(c) def clip_grad_norm(self, max_norm, aggregate_norm_fn=None): """Clips gradient norm.""" return self._optimizer.clip_grad_norm(max_norm, aggregate_norm_fn) def average_params(self): self._optimizer.average_params() def _block_sync(self): if self.world_size <= 1: return # Update the global model using local models from all GPUs # (Step-1) Calculate grad between previously synced model and # currrent local model if self.block_momentum != 0: self._calc_grad() # (Step-2) Average gradient from all GPUs self._avg_grad_from_all_gpus() # (Step-3) Calculate global momentum and update the global model if self.block_momentum != 0: self._update_global_model() # (Step-4) Average local optimizer params if self.average_sync: self.average_params() def _is_warmup_end(self): # Check whether train iterations is equal to warmup iter if self.get_num_updates() == self.warmup_iteration: return True return False def _is_bmuf_iter(self): # Check whether train iterations is equal to bmuf sync iter if (self.get_num_updates() > self.warmup_iteration) and ( self.get_num_updates() % self.sync_iter == 0 ): return True return False def _warmup_sync(self, root_rank=0): if self.world_size <= 1: return # Broadcast the local model to all gpus for param in self.params: dist.broadcast(param.data, src=root_rank) # Update local optimizer state if self.average_sync: self._optimizer.average_params() else: self._optimizer.load_state_dict(self.initial_state) self._reset_local_data() def step(self, closure=None): """Performs a single optimization step.""" self._optimizer.step(closure) self.set_num_updates(self.get_num_updates() + 1) if self._is_warmup_end(): self._warmup_sync() elif self._is_bmuf_iter(): self._block_sync() def zero_grad(self): """Clears the gradients of all optimized parameters.""" self._optimizer.zero_grad() def get_num_updates(self): """Get the number of parameters updates.""" return self._num_updates def set_num_updates(self, num_updates): """Set the number of parameters updates.""" self._num_updates = num_updates @torch.no_grad() def _reset_local_data(self): # (Step-0) Initialize global momentum parameters and store global copy on each gpu self.global_params = [torch.zeros_like(p.data) for p in self.params] self.smoothed_grads = [p.data.new_zeros(p.data.size()) for p in self.params] self.grads = [p.data.new_zeros(p.data.size()) for p in self.params] # saving the global model locally for calculating gradient during bmuf sync for param, global_param in zip(self.params, self.global_params): global_param.copy_(param.data) @torch.no_grad() def _calc_grad(self): # global_params is basically the global copy from the previously finished # synchronisation. param.data is local parameter after block_sync_freq # for the local gpu. so grad is difference between previously synced # model and currrent local model. for index, (param, global_param) in enumerate( zip(self.params, self.global_params) ): self.grads[index] = global_param - param.data def _avg_grad_from_all_gpus(self): for index, param in enumerate(self.params): sync_para = param.data if self.block_momentum == 0 else self.grads[index] sync_para /= float(dist.get_world_size()) dist.all_reduce(sync_para, op=dist.ReduceOp.SUM) @torch.no_grad() def _update_global_model(self): for index, (param, global_param, smoothed_grad, grad) in enumerate( zip( self.params, self.global_params, self.smoothed_grads, # all gpus would share the same value of smoothed_grad, since it is # always computed on synchronized gradients. self.grads, ) ): # global_param is basically last syncrhornized parameter. though # smoothed_grad is local, all processes will have same value of # smoothed_grad and hence param is globally synchronized copy. # smoothed_grad(t) = BM * smoothed_grad(t-1) + BM_lr * grad(t) smoothed_grad = self.block_momentum * smoothed_grad + self.block_lr * grad param.data.copy_(global_param - smoothed_grad) # A Nesterov momentum here is to do a partial weight update before # calculating the gradient if self.use_nbm: param.data.copy_(param.data - self.block_momentum * smoothed_grad) # backup for the next synchronization. self.smoothed_grads[index] = smoothed_grad global_param.copy_(param.data)
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/bmuf.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. class DynamicLossScaler(object): def __init__( self, init_scale=2.0 ** 15, scale_factor=2.0, scale_window=2000, tolerance=0.0, threshold=None, min_loss_scale=1e-4, ): self.loss_scale = init_scale self.scale_factor = scale_factor self.scale_window = scale_window self.tolerance = tolerance self.threshold = threshold self._iter = 0 self._last_overflow_iter = -1 self._last_rescale_iter = -1 self._overflows_since_rescale = 0 self.min_loss_scale = min_loss_scale def scale(self, outputs): return self.loss_scale * outputs def update(self): if (self._iter - self._last_overflow_iter) % self.scale_window == 0: self.loss_scale *= self.scale_factor self._last_rescale_iter = self._iter self._iter += 1 def _decrease_loss_scale(self): self.loss_scale /= self.scale_factor if self.threshold is not None: self.loss_scale = max(self.loss_scale, self.threshold) def check_overflow(self, grad_norm=None, overflow=False): # detect inf and nan if overflow or grad_norm == float("inf") or grad_norm != grad_norm: # overflow has occured prev_scale = self.loss_scale iter_since_rescale = self._iter - self._last_rescale_iter self._last_overflow_iter = self._iter self._overflows_since_rescale += 1 pct_overflow = self._overflows_since_rescale / float(iter_since_rescale) if pct_overflow >= self.tolerance: self._decrease_loss_scale() self._last_rescale_iter = self._iter self._overflows_since_rescale = 0 if self.loss_scale <= self.min_loss_scale: # Use FloatingPointError as an uncommon error that parent # functions can safely catch to stop training. self.loss_scale = prev_scale raise FloatingPointError( ( "Minimum loss scale reached ({}). Your loss is probably exploding. " "Try lowering the learning rate, using gradient clipping or " "increasing the batch size." ).format(self.min_loss_scale) ) self._iter += 1 raise OverflowError("setting loss scale to: " + str(self.loss_scale))
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/dynamic_loss_scaler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.optim from . import LegacyFairseqOptimizer, register_optimizer @register_optimizer("adafactor") class FairseqAdafactor(LegacyFairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = Adafactor(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--adafactor-eps', default='(1e-30, 1e-3)', metavar="E", help='epsilons for Adafactor optimizer') parser.add_argument('--clip-threshold', type=float, default=1.0, metavar="C", help='threshold for clipping update root mean square') parser.add_argument('--decay-rate', type=float, default=-0.8, metavar="D", help='decay rate of the second moment estimator') parser.add_argument('--beta1', type=float, default=None, metavar="B", help='beta for first moment estimator. Optional') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') parser.add_argument('--scale-parameter', action='store_true', help='scale learning rate by root mean square of parameter') parser.add_argument('--relative-step', action='store_true', help='set learning rate to inverse square root of timestep,' 'otherwise use external learning rate') parser.add_argument('--warmup-init', action='store_true', help='use relative step for warm-up learning rate schedule') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. Note : Convergence issues empirically observed with fp16 on. Might require search for appropriate configuration. """ return { "lr": self.args.lr[0], "eps": eval(self.args.adafactor_eps), "clip_threshold": self.args.clip_threshold, "decay_rate": self.args.decay_rate, "beta1": self.args.beta1, "weight_decay": self.args.weight_decay, "scale_parameter": self.args.scale_parameter, # defaults to False "relative_step": self.args.relative_step, # defaults to False "warmup_init": self.args.warmup_init, } class Adafactor(torch.optim.Optimizer): """Implements Adafactor algorithm. This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost` (see https://arxiv.org/abs/1804.04235) Note that this optimizer internally adjusts the learning rate depending on the *scale_parameter*, *relative_step* and *warmup_init* options. To use a manual (external) learning rate schedule you should set `scale_parameter=False` and `relative_step=False`. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): external learning rate (default: None) eps (tuple[float, float]): regularization constans for square gradient and parameter scale respectively (default: (1e-30, 1e-3)) clip_threshold (float): threshold of root mean square of final gradient update (default: 1.0) decay_rate (float): coefficient used to compute running averages of square gradient (default: -0.8) beta1 (float): coefficient used for computing running averages of gradient (default: None) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) scale_parameter (bool): if True, learning rate is scaled by root mean square of parameter (default: True) relative_step (bool): if True, time-dependent learning rate is computed instead of external learning rate (default: True) warmup_init (bool): time-dependent learning rate computation depends on whether warm-up initialization is being used (default: False) """ def __init__( self, params, lr=None, eps=(1e-30, 1e-3), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, scale_parameter=True, relative_step=True, warmup_init=False, ): if lr is not None and relative_step: raise ValueError("Cannot combine manual lr and relative_step options") if warmup_init and not relative_step: raise ValueError("warmup_init requires relative_step=True") defaults = dict( lr=lr, eps=eps, clip_threshold=clip_threshold, decay_rate=decay_rate, beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, relative_step=relative_step, warmup_init=warmup_init, ) super(Adafactor, self).__init__(params, defaults) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return False def _get_lr(self, param_group, param_state): rel_step_sz = param_group["lr"] if param_group["relative_step"]: min_step = ( 1e-6 * param_state["step"] if param_group["warmup_init"] else 1e-2 ) rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"])) param_scale = 1.0 if param_group["scale_parameter"]: param_scale = max(param_group["eps"][1], param_state["RMS"]) return param_scale * rel_step_sz def _get_options(self, param_group, param_shape): factored = len(param_shape) >= 2 use_first_moment = param_group["beta1"] is not None return factored, use_first_moment def _rms(self, tensor): return tensor.norm(2) / (tensor.numel() ** 0.5) def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col): r_factor = ( (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)) .rsqrt_() .unsqueeze(-1) ) c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() return torch.mul(r_factor, c_factor) def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad.data if grad.dtype in {torch.float16, torch.bfloat16}: grad = grad.float() if grad.is_sparse: raise RuntimeError("Adafactor does not support sparse gradients.") state = self.state[p] grad_shape = grad.shape factored, use_first_moment = self._get_options(group, grad_shape) # State Initialization if len(state) == 0: state["step"] = 0 if use_first_moment: # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(grad) if factored: state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1]).to(grad) state["exp_avg_sq_col"] = torch.zeros( grad_shape[:-2] + grad_shape[-1:] ).to(grad) else: state["exp_avg_sq"] = torch.zeros_like(grad) state["RMS"] = 0 else: if use_first_moment: state["exp_avg"] = state["exp_avg"].to(grad) if factored: state["exp_avg_sq_row"] = state["exp_avg_sq_row"].to(grad) state["exp_avg_sq_col"] = state["exp_avg_sq_col"].to(grad) else: state["exp_avg_sq"] = state["exp_avg_sq"].to(grad) p_data_fp32 = p.data if p.data.dtype in {torch.float16, torch.bfloat16}: p_data_fp32 = p_data_fp32.float() state["step"] += 1 state["RMS"] = self._rms(p_data_fp32) group["lr"] = self._get_lr(group, state) beta2t = 1.0 - math.pow(state["step"], group["decay_rate"]) update = (grad ** 2) + group["eps"][0] if factored: exp_avg_sq_row = state["exp_avg_sq_row"] exp_avg_sq_col = state["exp_avg_sq_col"] exp_avg_sq_row.mul_(beta2t).add_( update.mean(dim=-1), alpha=1.0 - beta2t ) exp_avg_sq_col.mul_(beta2t).add_( update.mean(dim=-2), alpha=1.0 - beta2t ) # Approximation of exponential moving average of square of gradient update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) update.mul_(grad) else: exp_avg_sq = state["exp_avg_sq"] exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t) update = exp_avg_sq.rsqrt().mul_(grad) update.div_( (self._rms(update) / group["clip_threshold"]).clamp_(min=1.0) ) update.mul_(group["lr"]) if use_first_moment: exp_avg = state["exp_avg"] exp_avg.mul_(group["beta1"]).add_(update, alpha=1 - group["beta1"]) update = exp_avg if group["weight_decay"] != 0: p_data_fp32.add_( p_data_fp32, alpha=-group["weight_decay"] * group["lr"] ) p_data_fp32.add_(-update) if p.data.dtype in {torch.float16, torch.bfloat16}: p.data.copy_(p_data_fp32) return loss
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/adafactor.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.optim from . import LegacyFairseqOptimizer, register_optimizer @register_optimizer("sgd") class SGD(LegacyFairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = torch.optim.SGD(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--momentum', default=0.0, type=float, metavar='M', help='momentum factor') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.args.lr[0], "momentum": self.args.momentum, "weight_decay": self.args.weight_decay, } @property def supports_flat_params(self): return True
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/sgd.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq import utils from fairseq.dataclass.utils import gen_parser_from_dataclass class FairseqOptimizer(object): def __init__(self, cfg): super().__init__() self.cfg = cfg @classmethod def add_args(cls, parser): """Add optimizer-specific arguments to the parser.""" dc = getattr(cls, "__dataclass", None) if dc is not None: gen_parser_from_dataclass(parser, dc()) @property def optimizer(self): """Return a torch.optim.optimizer.Optimizer instance.""" if not hasattr(self, "_optimizer"): raise NotImplementedError if not isinstance(self._optimizer, torch.optim.Optimizer): raise ValueError("_optimizer must be an instance of torch.optim.Optimizer") return self._optimizer @optimizer.setter def optimizer(self, optimizer): """Reset optimizer instance.""" if not hasattr(self, "_optimizer"): raise NotImplementedError if not isinstance(self._optimizer, torch.optim.Optimizer): raise ValueError("_optimizer must be an instance of torch.optim.Optimizer") self._optimizer = optimizer @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ raise NotImplementedError @property def params(self): """Return an iterable of the parameters held by the optimizer.""" for param_group in self.param_groups: for p in param_group["params"]: yield p @property def param_groups(self): return self.optimizer.param_groups def __getstate__(self): return self._optimizer.__getstate__() def get_lr(self): """Return the current learning rate.""" return self.param_groups[0]["lr"] def set_lr(self, lr): """Set the learning rate.""" for param_group in self.param_groups: param_group["lr"] = lr def state_dict(self): """Return the optimizer's state dict.""" return self.optimizer.state_dict() def load_state_dict(self, state_dict, optimizer_overrides=None): """Load an optimizer state dict. In general we should prefer the configuration of the existing optimizer instance (e.g., learning rate) over that found in the state_dict. This allows us to resume training from a checkpoint using a new set of optimizer args. """ self.optimizer.load_state_dict(state_dict) if optimizer_overrides is not None and len(optimizer_overrides) > 0: # override learning rate, momentum, etc. with latest values for group in self.param_groups: group.update(optimizer_overrides) def backward(self, loss): """Computes the sum of gradients of the given tensor w.r.t. graph leaves.""" loss.backward() def all_reduce_grads(self, module): """Manually all-reduce gradients (if required).""" if hasattr(module, "all_reduce_grads"): module.all_reduce_grads() def multiply_grads(self, c): """Multiplies grads by a constant *c*.""" for p in self.params: if p.grad is not None: if torch.is_tensor(c): c = c.to(p.grad.device) p.grad.data.mul_(c) def clip_grad_norm(self, max_norm, aggregate_norm_fn=None): """Clips gradient norm.""" return utils.clip_grad_norm_(self.params, max_norm, aggregate_norm_fn) def step(self, closure=None, scale=1.0, groups=None): """Performs a single optimization step.""" if self.supports_step_with_scale: if self.supports_groups: self.optimizer.step(closure, scale=scale, groups=groups) else: self.optimizer.step(closure, scale=scale) else: if scale != 1.0: self.multiply_grads(1.0 / scale) if self.supports_groups: self.optimizer.step(closure, groups=groups) else: self.optimizer.step(closure) def zero_grad(self): """Clears the gradients of all optimized parameters.""" for p in self.params: p.grad = None self.optimizer.zero_grad() @property def supports_memory_efficient_fp16(self): if hasattr(self.optimizer, "supports_memory_efficient_fp16"): return self.optimizer.supports_memory_efficient_fp16 return False @property def supports_step_with_scale(self): if hasattr(self.optimizer, "supports_step_with_scale"): return self.optimizer.supports_step_with_scale return False @property def supports_groups(self): if hasattr(self.optimizer, "supports_groups"): return self.optimizer.supports_groups return False @property def supports_flat_params(self): """ Whether the optimizer supports collapsing of the model parameters/gradients into a single contiguous Tensor. """ if hasattr(self.optimizer, "supports_flat_params"): return self.optimizer.supports_flat_params return False def average_params(self): pass def broadcast_global_state_dict(self, state_dict): """ Broadcasts a global state dict to all ranks. Useful for optimizers that shard state between ranks. """ if hasattr(self.optimizer, "broadcast_global_state_dict"): return self.optimizer.broadcast_global_state_dict(state_dict) else: return state_dict class LegacyFairseqOptimizer(FairseqOptimizer): def __init__(self, args): self.args = args
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/fairseq_optimizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" import importlib import os from fairseq import registry from fairseq.optim.bmuf import FairseqBMUF # noqa from fairseq.optim.fairseq_optimizer import ( # noqa FairseqOptimizer, LegacyFairseqOptimizer, ) from fairseq.optim.amp_optimizer import AMPOptimizer from fairseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer from fairseq.optim.shard import shard_ from omegaconf import DictConfig __all__ = [ "AMPOptimizer", "FairseqOptimizer", "FP16Optimizer", "MemoryEfficientFP16Optimizer", "shard_", ] ( _build_optimizer, register_optimizer, OPTIMIZER_REGISTRY, OPTIMIZER_DATACLASS_REGISTRY, ) = registry.setup_registry("--optimizer", base_class=FairseqOptimizer, required=True) def build_optimizer(cfg: DictConfig, params, *extra_args, **extra_kwargs): if all(isinstance(p, dict) for p in params): params = [t for p in params for t in p.values()] params = list(filter(lambda p: p.requires_grad, params)) return _build_optimizer(cfg, params, *extra_args, **extra_kwargs) # automatically import any Python files in the optim/ directory for file in sorted(os.listdir(os.path.dirname(__file__))): if file.endswith(".py") and not file.startswith("_"): file_name = file[: file.find(".py")] importlib.import_module("fairseq.optim." + file_name)
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.optim from . import LegacyFairseqOptimizer, register_optimizer @register_optimizer("adamax") class FairseqAdamax(LegacyFairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = Adamax(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--adamax-betas', default='(0.9, 0.999)', metavar='B', help='betas for Adam optimizer') parser.add_argument('--adamax-eps', type=float, default=1e-8, metavar='D', help='epsilon for Adam optimizer') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') parser.add_argument('--no-bias-correction', default=False, action='store_true', help='disable bias correction') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.args.lr[0], "betas": eval(self.args.adamax_betas), "eps": self.args.adamax_eps, "weight_decay": self.args.weight_decay, "bias_correction": not self.args.no_bias_correction, } class Adamax(torch.optim.Optimizer): """Implements Adamax algorithm (a variant of Adam based on infinity norm). It has been proposed in `Adam: A Method for Stochastic Optimization`__. Compared to the version in PyTorch, this version implements a fix for weight decay. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 2e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) bias_correction (bool, optional): enable bias correction (default: True) __ https://arxiv.org/abs/1412.6980 """ def __init__( self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, bias_correction=True, ): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, bias_correction=bias_correction, ) super(Adamax, self).__init__(params, defaults) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad.data.float() if grad.is_sparse: raise RuntimeError("Adamax does not support sparse gradients") p_data_fp32 = p.data if p.data.dtype in {torch.float16, torch.bfloat16}: p_data_fp32 = p_data_fp32.float() state = self.state[p] # State initialization if len(state) == 0: state["step"] = 0 state["exp_avg"] = torch.zeros_like(p_data_fp32) state["exp_inf"] = torch.zeros_like(p_data_fp32) else: state["exp_avg"] = state["exp_avg"].to(p_data_fp32) state["exp_inf"] = state["exp_inf"].to(p_data_fp32) exp_avg, exp_inf = state["exp_avg"], state["exp_inf"] beta1, beta2 = group["betas"] eps = group["eps"] state["step"] += 1 # Update biased first moment estimate. exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # Update the exponentially weighted infinity norm. torch.max( exp_inf.mul_(beta2), grad.abs_(), out=exp_inf, ) step_size = group["lr"] if group["bias_correction"]: bias_correction = 1 - beta1 ** state["step"] step_size /= bias_correction if group["weight_decay"] != 0: p_data_fp32.add_( p_data_fp32, alpha=-group["weight_decay"] * group["lr"] ) p_data_fp32.addcdiv_(exp_avg, exp_inf.add(eps), value=-step_size) if p.data.dtype in {torch.float16, torch.bfloat16}: p.data.copy_(p_data_fp32) return loss
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/adamax.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import defaultdict from itertools import chain import torch from fairseq import optim from omegaconf import DictConfig from .dynamic_loss_scaler import DynamicLossScaler class _FP16OptimizerMixin(object): def __init__(self, *args, **kwargs): # forward __init__ call to the next class in mro(method resolution order) super().__init__(*args, **kwargs) self._multiply_factor = 1.0 @property def has_flat_params(self): return torch.is_tensor(self.fp32_params) or ( isinstance(self.fp32_params, dict) and all(torch.is_tensor(t) for t in self.fp32_params.values()) ) @classmethod def build_fp32_params(cls, args, params, flatten=True): # create FP32 copy of parameters and grads if flatten: is_pipeline_parallel = getattr( args, "pipeline_model_parallel", False ) and getattr(args, "distributed_no_spawn", False) total_param_size = sum(p.data.numel() for p in params) devices = [torch.cuda.current_device()] if is_pipeline_parallel: devices = list(set(args.pipeline_devices)) fp32_params = {} for device in devices: if is_pipeline_parallel: device_param_size = sum( p.data.numel() for p in params if p.device.index == device ) device_params = [p for p in params if p.device.index == device] else: device_param_size = total_param_size device_params = params fp32_params[device] = ( device_params[0].new(0).float().new(device_param_size) ) offset = 0 for p in device_params: numel = p.data.numel() fp32_params[device][offset : offset + numel].copy_(p.data.view(-1)) offset += numel fp32_params[device] = torch.nn.Parameter(fp32_params[device]) fp32_params[device].grad = fp32_params[device].data.new( device_param_size ) return fp32_params else: fp32_params = [] for p in params: p32 = torch.nn.Parameter(p.data.float()) if hasattr(p, "expert"): p32.expert = True elif hasattr(p, "base_expert"): p32.base_expert = True p32.grad = torch.zeros_like(p32.data) if hasattr(p, "param_group"): p32.param_group = p.param_group fp32_params.append(p32) return fp32_params def state_dict(self): """Return the optimizer's state dict.""" state_dict = self.fp32_optimizer.state_dict() if self.scaler is not None: state_dict["loss_scale"] = self.scaler.loss_scale return state_dict def load_state_dict(self, state_dict, optimizer_overrides=None): """Load an optimizer state dict. In general we should prefer the configuration of the existing optimizer instance (e.g., learning rate) over that found in the state_dict. This allows us to resume training from a checkpoint using a new set of optimizer args. """ if "loss_scale" in state_dict and self.scaler is not None: self.scaler.loss_scale = state_dict["loss_scale"] self.fp32_optimizer.load_state_dict(state_dict, optimizer_overrides) def backward(self, loss): """Computes the sum of gradients of the given tensor w.r.t. graph leaves. Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this function additionally dynamically scales the loss to avoid gradient underflow. """ if self.scaler is not None: loss = self.scaler.scale(loss) loss.backward() self._needs_sync = True def _sync_fp16_grads_to_fp32(self): if self._needs_sync: # copy FP16 grads to FP32 if self.has_flat_params: devices = list(self.fp32_params.keys()) device_params_dict = defaultdict(list) for p in self.fp16_params: if p.requires_grad: device_params_dict[p.device.index].append(p) for device in devices: device_params = device_params_dict[device] offset = 0 for p in device_params: grad_data = ( p.grad.data if p.grad is not None else p.data.new_zeros(p.data.shape) ) numel = grad_data.numel() self.fp32_params[device].grad.data[ offset : offset + numel ].copy_(grad_data.view(-1)) offset += numel else: for p, p32 in zip(self.fp16_params, self.fp32_params): if not p.requires_grad: continue if p.grad is not None: if p32.grad is None: p32.grad = p.grad.data.float() else: p32.grad.data.copy_(p.grad.data) else: p32.grad = torch.zeros_like(p.data, dtype=torch.float) self._needs_sync = False def _sync_fp32_params_to_fp16(self): # copy FP32 params back into FP16 model if self.has_flat_params: devices = list(self.fp32_params.keys()) device_params_dict = defaultdict(list) for p in self.fp16_params: device_params_dict[p.device.index].append(p) for device in devices: device_params = device_params_dict[device] offset = 0 for p in device_params: numel = p.data.numel() p.data.copy_( self.fp32_params[device] .data[offset : offset + numel] .view_as(p.data) ) offset += numel else: for p, p32 in zip(self.fp16_params, self.fp32_params): if not p.requires_grad: continue p.data.copy_(p32.data) def _unscale_grads(self): self._sync_fp16_grads_to_fp32() if ( # Skip the multiplication if it's a no-op (i.e., if _multiply_factor # is 1.0). At the same time, we want to avoid the device-to-host # transfer by comparing it to 1.0. Since _multiply_factor starts as # a Python float, we roughly assume that if it's a tensor then it's # probably not =1.0 anymore and we do the multiplication. Otherwise # we can safely check the value without a D2H transfer. torch.is_tensor(self._multiply_factor) or self._multiply_factor != 1.0 ): self.fp32_optimizer.multiply_grads(self._multiply_factor) self._multiply_factor = 1.0 def multiply_grads(self, c): """Multiplies grads by a constant ``c``.""" self._multiply_factor *= c def clip_grad_norm(self, max_norm, aggregate_norm_fn=None): """Clips gradient norm and updates dynamic loss scaler.""" self._sync_fp16_grads_to_fp32() grad_norm = self._multiply_factor * self.fp32_optimizer.clip_grad_norm( 0, aggregate_norm_fn ) if self.scaler is not None: if grad_norm > max_norm > 0.0: self._multiply_factor *= max_norm / grad_norm self.scaler.check_overflow(grad_norm) elif max_norm > 0.0: clip_coef = (max_norm / (grad_norm + 1e-6)).clamp_(max=1) self._multiply_factor *= clip_coef return grad_norm def step(self, closure=None, groups=None): """Performs a single optimization step.""" self._sync_fp16_grads_to_fp32() if getattr(self, "supports_step_with_scale", False): self.fp32_optimizer.step( closure, scale=(1.0 / self._multiply_factor), groups=groups ) else: self._unscale_grads() self.fp32_optimizer.step(closure, groups=groups) if self.scaler is not None: self.scaler.update() self._sync_fp32_params_to_fp16() def zero_grad(self): """Clears the gradients of all optimized parameters.""" for p in self.fp16_params: p.grad = None if self.has_flat_params: if torch.is_tensor(self.fp32_params): self.fp32_params.grad.zero_() elif isinstance(self.fp32_params, dict): for fp32_params in self.fp32_params.values(): fp32_params.grad.zero_() else: raise RuntimeError("self.fp32_params must be a tensor or dict") else: for p32 in self.fp32_params: if p32.grad is not None: p32.grad.zero_() self._needs_sync = False if self.scaler is not None: self._multiply_factor = 1.0 / float(self.scaler.loss_scale) class FP16Optimizer(_FP16OptimizerMixin, optim.FairseqOptimizer): """ Wrap an *optimizer* to support FP16 (mixed precision) training. """ def __init__(self, cfg: DictConfig, params, fp32_optimizer, fp32_params, **kwargs): super().__init__(cfg.optimizer) self.fp16_params = params self.fp32_optimizer = fp32_optimizer self.fp32_params = fp32_params if getattr(cfg.common, "fp16_scale_window", None) is None: if len(cfg.optimization.update_freq) > 1: raise ValueError( "--fp16-scale-window must be given explicitly when using a " "custom --update-freq schedule" ) data_parallel_size = int( cfg.distributed_training.distributed_world_size / cfg.common.model_parallel_size ) scale_window = int( 2 ** 14 / data_parallel_size / cfg.optimization.update_freq[0] ) else: scale_window = cfg.common.fp16_scale_window if not getattr(cfg.common, "bf16", False): self.scaler = DynamicLossScaler( init_scale=cfg.common.fp16_init_scale, scale_window=scale_window, tolerance=cfg.common.fp16_scale_tolerance, threshold=cfg.common.threshold_loss_scale, min_loss_scale=cfg.common.min_loss_scale, ) else: # disable loss scaling for bfloat16 self.scaler = None @classmethod def build_optimizer(cls, cfg: DictConfig, params, **kwargs): """ Args: cfg (omegaconf.DictConfig): fairseq args params (iterable): iterable of parameters to optimize """ flatten = not getattr(cfg.common, "fp16_no_flatten_grads", False) if getattr(cfg.common, "bf16", False): flatten = False # mixed precision is faster on TPUs without flat grads fp32_params = cls.build_fp32_params(cfg.optimizer, params, flatten=flatten) if flatten: fp32_optimizer = optim.build_optimizer(cfg.optimizer, [fp32_params]) else: fp32_optimizer = optim.build_optimizer(cfg.optimizer, fp32_params) if flatten and not fp32_optimizer.supports_flat_params: raise RuntimeError( f"chosen optimizer {fp32_optimizer.__class__.__name__} does not support flat params, please set --fp16-no-flatten-grads" ) return cls(cfg, params, fp32_optimizer, fp32_params, **kwargs) @property def optimizer(self): return self.fp32_optimizer.optimizer @optimizer.setter def optimizer(self, optimizer): self.fp32_optimizer.optimizer = optimizer @property def lr_scheduler(self): return getattr(self.fp32_optimizer, "lr_scheduler", None) @property def optimizer_config(self): return self.fp32_optimizer.optimizer_config def get_lr(self): return self.fp32_optimizer.get_lr() def set_lr(self, lr): self.fp32_optimizer.set_lr(lr) def all_reduce_grads(self, module): self.fp32_optimizer.all_reduce_grads(module) @property def supports_flat_params(self): return self.fp32_optimizer.supports_flat_params class _MemoryEfficientFP16OptimizerMixin(object): def __init__(self, *args, **kwargs): # forward __init__ call to the next class in MRO (method resolution order) super().__init__(*args, **kwargs) self._multiply_factor = 1.0 @property def has_flat_params(self): return False def state_dict(self): """Return the optimizer's state dict.""" state_dict = self.wrapped_optimizer.state_dict() if self.scaler is not None: state_dict["loss_scale"] = self.scaler.loss_scale return state_dict def load_state_dict(self, state_dict, optimizer_overrides=None): """Load an optimizer state dict. In general we should prefer the configuration of the existing optimizer instance (e.g., learning rate) over that found in the state_dict. This allows us to resume training from a checkpoint using a new set of optimizer args. """ if "loss_scale" in state_dict and self.scaler is not None: self.scaler.loss_scale = state_dict["loss_scale"] self.wrapped_optimizer.load_state_dict(state_dict, optimizer_overrides) # Hack: PyTorch automatically casts the optimizer state to match the # type of the current parameters. But with --memory-efficient-fp16 the # params are FP16 while the optimizer state is FP32 and we don't want # to cast. A workaround is to manually copy back the original state # after the optimizer has been loaded. if not getattr(self.optimizer, "disable_mem_eff_fp16_loading_hack", False): groups = self.optimizer.param_groups saved_groups = state_dict["param_groups"] id_map = { old_id: p for old_id, p in zip( chain(*(g["params"] for g in saved_groups)), chain(*(g["params"] for g in groups)), ) } for k, v in state_dict["state"].items(): if k in id_map: param = id_map[k] self.optimizer.state[param] = v def backward(self, loss): """Computes the sum of gradients of the given tensor w.r.t. graph leaves. Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this function additionally dynamically scales the loss to avoid gradient underflow. """ if self.scaler is not None: loss = self.scaler.scale(loss) loss.backward() def _unscale_grads(self): if ( # Skip the multiplication if it's a no-op (i.e., if _multiply_factor # is 1.0). At the same time, we want to avoid the device-to-host # transfer by comparing it to 1.0. Since _multiply_factor starts as # a Python float, we roughly assume that if it's a tensor then it's # probably not =1.0 anymore and we do the multiplication. Otherwise # we can safely check the value without a D2H transfer. torch.is_tensor(self._multiply_factor) or self._multiply_factor != 1.0 ): self.wrapped_optimizer.multiply_grads(self._multiply_factor) self._multiply_factor = 1.0 def multiply_grads(self, c): """Multiplies grads by a constant *c*.""" self._multiply_factor *= c def clip_grad_norm(self, max_norm, aggregate_norm_fn=None): """Clips gradient norm and updates dynamic loss scaler.""" max_norm = float(max_norm) grad_norm = self._multiply_factor * self.wrapped_optimizer.clip_grad_norm( 0, aggregate_norm_fn ) if self.scaler is not None: grad_norm_cpu = float(grad_norm) if grad_norm_cpu > max_norm > 0.0: self._multiply_factor *= max_norm / grad_norm_cpu # detect overflow and adjust loss scale self.scaler.check_overflow(grad_norm_cpu) elif max_norm > 0.0: clip_coef = (max_norm / (grad_norm + 1e-6)).clamp_(max=1) self._multiply_factor *= clip_coef return grad_norm def step(self, closure=None, groups=None): """Performs a single optimization step.""" if getattr(self, "supports_step_with_scale", False): # NOTE(msb) optimizer divides by scale factor self.wrapped_optimizer.step( closure, scale=(1.0 / self._multiply_factor), groups=groups ) else: self._unscale_grads() self.wrapped_optimizer.step(closure, groups=groups) if self.scaler is not None: self.scaler.update() def zero_grad(self): """Clears the gradients of all optimized parameters.""" self.wrapped_optimizer.zero_grad() if self.scaler is not None: self._multiply_factor = 1.0 / float(self.scaler.loss_scale) else: self._multiply_factor = 1.0 @property def supports_flat_params(self): return self.wrapped_optimizer.supports_flat_params class MemoryEfficientFP16Optimizer( _MemoryEfficientFP16OptimizerMixin, optim.FairseqOptimizer ): """ Wrap an *optimizer* to support FP16 (mixed precision) training. Compared to :class:`fairseq.optim.FP16Optimizer`, this version does not maintain an FP32 copy of the model. We instead expect the optimizer to convert the gradients to FP32 internally and sync the results back to the FP16 model params. This significantly reduces memory usage but slightly increases the time spent in the optimizer. Since this wrapper depends on specific functionality in the wrapped optimizer (i.e., on-the-fly conversion of grads to FP32), only certain optimizers can be wrapped. This is determined by the *supports_memory_efficient_fp16* property. """ def __init__( self, cfg: DictConfig, params, optimizer, allow_unsupported=False, **kwargs ): if not allow_unsupported and not optimizer.supports_memory_efficient_fp16: raise ValueError( "Unsupported optimizer: {}".format(optimizer.__class__.__name__) ) super().__init__(getattr(cfg, "optimizer", None)) self.wrapped_optimizer = optimizer if getattr(cfg.common, "fp16_scale_window", None) is None: if len(cfg.optimization.update_freq) > 1: raise ValueError( "--fp16-scale-window must be given explicitly when using a " "custom --update-freq schedule" ) data_parallel_size = int( cfg.distributed_training.distributed_world_size / cfg.common.model_parallel_size ) scale_window = int( 2 ** 14 / data_parallel_size / cfg.optimization.update_freq[0] ) else: scale_window = cfg.common.fp16_scale_window if not getattr(cfg.common, "bf16", False): self.scaler = DynamicLossScaler( init_scale=cfg.common.fp16_init_scale, scale_window=scale_window, tolerance=cfg.common.fp16_scale_tolerance, threshold=cfg.common.threshold_loss_scale, min_loss_scale=cfg.common.min_loss_scale, ) else: # disable loss scaling for bfloat16 self.scaler = None @classmethod def build_optimizer(cls, cfg: DictConfig, params, **kwargs): """ Args: args (argparse.Namespace): fairseq args params (iterable): iterable of parameters to optimize """ fp16_optimizer = optim.build_optimizer(cfg.optimizer, params) return cls(cfg, params, fp16_optimizer, **kwargs) @property def optimizer(self): return self.wrapped_optimizer.optimizer @optimizer.setter def optimizer(self, optimizer): self.wrapped_optimizer.optimizer = optimizer @property def optimizer_config(self): return self.wrapped_optimizer.optimizer_config @property def lr_scheduler(self): return getattr(self.wrapped_optimizer, "lr_scheduler", None) def get_lr(self): return self.wrapped_optimizer.get_lr() def set_lr(self, lr): self.wrapped_optimizer.set_lr(lr) def all_reduce_grads(self, module): self.wrapped_optimizer.all_reduce_grads(module)
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/fp16_optimizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.optim from . import LegacyFairseqOptimizer, register_optimizer @register_optimizer("adagrad") class Adagrad(LegacyFairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.args.lr[0], "weight_decay": self.args.weight_decay, } @property def supports_flat_params(self): return False
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/adagrad.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Dict from fairseq.distributed import utils try: from fairscale.optim import OSS _has_fairscale = True except ImportError: _has_fairscale = False def shard_(optimizer, group): if not _has_fairscale: raise ImportError( "\n\nPlease install the fairscale package:" "\n\n pip install fairscale" ) class FairseqOSS(OSS): @property def disable_mem_eff_fp16_loading_hack(self): return True def __getattr__(self, name): if name.startswith("supports") and hasattr(self.optim, name): return getattr(self.optim, name) raise AttributeError( "'FairseqOSS' object has no attribute {0!r}".format(name) ) def broadcast_global_state_dict( self, state_dict: Dict[str, Any] ) -> Dict[str, Any]: """ Broadcasts the entire state_dict to all other ranks each rank is responsible to load their own partition of data """ return utils.broadcast_object( state_dict, src_rank=0, group=self.group, ) torch_optimizer = optimizer.optimizer optim_cls = type(torch_optimizer) optimizer.optimizer = FairseqOSS( torch_optimizer.param_groups, optim_cls, group=group, **optimizer.optimizer_config )
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/shard.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections.abc import Collection from dataclasses import dataclass, field from typing import List import torch from fairseq.dataclass import FairseqDataclass from fairseq.optim import FairseqOptimizer, register_optimizer from omegaconf import II, DictConfig try: import deepspeed has_deepspeed = True except ImportError: has_deepspeed = False def _get_cpu_adam(): try: from deepspeed.ops.op_builder import CPUAdamBuilder return CPUAdamBuilder().load() except ImportError: # fbcode from deepspeed.ops.adam import DeepSpeedCPUAdam as ds_opt_adam return ds_opt_adam @dataclass class FairseqCPUAdamConfig(FairseqDataclass): adam_betas: str = field( default="(0.9, 0.999)", metadata={"help": "betas for Adam optimizer"} ) adam_eps: float = field( default=1e-8, metadata={"help": "epsilon for Adam optimizer"} ) weight_decay: float = field(default=0.0, metadata={"help": "weight decay"}) fp16_adam_stats: bool = field( default=False, metadata={"help": "use FP16 stats (with automatic scaling)"} ) # TODO common vars below in parent lr: List[float] = II("optimization.lr") @register_optimizer("cpu_adam", dataclass=FairseqCPUAdamConfig) class FairseqCPUAdam(FairseqOptimizer): """Adam optimizer for fairseq, optimized for CPU tensors. Important note: this optimizer corresponds to the "AdamW" variant of Adam in its weight decay behavior. As such, it is most closely analogous to torch.optim.AdamW from PyTorch. """ def __init__(self, cfg: DictConfig, params): super().__init__(cfg) self._optimizer = CPUAdam(params, **self.optimizer_config) @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.cfg.lr[0] if isinstance(self.cfg.lr, Collection) else self.cfg.lr, "betas": eval(self.cfg.adam_betas), "eps": self.cfg.adam_eps, "weight_decay": self.cfg.weight_decay, "use_fp16_stats": self.cfg.fp16_adam_stats, } class CPUAdam(torch.optim.Optimizer): optimizer_id = 0 def __init__( self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, use_fp16_stats=False, ): defaults = { "lr": lr, "bias_correction": bias_correction, "betas": betas, "eps": eps, "weight_decay": weight_decay, } super().__init__(params, defaults) self.use_fp16_stats = use_fp16_stats self.FLOAT16_MAX = 65504.0 if not has_deepspeed: raise ImportError("Please install DeepSpeed: pip install deepspeed") self.opt_id = CPUAdam.optimizer_id CPUAdam.optimizer_id = CPUAdam.optimizer_id + 1 self.ds_opt_adam = _get_cpu_adam() adamw_mode = True self.ds_opt_adam.create_adam( self.opt_id, lr, betas[0], betas[1], eps, weight_decay, adamw_mode ) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() torch.cuda.synchronize() for group_id, group in enumerate(self.param_groups): for param_id, p in enumerate(group["params"]): if p.grad is None: continue state = self.state[p] if len(state) == 0: state["step"] = 0 dtype = torch.float16 if self.use_fp16_stats else p.data.dtype # gradient momentums state["exp_avg"] = torch.zeros_like( p.data, dtype=dtype, device="cpu" ) # gradient variances state["exp_avg_sq"] = torch.zeros_like( p.data, dtype=dtype, device="cpu" ) if self.use_fp16_stats: assert torch.is_floating_point(p.data) state["exp_avg_scale"] = 1.0 state["exp_avg_sq_scale"] = 1.0 exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] p_data_bak = p.data # backup of the original data pointer p.data = p.data.to(dtype=torch.float32, device="cpu") p.grad.data = p.grad.data.to(dtype=torch.float32, device="cpu") if self.use_fp16_stats: exp_avg = exp_avg.float() * state["exp_avg_scale"] exp_avg_sq = exp_avg_sq.float() * state["exp_avg_sq_scale"] state["step"] += 1 beta1, beta2 = group["betas"] self.ds_opt_adam.adam_update( self.opt_id, state["step"], group["lr"], beta1, beta2, group["eps"], group["weight_decay"], group["bias_correction"], p.data, p.grad.data, exp_avg, exp_avg_sq, ) if p_data_bak.data_ptr() != p.data.data_ptr(): p_data_bak.copy_(p.data) p.data = p_data_bak if self.use_fp16_stats: def inf_norm(t): return torch.norm(t, float("inf")) # from github.com/openai/jukebox/blob/master/jukebox/utils/fp16.py state["exp_avg_scale"], state["exp_avg_sq_scale"] = ( 1e-8 + inf_norm(exp_avg) / self.FLOAT16_MAX, 1e-8 + inf_norm(exp_avg_sq) / self.FLOAT16_MAX, ) state["exp_avg"], state["exp_avg_sq"] = ( (exp_avg / state["exp_avg_scale"]).half(), (exp_avg_sq / state["exp_avg_sq_scale"]).half(), ) return loss
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/cpu_adam.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import math from collections.abc import Collection from dataclasses import dataclass, field from typing import Any, List import torch import torch.distributed as dist import torch.optim from fairseq.dataclass import FairseqDataclass from fairseq.optim import FairseqOptimizer, register_optimizer from fairseq.optim.fused_adam import get_fused_adam_class from omegaconf import II, OmegaConf logger = logging.getLogger(__name__) @dataclass class FairseqAdamConfig(FairseqDataclass): adam_betas: Any = field( default=(0.9, 0.999), metadata={"help": "betas for Adam optimizer"} ) adam_eps: float = field( default=1e-8, metadata={"help": "epsilon for Adam optimizer"} ) weight_decay: float = field(default=0.0, metadata={"help": "weight decay"}) use_old_adam: bool = field( default=False, metadata={"help": "Use fairseq.optim.adam.Adam"} ) fp16_adam_stats: bool = field( default=False, metadata={"help": "use FP16 stats (with automatic scaling)"} ) # TODO common vars below in parent tpu: bool = II("common.tpu") lr: List[float] = II("optimization.lr") @register_optimizer("adam", dataclass=FairseqAdamConfig) class FairseqAdam(FairseqOptimizer): """Adam optimizer for fairseq. Important note: this optimizer corresponds to the "AdamW" variant of Adam in its weight decay behavior. As such, it is most closely analogous to torch.optim.AdamW from PyTorch. """ def __init__(self, cfg: FairseqAdamConfig, params): super().__init__(cfg) fused_adam_cls = get_fused_adam_class() use_fused_adam = ( not getattr(cfg, "use_old_adam", False) and fused_adam_cls is not None and torch.cuda.is_available() ) if getattr(cfg, "tpu", False): if self.cfg.fp16_adam_stats: raise NotImplementedError("--fp16-adam-stats is only supported on GPU") # on TPUs we use the Adam defined here, since it # automatically casts gradients to FP32 self._optimizer = Adam(params, **self.optimizer_config) elif use_fused_adam: logger.info("using FusedAdam") self._optimizer = fused_adam_cls( params, use_fp16_stats=self.cfg.fp16_adam_stats, **self.optimizer_config ) else: if self.cfg.fp16_adam_stats: raise NotImplementedError( "--fp16-adam-stats is only supported with FusedAdamV1" ) self._optimizer = Adam(params, **self.optimizer_config) @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.cfg.lr[0] if isinstance(self.cfg.lr, Collection) else self.cfg.lr, "betas": eval(self.cfg.adam_betas) if isinstance(self.cfg.adam_betas, str) else OmegaConf.to_container(self.cfg.adam_betas), "eps": self.cfg.adam_eps, "weight_decay": self.cfg.weight_decay, } def average_params(self): """Reduce Params is only used during BMUF distributed training.""" state_dict = self.optimizer.state_dict() total_gpus = float(dist.get_world_size()) for _, value in state_dict["state"].items(): value["exp_avg"] /= total_gpus value["exp_avg_sq"] /= total_gpus dist.all_reduce(value["exp_avg"], op=dist.ReduceOp.SUM) dist.all_reduce(value["exp_avg_sq"], op=dist.ReduceOp.SUM) class Adam(torch.optim.Optimizer): r"""Implements Adam algorithm. This implementation is modified from torch.optim.Adam based on: `Fixed Weight Decay Regularization in Adam` (see https://arxiv.org/abs/1711.05101) It has been proposed in `Adam: A Method for Stochastic Optimization`_. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, ): defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad ) super(Adam, self).__init__(params, defaults) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad.data if grad.dtype in {torch.float16, torch.bfloat16}: grad = grad.float() if grad.is_sparse: raise RuntimeError( "Adam does not support sparse gradients, please consider SparseAdam instead" ) amsgrad = group.get("amsgrad", False) p_data_fp32 = p.data if p.data.dtype in {torch.float16, torch.bfloat16}: p_data_fp32 = p_data_fp32.float() state = self.state[p] # State initialization if len(state) == 0: state["step"] = 0 # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(p_data_fp32) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.zeros_like(p_data_fp32) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state["max_exp_avg_sq"] = torch.zeros_like(p_data_fp32) else: state["exp_avg"] = state["exp_avg"].to(p_data_fp32) state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data_fp32) if amsgrad: state["max_exp_avg_sq"] = state["max_exp_avg_sq"].to( p_data_fp32 ) exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] if amsgrad: max_exp_avg_sq = state["max_exp_avg_sq"] beta1, beta2 = group["betas"] state["step"] += 1 # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) # Use the max. for normalizing running avg. of gradient denom = max_exp_avg_sq.sqrt().add_(group["eps"]) else: denom = exp_avg_sq.sqrt().add_(group["eps"]) bias_correction1 = 1 - beta1 ** state["step"] bias_correction2 = 1 - beta2 ** state["step"] step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1 if group["weight_decay"] != 0: p_data_fp32.add_( p_data_fp32, alpha=-group["weight_decay"] * group["lr"] ) p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size) if p.data.dtype in {torch.float16, torch.bfloat16}: p.data.copy_(p_data_fp32) return loss
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/adam.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from collections import defaultdict from dataclasses import dataclass, field from typing import Dict, Any, List, Optional import torch.optim from fairseq.dataclass import FairseqDataclass from fairseq.optim import FairseqOptimizer, register_optimizer, _build_optimizer from fairseq.optim.lr_scheduler import FairseqLRScheduler, build_lr_scheduler from omegaconf import II, open_dict logger = logging.getLogger(__name__) @dataclass class OptimizerAndSchedulerConfig(FairseqDataclass): optimizer: Any = None lr_scheduler: Optional[Any] = None lr: List = II("optimization.lr") lr_float: Optional[ float ] = None # this makes it easier to sweep on learning rate with auto sweepers @dataclass class CompositeOptimizerConfig(FairseqDataclass): groups: Dict[str, Any] = field( default_factory=lambda: {}, metadata={ "help": "optimizer name -> optimizer OptimizerAndSchedulerConfig. " "Configures a different optimizer and (optionally) lr scheduler for each parameter group" }, ) @register_optimizer("composite", dataclass=CompositeOptimizerConfig) class FairseqCompositeOptimizer(FairseqOptimizer): optimizers: Dict[str, FairseqOptimizer] = {} lr_schedulers: Dict[str, FairseqLRScheduler] = {} lr_scheduler: FairseqLRScheduler = None _optimizer: torch.optim.Optimizer def __init__(self, cfg: CompositeOptimizerConfig, params): super().__init__(cfg) assert ( len(params) > 1 ), "Composite optimizer only works when there are multiple parameter groups (try fp16_no_flatten_grads: true)" groupped_params = defaultdict(list) for p in params: group = getattr(p, "param_group", "default") groupped_params[group].append(p) assert groupped_params.keys() == cfg.groups.keys(), ( f"Parameter groups {groupped_params.keys()} and optimizer groups {cfg.groups.keys()} are not the same! " "Try setting 'param_group' on your parameters in the model." ) for group, group_params in groupped_params.items(): group_cfg = cfg.groups[group] with open_dict(group_cfg): if group_cfg.lr_float is not None: group_cfg.optimizer.lr = [group_cfg.lr_float] group_cfg.lr_scheduler.lr = [group_cfg.lr_float] else: group_cfg.optimizer.lr = group_cfg.lr group_cfg.lr_scheduler.lr = group_cfg.lr self.optimizers[group] = _build_optimizer(group_cfg.optimizer, group_params) if group_cfg.lr_scheduler is not None: self.lr_schedulers[group] = build_lr_scheduler( group_cfg.lr_scheduler, self.optimizers[group] ) if len(self.lr_schedulers) > 0: assert len(self.lr_schedulers) == len(self.optimizers), ( f"Please provide an lr scheduler for each optimizer to use pass_through scheduler. " f"Optimizers: {self.optimizers}; Lr scheds: {self.lr_schedulers}" ) self.lr_scheduler = CompositeLRScheduler(self.lr_schedulers) self._optimizer = CompositeOptimizer(self.optimizers) @property def supports_groups(self): return True @property def param_groups(self): for opt in self.optimizers.values(): for group in opt.param_groups: yield group def get_lr(self): """Return the current learning rate.""" k = ( "default" if "default" in self.optimizers else next(iter(self.optimizers.keys())) ) return self.optimizers[k].param_groups[0]["lr"] def state_dict(self): """Return the LR scheduler state dict.""" return {k: s.state_dict() for k, s in self.optimizers.items()} def load_state_dict(self, state_dict, optimizer_overrides=None): """Load an LR scheduler state dict.""" for k, state in state_dict.items(): if k not in self.optimizers: # skip extra keys like "loss_scale" added by fp16 optimizer continue overrides = ( optimizer_overrides[k] if isinstance(optimizer_overrides, dict) and k in optimizer_overrides else None ) self.optimizers[k].load_state_dict(state, optimizer_overrides=overrides) class CompositeOptimizer(torch.optim.Optimizer): def __init__(self, optimizers: Dict[str, FairseqOptimizer]): self.optimizers = optimizers @property def supports_memory_efficient_fp16(self): return all(o.supports_memory_efficient_fp16 for o in self.optimizers.values()) @property def supports_flat_params(self): return all(o.supports_flat_params for o in self.optimizers.values()) def step(self, closure=None, groups=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for k, opt in self.optimizers.items(): if groups is None or k in groups: opt.step() return loss def zero_grad(self): for opt in self.optimizers.values(): opt.zero_grad() class CompositeLRScheduler(FairseqLRScheduler): def __init__(self, lr_schedulers): super().__init__(None, None) self.lr_schedulers = lr_schedulers def state_dict(self): """Return the LR scheduler state dict.""" return {k: s.state_dict() for k, s in self.lr_schedulers.items()} def load_state_dict(self, state_dict): """Load an LR scheduler state dict.""" for k, state in state_dict.items(): self.lr_schedulers[k].load_state_dict(state) def step_begin_epoch(self, epoch): """Update the learning rate at the beginning of the given epoch.""" for s in self.lr_schedulers.values(): s.step_begin_epoch(epoch) def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" for s in self.lr_schedulers.values(): s.step(epoch) def step_update(self, num_updates): """Update the learning rate after each update.""" return {k: s.step_update(num_updates) for k, s in self.lr_schedulers.items()}
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/composite.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.optim import LegacyFairseqOptimizer, register_optimizer @register_optimizer("lamb") class FairseqLAMB(LegacyFairseqOptimizer): """LAMB optimizer.""" def __init__(self, args, params): super().__init__(args) try: from apex.optimizers import FusedLAMB self._optimizer = FusedLAMB(params, **self.optimizer_config) except ImportError: raise ImportError("Please install apex to use LAMB optimizer") @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--lamb-betas', default='(0.9, 0.999)', metavar='B', help='betas for LAMB optimizer') parser.add_argument('--lamb-eps', type=float, default=1e-8, metavar='D', help='epsilon for LAMB optimizer') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.args.lr[0], "betas": eval(self.args.lamb_betas), "eps": self.args.lamb_eps, "weight_decay": self.args.weight_decay, } @property def supports_flat_params(self): return False
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/fused_lamb.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.optim from . import LegacyFairseqOptimizer, register_optimizer @register_optimizer("adadelta") class Adadelta(LegacyFairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = torch.optim.Adadelta(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--adadelta-rho', type=float, default=0.9, metavar='RHO', help='coefficient used for computing a running average of squared gradients') parser.add_argument('--adadelta-eps', type=float, default=1e-6, metavar='EPS', help='term added to the denominator to improve numerical stability') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') parser.add_argument('--anneal-eps', action='store_true', help='flag to anneal eps') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.args.lr[0], "rho": self.args.adadelta_rho, "eps": self.args.adadelta_eps, "weight_decay": self.args.weight_decay, } @property def supports_flat_params(self): return True
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/adadelta.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class PassThroughScheduleConfig(FairseqDataclass): pass @register_lr_scheduler("pass_through", dataclass=PassThroughScheduleConfig) class PassThroughScheduleSchedule(FairseqLRScheduler): """Delegate lr scheduling to the optimizer.""" def __init__(self, cfg: PassThroughScheduleConfig, optimizer): super().__init__(cfg, optimizer) assert ( hasattr(optimizer, "lr_scheduler") and optimizer.lr_scheduler is not None ), "Pass-through schedule can only be used with optimizers with their own schedulers" def state_dict(self): return self.optimizer.lr_scheduler.state_dict() def load_state_dict(self, state_dict): self.optimizer.lr_scheduler.load_state_dict(state_dict) def step_begin_epoch(self, epoch): """Update the learning rate at the beginning of the given epoch.""" return self.optimizer.lr_scheduler.step_begin_epoch(epoch) def step_update(self, num_updates): """Update the learning rate after each update.""" return self.optimizer.lr_scheduler.step_update(num_updates)
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/lr_scheduler/pass_through.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import LegacyFairseqLRScheduler, register_lr_scheduler import logging import ast logger = logging.getLogger(__name__) logger.setLevel(logging.WARNING) @register_lr_scheduler("manual") class ManualSchedule(LegacyFairseqLRScheduler): """Decay the LR on a manual schedule.""" def __init__(self, args, optimizer): super().__init__(args, optimizer) self.epoch2lr = self.parse_manuallr_args(args.epoch2lr) self.update2lr = self.parse_manuallr_args(args.update2lr) logger.info("@@@ ManualSchedule epoch2lr={}".format(self.epoch2lr)) logger.info("@@@ ManualSchedule update2lr={}".format(self.update2lr)) if 1 in self.epoch2lr: self.lr = self.epoch2lr[1] elif 1 in self.update2lr: self.lr = self.update2lr[1] else: self.lr = args.lr[0] self.optimizer.set_lr(self.lr) # Set the beginning of the epoch. def parse_manuallr_args(self, lr_args_str): lr_dict = ast.literal_eval(lr_args_str.replace(" ", "")) if not isinstance(lr_dict, dict): raise ValueError("epoch2lr/update2lr must be abel to evaluated to a dict") lr_args = {} logger.info("@@@ after parsing input dictionary lr_dict = {}".format(lr_dict)) for key, val in lr_dict.items(): if "," in key: for k in key.split(","): lr_args[int(k)] = float(val) elif "-" in key: s = int(key.split("-")[0]) e = int(key.split("-")[1]) for k in range(s, e + 1, 1): lr_args[k] = float(val) else: lr_args[int(key)] = float(val) return lr_args @staticmethod def add_args(parser): """Add arguments to the parser for this LR scheduler.""" # fmt: off parser.add_argument( "--epoch2lr", type=str, metavar="DICT", default="{}", help="a dictionary used to set lr for each epoch manually", ) parser.add_argument( "--update2lr", type=str, metavar="DICT", default="{}", help="a dictionary used to set lr for each update manually", ) # fmt: on def state_dict(self): return {"lr": self.lr} def load_state_dict(self, state_dict): if "lr" in state_dict: self.lr = state_dict["lr"] def get_next_lr(self, epoch): manual_keys = [k for k in self.epoch2lr if k <= epoch] if manual_keys: manual_lr = self.epoch2lr[max(manual_keys)] else: logger.warning( "@@@ epoch={} does not exist in manual lr input. epoch2lr={}...".format( epoch, list(self.epoch2lr.items())[ : min(10, len(self.epoch2lr.keys()) - 1) ], ) ) manual_lr = self.optimizer.get_lr() return manual_lr def step_begin_epoch(self, epoch): """Update the learning rate at the beginning of the given epoch.""" self.lr = self.get_next_lr(epoch) self.optimizer.set_lr(self.lr) return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" manual_keys = [k for k in self.update2lr if k <= num_updates] if manual_keys: manual_lr = self.update2lr[max(manual_keys)] else: logger.warning( "epoch={} does not exist in manual lr input update2lr={}...".format( num_updates, list(self.update2lr.items())[ : min(10, len(self.update2lr.keys()) - 1) ], ) ) manual_lr = self.optimizer.get_lr() self.optimizer.set_lr(manual_lr) return self.optimizer.get_lr()
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/lr_scheduler/manual_lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from typing import Optional, List from omegaconf import II from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class FixedLRScheduleConfig(FairseqDataclass): force_anneal: Optional[int] = field( default=None, metadata={"help": "force annealing at specified epoch"}, ) lr_shrink: float = field( default=0.1, metadata={"help": "shrink factor for annealing, lr_new = (lr * lr_shrink)"}, ) warmup_updates: int = field( default=0, metadata={"help": "warmup the learning rate linearly for the first N updates"}, ) lr: List[float] = II("optimization.lr") @register_lr_scheduler("fixed", dataclass=FixedLRScheduleConfig) class FixedLRSchedule(FairseqLRScheduler): """Decay the LR on a fixed schedule.""" def __init__(self, cfg: FixedLRScheduleConfig, optimizer): super().__init__(cfg, optimizer) self.lr = cfg.lr[0] if cfg.warmup_updates > 0: self.warmup_factor = 1.0 / cfg.warmup_updates else: self.warmup_factor = 1 def state_dict(self): return {"lr": self.lr} def load_state_dict(self, state_dict): if "lr" in state_dict: self.lr = state_dict["lr"] def get_next_lr(self, epoch): lrs = self.cfg.lr if self.cfg.force_anneal is None or epoch < self.cfg.force_anneal: # use fixed LR schedule next_lr = lrs[min(epoch - 1, len(lrs) - 1)] else: # annneal based on lr_shrink next_lr = lrs[-1] * self.cfg.lr_shrink ** ( epoch + 1 - self.cfg.force_anneal ) return next_lr def step_begin_epoch(self, epoch): """Update the learning rate at the beginning of the given epoch.""" self.lr = self.get_next_lr(epoch) self.optimizer.set_lr(self.warmup_factor * self.lr) return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if self.cfg.warmup_updates > 0 and num_updates < self.cfg.warmup_updates: self.warmup_factor = (num_updates + 1) / float(self.cfg.warmup_updates) self.optimizer.set_lr(self.warmup_factor * self.lr) else: self.optimizer.set_lr(self.lr) return self.optimizer.get_lr()
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/lr_scheduler/fixed_schedule.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from typing import List import torch.optim.lr_scheduler from omegaconf import II from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class ReduceLROnPlateauLRScheduleConfig(FairseqDataclass): lr_shrink: float = field( default=0.1, metadata={"help": "shrink factor for annealing"} ) lr_threshold: float = field( default=1e-4, metadata={ "help": ( "threshold for measuring the new optimum, to only focus on " "significant changes" ) }, ) lr_patience: int = field( default=0, metadata={ "help": ( "number of epochs with no improvement after which learning rate will " "be reduced" ) }, ) warmup_updates: int = field( default=0, metadata={"help": "warmup the learning rate linearly for the first N updates"}, ) warmup_init_lr: float = field( default=-1, metadata={ "help": "initial learning rate during warmup phase; default is cfg.lr" }, ) lr: List[float] = II("optimization.lr") maximize_best_checkpoint_metric: bool = II( "checkpoint.maximize_best_checkpoint_metric" ) @register_lr_scheduler( "reduce_lr_on_plateau", dataclass=ReduceLROnPlateauLRScheduleConfig ) class ReduceLROnPlateauLRSchedule(FairseqLRScheduler): """ Decay the LR by a factor every time the validation loss plateaus. Also comes with optional warmup phase, where we linearly increase the learning rate from some initial learning rate (``--warmup-init-lr``) until the configured learning rate (``--lr``). Thereafter the lr is adjusted according to original reduce_on_plateau scheme. During warmup:: lrs = torch.linspace( cfg.warmup_init_lr, cfg.lr, cfg.warmup_updates ) lr = lrs[update_num] """ def __init__(self, cfg: ReduceLROnPlateauLRScheduleConfig, optimizer): super().__init__(cfg, optimizer) if len(cfg.lr) > 1: raise ValueError( "Cannot use a fixed learning rate schedule with reduce_lr_on_plateau." " Consider --lr-scheduler=fixed instead." ) self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( self.optimizer.optimizer, patience=cfg.lr_patience, factor=cfg.lr_shrink, mode="max" if cfg.maximize_best_checkpoint_metric else "min", threshold=cfg.lr_threshold, ) warmup_end_lr = cfg.lr[0] # if no warm up, sets initial lr to be cfg.lr[0] if cfg.warmup_init_lr < 0: cfg.warmup_init_lr = 0 if cfg.warmup_updates > 0 else warmup_end_lr # linearly warmup for the first cfg.warmup_updates if cfg.warmup_updates > 0: self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates # this flag is either set from arg when no warm up, or set by # step_update() when warmup finishes self.warmup_end = True if cfg.warmup_updates <= 0 else False # initial learning rate # this self.lr is used only during init and/or warm up period self.lr = warmup_end_lr if self.warmup_end else cfg.warmup_init_lr self.optimizer.set_lr(self.lr) def state_dict(self): """Return the LR scheduler state dict.""" return { "best": self.lr_scheduler.best, "last_epoch": self.lr_scheduler.last_epoch, } def load_state_dict(self, state_dict): """Load an LR scheduler state dict.""" self.lr_scheduler.best = state_dict["best"] if "last_epoch" in state_dict: self.lr_scheduler.last_epoch = state_dict["last_epoch"] def step(self, epoch, val_loss=None): """ Update the learning rate at the end of the given epoch if warmup finishes otherwise no update of lr on epoch boundaries """ if val_loss is not None and self.warmup_end is True: self.lr_scheduler.step(val_loss) else: self.lr_scheduler.last_epoch = epoch return self.optimizer.get_lr() def step_update(self, num_updates): """ Update the learning rate after each update.""" # if there is warmup if self.cfg.warmup_updates > 0: if num_updates <= self.cfg.warmup_updates: self.lr = self.cfg.warmup_init_lr + num_updates * self.lr_step self.optimizer.set_lr(self.lr) else: if self.warmup_end is False: self.warmup_end = True # else do nothing return self.optimizer.get_lr()
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/lr_scheduler/reduce_lr_on_plateau.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" import importlib import os from fairseq import registry from fairseq.optim.lr_scheduler.fairseq_lr_scheduler import ( # noqa FairseqLRScheduler, LegacyFairseqLRScheduler, ) from omegaconf import DictConfig ( build_lr_scheduler_, register_lr_scheduler, LR_SCHEDULER_REGISTRY, LR_SCHEDULER_DATACLASS_REGISTRY, ) = registry.setup_registry( "--lr-scheduler", base_class=FairseqLRScheduler, default="fixed" ) def build_lr_scheduler(cfg: DictConfig, optimizer): return build_lr_scheduler_(cfg, optimizer) # automatically import any Python files in the optim/lr_scheduler/ directory for file in sorted(os.listdir(os.path.dirname(__file__))): if file.endswith(".py") and not file.startswith("_"): file_name = file[: file.find(".py")] importlib.import_module("fairseq.optim.lr_scheduler." + file_name)
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/lr_scheduler/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from typing import Optional, List from omegaconf import II from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class PolynomialDecayLRScheduleConfig(FairseqDataclass): warmup_updates: int = field( default=0, metadata={"help": "warmup the learning rate linearly for the first N updates"}, ) force_anneal: Optional[int] = field( default=None, metadata={"help": "force annealing at specified epoch"}, ) end_learning_rate: float = field( default=0.0, metadata={"help": "learning rate to decay to"}, ) power: float = field( default=1.0, metadata={"help": "decay exponent"}, ) total_num_update: float = field( default=II("optimization.max_update"), metadata={"help": "total number of updates over which to decay learning rate"}, ) lr: List[float] = II("optimization.lr") @register_lr_scheduler("polynomial_decay", dataclass=PolynomialDecayLRScheduleConfig) class PolynomialDecayLRSchedule(FairseqLRScheduler): """Decay the LR on a fixed schedule.""" def __init__(self, cfg: PolynomialDecayLRScheduleConfig, optimizer): super().__init__(cfg, optimizer) assert cfg.total_num_update > 0 self.lr = cfg.lr[0] if cfg.warmup_updates > 0: self.warmup_factor = 1.0 / cfg.warmup_updates else: self.warmup_factor = 1 self.end_learning_rate = cfg.end_learning_rate self.total_num_update = cfg.total_num_update self.power = cfg.power self.optimizer.set_lr(self.warmup_factor * self.lr) def get_next_lr(self, epoch): lrs = self.cfg.lr if self.cfg.force_anneal is None or epoch < self.cfg.force_anneal: # use fixed LR schedule next_lr = lrs[min(epoch, len(lrs) - 1)] else: # annneal based on lr_shrink next_lr = self.optimizer.get_lr() return next_lr def step_begin_epoch(self, epoch): """Update the learning rate at the beginning of the given epoch.""" self.lr = self.get_next_lr(epoch) self.optimizer.set_lr(self.warmup_factor * self.lr) return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if self.cfg.warmup_updates > 0 and num_updates <= self.cfg.warmup_updates: self.warmup_factor = num_updates / float(self.cfg.warmup_updates) lr = self.warmup_factor * self.lr elif num_updates >= self.total_num_update: lr = self.end_learning_rate else: warmup = self.cfg.warmup_updates lr_range = self.lr - self.end_learning_rate pct_remaining = 1 - (num_updates - warmup) / ( self.total_num_update - warmup ) lr = lr_range * pct_remaining ** (self.power) + self.end_learning_rate self.optimizer.set_lr(lr) return self.optimizer.get_lr()
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/lr_scheduler/polynomial_decay_schedule.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections.abc import Collection from dataclasses import dataclass, field from typing import List from omegaconf import II from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class InverseSquareRootLRScheduleConfig(FairseqDataclass): warmup_updates: int = field( default=4000, metadata={"help": "warmup the learning rate linearly for the first N updates"}, ) warmup_init_lr: float = field( default=-1, metadata={ "help": "initial learning rate during warmup phase; default is cfg.lr" }, ) lr: List[float] = II("optimization.lr") @register_lr_scheduler("inverse_sqrt", dataclass=InverseSquareRootLRScheduleConfig) class InverseSquareRootSchedule(FairseqLRScheduler): """Decay the LR based on the inverse square root of the update number. We also support a warmup phase where we linearly increase the learning rate from some initial learning rate (``--warmup-init-lr``) until the configured learning rate (``--lr``). Thereafter we decay proportional to the number of updates, with a decay factor set to align with the configured learning rate. During warmup:: lrs = torch.linspace(cfg.warmup_init_lr, cfg.lr, cfg.warmup_updates) lr = lrs[update_num] After warmup:: decay_factor = cfg.lr * sqrt(cfg.warmup_updates) lr = decay_factor / sqrt(update_num) """ def __init__(self, cfg: InverseSquareRootLRScheduleConfig, optimizer): super().__init__(cfg, optimizer) if isinstance(cfg.lr, Collection) and len(cfg.lr) > 1: raise ValueError( "Cannot use a fixed learning rate schedule with inverse_sqrt." " Consider --lr-scheduler=fixed instead." ) warmup_end_lr = cfg.lr[0] if isinstance(cfg.lr, Collection) else cfg.lr if cfg.warmup_init_lr < 0: cfg.warmup_init_lr = 0 if cfg.warmup_updates > 0 else warmup_end_lr # linearly warmup for the first cfg.warmup_updates self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates # then, decay prop. to the inverse square root of the update number self.decay_factor = warmup_end_lr * cfg.warmup_updates ** 0.5 # initial learning rate self.lr = cfg.warmup_init_lr self.optimizer.set_lr(self.lr) def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if num_updates < self.cfg.warmup_updates: self.lr = self.cfg.warmup_init_lr + num_updates * self.lr_step else: self.lr = self.decay_factor * num_updates ** -0.5 self.optimizer.set_lr(self.lr) return self.lr
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/lr_scheduler/inverse_square_root_schedule.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from argparse import Namespace from fairseq.dataclass.utils import gen_parser_from_dataclass from fairseq.optim import FairseqOptimizer class FairseqLRScheduler(object): def __init__(self, cfg, optimizer): super().__init__() # DS: disable check to support wrapping deepspeed optimizer wrapper # if optimizer is not None and not isinstance(optimizer, FairseqOptimizer): # raise ValueError("optimizer must be an instance of FairseqOptimizer") self.cfg = cfg self.optimizer = optimizer self.best = None @classmethod def add_args(cls, parser): """Add arguments to the parser for this LR scheduler.""" dc = getattr(cls, "__dataclass", None) if dc is not None: gen_parser_from_dataclass(parser, dc()) def state_dict(self): """Return the LR scheduler state dict.""" return {"best": self.best} def load_state_dict(self, state_dict): """Load an LR scheduler state dict.""" self.best = state_dict["best"] def step_begin_epoch(self, epoch): """Update the learning rate at the beginning of the given epoch.""" pass def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" if val_loss is not None: if self.best is None: self.best = val_loss else: self.best = min(self.best, val_loss) def step_update(self, num_updates): """Update the learning rate after each update.""" return self.optimizer.get_lr() class LegacyFairseqLRScheduler(FairseqLRScheduler): def __init__(self, args: Namespace, optimizer): if not isinstance(optimizer, FairseqOptimizer): raise ValueError("optimizer must be an instance of FairseqOptimizer") self.args = args self.optimizer = optimizer self.best = None
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/lr_scheduler/fairseq_lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from dataclasses import dataclass, field from typing import Optional, List, Tuple from omegaconf import II from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class TriStageLRScheduleConfig(FairseqDataclass): warmup_steps: int = field( default=0, metadata={"help": "warmup the learning rate linearly for the first N updates"}, ) hold_steps: int = field( default=0, metadata={"help": "steps in hold stage"}, ) decay_steps: int = field( default=0, metadata={"help": "steps in decay stages"}, ) phase_ratio: Optional[Tuple[float, float, float]] = field( default=None, metadata={ "help": ( "if set, automatically sets warmup/hold/decay steps to the ratio " "specified here from max_updates. the ratios must add up to 1.0" ) }, ) init_lr_scale: float = field( default=0.01, metadata={"help": "initial learning rate scale during warmup phase"}, ) final_lr_scale: float = field( default=0.01, metadata={"help": "final learning rate scale"}, ) max_update: float = II("optimization.max_update") lr: List[float] = II("optimization.lr") @register_lr_scheduler("tri_stage", dataclass=TriStageLRScheduleConfig) class TriStageLRSchedule(FairseqLRScheduler): """Tristage learning rate schedulr Implement the learning rate scheduler in https://arxiv.org/pdf/1904.08779.pdf Similar to inverse_squre_root scheduler, but tri_stage learning rate employs three stages LR scheduling: - warmup stage, starting from `lr` * `init_lr_scale`, linearly increased to `lr` in `warmup_steps` iterations - hold stage, after `warmup_steps`, keep the LR as `lr` for `hold_steps` iterations - decay stage, after hold stage, decay LR exponetially to `lr` * `final_lr_scale` in `decay_steps`; after that LR is keep as `final_lr_scale` * `lr` During warmup:: init_lr = cfg.init_lr_scale * cfg.lr lrs = torch.linspace(init_lr, cfg.lr, cfg.warmup_steps) lr = lrs[update_num] During hold:: lr = cfg.lr During decay:: decay_factor = - math.log(cfg.final_lr_scale) / cfg.decay_steps lr = cfg.lr * exp(- (update_num - warmup_steps - decay_steps) * decay_factor) After that:: lr = cfg.lr * cfg.final_lr_scale """ def __init__(self, cfg: TriStageLRScheduleConfig, optimizer): super().__init__(cfg, optimizer) if len(cfg.lr) > 1: raise ValueError( "Cannot use a fixed learning rate schedule with tri-stage lr." " Consider --lr-scheduler=fixed instead." ) # calculate LR at each point self.peak_lr = cfg.lr[0] self.init_lr = cfg.init_lr_scale * cfg.lr[0] self.final_lr = cfg.final_lr_scale * cfg.lr[0] if cfg.phase_ratio is not None: assert cfg.max_update > 0 assert sum(cfg.phase_ratio) == 1, "phase ratios must add up to 1" self.warmup_steps = int(cfg.max_update * cfg.phase_ratio[0]) self.hold_steps = int(cfg.max_update * cfg.phase_ratio[1]) self.decay_steps = int(cfg.max_update * cfg.phase_ratio[2]) else: self.warmup_steps = cfg.warmup_steps self.hold_steps = cfg.hold_steps self.decay_steps = cfg.decay_steps assert ( self.warmup_steps + self.hold_steps + self.decay_steps > 0 ), "please specify steps or phase_ratio" self.warmup_rate = ( (self.peak_lr - self.init_lr) / self.warmup_steps if self.warmup_steps != 0 else 0 ) self.decay_factor = -math.log(cfg.final_lr_scale) / self.decay_steps # initial learning rate self.lr = self.init_lr self.optimizer.set_lr(self.lr) def _decide_stage(self, update_step): """ return stage, and the corresponding steps within the current stage """ if update_step < self.warmup_steps: # warmup state return 0, update_step offset = self.warmup_steps if update_step < offset + self.hold_steps: # hold stage return 1, update_step - offset offset += self.hold_steps if update_step <= offset + self.decay_steps: # decay stage return 2, update_step - offset offset += self.decay_steps # still here ? constant lr stage return 3, update_step - offset def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" stage, steps_in_stage = self._decide_stage(num_updates) if stage == 0: self.lr = self.init_lr + self.warmup_rate * steps_in_stage elif stage == 1: self.lr = self.peak_lr elif stage == 2: self.lr = self.peak_lr * math.exp(-self.decay_factor * steps_in_stage) elif stage == 3: self.lr = self.final_lr else: raise ValueError("Undefined stage") self.optimizer.set_lr(self.lr) return self.lr
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/lr_scheduler/tri_stage_lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from collections.abc import Collection from dataclasses import dataclass, field from typing import List from omegaconf import II from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class CosineLRScheduleConfig(FairseqDataclass): warmup_updates: int = field( default=0, metadata={"help": "warmup the learning rate linearly for the first N updates"}, ) warmup_init_lr: float = field( default=-1, metadata={ "help": "initial learning rate during warmup phase; default is cfg.lr" }, ) lr: List[float] = field( default=II("optimization.lr"), metadata={"help": "max learning rate, must be more than cfg.min_lr"}, ) min_lr: float = field(default=0.0, metadata={"help": "min learning rate"}) t_mult: float = field( default=1.0, metadata={"help": "factor to grow the length of each period"} ) lr_period_updates: float = field( default=-1, metadata={"help": "initial number of updates per period"} ) lr_shrink: float = field( default=0.1, metadata={"help": "shrink factor for annealing"} ) # This is not required, but is for convenience in inferring lr_period_updates max_update: int = II("optimization.max_update") @register_lr_scheduler("cosine", dataclass=CosineLRScheduleConfig) class CosineLRSchedule(FairseqLRScheduler): """Assign LR based on a cyclical schedule that follows the cosine function. See https://arxiv.org/pdf/1608.03983.pdf for details. We also support a warmup phase where we linearly increase the learning rate from some initial learning rate (``--warmup-init-lr``) until the configured max learning rate (``--lr``). During warmup:: lrs = torch.linspace(cfg.warmup_init_lr, cfg.lr, cfg.warmup_updates) lr = lrs[update_num] After warmup:: lr = cfg.min_lr + 0.5*(cfg.lr - cfg.min_lr)*(1 + cos(t_curr / t_i)) where ``t_curr`` is current percentage of updates within the current period range and ``t_i`` is the current period range, which is scaled by ``t_mul`` after every iteration. """ def __init__(self, cfg: CosineLRScheduleConfig, fairseq_optimizer): super().__init__(cfg, fairseq_optimizer) if isinstance(cfg.lr, Collection) and len(cfg.lr) > 1: raise ValueError( "Cannot use a fixed learning rate schedule with cosine." f" Consider --lr-scheduler=fixed instead. ({cfg.lr})" ) self.max_lr = cfg.lr[0] if isinstance(cfg.lr, Collection) else cfg.lr assert ( self.max_lr > cfg.min_lr ), f"max_lr (={cfg.lr}) must be more than min_lr (={cfg.min_lr})" warmup_end_lr = self.max_lr if cfg.warmup_init_lr < 0: cfg.warmup_init_lr = cfg.min_lr self.t_mult = cfg.t_mult self.period = cfg.lr_period_updates if self.period <= 0: assert ( cfg.max_update > 0 ), "Either --max_update or --lr-period-updates must be set" self.period = cfg.max_update - cfg.warmup_updates if cfg.warmup_updates > 0: # linearly warmup for the first cfg.warmup_updates self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates else: self.lr_step = 1 self.warmup_updates = cfg.warmup_updates self.lr_shrink = cfg.lr_shrink # initial learning rate self.lr = cfg.warmup_init_lr self.optimizer.set_lr(self.lr) def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if num_updates < self.cfg.warmup_updates: self.lr = self.cfg.warmup_init_lr + num_updates * self.lr_step else: curr_updates = num_updates - self.cfg.warmup_updates if self.t_mult != 1: i = math.floor( math.log( 1 - curr_updates / self.period * (1 - self.t_mult), self.t_mult ) ) t_i = self.t_mult ** i * self.period t_curr = ( curr_updates - (1 - self.t_mult ** i) / (1 - self.t_mult) * self.period ) else: i = math.floor(curr_updates / self.period) t_i = self.period t_curr = curr_updates - (self.period * i) lr_shrink = self.lr_shrink ** i min_lr = self.cfg.min_lr * lr_shrink max_lr = self.max_lr * lr_shrink self.lr = min_lr + 0.5 * (max_lr - min_lr) * ( 1 + math.cos(math.pi * t_curr / t_i) ) self.optimizer.set_lr(self.lr) return self.lr
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/lr_scheduler/cosine_lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections.abc import Collection from dataclasses import dataclass, field from typing import List from omegaconf import II from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class StepLRScheduleConfig(FairseqDataclass): warmup_updates: int = field( default=0, metadata={"help": "warmup the learning rate linearly for the first N updates"}, ) warmup_init_lr: float = field( default=-1, metadata={ "help": "initial learning rate during warmup phase; default is cfg.lr" }, ) lr: List[float] = field( default=II("optimization.lr"), metadata={"help": "max learning rate, must be more than cfg.min_lr"}, ) min_lr: float = field(default=0.0, metadata={"help": "min learning rate"}) lr_deacy_period: int = field(default=25000, metadata={"help": "decay period"}) lr_decay: float = field(default=0.5, metadata={"help": "decay factor"}) @register_lr_scheduler("step", dataclass=StepLRScheduleConfig) class StepLRSchedule(FairseqLRScheduler): """Decay learning rate every k updates by a fixed factor""" def __init__(self, cfg: StepLRScheduleConfig, fairseq_optimizer): super().__init__(cfg, fairseq_optimizer) self.max_lr = cfg.lr[0] if isinstance(cfg.lr, Collection) else cfg.lr self.min_lr = cfg.min_lr self.lr_deacy_period = cfg.lr_deacy_period self.lr_decay = cfg.lr_decay self.warmup_updates = cfg.warmup_updates self.warmup_init_lr = ( cfg.warmup_init_lr if cfg.warmup_init_lr >= 0 else self.min_lr ) assert self.lr_deacy_period > 0 assert self.lr_decay <= 1 assert self.min_lr >= 0 assert self.max_lr > self.min_lr if cfg.warmup_updates > 0: # linearly warmup for the first cfg.warmup_updates self.warmup_lr_step = ( self.max_lr - self.warmup_init_lr ) / self.warmup_updates else: self.warmup_lr_step = 1 # initial learning rate self.lr = self.warmup_init_lr self.optimizer.set_lr(self.lr) def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if num_updates < self.cfg.warmup_updates: self.lr = self.warmup_init_lr + num_updates * self.warmup_lr_step else: curr_updates = num_updates - self.cfg.warmup_updates lr_mult = self.lr_decay ** (curr_updates // self.lr_deacy_period) self.lr = max(self.max_lr * lr_mult, self.min_lr) self.optimizer.set_lr(self.lr) return self.lr
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/lr_scheduler/step_lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from dataclasses import dataclass, field from typing import List from omegaconf import II from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class TriangularLRScheduleConfig(FairseqDataclass): max_lr: float = field( default="???", metadata={"help": "max learning rate, must be more than cfg.lr"} ) lr_period_updates: float = field( default=5000, metadata={"help": "initial number of updates per period (cycle length)"}, ) lr_shrink: float = field( default=0.1, metadata={"help": "shrink factor for annealing"} ) shrink_min: bool = field( default=False, metadata={"help": "if set, also shrinks min lr"} ) lr: List[float] = II("optimization.lr") @register_lr_scheduler("triangular", dataclass=TriangularLRScheduleConfig) class TriangularLRSchedule(FairseqLRScheduler): """Assign LR based on a triangular cyclical schedule. See https://arxiv.org/pdf/1506.01186.pdf for details. """ def __init__(self, cfg: TriangularLRScheduleConfig, optimizer): super().__init__(cfg, optimizer) if len(cfg.lr) > 1: raise ValueError( "Cannot use a fixed learning rate schedule with triangular." " Consider --lr-scheduler=fixed instead." ) lr = cfg.lr[0] assert cfg.max_lr > lr, "max_lr must be more than lr" self.min_lr = lr self.max_lr = cfg.max_lr self.stepsize = cfg.lr_period_updates // 2 self.lr_shrink = cfg.lr_shrink self.shrink_min = cfg.shrink_min # initial learning rate self.lr = self.min_lr self.optimizer.set_lr(self.lr) def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" cycle = math.floor(num_updates / (2 * self.stepsize)) lr_shrink = self.lr_shrink ** cycle max_lr = self.max_lr * lr_shrink if self.shrink_min: min_lr = self.min_lr * lr_shrink else: min_lr = self.min_lr x = abs(num_updates / self.stepsize - 2 * (cycle + 1) + 1) self.lr = min_lr + (max_lr - min_lr) * max(0, (1 - x)) self.optimizer.set_lr(self.lr) return self.lr
KosmosX-API-main
kosmosX/fairseq/fairseq/optim/lr_scheduler/triangular_lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional import torch from torch import Tensor @torch.jit.script def script_skip_tensor_list(x: List[Tensor], mask): res = [xi[mask] if xi.size(0) == mask.size(0) else xi[:, mask] for xi in x] outputs = [] for i, t in enumerate(res): if t.numel() != 0: outputs.append(t) else: outputs.append(x[i]) return outputs @torch.jit.script def script_skip_tensor(x: Tensor, mask): # None case if x.size(0) == 0: return x res = x[mask] if x.size(0) == mask.size(0) else x[:, mask] if res.numel() == 0: return x else: return res @torch.jit.script def expand_2d_or_3d_tensor(x, trg_dim: int, padding_idx: int): """ Expand 2D/3D tensor on dim=1 """ if x is None: return None assert x.dim() == 2 or x.dim() == 3 assert trg_dim >= x.size(1), (trg_dim, x.size()) if trg_dim == x.size(1): return x dims = [x.size(0), trg_dim - x.size(1)] if x.dim() == 3: dims.append(x.size(2)) x = torch.cat([x, torch.zeros(dims).to(x).fill_(padding_idx)], 1) return x @torch.jit.script def coalesce(x: Optional[Tensor], y: Tensor) -> Tensor: return x if x is not None else y @torch.jit.script def fill_tensors( x: Optional[Tensor], mask, y: Optional[Tensor], padding_idx: int ) -> Optional[Tensor]: """ Filling tensor x with y at masked positions (dim=0). """ if x is None or x.size()[0] == 0 or y is None: return x assert x.dim() == y.dim() and mask.size(0) == x.size(0) assert x.dim() == 2 or (x.dim() == 3 and x.size(2) == y.size(2)) n_selected = mask.sum() if n_selected == 0: return x assert n_selected == y.size(0) if n_selected == x.size(0): return y if x.size(1) < y.size(1): x = expand_2d_or_3d_tensor(x, y.size(1), padding_idx) x[mask] = y elif x.size(1) > y.size(1): x[mask] = torch.tensor(padding_idx).type_as(x) if x.dim() == 2: x[mask, : y.size(1)] = y else: x[mask, : y.size(1), :] = y else: x[mask] = y return x
KosmosX-API-main
kosmosX/fairseq/fairseq/models/model_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Base classes for various fairseq models. """ import logging from argparse import Namespace from typing import Dict, List, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.data import Dictionary from fairseq.dataclass.utils import ( convert_namespace_to_omegaconf, gen_parser_from_dataclass, ) from fairseq.models import FairseqDecoder, FairseqEncoder from omegaconf import DictConfig from torch import Tensor logger = logging.getLogger(__name__) def check_type(module, expected_type): if hasattr(module, "unwrapped_module"): assert isinstance( module.unwrapped_module, expected_type ), f"{type(module.unwrapped_module)} != {expected_type}" else: assert isinstance(module, expected_type), f"{type(module)} != {expected_type}" class BaseFairseqModel(nn.Module): """Base class for fairseq models.""" def __init__(self): super().__init__() self._is_generation_fast = False @classmethod def add_args(cls, parser): """Add model-specific arguments to the parser.""" dc = getattr(cls, "__dataclass", None) if dc is not None: # do not set defaults so that settings defaults from various architectures still works gen_parser_from_dataclass(parser, dc(), delete_default=True) @classmethod def build_model(cls, args, task): """Build a new model instance.""" raise NotImplementedError("Model must implement the build_model method") def get_targets(self, sample, net_output): """Get targets from either the sample or the net's output.""" return sample["target"] def get_normalized_probs( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): """Get normalized probabilities (or log probs) from a net's output.""" return self.get_normalized_probs_scriptable(net_output, log_probs, sample) # TorchScript doesn't support super() method so that the scriptable Subclass # can't access the base class model in Torchscript. # Current workaround is to add a helper function with different name and # call the helper function from scriptable Subclass. def get_normalized_probs_scriptable( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): """Scriptable helper function for get_normalized_probs in ~BaseFairseqModel""" if hasattr(self, "decoder"): return self.decoder.get_normalized_probs(net_output, log_probs, sample) elif torch.is_tensor(net_output): # syntactic sugar for simple models which don't have a decoder # (e.g., the classification tutorial) logits = net_output.float() if log_probs: return F.log_softmax(logits, dim=-1) else: return F.softmax(logits, dim=-1) raise NotImplementedError def extract_features(self, *args, **kwargs): """Similar to *forward* but only return features.""" return self(*args, **kwargs) def max_positions(self): """Maximum length supported by the model.""" return None def load_state_dict( self, state_dict, strict=True, model_cfg: Optional[DictConfig] = None, args: Optional[Namespace] = None, ): """Copies parameters and buffers from *state_dict* into this module and its descendants. Overrides the method in :class:`nn.Module`. Compared with that method this additionally "upgrades" *state_dicts* from old checkpoints. """ if model_cfg is None and args is not None: logger.warn( "using 'args' is deprecated, please update your code to use dataclass config" ) model_cfg = convert_namespace_to_omegaconf(args).model self.upgrade_state_dict(state_dict) from fairseq.checkpoint_utils import prune_state_dict new_state_dict = prune_state_dict(state_dict, model_cfg) return super().load_state_dict(new_state_dict, strict) def upgrade_state_dict(self, state_dict): """Upgrade old state dicts to work with newer code.""" self.upgrade_state_dict_named(state_dict, "") def upgrade_state_dict_named(self, state_dict, name): """Upgrade old state dicts to work with newer code. Args: state_dict (dict): state dictionary to upgrade, in place name (str): the state dict key corresponding to the current module """ assert state_dict is not None def do_upgrade(m, prefix): if len(prefix) > 0: prefix += "." for n, c in m.named_children(): name = prefix + n if hasattr(c, "upgrade_state_dict_named"): c.upgrade_state_dict_named(state_dict, name) elif hasattr(c, "upgrade_state_dict"): c.upgrade_state_dict(state_dict) do_upgrade(c, name) do_upgrade(self, name) def set_num_updates(self, num_updates): """State from trainer to pass along to model at every update.""" for m in self.modules(): if hasattr(m, "set_num_updates") and m != self: m.set_num_updates(num_updates) def prepare_for_inference_(self, cfg: DictConfig): """Prepare model for inference.""" kwargs = {} kwargs["beamable_mm_beam_size"] = ( None if getattr(cfg.generation, "no_beamable_mm", False) else getattr(cfg.generation, "beam", 5) ) kwargs["need_attn"] = getattr(cfg.generation, "print_alignment", False) if getattr(cfg.generation, "retain_dropout", False): kwargs["retain_dropout"] = cfg.generation.retain_dropout kwargs["retain_dropout_modules"] = cfg.generation.retain_dropout_modules self.make_generation_fast_(**kwargs) def make_generation_fast_(self, **kwargs): """ Legacy entry point to optimize model for faster generation. Prefer prepare_for_inference_. """ if self._is_generation_fast: return # only apply once self._is_generation_fast = True # remove weight norm from all modules in the network def apply_remove_weight_norm(module): try: nn.utils.remove_weight_norm(module) except (AttributeError, ValueError): # this module didn't have weight norm return self.apply(apply_remove_weight_norm) def apply_make_generation_fast_(module, prefix): if len(prefix) > 0: prefix += "." base_func = BaseFairseqModel.make_generation_fast_ for n, m in module.named_modules(): if ( m != self and hasattr(m, "make_generation_fast_") # don't call this implementation again, e.g., if # children modules also inherit from BaseFairseqModel and m.make_generation_fast_.__func__ is not base_func ): name = prefix + n m.make_generation_fast_(name=name, **kwargs) apply_make_generation_fast_(self, "") def train(mode=True): if mode: raise RuntimeError("cannot train after make_generation_fast") # this model should no longer be used for training self.eval() self.train = train def prepare_for_onnx_export_(self, **kwargs): """Make model exportable via ONNX trace.""" seen = set() def apply_prepare_for_onnx_export_(module): if ( module != self and hasattr(module, "prepare_for_onnx_export_") and module not in seen ): seen.add(module) module.prepare_for_onnx_export_(**kwargs) self.apply(apply_prepare_for_onnx_export_) @classmethod def from_pretrained( cls, model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", **kwargs, ): """ Load a :class:`~fairseq.models.FairseqModel` from a pre-trained model file. Downloads and caches the pre-trained model file if needed. The base implementation returns a :class:`~fairseq.hub_utils.GeneratorHubInterface`, which can be used to generate translations or sample from language models. The underlying :class:`~fairseq.models.FairseqModel` can be accessed via the *generator.models* attribute. Other models may override this to implement custom hub interfaces. Args: model_name_or_path (str): either the name of a pre-trained model to load or a path/URL to a pre-trained model state dict checkpoint_file (str, optional): colon-separated list of checkpoint files in the model archive to ensemble (default: 'model.pt') data_name_or_path (str, optional): point args.data to the archive at the given path/URL. Can start with '.' or './' to reuse the model archive path. """ from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), **kwargs, ) logger.info(x["args"]) return hub_utils.GeneratorHubInterface(x["args"], x["task"], x["models"]) @classmethod def hub_models(cls): return {} class FairseqEncoderDecoderModel(BaseFairseqModel): """Base class for encoder-decoder models. Args: encoder (FairseqEncoder): the encoder decoder (FairseqDecoder): the decoder """ def __init__(self, encoder, decoder): super().__init__() self.encoder = encoder self.decoder = decoder check_type(self.encoder, FairseqEncoder) check_type(self.decoder, FairseqDecoder) def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): """ Run the forward pass for an encoder-decoder model. First feed a batch of source tokens through the encoder. Then, feed the encoder output and previous decoder outputs (i.e., teacher forcing) to the decoder to produce the next outputs:: encoder_out = self.encoder(src_tokens, src_lengths) return self.decoder(prev_output_tokens, encoder_out) Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) decoder_out = self.decoder( prev_output_tokens, encoder_out=encoder_out, **kwargs ) return decoder_out def forward_decoder(self, prev_output_tokens, **kwargs): return self.decoder(prev_output_tokens, **kwargs) def extract_features(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) features = self.decoder.extract_features( prev_output_tokens, encoder_out=encoder_out, **kwargs ) return features def output_layer(self, features, **kwargs): """Project features to the default output size (typically vocabulary size).""" return self.decoder.output_layer(features, **kwargs) def max_positions(self): """Maximum length supported by the model.""" return (self.encoder.max_positions(), self.decoder.max_positions()) def max_decoder_positions(self): """Maximum length supported by the decoder.""" return self.decoder.max_positions() class FairseqModel(FairseqEncoderDecoderModel): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) utils.deprecation_warning( "FairseqModel is deprecated, please use FairseqEncoderDecoderModel " "or BaseFairseqModel instead", stacklevel=4, ) class FairseqMultiModel(BaseFairseqModel): """Base class for combining multiple encoder-decoder models.""" def __init__(self, encoders, decoders): super().__init__() assert encoders.keys() == decoders.keys() self.keys = list(encoders.keys()) for key in self.keys: check_type(encoders[key], FairseqEncoder) check_type(decoders[key], FairseqDecoder) self.models = nn.ModuleDict( { key: FairseqEncoderDecoderModel(encoders[key], decoders[key]) for key in self.keys } ) @staticmethod def build_shared_embeddings( dicts: Dict[str, Dictionary], langs: List[str], embed_dim: int, build_embedding: callable, pretrained_embed_path: Optional[str] = None, ): """ Helper function to build shared embeddings for a set of languages after checking that all dicts corresponding to those languages are equivalent. Args: dicts: Dict of lang_id to its corresponding Dictionary langs: languages that we want to share embeddings for embed_dim: embedding dimension build_embedding: callable function to actually build the embedding pretrained_embed_path: Optional path to load pretrained embeddings """ shared_dict = dicts[langs[0]] if any(dicts[lang] != shared_dict for lang in langs): raise ValueError( "--share-*-embeddings requires a joined dictionary: " "--share-encoder-embeddings requires a joined source " "dictionary, --share-decoder-embeddings requires a joined " "target dictionary, and --share-all-embeddings requires a " "joint source + target dictionary." ) return build_embedding(shared_dict, embed_dim, pretrained_embed_path) def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): raise NotImplementedError def max_positions(self): """Maximum length supported by the model.""" return { key: ( self.models[key].encoder.max_positions(), self.models[key].decoder.max_positions(), ) for key in self.keys } def max_decoder_positions(self): """Maximum length supported by the decoder.""" return min(model.decoder.max_positions() for model in self.models.values()) @property def encoder(self): return self.models[self.keys[0]].encoder @property def decoder(self): return self.models[self.keys[0]].decoder def forward_decoder(self, prev_output_tokens, **kwargs): return self.decoder(prev_output_tokens, **kwargs) def load_state_dict( self, state_dict, strict=True, model_cfg=None, args: Optional[Namespace] = None, ): """Copies parameters and buffers from *state_dict* into this module and its descendants. Overrides the method in :class:`nn.Module`. Compared with that method this additionally "upgrades" *state_dicts* from old checkpoints. """ if model_cfg is None and args is not None: logger.warn( "using 'args' is deprecated, please update your code to use dataclass config" ) model_cfg = convert_namespace_to_omegaconf(args).model self.upgrade_state_dict(state_dict) from fairseq.checkpoint_utils import prune_state_dict new_state_dict = prune_state_dict(state_dict, model_cfg) return super().load_state_dict(new_state_dict, strict) class FairseqLanguageModel(BaseFairseqModel): """Base class for decoder-only models. Args: decoder (FairseqDecoder): the decoder """ def __init__(self, decoder): super().__init__() self.decoder = decoder check_type(self.decoder, FairseqDecoder) def forward(self, src_tokens, **kwargs): """ Run the forward pass for a decoder-only model. Feeds a batch of tokens through the decoder to predict the next tokens. Args: src_tokens (LongTensor): tokens on which to condition the decoder, of shape `(batch, tgt_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` Returns: tuple: - the decoder's output of shape `(batch, seq_len, vocab)` - a dictionary with any model-specific outputs """ return self.decoder(src_tokens, **kwargs) def forward_decoder(self, prev_output_tokens, **kwargs): return self.decoder(prev_output_tokens, **kwargs) def extract_features(self, src_tokens, **kwargs): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, seq_len, embed_dim)` - a dictionary with any model-specific outputs """ return self.decoder.extract_features(src_tokens, **kwargs) def output_layer(self, features, **kwargs): """Project features to the default output size (typically vocabulary size).""" return self.decoder.output_layer(features, **kwargs) def max_positions(self): """Maximum length supported by the model.""" return self.decoder.max_positions() def max_decoder_positions(self): """Maximum length supported by the decoder.""" return self.decoder.max_positions() @property def supported_targets(self): return {"future"} class FairseqEncoderModel(BaseFairseqModel): """Base class for encoder-only models. Args: encoder (FairseqEncoder): the encoder """ def __init__(self, encoder): super().__init__() self.encoder = encoder check_type(self.encoder, FairseqEncoder) def forward(self, src_tokens, src_lengths, **kwargs): """ Run the forward pass for a encoder-only model. Feeds a batch of tokens through the encoder to generate features. Args: src_tokens (LongTensor): input tokens of shape `(batch, src_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` Returns: the encoder's output, typically of shape `(batch, src_len, features)` """ return self.encoder(src_tokens, src_lengths, **kwargs) def get_normalized_probs(self, net_output, log_probs, sample=None): """Get normalized probabilities (or log probs) from a net's output.""" encoder_out = net_output["encoder_out"] if torch.is_tensor(encoder_out): logits = encoder_out.float() if log_probs: return F.log_softmax(logits, dim=-1) else: return F.softmax(logits, dim=-1) raise NotImplementedError def max_positions(self): """Maximum length supported by the model.""" return self.encoder.max_positions()
KosmosX-API-main
kosmosX/fairseq/fairseq/models/fairseq_model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, NamedTuple, Optional import torch import torch.nn as nn from torch import Tensor EncoderOut = NamedTuple( "EncoderOut", [ ("encoder_out", Tensor), # T x B x C ("encoder_padding_mask", Optional[Tensor]), # B x T ("encoder_embedding", Optional[Tensor]), # B x T x C ("encoder_states", Optional[List[Tensor]]), # List[T x B x C] ("src_tokens", Optional[Tensor]), # B x T ("src_lengths", Optional[Tensor]), # B x 1 ], ) class FairseqEncoder(nn.Module): """Base class for encoders.""" def __init__(self, dictionary): super().__init__() self.dictionary = dictionary def forward(self, src_tokens, src_lengths=None, **kwargs): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): lengths of each source sentence of shape `(batch)` """ raise NotImplementedError def forward_torchscript(self, net_input: Dict[str, Tensor]): """A TorchScript-compatible version of forward. Encoders which use additional arguments may want to override this method for TorchScript compatibility. """ if torch.jit.is_scripting(): return self.forward( src_tokens=net_input["src_tokens"], src_lengths=net_input["src_lengths"], ) else: return self.forward_non_torchscript(net_input) @torch.jit.unused def forward_non_torchscript(self, net_input: Dict[str, Tensor]): encoder_input = { k: v for k, v in net_input.items() if k != "prev_output_tokens" } return self.forward(**encoder_input) def reorder_encoder_out(self, encoder_out, new_order): """ Reorder encoder output according to `new_order`. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: `encoder_out` rearranged according to `new_order` """ raise NotImplementedError def max_positions(self): """Maximum input length supported by the encoder.""" return 1e6 # an arbitrary large number def upgrade_state_dict_named(self, state_dict, name): """Upgrade old state dicts to work with newer code.""" return state_dict def set_num_updates(self, num_updates): """State from trainer to pass along to model at every update.""" def _apply(m): if hasattr(m, "set_num_updates") and m != self: m.set_num_updates(num_updates) self.apply(_apply)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/fairseq_encoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import math import os import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.models import ( CompositeEncoder, FairseqDecoder, FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( DownsampledMultiHeadAttention, FairseqDropout, GradMultiply, LayerNorm, LearnedPositionalEmbedding, LinearizedConvolution, ) logger = logging.getLogger(__name__) @register_model("fconv_self_att") class FConvModelSelfAtt(FairseqEncoderDecoderModel): @classmethod def hub_models(cls): return { "conv.stories.pretrained": { "path": "https://dl.fbaipublicfiles.com/fairseq/models/stories_checkpoint.tar.gz", "checkpoint_file": "pretrained_checkpoint.pt", "tokenizer": "nltk", }, "conv.stories": { "path": "https://dl.fbaipublicfiles.com/fairseq/models/stories_checkpoint.tar.gz", "checkpoint_file": "fusion_checkpoint.pt", "tokenizer": "nltk", "pretrained": "True", "pretrained_checkpoint": "./pretrained_checkpoint.pt", }, # Test set containing dictionaries "data.stories": "https://dl.fbaipublicfiles.com/fairseq/data/stories_test.tar.bz2", } def __init__(self, encoder, decoder, pretrained_encoder=None): super().__init__(encoder, decoder) self.encoder.num_attention_layers = sum( layer is not None for layer in decoder.attention ) self.pretrained_encoder = pretrained_encoder if self.pretrained_encoder is None: encoders = {"encoder": encoder} else: encoders = {"encoder": encoder, "pretrained": self.pretrained_encoder} # for fusion model, CompositeEncoder contains both pretrained and training encoders # these are forwarded and then combined in the decoder self.encoder = CompositeEncoder(encoders) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-layers', type=str, metavar='EXPR', help='encoder layers [(dim, kernel_size), ...]') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-layers', type=str, metavar='EXPR', help='decoder layers [(dim, kernel_size), ...]') parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension') parser.add_argument('--decoder-attention', type=str, metavar='EXPR', help='decoder attention [True, ...]') parser.add_argument('--self-attention', type=str, metavar='EXPR', help='decoder self-attention layers, ex: [True] + [False]*5') parser.add_argument('--multihead-attention-nheads', type=int, help='Number of heads to use in attention') parser.add_argument('--multihead-self-attention-nheads', type=int, help='Number of heads to use in self-attention') parser.add_argument('--encoder-attention', type=str, metavar='EXPR', help='encoder attention [True, ...]') parser.add_argument('--encoder-attention-nheads', type=int, help='Number of heads to use in encoder attention') parser.add_argument('--project-input', type=str, metavar='EXPR', help='Use projections in self-attention [True, ...]') parser.add_argument('--gated-attention', type=str, metavar='EXPR', help='Use GLU layers in self-attention projections [True, ...]') parser.add_argument('--downsample', type=str, metavar='EXPR', help='Use downsampling in self-attention [True, ...]') parser.add_argument('--pretrained-checkpoint', metavar='DIR', help='path to load checkpoint from pretrained model') parser.add_argument('--pretrained', type=str, metavar='EXPR', help='use pretrained model when training [True, ...]') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" trained_encoder, trained_decoder = None, None pretrained = eval(args.pretrained) if pretrained: logger.info("loading pretrained model") if not os.path.exists(args.pretrained_checkpoint): new_pretrained_checkpoint = os.path.join( args.data, args.pretrained_checkpoint ) if os.path.exists(new_pretrained_checkpoint): args.pretrained_checkpoint = new_pretrained_checkpoint trained_model = checkpoint_utils.load_model_ensemble( filenames=[args.pretrained_checkpoint], task=task, )[0][0] trained_decoder = list(trained_model.children())[1] trained_encoder = list(trained_model.children())[0] # freeze pretrained model for param in trained_decoder.parameters(): param.requires_grad = False for param in trained_encoder.parameters(): param.requires_grad = False encoder = FConvEncoder( task.source_dictionary, embed_dim=args.encoder_embed_dim, convolutions=eval(args.encoder_layers), dropout=args.dropout, max_positions=args.max_source_positions, attention=eval(args.encoder_attention), attention_nheads=args.encoder_attention_nheads, ) decoder = FConvDecoder( task.target_dictionary, embed_dim=args.decoder_embed_dim, convolutions=eval(args.decoder_layers), out_embed_dim=args.decoder_out_embed_dim, attention=eval(args.decoder_attention), dropout=args.dropout, max_positions=args.max_target_positions, selfattention=eval(args.self_attention), attention_nheads=args.multihead_attention_nheads, selfattention_nheads=args.multihead_self_attention_nheads, project_input=eval(args.project_input), gated_attention=eval(args.gated_attention), downsample=eval(args.downsample), pretrained=pretrained, trained_decoder=trained_decoder, ) model = FConvModelSelfAtt(encoder, decoder, trained_encoder) return model @property def pretrained(self): return self.pretrained_encoder is not None class FConvEncoder(FairseqEncoder): """Convolutional encoder""" def __init__( self, dictionary, embed_dim=512, max_positions=1024, convolutions=((512, 3),) * 20, dropout=0.1, attention=False, attention_nheads=1, ): super().__init__(dictionary) self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.num_attention_layers = None num_embeddings = len(dictionary) self.padding_idx = dictionary.pad() self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx) self.embed_positions = PositionalEmbedding( max_positions, embed_dim, self.padding_idx, ) def expand_bool_array(val): if isinstance(val, bool): # expand True into [True, True, ...] and do the same with False return [val] * len(convolutions) return val attention = expand_bool_array(attention) in_channels = convolutions[0][0] self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) self.projections = nn.ModuleList() self.convolutions = nn.ModuleList() self.attention = nn.ModuleList() self.attproj = nn.ModuleList() for i, (out_channels, kernel_size) in enumerate(convolutions): self.projections.append( Linear(in_channels, out_channels) if in_channels != out_channels else None ) self.convolutions.append( ConvTBC(in_channels, out_channels * 2, kernel_size, dropout=dropout) ) self.attention.append( SelfAttention(out_channels, embed_dim, attention_nheads) if attention[i] else None ) in_channels = out_channels self.fc2 = Linear(in_channels, embed_dim) def forward(self, src_tokens, src_lengths): # embed tokens and positions x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens) x = self.dropout_module(x) input_embedding = x.transpose(0, 1) # project to size of convolution x = self.fc1(x) encoder_padding_mask = src_tokens.eq(self.padding_idx).t() # -> T x B if not encoder_padding_mask.any(): encoder_padding_mask = None # B x T x C -> T x B x C x = x.transpose(0, 1) # temporal convolutions for proj, conv, attention in zip( self.projections, self.convolutions, self.attention ): residual = x if proj is None else proj(x) if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0) x = self.dropout_module(x) padding_l = (conv.kernel_size[0] - 1) // 2 padding_r = conv.kernel_size[0] // 2 x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r)) x = conv(x) x = F.glu(x, dim=2) if attention is not None: x = attention(x) x = (x + residual) * math.sqrt(0.5) # T x B x C -> B x T x C x = x.transpose(1, 0) # project back to size of embedding x = self.fc2(x) if encoder_padding_mask is not None: encoder_padding_mask = encoder_padding_mask.t() # -> B x T x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0) # scale gradients (this only affects backward, not forward) x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers)) # add output to input embedding for attention y = (x + input_embedding.transpose(0, 1)) * math.sqrt(0.5) return { "encoder_out": (x, y), "encoder_padding_mask": encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, new_order): encoder_out["encoder_out"] = tuple( eo.index_select(0, new_order) for eo in encoder_out["encoder_out"] ) if encoder_out["encoder_padding_mask"] is not None: encoder_out["encoder_padding_mask"] = encoder_out[ "encoder_padding_mask" ].index_select(0, new_order) if "pretrained" in encoder_out: encoder_out["pretrained"]["encoder_out"] = tuple( eo.index_select(0, new_order) for eo in encoder_out["pretrained"]["encoder_out"] ) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" return self.embed_positions.max_positions @with_incremental_state class FConvDecoder(FairseqDecoder): """Convolutional decoder""" def __init__( self, dictionary, embed_dim=512, out_embed_dim=256, max_positions=1024, convolutions=((512, 3),) * 8, attention=True, dropout=0.1, selfattention=False, attention_nheads=1, selfattention_nheads=1, project_input=False, gated_attention=False, downsample=False, pretrained=False, trained_decoder=None, ): super().__init__(dictionary) self.register_buffer("version", torch.Tensor([2])) self.pretrained = pretrained self.pretrained_decoder = trained_decoder self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.need_attn = True in_channels = convolutions[0][0] def expand_bool_array(val): if isinstance(val, bool): # expand True into [True, True, ...] and do the same with False return [val] * len(convolutions) return val attention = expand_bool_array(attention) selfattention = expand_bool_array(selfattention) if not isinstance(attention, list) or len(attention) != len(convolutions): raise ValueError( "Attention is expected to be a list of booleans of " "length equal to the number of layers." ) num_embeddings = len(dictionary) padding_idx = dictionary.pad() self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) self.embed_positions = PositionalEmbedding( max_positions, embed_dim, padding_idx, ) self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) self.projections = nn.ModuleList() self.convolutions = nn.ModuleList() self.attention = nn.ModuleList() self.selfattention = nn.ModuleList() self.attproj = nn.ModuleList() for i, (out_channels, kernel_size) in enumerate(convolutions): self.projections.append( Linear(in_channels, out_channels) if in_channels != out_channels else None ) self.convolutions.append( LinearizedConv1d( in_channels, out_channels * 2, kernel_size, padding=(kernel_size - 1), dropout=dropout, ) ) self.attention.append( DownsampledMultiHeadAttention( out_channels, embed_dim, attention_nheads, project_input=project_input, gated=False, downsample=False, ) if attention[i] else None ) self.attproj.append( Linear(out_channels, embed_dim, dropout=dropout) if attention[i] else None ) self.selfattention.append( SelfAttention( out_channels, embed_dim, selfattention_nheads, project_input=project_input, gated=gated_attention, downsample=downsample, ) if selfattention[i] else None ) in_channels = out_channels self.fc2 = Linear(in_channels, out_embed_dim) self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout) # model fusion if self.pretrained: # independent gates are learned from the concatenated input self.gate1 = nn.Sequential( Linear(out_embed_dim * 2, out_embed_dim), nn.Sigmoid() ) self.gate2 = nn.Sequential( Linear(out_embed_dim * 2, out_embed_dim), nn.Sigmoid() ) # pretrained and trained models are joined self.joining = nn.Sequential( Linear(out_embed_dim * 2, out_embed_dim * 2), LayerNorm(out_embed_dim * 2), nn.GLU(), Linear(out_embed_dim, out_embed_dim * 2), LayerNorm(out_embed_dim * 2), nn.GLU(), Linear(out_embed_dim, out_embed_dim), LayerNorm(out_embed_dim), ) # pretrained model contains an output layer that is nhid -> vocab size # but the models are combined in their hidden state # the hook stores the output of the pretrained model forward self.pretrained_outputs = {} def save_output(): def hook(a, b, output): self.pretrained_outputs["out"] = output return hook self.pretrained_decoder.fc2.register_forward_hook(save_output()) def forward(self, prev_output_tokens, encoder_out): trained_encoder_out = encoder_out["pretrained"] if self.pretrained else None encoder_out = encoder_out["encoder"]["encoder_out"] encoder_a, encoder_b = self._split_encoder_out(encoder_out) # embed positions positions = self.embed_positions(prev_output_tokens) # embed tokens and positions x = self.embed_tokens(prev_output_tokens) + positions x = self.dropout_module(x) target_embedding = x.transpose(0, 1) # project to size of convolution x = self.fc1(x) # B x T x C -> T x B x C x = x.transpose(0, 1) # temporal convolutions avg_attn_scores = None for proj, conv, attention, selfattention, attproj in zip( self.projections, self.convolutions, self.attention, self.selfattention, self.attproj, ): residual = x if proj is None else proj(x) x = self.dropout_module(x) x = conv(x) x = F.glu(x, dim=2) # attention if attention is not None: r = x x, attn_scores = attention( attproj(x) + target_embedding, encoder_a, encoder_b ) x = x + r if not self.training and self.need_attn: if avg_attn_scores is None: avg_attn_scores = attn_scores else: avg_attn_scores.add_(attn_scores) if selfattention is not None: x = selfattention(x) x = (x + residual) * math.sqrt(0.5) # T x B x C -> B x T x C x = x.transpose(0, 1) # project back to size of vocabulary x = self.fc2(x) x = self.dropout_module(x) if not self.pretrained: x = self.fc3(x) # fusion gating if self.pretrained: trained_x, _ = self.pretrained_decoder.forward( prev_output_tokens, trained_encoder_out ) y = torch.cat([x, self.pretrained_outputs["out"]], dim=-1) gate1 = self.gate1(y) gate2 = self.gate2(y) gated_x1 = gate1 * x gated_x2 = gate2 * self.pretrained_outputs["out"] fusion = torch.cat([gated_x1, gated_x2], dim=-1) fusion = self.joining(fusion) fusion_output = self.fc3(fusion) return fusion_output, avg_attn_scores else: return x, avg_attn_scores def max_positions(self): """Maximum output length supported by the decoder.""" return self.embed_positions.max_positions def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def _split_encoder_out(self, encoder_out): """Split and transpose encoder outputs.""" # transpose only once to speed up attention layers encoder_a, encoder_b = encoder_out encoder_a = encoder_a.transpose(0, 1).contiguous() encoder_b = encoder_b.transpose(0, 1).contiguous() result = (encoder_a, encoder_b) return result class SelfAttention(nn.Module): def __init__( self, out_channels, embed_dim, num_heads, project_input=False, gated=False, downsample=False, ): super().__init__() self.attention = DownsampledMultiHeadAttention( out_channels, embed_dim, num_heads, dropout=0, bias=True, project_input=project_input, gated=gated, downsample=downsample, ) self.in_proj_q = Linear(out_channels, embed_dim) self.in_proj_k = Linear(out_channels, embed_dim) self.in_proj_v = Linear(out_channels, embed_dim) self.ln = LayerNorm(out_channels) def forward(self, x): residual = x query = self.in_proj_q(x) key = self.in_proj_k(x) value = self.in_proj_v(x) x, _ = self.attention( query, key, value, mask_future_timesteps=True, use_scalar_bias=True ) return self.ln(x + residual) def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) m.weight.data.normal_(0, 0.1) return m def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx): m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) m.weight.data.normal_(0, 0.1) return m def Linear(in_features, out_features, dropout=0.0): """Weight-normalized Linear layer (input: N x T x C)""" m = nn.Linear(in_features, out_features) m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features)) m.bias.data.zero_() return m def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs): """Weight-normalized Conv1d layer optimized for decoding""" m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) m.weight.data.normal_(mean=0, std=std) m.bias.data.zero_() return m def ConvTBC(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs): """Weight-normalized Conv1d layer""" from fairseq.modules import ConvTBC m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) m.weight.data.normal_(mean=0, std=std) m.bias.data.zero_() return m @register_model_architecture("fconv_self_att", "fconv_self_att") def base_architecture(args): args.dropout = getattr(args, "dropout", 0.1) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_layers = getattr(args, "encoder_layers", "[(512, 3)] * 3") args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_layers = getattr(args, "decoder_layers", "[(512, 3)] * 8") args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256) args.decoder_attention = getattr(args, "decoder_attention", "True") args.self_attention = getattr(args, "self_attention", "False") args.encoder_attention = getattr(args, "encoder_attention", "False") args.multihead_attention_nheads = getattr(args, "multihead_attention_nheads", 1) args.multihead_self_attention_nheads = getattr( args, "multihead_self_attention_nheads", 1 ) args.encoder_attention_nheads = getattr(args, "encoder_attention_nheads", 1) args.project_input = getattr(args, "project_input", "False") args.gated_attention = getattr(args, "gated_attention", "False") args.downsample = getattr(args, "downsample", "False") args.pretrained_checkpoint = getattr(args, "pretrained_checkpoint", "") args.pretrained = getattr(args, "pretrained", "False") @register_model_architecture("fconv_self_att", "fconv_self_att_wp") def fconv_self_att_wp(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) args.encoder_layers = getattr( args, "encoder_layers", "[(128, 3)] * 2 + [(512,3)] * 1" ) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) args.decoder_layers = getattr( args, "decoder_layers", "[(512, 4)] * 4 + [(768, 4)] * 2 + [(1024, 4)] * 1" ) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256) args.self_attention = getattr(args, "self_attention", "True") args.multihead_self_attention_nheads = getattr( args, "multihead_self_attention_nheads", 4 ) args.project_input = getattr(args, "project_input", "True") args.gated_attention = getattr(args, "gated_attention", "True") args.downsample = getattr(args, "downsample", "True") base_architecture(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/fconv_self_att.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq import utils from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.fconv import FConvDecoder from fairseq.utils import safe_hasattr @register_model("fconv_lm") class FConvLanguageModel(FairseqLanguageModel): def __init__(self, decoder): super().__init__(decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability" ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-layers", type=str, metavar="EXPR", help="decoder layers [(dim, kernel_size), ...]", ) parser.add_argument( "--decoder-out-embed-dim", type=int, metavar="N", help="decoder output embedding dimension", ) parser.add_argument( "--adaptive-softmax-cutoff", metavar="EXPR", help="comma separated list of adaptive softmax cutoff points. " "Must be used with adaptive_loss criterion", ) parser.add_argument( "--adaptive-softmax-dropout", type=float, metavar="D", help="sets adaptive softmax dropout for the tail projections", ) parser.add_argument( "--decoder-attention", type=str, metavar="EXPR", help="decoder attention [True, ...]", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_lm_architecture(args) if safe_hasattr(args, "max_target_positions") and not safe_hasattr( args, "tokens_per_sample" ): args.tokens_per_sample = args.max_target_positions decoder = FConvDecoder( dictionary=task.target_dictionary, embed_dim=args.decoder_embed_dim, convolutions=eval(args.decoder_layers), out_embed_dim=args.decoder_embed_dim, attention=eval(args.decoder_attention), dropout=args.dropout, max_positions=args.tokens_per_sample, share_embed=False, positional_embeddings=False, adaptive_softmax_cutoff=( utils.eval_str_list(args.adaptive_softmax_cutoff, type=int) if args.criterion == "adaptive_loss" else None ), adaptive_softmax_dropout=args.adaptive_softmax_dropout, ) return FConvLanguageModel(decoder) @register_model_architecture("fconv_lm", "fconv_lm") def base_lm_architecture(args): args.dropout = getattr(args, "dropout", 0.1) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128) args.decoder_layers = getattr(args, "decoder_layers", "[(1268, 4)] * 13") args.decoder_attention = getattr(args, "decoder_attention", "False") args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) @register_model_architecture("fconv_lm", "fconv_lm_dauphin_wikitext103") def fconv_lm_dauphin_wikitext103(args): layers = "[(850, 6)] * 3" layers += " + [(850, 1)] * 1" layers += " + [(850, 5)] * 4" layers += " + [(850, 1)] * 1" layers += " + [(850, 4)] * 3" layers += " + [(1024, 4)] * 1" layers += " + [(2048, 4)] * 1" args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 280) args.decoder_layers = getattr(args, "decoder_layers", layers) args.decoder_attention = getattr(args, "decoder_attention", "False") args.adaptive_softmax_cutoff = getattr( args, "adaptive_softmax_cutoff", "10000,20000,200000" ) base_lm_architecture(args) @register_model_architecture("fconv_lm", "fconv_lm_dauphin_gbw") def fconv_lm_dauphin_gbw(args): layers = "[(512, 5)]" layers += " + [(128, 1, 0), (128, 5, 0), (512, 1, 3)] * 3" layers += " + [(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3" layers += " + [(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 6" layers += " + [(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]" args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128) args.decoder_layers = getattr(args, "decoder_layers", layers) args.decoder_attention = getattr(args, "decoder_attention", "False") args.adaptive_softmax_cutoff = getattr( args, "adaptive_softmax_cutoff", "10000,50000,200000" ) base_lm_architecture(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/fconv_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq import utils from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.lstm import Embedding, LSTMDecoder DEFAULT_MAX_TARGET_POSITIONS = 1e5 @register_model("lstm_lm") class LSTMLanguageModel(FairseqLanguageModel): def __init__(self, decoder): super().__init__(decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-hidden-size', type=int, metavar='N', help='decoder hidden size') parser.add_argument('--decoder-layers', type=int, metavar='N', help='number of decoder layers') parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension') parser.add_argument('--decoder-attention', type=str, metavar='BOOL', help='decoder attention') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion') parser.add_argument('--residuals', default=False, action='store_true', help='applying residuals between LSTM layers') # Granular dropout settings (if not specified these default to --dropout) parser.add_argument('--decoder-dropout-in', type=float, metavar='D', help='dropout probability for decoder input embedding') parser.add_argument('--decoder-dropout-out', type=float, metavar='D', help='dropout probability for decoder output') parser.add_argument('--share-decoder-input-output-embed', default=False, action='store_true', help='share decoder input and output embeddings') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if getattr(args, "max_target_positions", None) is not None: max_target_positions = args.max_target_positions else: max_target_positions = getattr( args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS ) def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) embed_dict = utils.parse_embedding(embed_path) utils.print_embed_overlap(embed_dict, dictionary) return utils.load_embedding(embed_dict, dictionary, embed_tokens) pretrained_decoder_embed = None if args.decoder_embed_path: pretrained_decoder_embed = load_pretrained_embedding_from_file( args.decoder_embed_path, task.target_dictionary, args.decoder_embed_dim ) if args.share_decoder_input_output_embed: # double check all parameters combinations are valid if task.source_dictionary != task.target_dictionary: raise ValueError( "--share-decoder-input-output-embeddings requires a joint dictionary" ) if args.decoder_embed_dim != args.decoder_out_embed_dim: raise ValueError( "--share-decoder-input-output-embeddings requires " "--decoder-embed-dim to match --decoder-out-embed-dim" ) decoder = LSTMDecoder( dictionary=task.dictionary, embed_dim=args.decoder_embed_dim, hidden_size=args.decoder_hidden_size, out_embed_dim=args.decoder_out_embed_dim, num_layers=args.decoder_layers, dropout_in=args.decoder_dropout_in, dropout_out=args.decoder_dropout_out, attention=False, # decoder-only language model doesn't support attention encoder_output_units=0, pretrained_embed=pretrained_decoder_embed, share_input_output_embed=args.share_decoder_input_output_embed, adaptive_softmax_cutoff=( utils.eval_str_list(args.adaptive_softmax_cutoff, type=int) if args.criterion == "adaptive_loss" else None ), max_target_positions=max_target_positions, residuals=args.residuals, ) return cls(decoder) @register_model_architecture("lstm_lm", "lstm_lm") def base_architecture(args): args.dropout = getattr(args, "dropout", 0.1) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_hidden_size = getattr( args, "decoder_hidden_size", args.decoder_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 1) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) args.decoder_attention = getattr(args, "decoder_attention", "0") args.decoder_dropout_in = getattr(args, "decoder_dropout_in", args.dropout) args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.adaptive_softmax_cutoff = getattr( args, "adaptive_softmax_cutoff", "10000,50000,200000" ) args.residuals = getattr(args, "residuals", False)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/lstm_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import OrderedDict from fairseq import utils from fairseq.models import ( FairseqMultiModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( Embedding, TransformerDecoder, TransformerEncoder, TransformerModel, base_architecture, ) from fairseq.utils import safe_hasattr @register_model("multilingual_transformer") class MultilingualTransformerModel(FairseqMultiModel): """Train Transformer models for multiple language pairs simultaneously. Requires `--task multilingual_translation`. We inherit all arguments from TransformerModel and assume that all language pairs use a single Transformer architecture. In addition, we provide several options that are specific to the multilingual setting. Args: --share-encoder-embeddings: share encoder embeddings across all source languages --share-decoder-embeddings: share decoder embeddings across all target languages --share-encoders: share all encoder params (incl. embeddings) across all source languages --share-decoders: share all decoder params (incl. embeddings) across all target languages """ def __init__(self, encoders, decoders): super().__init__(encoders, decoders) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" TransformerModel.add_args(parser) parser.add_argument( "--share-encoder-embeddings", action="store_true", help="share encoder embeddings across languages", ) parser.add_argument( "--share-decoder-embeddings", action="store_true", help="share decoder embeddings across languages", ) parser.add_argument( "--share-encoders", action="store_true", help="share encoders across languages", ) parser.add_argument( "--share-decoders", action="store_true", help="share decoders across languages", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" from fairseq.tasks.multilingual_translation import MultilingualTranslationTask assert isinstance(task, MultilingualTranslationTask) # make sure all arguments are present in older models base_multilingual_architecture(args) if not safe_hasattr(args, "max_source_positions"): args.max_source_positions = 1024 if not safe_hasattr(args, "max_target_positions"): args.max_target_positions = 1024 src_langs = [lang_pair.split("-")[0] for lang_pair in task.model_lang_pairs] tgt_langs = [lang_pair.split("-")[1] for lang_pair in task.model_lang_pairs] if args.share_encoders: args.share_encoder_embeddings = True if args.share_decoders: args.share_decoder_embeddings = True def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb # build shared embeddings (if applicable) shared_encoder_embed_tokens, shared_decoder_embed_tokens = None, None if args.share_all_embeddings: if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim" ) if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path ): raise ValueError( "--share-all-embeddings not compatible with --decoder-embed-path" ) shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings( dicts=task.dicts, langs=task.langs, embed_dim=args.encoder_embed_dim, build_embedding=build_embedding, pretrained_embed_path=args.encoder_embed_path, ) shared_decoder_embed_tokens = shared_encoder_embed_tokens args.share_decoder_input_output_embed = True else: if args.share_encoder_embeddings: shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings( dicts=task.dicts, langs=src_langs, embed_dim=args.encoder_embed_dim, build_embedding=build_embedding, pretrained_embed_path=args.encoder_embed_path, ) if args.share_decoder_embeddings: shared_decoder_embed_tokens = FairseqMultiModel.build_shared_embeddings( dicts=task.dicts, langs=tgt_langs, embed_dim=args.decoder_embed_dim, build_embedding=build_embedding, pretrained_embed_path=args.decoder_embed_path, ) # encoders/decoders for each language lang_encoders, lang_decoders = {}, {} def get_encoder(lang): if lang not in lang_encoders: if shared_encoder_embed_tokens is not None: encoder_embed_tokens = shared_encoder_embed_tokens else: encoder_embed_tokens = build_embedding( task.dicts[lang], args.encoder_embed_dim, args.encoder_embed_path, ) lang_encoders[lang] = cls._get_module_class( True, args, task.dicts[lang], encoder_embed_tokens, src_langs ) return lang_encoders[lang] def get_decoder(lang): if lang not in lang_decoders: if shared_decoder_embed_tokens is not None: decoder_embed_tokens = shared_decoder_embed_tokens else: decoder_embed_tokens = build_embedding( task.dicts[lang], args.decoder_embed_dim, args.decoder_embed_path, ) lang_decoders[lang] = cls._get_module_class( False, args, task.dicts[lang], decoder_embed_tokens, tgt_langs ) return lang_decoders[lang] # shared encoders/decoders (if applicable) shared_encoder, shared_decoder = None, None if args.share_encoders: shared_encoder = get_encoder(src_langs[0]) if args.share_decoders: shared_decoder = get_decoder(tgt_langs[0]) encoders, decoders = OrderedDict(), OrderedDict() for lang_pair, src, tgt in zip(task.model_lang_pairs, src_langs, tgt_langs): encoders[lang_pair] = ( shared_encoder if shared_encoder is not None else get_encoder(src) ) decoders[lang_pair] = ( shared_decoder if shared_decoder is not None else get_decoder(tgt) ) return MultilingualTransformerModel(encoders, decoders) @classmethod def _get_module_class(cls, is_encoder, args, lang_dict, embed_tokens, langs): module_class = TransformerEncoder if is_encoder else TransformerDecoder return module_class(args, lang_dict, embed_tokens) def load_state_dict(self, state_dict, strict=True, model_cfg=None): state_dict_subset = state_dict.copy() for k, _ in state_dict.items(): assert k.startswith("models.") lang_pair = k.split(".")[1] if lang_pair not in self.models: del state_dict_subset[k] super().load_state_dict(state_dict_subset, strict=strict, model_cfg=model_cfg) @register_model_architecture("multilingual_transformer", "multilingual_transformer") def base_multilingual_architecture(args): base_architecture(args) args.share_encoder_embeddings = getattr(args, "share_encoder_embeddings", False) args.share_decoder_embeddings = getattr(args, "share_decoder_embeddings", False) args.share_encoders = getattr(args, "share_encoders", False) args.share_decoders = getattr(args, "share_decoders", False) @register_model_architecture( "multilingual_transformer", "multilingual_transformer_iwslt_de_en" ) def multilingual_transformer_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.encoder_layers = getattr(args, "encoder_layers", 6) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) args.decoder_layers = getattr(args, "decoder_layers", 6) base_multilingual_architecture(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/multilingual_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, Optional, Tuple import torch.nn as nn from fairseq import utils from torch import Tensor class FairseqDecoder(nn.Module): """Base class for decoders.""" def __init__(self, dictionary): super().__init__() self.dictionary = dictionary self.onnx_trace = False self.adaptive_softmax = None def forward(self, prev_output_tokens, encoder_out=None, **kwargs): """ Args: prev_output_tokens (LongTensor): shifted output tokens of shape `(batch, tgt_len)`, for teacher forcing encoder_out (dict, optional): output from the encoder, used for encoder-side attention Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, **kwargs ) x = self.output_layer(x) return x, extra def extract_features(self, prev_output_tokens, encoder_out=None, **kwargs): """ Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ raise NotImplementedError def output_layer(self, features, **kwargs): """ Project features to the default output size, e.g., vocabulary size. Args: features (Tensor): features returned by *extract_features*. """ raise NotImplementedError def get_normalized_probs( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): """Get normalized probabilities (or log probs) from a net's output.""" return self.get_normalized_probs_scriptable(net_output, log_probs, sample) # TorchScript doesn't support super() method so that the scriptable Subclass # can't access the base class model in Torchscript. # Current workaround is to add a helper function with different name and # call the helper function from scriptable Subclass. def get_normalized_probs_scriptable( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): """Get normalized probabilities (or log probs) from a net's output.""" if hasattr(self, "adaptive_softmax") and self.adaptive_softmax is not None: if sample is not None: assert "target" in sample target = sample["target"] else: target = None out = self.adaptive_softmax.get_log_prob(net_output[0], target=target) return out.exp_() if not log_probs else out logits = net_output[0] if log_probs: return utils.log_softmax(logits, dim=-1, onnx_trace=self.onnx_trace) else: return utils.softmax(logits, dim=-1, onnx_trace=self.onnx_trace) def max_positions(self): """Maximum input length supported by the decoder.""" return 1e6 # an arbitrary large number def upgrade_state_dict_named(self, state_dict, name): """Upgrade old state dicts to work with newer code.""" return state_dict def prepare_for_onnx_export_(self): self.onnx_trace = True
KosmosX-API-main
kosmosX/fairseq/fairseq/models/fairseq_decoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" import argparse import importlib import os from contextlib import ExitStack from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.utils import merge_with_parent from hydra.core.config_store import ConfigStore from omegaconf import open_dict, OmegaConf from .composite_encoder import CompositeEncoder from .distributed_fairseq_model import DistributedFairseqModel from .fairseq_decoder import FairseqDecoder from .fairseq_encoder import FairseqEncoder from .fairseq_incremental_decoder import FairseqIncrementalDecoder from .fairseq_model import ( BaseFairseqModel, FairseqEncoderDecoderModel, FairseqEncoderModel, FairseqLanguageModel, FairseqModel, FairseqMultiModel, ) MODEL_REGISTRY = {} MODEL_DATACLASS_REGISTRY = {} ARCH_MODEL_REGISTRY = {} ARCH_MODEL_NAME_REGISTRY = {} ARCH_MODEL_INV_REGISTRY = {} ARCH_CONFIG_REGISTRY = {} __all__ = [ "BaseFairseqModel", "CompositeEncoder", "DistributedFairseqModel", "FairseqDecoder", "FairseqEncoder", "FairseqEncoderDecoderModel", "FairseqEncoderModel", "FairseqIncrementalDecoder", "FairseqLanguageModel", "FairseqModel", "FairseqMultiModel", ] def build_model(cfg: FairseqDataclass, task, from_checkpoint=False): model = None model_type = getattr(cfg, "_name", None) or getattr(cfg, "arch", None) if not model_type and len(cfg) == 1: # this is hit if config object is nested in directory that is named after model type model_type = next(iter(cfg)) if model_type in MODEL_DATACLASS_REGISTRY: cfg = cfg[model_type] else: raise Exception( "Could not infer model type from directory. Please add _name field to indicate model type. " "Available models: " + str(MODEL_DATACLASS_REGISTRY.keys()) + " Requested model type: " + model_type ) if model_type in ARCH_MODEL_REGISTRY: # case 1: legacy models model = ARCH_MODEL_REGISTRY[model_type] elif model_type in MODEL_DATACLASS_REGISTRY: # case 2: config-driven models model = MODEL_REGISTRY[model_type] if model_type in MODEL_DATACLASS_REGISTRY: # set defaults from dataclass. note that arch name and model name can be the same dc = MODEL_DATACLASS_REGISTRY[model_type] if isinstance(cfg, argparse.Namespace): cfg = dc.from_namespace(cfg) else: cfg = merge_with_parent(dc(), cfg, from_checkpoint) else: if model_type in ARCH_CONFIG_REGISTRY: with open_dict(cfg) if OmegaConf.is_config(cfg) else ExitStack(): # this calls the different "arch" functions (like base_architecture()) that you indicate # if you specify --arch on the command line. this is only applicable to the old argparse based models # hydra models should expose different architectures via different config files # it will modify the cfg object and default parameters according to the arch ARCH_CONFIG_REGISTRY[model_type](cfg) assert model is not None, ( f"Could not infer model type from {cfg}. " "Available models: {}".format(MODEL_DATACLASS_REGISTRY.keys()) + f" Requested model type: {model_type}" ) return model.build_model(cfg, task) def register_model(name, dataclass=None): """ New model types can be added to fairseq with the :func:`register_model` function decorator. For example:: @register_model('lstm') class LSTM(FairseqEncoderDecoderModel): (...) .. note:: All models must implement the :class:`BaseFairseqModel` interface. Typically you will extend :class:`FairseqEncoderDecoderModel` for sequence-to-sequence tasks or :class:`FairseqLanguageModel` for language modeling tasks. Args: name (str): the name of the model """ def register_model_cls(cls): if name in MODEL_REGISTRY: raise ValueError("Cannot register duplicate model ({})".format(name)) if not issubclass(cls, BaseFairseqModel): raise ValueError( "Model ({}: {}) must extend BaseFairseqModel".format(name, cls.__name__) ) MODEL_REGISTRY[name] = cls if dataclass is not None and not issubclass(dataclass, FairseqDataclass): raise ValueError( "Dataclass {} must extend FairseqDataclass".format(dataclass) ) cls.__dataclass = dataclass if dataclass is not None: MODEL_DATACLASS_REGISTRY[name] = dataclass cs = ConfigStore.instance() node = dataclass() node._name = name cs.store(name=name, group="model", node=node, provider="fairseq") @register_model_architecture(name, name) def noop(_): pass return cls return register_model_cls def register_model_architecture(model_name, arch_name): """ New model architectures can be added to fairseq with the :func:`register_model_architecture` function decorator. After registration, model architectures can be selected with the ``--arch`` command-line argument. For example:: @register_model_architecture('lstm', 'lstm_luong_wmt_en_de') def lstm_luong_wmt_en_de(cfg): args.encoder_embed_dim = getattr(cfg.model, 'encoder_embed_dim', 1000) (...) The decorated function should take a single argument *cfg*, which is a :class:`omegaconf.DictConfig`. The decorated function should modify these arguments in-place to match the desired architecture. Args: model_name (str): the name of the Model (Model must already be registered) arch_name (str): the name of the model architecture (``--arch``) """ def register_model_arch_fn(fn): if model_name not in MODEL_REGISTRY: raise ValueError( "Cannot register model architecture for unknown model type ({})".format( model_name ) ) if arch_name in ARCH_MODEL_REGISTRY: raise ValueError( "Cannot register duplicate model architecture ({})".format(arch_name) ) if not callable(fn): raise ValueError( "Model architecture must be callable ({})".format(arch_name) ) ARCH_MODEL_REGISTRY[arch_name] = MODEL_REGISTRY[model_name] ARCH_MODEL_NAME_REGISTRY[arch_name] = model_name ARCH_MODEL_INV_REGISTRY.setdefault(model_name, []).append(arch_name) ARCH_CONFIG_REGISTRY[arch_name] = fn return fn return register_model_arch_fn def import_models(models_dir, namespace): for file in os.listdir(models_dir): path = os.path.join(models_dir, file) if ( not file.startswith("_") and not file.startswith(".") and (file.endswith(".py") or os.path.isdir(path)) ): model_name = file[: file.find(".py")] if file.endswith(".py") else file importlib.import_module(namespace + "." + model_name) # extra `model_parser` for sphinx if model_name in MODEL_REGISTRY: parser = argparse.ArgumentParser(add_help=False) group_archs = parser.add_argument_group("Named architectures") group_archs.add_argument( "--arch", choices=ARCH_MODEL_INV_REGISTRY[model_name] ) group_args = parser.add_argument_group( "Additional command-line arguments" ) MODEL_REGISTRY[model_name].add_args(group_args) globals()[model_name + "_parser"] = parser # automatically import any Python files in the models/ directory models_dir = os.path.dirname(__file__) import_models(models_dir, "fairseq.models")
KosmosX-API-main
kosmosX/fairseq/fairseq/models/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os from typing import Any, Dict from fairseq import checkpoint_utils from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( TransformerDecoder, TransformerEncoder, TransformerModel, base_architecture as transformer_base_architecture, ) @register_model("transformer_from_pretrained_xlm") class TransformerFromPretrainedXLMModel(TransformerModel): @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" TransformerModel.add_args(parser) parser.add_argument( "--pretrained-xlm-checkpoint", type=str, metavar="STR", help="XLM model to use for initializing transformer encoder and/or decoder", ) parser.add_argument( "--init-encoder-only", action="store_true", help="if set, don't load the XLM weights and embeddings into decoder", ) parser.add_argument( "--init-decoder-only", action="store_true", help="if set, don't load the XLM weights and embeddings into encoder", ) @classmethod def build_model(self, args, task, cls_dictionary=MaskedLMDictionary): assert hasattr(args, "pretrained_xlm_checkpoint"), ( "You must specify a path for --pretrained-xlm-checkpoint to use " "--arch transformer_from_pretrained_xlm" ) assert isinstance(task.source_dictionary, cls_dictionary) and isinstance( task.target_dictionary, cls_dictionary ), ( "You should use a MaskedLMDictionary when using --arch " "transformer_from_pretrained_xlm because the pretrained XLM model " "was trained using data binarized with MaskedLMDictionary. " "For translation, you may want to use --task " "translation_from_pretrained_xlm" ) assert not ( getattr(args, "init_encoder_only", False) and getattr(args, "init_decoder_only", False) ), "Only one of --init-encoder-only and --init-decoder-only can be set." return super().build_model(args, task) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoderFromPretrainedXLM(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoderFromPretrainedXLM(args, tgt_dict, embed_tokens) def upgrade_state_dict_with_xlm_weights( state_dict: Dict[str, Any], pretrained_xlm_checkpoint: str ) -> Dict[str, Any]: """ Load XLM weights into a Transformer encoder or decoder model. Args: state_dict: state dict for either TransformerEncoder or TransformerDecoder pretrained_xlm_checkpoint: checkpoint to load XLM weights from Raises: AssertionError: If architecture (num layers, attention heads, etc.) does not match between the current Transformer encoder or decoder and the pretrained_xlm_checkpoint """ if not os.path.exists(pretrained_xlm_checkpoint): raise IOError("Model file not found: {}".format(pretrained_xlm_checkpoint)) state = checkpoint_utils.load_checkpoint_to_cpu(pretrained_xlm_checkpoint) xlm_state_dict = state["model"] for key in xlm_state_dict.keys(): for search_key in ["embed_tokens", "embed_positions", "layers"]: if search_key in key: subkey = key[key.find(search_key) :] assert subkey in state_dict, ( "{} Transformer encoder / decoder " "state_dict does not contain {}. Cannot " "load {} from pretrained XLM checkpoint " "{} into Transformer.".format( str(state_dict.keys()), subkey, key, pretrained_xlm_checkpoint ) ) state_dict[subkey] = xlm_state_dict[key] return state_dict class TransformerEncoderFromPretrainedXLM(TransformerEncoder): def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens) if getattr(args, "init_decoder_only", False): # Don't load XLM weights for encoder if --init-decoder-only return assert hasattr(args, "pretrained_xlm_checkpoint"), ( "--pretrained-xlm-checkpoint must be specified to load Transformer " "encoder from pretrained XLM" ) xlm_loaded_state_dict = upgrade_state_dict_with_xlm_weights( state_dict=self.state_dict(), pretrained_xlm_checkpoint=args.pretrained_xlm_checkpoint, ) self.load_state_dict(xlm_loaded_state_dict, strict=True) class TransformerDecoderFromPretrainedXLM(TransformerDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(args, dictionary, embed_tokens, no_encoder_attn) if getattr(args, "init_encoder_only", False): # Don't load XLM weights for decoder if --init-encoder-only return assert hasattr(args, "pretrained_xlm_checkpoint"), ( "--pretrained-xlm-checkpoint must be specified to load Transformer " "decoder from pretrained XLM" ) xlm_loaded_state_dict = upgrade_state_dict_with_xlm_weights( state_dict=self.state_dict(), pretrained_xlm_checkpoint=args.pretrained_xlm_checkpoint, ) self.load_state_dict(xlm_loaded_state_dict, strict=True) @register_model_architecture( "transformer_from_pretrained_xlm", "transformer_from_pretrained_xlm" ) def base_architecture(args): transformer_base_architecture(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/transformer_from_pretrained_xlm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch.nn as nn from torch.nn.parallel import DistributedDataParallel from fairseq.distributed import ( DistributedTimeoutWrapper, LegacyDistributedDataParallel, ModuleProxyWrapper, TPUDistributedDataParallel, ) logger = logging.getLogger(__name__) _SLOWMO_DDP_DISABLED = False try: from fairscale.experimental.nn.data_parallel import ( SlowMoBaseAlgorithm, SlowMoDistributedDataParallel, ) except ImportError: _SLOWMO_DDP_DISABLED = True def DistributedFairseqModel(args, model, process_group, device): """ Wrap a *model* to support distributed data parallel training. This is similar to the built-in DistributedDataParallel, but allows additional configuration of the DistributedDataParallel class to use, and also provides easier access to the wrapped model by forwarding requests for missing attributes to the wrapped model. Args: args (argparse.Namespace): fairseq args model (BaseFairseqModel): model to wrap process_group: the c10d process group to be used for distributed data parallel all-reduction. device: device to move model to """ assert isinstance(model, nn.Module) if args.tpu: wrapped_model = TPUDistributedDataParallel( module=model.to(device), process_group=process_group, ) # forward missing getattr and state_dict/load_state_dict to orig model wrapped_model = ModuleProxyWrapper(wrapped_model) elif args.ddp_backend in {"c10d", "pytorch_ddp"}: wrapped_model = DistributedDataParallel( module=model.to(device), device_ids=[args.device_id], output_device=args.device_id, broadcast_buffers=args.broadcast_buffers, bucket_cap_mb=args.bucket_cap_mb, process_group=process_group, find_unused_parameters=args.find_unused_parameters, gradient_as_bucket_view=args.gradient_as_bucket_view, ) if args.ddp_comm_hook == "fp16": logger.info("enable fp16 communication hook in DDP") try: from torch.distributed.algorithms.ddp_comm_hooks import ( register_ddp_comm_hook, DDPCommHookType, ) except: logger.error( "Could not import from torch.distributed.algorithms.ddp_comm_hooks; you may need to update your pytorch version" ) raise register_ddp_comm_hook(DDPCommHookType.FP16_COMPRESS, wrapped_model) # forward missing getattr and state_dict/load_state_dict to orig model wrapped_model = ModuleProxyWrapper(wrapped_model) elif args.ddp_backend in {"no_c10d", "legacy_ddp"}: wrapped_model = LegacyDistributedDataParallel( module=model.to(device), buffer_size=2 ** 28, process_group=process_group, ) # forward missing getattr and state_dict/load_state_dict to orig model wrapped_model = ModuleProxyWrapper(wrapped_model) elif args.ddp_backend == "slowmo": if _SLOWMO_DDP_DISABLED: raise ImportError( "Cannot find SlowMoDistributedDataParallel. " "Please install fairscale with: pip install fairscale" ) # The values of slowmo_momentum below were obtained by tuning on the # En-De 16 dataset by training the transformer_wmt_en_de_large model if args.slowmo_momentum is None: if args.distributed_world_size <= 16: args.slowmo_momentum = 0.0 elif args.distributed_world_size <= 32: args.slowmo_momentum = 0.2 elif args.distributed_world_size <= 64: args.slowmo_momentum = 0.5 else: args.slowmo_momentum = 0.6 slowmo_base_algorithm = SlowMoBaseAlgorithm[args.slowmo_base_algorithm.upper()] wrapped_model = SlowMoDistributedDataParallel( module=model.to(device), broadcast_buffers=args.broadcast_buffers, nprocs_per_node=args.nprocs_per_node, slowmo_momentum=args.slowmo_momentum, slowmo_base_algorithm=slowmo_base_algorithm, localsgd_frequency=args.localsgd_frequency, ) # forward missing getattr and state_dict/load_state_dict to orig model wrapped_model = ModuleProxyWrapper(wrapped_model) elif args.ddp_backend == "fully_sharded": try: from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP except ImportError: raise ImportError( "Cannot find FullyShardedDataParallel. " "Please install fairscale with: pip install fairscale" ) assert isinstance(model, FSDP), "expected model to already be wrapped in FSDP" wrapped_model = model if args.memory_efficient_fp16: wrapped_model = wrapped_model.half() if not args.cpu_offload: wrapped_model = wrapped_model.to(device=device) else: raise ValueError("Unknown --ddp-backend: " + args.ddp_backend) # kill hung distributed jobs after a timeout if getattr(args, "heartbeat_timeout", -1) > 0: wrapped_model = DistributedTimeoutWrapper( wrapped_model, timeout=getattr(args, "heartbeat_timeout", -1) ) return wrapped_model
KosmosX-API-main
kosmosX/fairseq/fairseq/models/distributed_fairseq_model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder, ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II DEFAULT_MAX_TARGET_POSITIONS = 1024 @dataclass class TransformerLanguageModelConfig(FairseqDataclass): activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field( default="relu", metadata={"help": "activation function to use"} ) dropout: float = field(default=0.1, metadata={"help": "dropout probability"}) attention_dropout: float = field( default=0.0, metadata={"help": "dropout probability for attention weights"} ) activation_dropout: float = field( default=0.0, metadata={"help": "dropout probability after activation in FFN."} ) relu_dropout: float = field( default=0.0, metadata={"help": "dropout probability after activation in FFN."} ) decoder_embed_dim: int = field( default=512, metadata={"help": "decoder embedding dimension"} ) decoder_output_dim: int = field( default=512, metadata={"help": "decoder output dimension"} ) decoder_input_dim: int = field( default=512, metadata={"help": "decoder input dimension"} ) decoder_ffn_embed_dim: int = field( default=2048, metadata={"help": "decoder embedding dimension for FFN"} ) decoder_layers: int = field(default=6, metadata={"help": "num decoder layers"}) decoder_attention_heads: int = field( default=8, metadata={"help": "num decoder attention heads"} ) decoder_normalize_before: bool = field( default=False, metadata={"help": "apply layernorm before each decoder block"} ) no_decoder_final_norm: bool = field( default=False, metadata={"help": "don't add an extra layernorm after the last decoder block"}, ) adaptive_softmax_cutoff: Optional[str] = field( default=None, metadata={ "help": "comma separated list of adaptive softmax cutoff points. " "Must be used with adaptive_loss criterion" }, ) adaptive_softmax_dropout: float = field( default=0, metadata={"help": "sets adaptive softmax dropout for the tail projections"}, ) adaptive_softmax_factor: float = field( default=4, metadata={"help": "adaptive input factor"} ) no_token_positional_embeddings: bool = field( default=False, metadata={ "help": "if set, disables positional embeddings (outside self attention)" }, ) share_decoder_input_output_embed: bool = field( default=False, metadata={"help": "share decoder input and output embeddings"} ) character_embeddings: bool = field( default=False, metadata={ "help": "if set, uses character embedding convolutions to produce token embeddings" }, ) character_filters: str = field( default="[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]", metadata={"help": "size of character embeddings"}, ) character_embedding_dim: int = field( default=4, metadata={"help": "size of character embeddings"} ) char_embedder_highway_layers: int = field( default=2, metadata={"help": "number of highway layers for character token embeddder"}, ) adaptive_input: bool = field( default=False, metadata={"help": "if set, uses adaptive input"} ) adaptive_input_factor: float = field( default=4, metadata={"help": "adaptive input factor"} ) adaptive_input_cutoff: Optional[str] = field( default=None, metadata={"help": "comma separated list of adaptive input cutoff points."}, ) tie_adaptive_weights: bool = field( default=False, metadata={ "help": "if set, ties the weights of adaptive softmax and adaptive input" }, ) tie_adaptive_proj: bool = field( default=False, metadata={ "help": "if set, ties the projection weights of adaptive softmax and adaptive input" }, ) decoder_learned_pos: bool = field( default=False, metadata={"help": "use learned positional embeddings in the decoder"}, ) layernorm_embedding: bool = field( default=False, metadata={"help": "add layernorm to embedding"} ) no_scale_embedding: bool = field( default=False, metadata={"help": "if True, dont scale embeddings"} ) checkpoint_activations: bool = field( default=False, metadata={"help": "checkpoint activations at each layer"} ) offload_activations: bool = field( default=False, metadata={"help": "move checkpointed activations to CPU after they are used."}, ) # config for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019) decoder_layerdrop: float = field( default=0.0, metadata={"help": "LayerDrop probability for decoder"} ) decoder_layers_to_keep: Optional[str] = field( default=None, metadata={ "help": "which layers to *keep* when pruning as a comma-separated list" }, ) # config for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020) quant_noise_pq: float = field( default=0.0, metadata={"help": "iterative PQ quantization noise at training time"}, ) quant_noise_pq_block_size: int = field( default=8, metadata={"help": "block size of quantization noise at training time"}, ) quant_noise_scalar: float = field( default=0.0, metadata={ "help": "scalar quantization noise and scalar quantization at training time" }, ) # config for Fully Sharded Data Parallel (FSDP) training min_params_to_wrap: int = field( default=DEFAULT_MIN_PARAMS_TO_WRAP, metadata={ "help": ( "minimum number of params for a layer to be wrapped with FSDP() when " "training with --ddp-backend=fully_sharded. Smaller values will " "improve memory efficiency, but may make torch.distributed " "communication less efficient due to smaller input sizes. This option " "is set to 0 (i.e., always wrap) when --checkpoint-activations or " "--offload-activations are passed." ) }, ) # config for "BASE Layers: Simplifying Training of Large, Sparse Models" base_layers: Optional[int] = field( default=0, metadata={"help": "number of BASE layers in total"} ) base_sublayers: Optional[int] = field( default=1, metadata={"help": "number of sublayers in each BASE layer"} ) base_shuffle: Optional[int] = field( default=1, metadata={"help": "shuffle tokens between workers before computing assignment"}, ) # NormFormer scale_fc: Optional[bool] = field( default=False, metadata={"help": "Insert LayerNorm between fully connected layers"}, ) scale_attn: Optional[bool] = field( default=False, metadata={"help": "Insert LayerNorm after attention"} ) scale_heads: Optional[bool] = field( default=False, metadata={"help": "Learn a scale coefficient for each attention head"}, ) scale_resids: Optional[bool] = field( default=False, metadata={"help": "Learn a scale coefficient for each residual connection"}, ) # options from other parts of the config add_bos_token: bool = II("task.add_bos_token") tokens_per_sample: int = II("task.tokens_per_sample") max_target_positions: Optional[int] = II("task.max_target_positions") tpu: bool = II("common.tpu") @register_model("transformer_lm", dataclass=TransformerLanguageModelConfig) class TransformerLanguageModel(FairseqLanguageModel): @classmethod def hub_models(cls): def moses_fastbpe(path): return {"path": path, "tokenizer": "moses", "bpe": "fastbpe"} def spm(path): return {"path": path, "tokenizer": "space", "bpe": "sentencepiece"} return { "transformer_lm.gbw.adaptive_huge": "https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_gbw_huge.tar.bz2", "transformer_lm.wiki103.adaptive": "https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_wiki103.v2.tar.bz2", "transformer_lm.wmt19.en": moses_fastbpe( "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.en.tar.bz2" ), "transformer_lm.wmt19.de": moses_fastbpe( "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.de.tar.bz2" ), "transformer_lm.wmt19.ru": moses_fastbpe( "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.ru.tar.bz2" ), "transformer_lm.wmt20.en": spm( "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.en.tar.gz" ), "transformer_lm.wmt20.ta": spm( "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.ta.tar.gz" ), "transformer_lm.wmt20.iu.news": spm( "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.iu.news.tar.gz" ), "transformer_lm.wmt20.iu.nh": spm( "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.iu.nh.tar.gz" ), } def __init__(self, decoder): super().__init__(decoder) @classmethod def build_model(cls, args, task): """Build a new model instance.""" if args.decoder_layers_to_keep: args.decoder_layers = len(args.decoder_layers_to_keep.split(",")) if safe_getattr(args, "max_target_positions", None) is None: args.max_target_positions = safe_getattr( args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS ) if args.character_embeddings: embed_tokens = CharacterTokenEmbedder( task.source_dictionary, eval(args.character_filters), args.character_embedding_dim, args.decoder_embed_dim, args.char_embedder_highway_layers, ) elif args.adaptive_input: embed_tokens = AdaptiveInput( len(task.source_dictionary), task.source_dictionary.pad(), args.decoder_input_dim, args.adaptive_input_factor, args.decoder_embed_dim, options.eval_str_list(args.adaptive_input_cutoff, type=int), args.quant_noise_pq, args.quant_noise_pq_block_size, ) else: embed_tokens = cls.build_embedding( args, task.source_dictionary, args.decoder_input_dim ) if args.tie_adaptive_weights: assert args.adaptive_input assert args.adaptive_input_factor == args.adaptive_softmax_factor assert ( args.adaptive_softmax_cutoff == args.adaptive_input_cutoff ), "{} != {}".format( args.adaptive_softmax_cutoff, args.adaptive_input_cutoff ) assert args.decoder_input_dim == args.decoder_output_dim decoder = TransformerDecoder( args, task.target_dictionary, embed_tokens, no_encoder_attn=True ) return cls(decoder) @classmethod def build_embedding(cls, args, dictionary, embed_dim, path=None): embed_tokens = Embedding(len(dictionary), embed_dim, dictionary.pad()) return embed_tokens def base_lm_architecture(args): # backward compatibility for older model checkpoints if safe_hasattr(args, "no_tie_adaptive_proj"): # previous models defined --no-tie-adaptive-proj, so use the existence of # that option to determine if this is an "old" model checkpoint args.no_decoder_final_norm = True # old models always set this to True if args.no_tie_adaptive_proj is False: args.tie_adaptive_proj = True if safe_hasattr(args, "decoder_final_norm"): args.no_decoder_final_norm = not args.decoder_final_norm args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 2048) args.decoder_layers = safe_getattr(args, "decoder_layers", 6) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8) args.adaptive_softmax_cutoff = safe_getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0) args.adaptive_softmax_factor = safe_getattr(args, "adaptive_softmax_factor", 4) args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", False) args.activation_fn = safe_getattr(args, "activation_fn", "relu") args.decoder_layerdrop = safe_getattr(args, "decoder_layerdrop", 0) args.decoder_layers_to_keep = safe_getattr(args, "decoder_layers_to_keep", None) args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0) args.base_layers = safe_getattr(args, "base_layers", 0) args.base_sublayers = safe_getattr(args, "base_sublayers", 1) args.base_shuffle = safe_getattr(args, "base_shuffle", False) args.add_bos_token = safe_getattr(args, "add_bos_token", False) args.no_token_positional_embeddings = safe_getattr( args, "no_token_positional_embeddings", False ) args.share_decoder_input_output_embed = safe_getattr( args, "share_decoder_input_output_embed", False ) args.character_embeddings = safe_getattr(args, "character_embeddings", False) args.decoder_output_dim = safe_getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = safe_getattr( args, "decoder_input_dim", args.decoder_embed_dim ) # Model training is not stable without this args.decoder_normalize_before = True args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", False) args.adaptive_input = safe_getattr(args, "adaptive_input", False) args.adaptive_input_factor = safe_getattr(args, "adaptive_input_factor", 4) args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", None) args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", False) args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", False) args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", False) args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False) args.checkpoint_activations = safe_getattr(args, "checkpoint_activations", False) args.offload_activations = safe_getattr(args, "offload_activations", False) args.scale_fc = safe_getattr(args, "scale_fc", False) args.scale_attn = safe_getattr(args, "scale_attn", False) args.scale_heads = safe_getattr(args, "scale_heads", False) args.scale_resids = safe_getattr(args, "scale_resids", False) if args.offload_activations: args.checkpoint_activations = True @register_model_architecture("transformer_lm", "transformer_lm_big") def transformer_lm_big(args): args.decoder_layers = safe_getattr(args, "decoder_layers", 12) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) base_lm_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_wiki103") @register_model_architecture("transformer_lm", "transformer_lm_baevski_wiki103") def transformer_lm_baevski_wiki103(args): args.decoder_layers = safe_getattr(args, "decoder_layers", 16) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8) args.dropout = safe_getattr(args, "dropout", 0.3) args.adaptive_input = safe_getattr(args, "adaptive_input", True) args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", True) args.adaptive_input_cutoff = safe_getattr( args, "adaptive_input_cutoff", "20000,60000" ) args.adaptive_softmax_cutoff = safe_getattr( args, "adaptive_softmax_cutoff", "20000,60000" ) args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0.2) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_dropout = safe_getattr(args, "activation_dropout", 0.1) args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", True) args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", True) transformer_lm_big(args) @register_model_architecture("transformer_lm", "transformer_lm_gbw") @register_model_architecture("transformer_lm", "transformer_lm_baevski_gbw") def transformer_lm_baevski_gbw(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", True) transformer_lm_big(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt") def transformer_lm_gpt(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 768) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 3072) args.decoder_layers = safe_getattr(args, "decoder_layers", 12) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 12) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt2_small") def transformer_lm_gpt2_small(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_layers = safe_getattr(args, "decoder_layers", 24) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt2_tiny") def transformer_lm_gpt2_tiny(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 64) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 64) args.decoder_layers = safe_getattr(args, "decoder_layers", 2) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 1) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt2_medium") def transformer_lm_gpt2_medium(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1280) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 5120) args.decoder_layers = safe_getattr(args, "decoder_layers", 36) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 20) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt2_big") def transformer_lm_gpt2_big(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1600) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 6400) args.decoder_layers = safe_getattr(args, "decoder_layers", 48) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 25) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args) def base_gpt3_architecture(args): args.decoder_input_dim = args.decoder_embed_dim args.decoder_output_dim = args.decoder_embed_dim args.decoder_ffn_embed_dim = safe_getattr( args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4 ) # GPT-3 used learned positional embeddings, rather than sinusoidal args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True) args.dropout = safe_getattr(args, "dropout", 0.0) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") args.share_decoder_input_output_embed = True base_lm_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_small") def transformer_lm_gpt3_small(args): # 125M params args.decoder_layers = safe_getattr(args, "decoder_layers", 12) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 768) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 12) base_gpt3_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_medium") def transformer_lm_gpt3_medium(args): # 350M params args.decoder_layers = safe_getattr(args, "decoder_layers", 24) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) base_gpt3_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_large") def transformer_lm_gpt3_large(args): # 760M params args.decoder_layers = safe_getattr(args, "decoder_layers", 24) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1536) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) base_gpt3_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_xl") def transformer_lm_gpt3_xl(args): # 1.3B params args.decoder_layers = safe_getattr(args, "decoder_layers", 24) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 2048) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) base_gpt3_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_2_7") def transformer_lm_gpt3_2_7(args): # 2.7B params args.decoder_layers = safe_getattr(args, "decoder_layers", 32) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 2560) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) base_gpt3_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_6_7") def transformer_lm_gpt3_6_7(args): # 6.7B params args.decoder_layers = safe_getattr(args, "decoder_layers", 32) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 4096) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) base_gpt3_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_13") def transformer_lm_gpt3_13(args): # 13B params args.decoder_layers = safe_getattr(args, "decoder_layers", 40) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 5120) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 40) base_gpt3_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_175") def transformer_lm_gpt3_175(args): # 175B params args.decoder_layers = safe_getattr(args, "decoder_layers", 96) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 12288) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 96) base_gpt3_architecture(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/transformer_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, BeamableMM, FairseqDropout, GradMultiply, LearnedPositionalEmbedding, LinearizedConvolution, ) @register_model("fconv") class FConvModel(FairseqEncoderDecoderModel): """ A fully convolutional model, i.e. a convolutional encoder and a convolutional decoder, as described in `"Convolutional Sequence to Sequence Learning" (Gehring et al., 2017) <https://arxiv.org/abs/1705.03122>`_. Args: encoder (FConvEncoder): the encoder decoder (FConvDecoder): the decoder The Convolutional model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.fconv_parser :prog: """ @classmethod def hub_models(cls): def moses_subword(path): return { "path": path, "tokenizer": "moses", "bpe": "subword_nmt", } return { "conv.wmt14.en-fr": moses_subword( "https://dl.fbaipublicfiles.com/fairseq/models/wmt14.v2.en-fr.fconv-py.tar.bz2" ), "conv.wmt14.en-de": moses_subword( "https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-de.fconv-py.tar.bz2" ), "conv.wmt17.en-de": moses_subword( "https://dl.fbaipublicfiles.com/fairseq/models/wmt17.v2.en-de.fconv-py.tar.bz2" ), } def __init__(self, encoder, decoder): super().__init__(encoder, decoder) self.encoder.num_attention_layers = sum( layer is not None for layer in decoder.attention ) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-layers', type=str, metavar='EXPR', help='encoder layers [(dim, kernel_size), ...]') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-layers', type=str, metavar='EXPR', help='decoder layers [(dim, kernel_size), ...]') parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension') parser.add_argument('--decoder-attention', type=str, metavar='EXPR', help='decoder attention [True, ...]') parser.add_argument('--share-input-output-embed', action='store_true', help='share input and output embeddings (requires' ' --decoder-out-embed-dim and --decoder-embed-dim' ' to be equal)') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure that all args are properly defaulted (in case there are any new ones) base_architecture(args) encoder_embed_dict = None if args.encoder_embed_path: encoder_embed_dict = utils.parse_embedding(args.encoder_embed_path) utils.print_embed_overlap(encoder_embed_dict, task.source_dictionary) decoder_embed_dict = None if args.decoder_embed_path: decoder_embed_dict = utils.parse_embedding(args.decoder_embed_path) utils.print_embed_overlap(decoder_embed_dict, task.target_dictionary) encoder = FConvEncoder( dictionary=task.source_dictionary, embed_dim=args.encoder_embed_dim, embed_dict=encoder_embed_dict, convolutions=eval(args.encoder_layers), dropout=args.dropout, max_positions=args.max_source_positions, ) decoder = FConvDecoder( dictionary=task.target_dictionary, embed_dim=args.decoder_embed_dim, embed_dict=decoder_embed_dict, convolutions=eval(args.decoder_layers), out_embed_dim=args.decoder_out_embed_dim, attention=eval(args.decoder_attention), dropout=args.dropout, max_positions=args.max_target_positions, share_embed=args.share_input_output_embed, ) return FConvModel(encoder, decoder) class FConvEncoder(FairseqEncoder): """ Convolutional encoder consisting of `len(convolutions)` layers. Args: dictionary (~fairseq.data.Dictionary): encoding dictionary embed_dim (int, optional): embedding dimension embed_dict (str, optional): filename from which to load pre-trained embeddings max_positions (int, optional): maximum supported input sequence length convolutions (list, optional): the convolutional layer structure. Each list item `i` corresponds to convolutional layer `i`. Layers are given as ``(out_channels, kernel_width, [residual])``. Residual connections are added between layers when ``residual=1`` (which is the default behavior). dropout (float, optional): dropout to be applied before each conv layer """ def __init__( self, dictionary, embed_dim=512, embed_dict=None, max_positions=1024, convolutions=((512, 3),) * 20, dropout=0.1, ): super().__init__(dictionary) self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.num_attention_layers = None num_embeddings = len(dictionary) self.padding_idx = dictionary.pad() self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx) if embed_dict: self.embed_tokens = utils.load_embedding( embed_dict, self.dictionary, self.embed_tokens ) self.embed_positions = PositionalEmbedding( max_positions, embed_dim, self.padding_idx, ) convolutions = extend_conv_spec(convolutions) in_channels = convolutions[0][0] self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) self.projections = nn.ModuleList() self.convolutions = nn.ModuleList() self.residuals = [] layer_in_channels = [in_channels] for _, (out_channels, kernel_size, residual) in enumerate(convolutions): if residual == 0: residual_dim = out_channels else: residual_dim = layer_in_channels[-residual] self.projections.append( Linear(residual_dim, out_channels) if residual_dim != out_channels else None ) if kernel_size % 2 == 1: padding = kernel_size // 2 else: padding = 0 self.convolutions.append( ConvTBC( in_channels, out_channels * 2, kernel_size, dropout=dropout, padding=padding, ) ) self.residuals.append(residual) in_channels = out_channels layer_in_channels.append(out_channels) self.fc2 = Linear(in_channels, embed_dim) def forward(self, src_tokens, src_lengths): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (tuple): a tuple with two elements, where the first element is the last encoder layer's output and the second element is the same quantity summed with the input embedding (used for attention). The shape of both tensors is `(batch, src_len, embed_dim)`. - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens) x = self.dropout_module(x) input_embedding = x # project to size of convolution x = self.fc1(x) # used to mask padding in input encoder_padding_mask = src_tokens.eq(self.padding_idx).t() # -> T x B if not encoder_padding_mask.any(): encoder_padding_mask = None # B x T x C -> T x B x C x = x.transpose(0, 1) residuals = [x] # temporal convolutions for proj, conv, res_layer in zip( self.projections, self.convolutions, self.residuals ): if res_layer > 0: residual = residuals[-res_layer] residual = residual if proj is None else proj(residual) else: residual = None if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0) x = self.dropout_module(x) if conv.kernel_size[0] % 2 == 1: # padding is implicit in the conv x = conv(x) else: padding_l = (conv.kernel_size[0] - 1) // 2 padding_r = conv.kernel_size[0] // 2 x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r)) x = conv(x) x = F.glu(x, dim=2) if residual is not None: x = (x + residual) * math.sqrt(0.5) residuals.append(x) # T x B x C -> B x T x C x = x.transpose(1, 0) # project back to size of embedding x = self.fc2(x) if encoder_padding_mask is not None: encoder_padding_mask = encoder_padding_mask.t() # -> B x T x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0) # scale gradients (this only affects backward, not forward) x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers)) # add output to input embedding for attention y = (x + input_embedding) * math.sqrt(0.5) return { "encoder_out": (x, y), "encoder_padding_mask": encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, new_order): if encoder_out["encoder_out"] is not None: encoder_out["encoder_out"] = ( encoder_out["encoder_out"][0].index_select(0, new_order), encoder_out["encoder_out"][1].index_select(0, new_order), ) if encoder_out["encoder_padding_mask"] is not None: encoder_out["encoder_padding_mask"] = encoder_out[ "encoder_padding_mask" ].index_select(0, new_order) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" return self.embed_positions.max_positions class AttentionLayer(nn.Module): def __init__(self, conv_channels, embed_dim, bmm=None): super().__init__() # projects from output of convolution to embedding dimension self.in_projection = Linear(conv_channels, embed_dim) # projects from embedding dimension to convolution size self.out_projection = Linear(embed_dim, conv_channels) self.bmm = bmm if bmm is not None else torch.bmm def forward(self, x, target_embedding, encoder_out, encoder_padding_mask): residual = x # attention x = (self.in_projection(x) + target_embedding) * math.sqrt(0.5) x = self.bmm(x, encoder_out[0]) # don't attend over padding if encoder_padding_mask is not None: x = ( x.float() .masked_fill(encoder_padding_mask.unsqueeze(1), float("-inf")) .type_as(x) ) # FP16 support: cast to float and back # softmax over last dim sz = x.size() x = F.softmax(x.view(sz[0] * sz[1], sz[2]), dim=1) x = x.view(sz) attn_scores = x x = self.bmm(x, encoder_out[1]) # scale attention output (respecting potentially different lengths) s = encoder_out[1].size(1) if encoder_padding_mask is None: x = x * (s * math.sqrt(1.0 / s)) else: s = s - encoder_padding_mask.type_as(x).sum( dim=1, keepdim=True ) # exclude padding s = s.unsqueeze(-1) x = x * (s * s.rsqrt()) # project back x = (self.out_projection(x) + residual) * math.sqrt(0.5) return x, attn_scores def make_generation_fast_(self, beamable_mm_beam_size=None, **kwargs): """Replace torch.bmm with BeamableMM.""" if beamable_mm_beam_size is not None: del self.bmm self.add_module("bmm", BeamableMM(beamable_mm_beam_size)) class FConvDecoder(FairseqIncrementalDecoder): """Convolutional decoder""" def __init__( self, dictionary, embed_dim=512, embed_dict=None, out_embed_dim=256, max_positions=1024, convolutions=((512, 3),) * 20, attention=True, dropout=0.1, share_embed=False, positional_embeddings=True, adaptive_softmax_cutoff=None, adaptive_softmax_dropout=0.0, ): super().__init__(dictionary) self.register_buffer("version", torch.Tensor([2])) self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.need_attn = True convolutions = extend_conv_spec(convolutions) in_channels = convolutions[0][0] if isinstance(attention, bool): # expand True into [True, True, ...] and do the same with False attention = [attention] * len(convolutions) if not isinstance(attention, list) or len(attention) != len(convolutions): raise ValueError( "Attention is expected to be a list of booleans of " "length equal to the number of layers." ) num_embeddings = len(dictionary) padding_idx = dictionary.pad() self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) if embed_dict: self.embed_tokens = utils.load_embedding( embed_dict, self.dictionary, self.embed_tokens ) self.embed_positions = ( PositionalEmbedding( max_positions, embed_dim, padding_idx, ) if positional_embeddings else None ) self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) self.projections = nn.ModuleList() self.convolutions = nn.ModuleList() self.attention = nn.ModuleList() self.residuals = [] layer_in_channels = [in_channels] for i, (out_channels, kernel_size, residual) in enumerate(convolutions): if residual == 0: residual_dim = out_channels else: residual_dim = layer_in_channels[-residual] self.projections.append( Linear(residual_dim, out_channels) if residual_dim != out_channels else None ) self.convolutions.append( LinearizedConv1d( in_channels, out_channels * 2, kernel_size, padding=(kernel_size - 1), dropout=dropout, ) ) self.attention.append( AttentionLayer(out_channels, embed_dim) if attention[i] else None ) self.residuals.append(residual) in_channels = out_channels layer_in_channels.append(out_channels) self.adaptive_softmax = None self.fc2 = self.fc3 = None if adaptive_softmax_cutoff is not None: assert not share_embed self.adaptive_softmax = AdaptiveSoftmax( num_embeddings, in_channels, adaptive_softmax_cutoff, dropout=adaptive_softmax_dropout, ) else: self.fc2 = Linear(in_channels, out_embed_dim) if share_embed: assert out_embed_dim == embed_dim, ( "Shared embed weights implies same dimensions " " out_embed_dim={} vs embed_dim={}".format(out_embed_dim, embed_dim) ) self.fc3 = nn.Linear(out_embed_dim, num_embeddings) self.fc3.weight = self.embed_tokens.weight else: self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout) def forward( self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused ): if encoder_out is not None: encoder_padding_mask = encoder_out["encoder_padding_mask"] encoder_out = encoder_out["encoder_out"] # split and transpose encoder outputs encoder_a, encoder_b = self._split_encoder_out( encoder_out, incremental_state ) if self.embed_positions is not None: pos_embed = self.embed_positions(prev_output_tokens, incremental_state) else: pos_embed = 0 if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] x = self._embed_tokens(prev_output_tokens, incremental_state) # embed tokens and combine with positional embeddings x += pos_embed x = self.dropout_module(x) target_embedding = x # project to size of convolution x = self.fc1(x) # B x T x C -> T x B x C x = self._transpose_if_training(x, incremental_state) # temporal convolutions avg_attn_scores = None num_attn_layers = len(self.attention) residuals = [x] for proj, conv, attention, res_layer in zip( self.projections, self.convolutions, self.attention, self.residuals ): if res_layer > 0: residual = residuals[-res_layer] residual = residual if proj is None else proj(residual) else: residual = None x = self.dropout_module(x) x = conv(x, incremental_state) x = F.glu(x, dim=2) # attention if attention is not None: x = self._transpose_if_training(x, incremental_state) x, attn_scores = attention( x, target_embedding, (encoder_a, encoder_b), encoder_padding_mask ) if not self.training and self.need_attn: attn_scores = attn_scores / num_attn_layers if avg_attn_scores is None: avg_attn_scores = attn_scores else: avg_attn_scores.add_(attn_scores) x = self._transpose_if_training(x, incremental_state) # residual if residual is not None: x = (x + residual) * math.sqrt(0.5) residuals.append(x) # T x B x C -> B x T x C x = self._transpose_if_training(x, incremental_state) # project back to size of vocabulary if not using adaptive softmax if self.fc2 is not None and self.fc3 is not None: x = self.fc2(x) x = self.dropout_module(x) x = self.fc3(x) return x, avg_attn_scores def reorder_incremental_state(self, incremental_state, new_order): super().reorder_incremental_state(incremental_state, new_order) encoder_out = utils.get_incremental_state( self, incremental_state, "encoder_out" ) if encoder_out is not None: encoder_out = tuple(eo.index_select(0, new_order) for eo in encoder_out) utils.set_incremental_state( self, incremental_state, "encoder_out", encoder_out ) def max_positions(self): """Maximum output length supported by the decoder.""" return ( self.embed_positions.max_positions if self.embed_positions is not None else float("inf") ) def upgrade_state_dict(self, state_dict): if utils.item(state_dict.get("decoder.version", torch.Tensor([1]))[0]) < 2: # old models use incorrect weight norm dimension for i, conv in enumerate(self.convolutions): # reconfigure weight norm nn.utils.remove_weight_norm(conv) self.convolutions[i] = nn.utils.weight_norm(conv, dim=0) state_dict["decoder.version"] = torch.Tensor([1]) return state_dict def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def _embed_tokens(self, tokens, incremental_state): if incremental_state is not None: # keep only the last token for incremental forward pass tokens = tokens[:, -1:] return self.embed_tokens(tokens) def _split_encoder_out(self, encoder_out, incremental_state): """Split and transpose encoder outputs. This is cached when doing incremental inference. """ cached_result = utils.get_incremental_state( self, incremental_state, "encoder_out" ) if cached_result is not None: return cached_result # transpose only once to speed up attention layers encoder_a, encoder_b = encoder_out encoder_a = encoder_a.transpose(1, 2).contiguous() result = (encoder_a, encoder_b) if incremental_state is not None: utils.set_incremental_state(self, incremental_state, "encoder_out", result) return result def _transpose_if_training(self, x, incremental_state): if incremental_state is None: x = x.transpose(0, 1) return x def extend_conv_spec(convolutions): """ Extends convolutional spec that is a list of tuples of 2 or 3 parameters (kernel size, dim size and optionally how many layers behind to look for residual) to default the residual propagation param if it is not specified """ extended = [] for spec in convolutions: if len(spec) == 3: extended.append(spec) elif len(spec) == 2: extended.append(spec + (1,)) else: raise Exception( "invalid number of parameters in convolution spec " + str(spec) + ". expected 2 or 3" ) return tuple(extended) def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, 0, 0.1) nn.init.constant_(m.weight[padding_idx], 0) return m def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx): m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) nn.init.normal_(m.weight, 0, 0.1) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, dropout=0.0): """Weight-normalized Linear layer (input: N x T x C)""" m = nn.Linear(in_features, out_features) nn.init.normal_(m.weight, mean=0, std=math.sqrt((1 - dropout) / in_features)) nn.init.constant_(m.bias, 0) return nn.utils.weight_norm(m) def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs): """Weight-normalized Conv1d layer optimized for decoding""" m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) nn.init.normal_(m.weight, mean=0, std=std) nn.init.constant_(m.bias, 0) return nn.utils.weight_norm(m, dim=2) def ConvTBC(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs): """Weight-normalized Conv1d layer""" from fairseq.modules import ConvTBC m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) nn.init.normal_(m.weight, mean=0, std=std) nn.init.constant_(m.bias, 0) return nn.utils.weight_norm(m, dim=2) @register_model_architecture("fconv", "fconv") def base_architecture(args): args.dropout = getattr(args, "dropout", 0.1) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_layers = getattr(args, "encoder_layers", "[(512, 3)] * 20") args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_layers = getattr(args, "decoder_layers", "[(512, 3)] * 20") args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256) args.decoder_attention = getattr(args, "decoder_attention", "True") args.share_input_output_embed = getattr(args, "share_input_output_embed", False) @register_model_architecture("fconv", "fconv_iwslt_de_en") def fconv_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) args.encoder_layers = getattr(args, "encoder_layers", "[(256, 3)] * 4") args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) args.decoder_layers = getattr(args, "decoder_layers", "[(256, 3)] * 3") args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256) base_architecture(args) @register_model_architecture("fconv", "fconv_wmt_en_ro") def fconv_wmt_en_ro(args): args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) base_architecture(args) @register_model_architecture("fconv", "fconv_wmt_en_de") def fconv_wmt_en_de(args): convs = "[(512, 3)] * 9" # first 9 layers have 512 units convs += " + [(1024, 3)] * 4" # next 4 layers have 1024 units convs += " + [(2048, 1)] * 2" # final 2 layers use 1x1 convolutions args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768) args.encoder_layers = getattr(args, "encoder_layers", convs) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 768) args.decoder_layers = getattr(args, "decoder_layers", convs) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) base_architecture(args) @register_model_architecture("fconv", "fconv_wmt_en_fr") def fconv_wmt_en_fr(args): convs = "[(512, 3)] * 6" # first 6 layers have 512 units convs += " + [(768, 3)] * 4" # next 4 layers have 768 units convs += " + [(1024, 3)] * 3" # next 3 layers have 1024 units convs += " + [(2048, 1)] * 1" # next 1 layer uses 1x1 convolutions convs += " + [(4096, 1)] * 1" # final 1 layer uses 1x1 convolutions args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768) args.encoder_layers = getattr(args, "encoder_layers", convs) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 768) args.decoder_layers = getattr(args, "decoder_layers", convs) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) base_architecture(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/fconv.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture, ) from fairseq.modules import AdaptiveSoftmax, FairseqDropout from torch import Tensor DEFAULT_MAX_SOURCE_POSITIONS = 1e5 DEFAULT_MAX_TARGET_POSITIONS = 1e5 @register_model("lstm") class LSTMModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-freeze-embed', action='store_true', help='freeze encoder embeddings') parser.add_argument('--encoder-hidden-size', type=int, metavar='N', help='encoder hidden size') parser.add_argument('--encoder-layers', type=int, metavar='N', help='number of encoder layers') parser.add_argument('--encoder-bidirectional', action='store_true', help='make all layers of encoder bidirectional') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-freeze-embed', action='store_true', help='freeze decoder embeddings') parser.add_argument('--decoder-hidden-size', type=int, metavar='N', help='decoder hidden size') parser.add_argument('--decoder-layers', type=int, metavar='N', help='number of decoder layers') parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension') parser.add_argument('--decoder-attention', type=str, metavar='BOOL', help='decoder attention') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion') parser.add_argument('--share-decoder-input-output-embed', default=False, action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', default=False, action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') # Granular dropout settings (if not specified these default to --dropout) parser.add_argument('--encoder-dropout-in', type=float, metavar='D', help='dropout probability for encoder input embedding') parser.add_argument('--encoder-dropout-out', type=float, metavar='D', help='dropout probability for encoder output') parser.add_argument('--decoder-dropout-in', type=float, metavar='D', help='dropout probability for decoder input embedding') parser.add_argument('--decoder-dropout-out', type=float, metavar='D', help='dropout probability for decoder output') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure that all args are properly defaulted (in case there are any new ones) base_architecture(args) if args.encoder_layers != args.decoder_layers: raise ValueError("--encoder-layers must match --decoder-layers") max_source_positions = getattr( args, "max_source_positions", DEFAULT_MAX_SOURCE_POSITIONS ) max_target_positions = getattr( args, "max_target_positions", DEFAULT_MAX_TARGET_POSITIONS ) def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) embed_dict = utils.parse_embedding(embed_path) utils.print_embed_overlap(embed_dict, dictionary) return utils.load_embedding(embed_dict, dictionary, embed_tokens) if args.encoder_embed_path: pretrained_encoder_embed = load_pretrained_embedding_from_file( args.encoder_embed_path, task.source_dictionary, args.encoder_embed_dim ) else: num_embeddings = len(task.source_dictionary) pretrained_encoder_embed = Embedding( num_embeddings, args.encoder_embed_dim, task.source_dictionary.pad() ) if args.share_all_embeddings: # double check all parameters combinations are valid if task.source_dictionary != task.target_dictionary: raise ValueError("--share-all-embeddings requires a joint dictionary") if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path ): raise ValueError( "--share-all-embed not compatible with --decoder-embed-path" ) if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( "--share-all-embeddings requires --encoder-embed-dim to " "match --decoder-embed-dim" ) pretrained_decoder_embed = pretrained_encoder_embed args.share_decoder_input_output_embed = True else: # separate decoder input embeddings pretrained_decoder_embed = None if args.decoder_embed_path: pretrained_decoder_embed = load_pretrained_embedding_from_file( args.decoder_embed_path, task.target_dictionary, args.decoder_embed_dim, ) # one last double check of parameter combinations if args.share_decoder_input_output_embed and ( args.decoder_embed_dim != args.decoder_out_embed_dim ): raise ValueError( "--share-decoder-input-output-embeddings requires " "--decoder-embed-dim to match --decoder-out-embed-dim" ) if args.encoder_freeze_embed: pretrained_encoder_embed.weight.requires_grad = False if args.decoder_freeze_embed: pretrained_decoder_embed.weight.requires_grad = False encoder = LSTMEncoder( dictionary=task.source_dictionary, embed_dim=args.encoder_embed_dim, hidden_size=args.encoder_hidden_size, num_layers=args.encoder_layers, dropout_in=args.encoder_dropout_in, dropout_out=args.encoder_dropout_out, bidirectional=args.encoder_bidirectional, pretrained_embed=pretrained_encoder_embed, max_source_positions=max_source_positions, ) decoder = LSTMDecoder( dictionary=task.target_dictionary, embed_dim=args.decoder_embed_dim, hidden_size=args.decoder_hidden_size, out_embed_dim=args.decoder_out_embed_dim, num_layers=args.decoder_layers, dropout_in=args.decoder_dropout_in, dropout_out=args.decoder_dropout_out, attention=utils.eval_bool(args.decoder_attention), encoder_output_units=encoder.output_units, pretrained_embed=pretrained_decoder_embed, share_input_output_embed=args.share_decoder_input_output_embed, adaptive_softmax_cutoff=( utils.eval_str_list(args.adaptive_softmax_cutoff, type=int) if args.criterion == "adaptive_loss" else None ), max_target_positions=max_target_positions, residuals=False, ) return cls(encoder, decoder) def forward( self, src_tokens, src_lengths, prev_output_tokens, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, ): encoder_out = self.encoder(src_tokens, src_lengths=src_lengths) decoder_out = self.decoder( prev_output_tokens, encoder_out=encoder_out, incremental_state=incremental_state, ) return decoder_out class LSTMEncoder(FairseqEncoder): """LSTM encoder.""" def __init__( self, dictionary, embed_dim=512, hidden_size=512, num_layers=1, dropout_in=0.1, dropout_out=0.1, bidirectional=False, left_pad=True, pretrained_embed=None, padding_idx=None, max_source_positions=DEFAULT_MAX_SOURCE_POSITIONS, ): super().__init__(dictionary) self.num_layers = num_layers self.dropout_in_module = FairseqDropout( dropout_in * 1.0, module_name=self.__class__.__name__ ) self.dropout_out_module = FairseqDropout( dropout_out * 1.0, module_name=self.__class__.__name__ ) self.bidirectional = bidirectional self.hidden_size = hidden_size self.max_source_positions = max_source_positions num_embeddings = len(dictionary) self.padding_idx = padding_idx if padding_idx is not None else dictionary.pad() if pretrained_embed is None: self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx) else: self.embed_tokens = pretrained_embed self.lstm = LSTM( input_size=embed_dim, hidden_size=hidden_size, num_layers=num_layers, dropout=self.dropout_out_module.p if num_layers > 1 else 0.0, bidirectional=bidirectional, ) self.left_pad = left_pad self.output_units = hidden_size if bidirectional: self.output_units *= 2 def forward( self, src_tokens: Tensor, src_lengths: Tensor, enforce_sorted: bool = True, ): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): lengths of each source sentence of shape `(batch)` enforce_sorted (bool, optional): if True, `src_tokens` is expected to contain sequences sorted by length in a decreasing order. If False, this condition is not required. Default: True. """ if self.left_pad: # nn.utils.rnn.pack_padded_sequence requires right-padding; # convert left-padding to right-padding src_tokens = utils.convert_padding_direction( src_tokens, torch.zeros_like(src_tokens).fill_(self.padding_idx), left_to_right=True, ) bsz, seqlen = src_tokens.size() # embed tokens x = self.embed_tokens(src_tokens) x = self.dropout_in_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) # pack embedded source tokens into a PackedSequence packed_x = nn.utils.rnn.pack_padded_sequence( x, src_lengths.cpu(), enforce_sorted=enforce_sorted ) # apply LSTM if self.bidirectional: state_size = 2 * self.num_layers, bsz, self.hidden_size else: state_size = self.num_layers, bsz, self.hidden_size h0 = x.new_zeros(*state_size) c0 = x.new_zeros(*state_size) packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0)) # unpack outputs and apply dropout x, _ = nn.utils.rnn.pad_packed_sequence( packed_outs, padding_value=self.padding_idx * 1.0 ) x = self.dropout_out_module(x) assert list(x.size()) == [seqlen, bsz, self.output_units] if self.bidirectional: final_hiddens = self.combine_bidir(final_hiddens, bsz) final_cells = self.combine_bidir(final_cells, bsz) encoder_padding_mask = src_tokens.eq(self.padding_idx).t() return tuple( ( x, # seq_len x batch x hidden final_hiddens, # num_layers x batch x num_directions*hidden final_cells, # num_layers x batch x num_directions*hidden encoder_padding_mask, # seq_len x batch ) ) def combine_bidir(self, outs, bsz: int): out = outs.view(self.num_layers, 2, bsz, -1).transpose(1, 2).contiguous() return out.view(self.num_layers, bsz, -1) def reorder_encoder_out( self, encoder_out: Tuple[Tensor, Tensor, Tensor, Tensor], new_order ): return tuple( ( encoder_out[0].index_select(1, new_order), encoder_out[1].index_select(1, new_order), encoder_out[2].index_select(1, new_order), encoder_out[3].index_select(1, new_order), ) ) def max_positions(self): """Maximum input length supported by the encoder.""" return self.max_source_positions class AttentionLayer(nn.Module): def __init__(self, input_embed_dim, source_embed_dim, output_embed_dim, bias=False): super().__init__() self.input_proj = Linear(input_embed_dim, source_embed_dim, bias=bias) self.output_proj = Linear( input_embed_dim + source_embed_dim, output_embed_dim, bias=bias ) def forward(self, input, source_hids, encoder_padding_mask): # input: bsz x input_embed_dim # source_hids: srclen x bsz x source_embed_dim # x: bsz x source_embed_dim x = self.input_proj(input) # compute attention attn_scores = (source_hids * x.unsqueeze(0)).sum(dim=2) # don't attend over padding if encoder_padding_mask is not None: attn_scores = ( attn_scores.float() .masked_fill_(encoder_padding_mask, float("-inf")) .type_as(attn_scores) ) # FP16 support: cast to float and back attn_scores = F.softmax(attn_scores, dim=0) # srclen x bsz # sum weighted sources x = (attn_scores.unsqueeze(2) * source_hids).sum(dim=0) x = torch.tanh(self.output_proj(torch.cat((x, input), dim=1))) return x, attn_scores class LSTMDecoder(FairseqIncrementalDecoder): """LSTM decoder.""" def __init__( self, dictionary, embed_dim=512, hidden_size=512, out_embed_dim=512, num_layers=1, dropout_in=0.1, dropout_out=0.1, attention=True, encoder_output_units=512, pretrained_embed=None, share_input_output_embed=False, adaptive_softmax_cutoff=None, max_target_positions=DEFAULT_MAX_TARGET_POSITIONS, residuals=False, ): super().__init__(dictionary) self.dropout_in_module = FairseqDropout( dropout_in * 1.0, module_name=self.__class__.__name__ ) self.dropout_out_module = FairseqDropout( dropout_out * 1.0, module_name=self.__class__.__name__ ) self.hidden_size = hidden_size self.share_input_output_embed = share_input_output_embed self.need_attn = True self.max_target_positions = max_target_positions self.residuals = residuals self.num_layers = num_layers self.adaptive_softmax = None num_embeddings = len(dictionary) padding_idx = dictionary.pad() if pretrained_embed is None: self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) else: self.embed_tokens = pretrained_embed self.encoder_output_units = encoder_output_units if encoder_output_units != hidden_size and encoder_output_units != 0: self.encoder_hidden_proj = Linear(encoder_output_units, hidden_size) self.encoder_cell_proj = Linear(encoder_output_units, hidden_size) else: self.encoder_hidden_proj = self.encoder_cell_proj = None # disable input feeding if there is no encoder # input feeding is described in arxiv.org/abs/1508.04025 input_feed_size = 0 if encoder_output_units == 0 else hidden_size self.layers = nn.ModuleList( [ LSTMCell( input_size=input_feed_size + embed_dim if layer == 0 else hidden_size, hidden_size=hidden_size, ) for layer in range(num_layers) ] ) if attention: # TODO make bias configurable self.attention = AttentionLayer( hidden_size, encoder_output_units, hidden_size, bias=False ) else: self.attention = None if hidden_size != out_embed_dim: self.additional_fc = Linear(hidden_size, out_embed_dim) if adaptive_softmax_cutoff is not None: # setting adaptive_softmax dropout to dropout_out for now but can be redefined self.adaptive_softmax = AdaptiveSoftmax( num_embeddings, hidden_size, adaptive_softmax_cutoff, dropout=dropout_out, ) elif not self.share_input_output_embed: self.fc_out = Linear(out_embed_dim, num_embeddings, dropout=dropout_out) def forward( self, prev_output_tokens, encoder_out: Optional[Tuple[Tensor, Tensor, Tensor, Tensor]] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, src_lengths: Optional[Tensor] = None, ): x, attn_scores = self.extract_features( prev_output_tokens, encoder_out, incremental_state ) return self.output_layer(x), attn_scores def extract_features( self, prev_output_tokens, encoder_out: Optional[Tuple[Tensor, Tensor, Tensor, Tensor]] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, ): """ Similar to *forward* but only return features. """ # get outputs from encoder if encoder_out is not None: encoder_outs = encoder_out[0] encoder_hiddens = encoder_out[1] encoder_cells = encoder_out[2] encoder_padding_mask = encoder_out[3] else: encoder_outs = torch.empty(0) encoder_hiddens = torch.empty(0) encoder_cells = torch.empty(0) encoder_padding_mask = torch.empty(0) srclen = encoder_outs.size(0) if incremental_state is not None and len(incremental_state) > 0: prev_output_tokens = prev_output_tokens[:, -1:] bsz, seqlen = prev_output_tokens.size() # embed tokens x = self.embed_tokens(prev_output_tokens) x = self.dropout_in_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) # initialize previous states (or get from cache during incremental generation) if incremental_state is not None and len(incremental_state) > 0: prev_hiddens, prev_cells, input_feed = self.get_cached_state( incremental_state ) elif encoder_out is not None: # setup recurrent cells prev_hiddens = [encoder_hiddens[i] for i in range(self.num_layers)] prev_cells = [encoder_cells[i] for i in range(self.num_layers)] if self.encoder_hidden_proj is not None: prev_hiddens = [self.encoder_hidden_proj(y) for y in prev_hiddens] prev_cells = [self.encoder_cell_proj(y) for y in prev_cells] input_feed = x.new_zeros(bsz, self.hidden_size) else: # setup zero cells, since there is no encoder zero_state = x.new_zeros(bsz, self.hidden_size) prev_hiddens = [zero_state for i in range(self.num_layers)] prev_cells = [zero_state for i in range(self.num_layers)] input_feed = None assert ( srclen > 0 or self.attention is None ), "attention is not supported if there are no encoder outputs" attn_scores: Optional[Tensor] = ( x.new_zeros(srclen, seqlen, bsz) if self.attention is not None else None ) outs = [] for j in range(seqlen): # input feeding: concatenate context vector from previous time step if input_feed is not None: input = torch.cat((x[j, :, :], input_feed), dim=1) else: input = x[j] for i, rnn in enumerate(self.layers): # recurrent cell hidden, cell = rnn(input, (prev_hiddens[i], prev_cells[i])) # hidden state becomes the input to the next layer input = self.dropout_out_module(hidden) if self.residuals: input = input + prev_hiddens[i] # save state for next time step prev_hiddens[i] = hidden prev_cells[i] = cell # apply attention using the last layer's hidden state if self.attention is not None: assert attn_scores is not None out, attn_scores[:, j, :] = self.attention( hidden, encoder_outs, encoder_padding_mask ) else: out = hidden out = self.dropout_out_module(out) # input feeding if input_feed is not None: input_feed = out # save final output outs.append(out) # Stack all the necessary tensors together and store prev_hiddens_tensor = torch.stack(prev_hiddens) prev_cells_tensor = torch.stack(prev_cells) cache_state = torch.jit.annotate( Dict[str, Optional[Tensor]], { "prev_hiddens": prev_hiddens_tensor, "prev_cells": prev_cells_tensor, "input_feed": input_feed, }, ) self.set_incremental_state(incremental_state, "cached_state", cache_state) # collect outputs across time steps x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size) # T x B x C -> B x T x C x = x.transpose(1, 0) if hasattr(self, "additional_fc") and self.adaptive_softmax is None: x = self.additional_fc(x) x = self.dropout_out_module(x) # srclen x tgtlen x bsz -> bsz x tgtlen x srclen if not self.training and self.need_attn and self.attention is not None: assert attn_scores is not None attn_scores = attn_scores.transpose(0, 2) else: attn_scores = None return x, attn_scores def output_layer(self, x): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: if self.share_input_output_embed: x = F.linear(x, self.embed_tokens.weight) else: x = self.fc_out(x) return x def get_cached_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], ) -> Tuple[List[Tensor], List[Tensor], Optional[Tensor]]: cached_state = self.get_incremental_state(incremental_state, "cached_state") assert cached_state is not None prev_hiddens_ = cached_state["prev_hiddens"] assert prev_hiddens_ is not None prev_cells_ = cached_state["prev_cells"] assert prev_cells_ is not None prev_hiddens = [prev_hiddens_[i] for i in range(self.num_layers)] prev_cells = [prev_cells_[j] for j in range(self.num_layers)] input_feed = cached_state[ "input_feed" ] # can be None for decoder-only language models return prev_hiddens, prev_cells, input_feed def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor, ): if incremental_state is None or len(incremental_state) == 0: return prev_hiddens, prev_cells, input_feed = self.get_cached_state(incremental_state) prev_hiddens = [p.index_select(0, new_order) for p in prev_hiddens] prev_cells = [p.index_select(0, new_order) for p in prev_cells] if input_feed is not None: input_feed = input_feed.index_select(0, new_order) cached_state_new = torch.jit.annotate( Dict[str, Optional[Tensor]], { "prev_hiddens": torch.stack(prev_hiddens), "prev_cells": torch.stack(prev_cells), "input_feed": input_feed, }, ) self.set_incremental_state(incremental_state, "cached_state", cached_state_new), return def max_positions(self): """Maximum output length supported by the decoder.""" return self.max_target_positions def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.uniform_(m.weight, -0.1, 0.1) nn.init.constant_(m.weight[padding_idx], 0) return m def LSTM(input_size, hidden_size, **kwargs): m = nn.LSTM(input_size, hidden_size, **kwargs) for name, param in m.named_parameters(): if "weight" in name or "bias" in name: param.data.uniform_(-0.1, 0.1) return m def LSTMCell(input_size, hidden_size, **kwargs): m = nn.LSTMCell(input_size, hidden_size, **kwargs) for name, param in m.named_parameters(): if "weight" in name or "bias" in name: param.data.uniform_(-0.1, 0.1) return m def Linear(in_features, out_features, bias=True, dropout=0.0): """Linear layer (input: N x T x C)""" m = nn.Linear(in_features, out_features, bias=bias) m.weight.data.uniform_(-0.1, 0.1) if bias: m.bias.data.uniform_(-0.1, 0.1) return m @register_model_architecture("lstm", "lstm") def base_architecture(args): args.dropout = getattr(args, "dropout", 0.1) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_freeze_embed = getattr(args, "encoder_freeze_embed", False) args.encoder_hidden_size = getattr( args, "encoder_hidden_size", args.encoder_embed_dim ) args.encoder_layers = getattr(args, "encoder_layers", 1) args.encoder_bidirectional = getattr(args, "encoder_bidirectional", False) args.encoder_dropout_in = getattr(args, "encoder_dropout_in", args.dropout) args.encoder_dropout_out = getattr(args, "encoder_dropout_out", args.dropout) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_freeze_embed = getattr(args, "decoder_freeze_embed", False) args.decoder_hidden_size = getattr( args, "decoder_hidden_size", args.decoder_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 1) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) args.decoder_attention = getattr(args, "decoder_attention", "1") args.decoder_dropout_in = getattr(args, "decoder_dropout_in", args.dropout) args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.adaptive_softmax_cutoff = getattr( args, "adaptive_softmax_cutoff", "10000,50000,200000" ) @register_model_architecture("lstm", "lstm_wiseman_iwslt_de_en") def lstm_wiseman_iwslt_de_en(args): args.dropout = getattr(args, "dropout", 0.1) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) args.encoder_dropout_in = getattr(args, "encoder_dropout_in", 0) args.encoder_dropout_out = getattr(args, "encoder_dropout_out", 0) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256) args.decoder_dropout_in = getattr(args, "decoder_dropout_in", 0) args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout) base_architecture(args) @register_model_architecture("lstm", "lstm_luong_wmt_en_de") def lstm_luong_wmt_en_de(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1000) args.encoder_layers = getattr(args, "encoder_layers", 4) args.encoder_dropout_out = getattr(args, "encoder_dropout_out", 0) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1000) args.decoder_layers = getattr(args, "decoder_layers", 4) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 1000) args.decoder_dropout_out = getattr(args, "decoder_dropout_out", 0) base_architecture(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/lstm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( TransformerModel, base_architecture, transformer_wmt_en_de_big, ) @register_model("transformer_align") class TransformerAlignModel(TransformerModel): """ See "Jointly Learning to Align and Translate with Transformer Models" (Garg et al., EMNLP 2019). """ def __init__(self, encoder, decoder, args): super().__init__(args, encoder, decoder) self.alignment_heads = args.alignment_heads self.alignment_layer = args.alignment_layer self.full_context_alignment = args.full_context_alignment @staticmethod def add_args(parser): # fmt: off super(TransformerAlignModel, TransformerAlignModel).add_args(parser) parser.add_argument('--alignment-heads', type=int, metavar='D', help='Number of cross attention heads per layer to supervised with alignments') parser.add_argument('--alignment-layer', type=int, metavar='D', help='Layer number which has to be supervised. 0 corresponding to the bottommost layer.') parser.add_argument('--full-context-alignment', action='store_true', help='Whether or not alignment is supervised conditioned on the full target context.') # fmt: on @classmethod def build_model(cls, args, task): # set any default arguments transformer_align(args) transformer_model = TransformerModel.build_model(args, task) return TransformerAlignModel( transformer_model.encoder, transformer_model.decoder, args ) def forward(self, src_tokens, src_lengths, prev_output_tokens): encoder_out = self.encoder(src_tokens, src_lengths) return self.forward_decoder(prev_output_tokens, encoder_out) def forward_decoder( self, prev_output_tokens, encoder_out=None, incremental_state=None, features_only=False, **extra_args, ): attn_args = { "alignment_layer": self.alignment_layer, "alignment_heads": self.alignment_heads, } decoder_out = self.decoder(prev_output_tokens, encoder_out, **attn_args) if self.full_context_alignment: attn_args["full_context_alignment"] = self.full_context_alignment _, alignment_out = self.decoder( prev_output_tokens, encoder_out, features_only=True, **attn_args, **extra_args, ) decoder_out[1]["attn"] = alignment_out["attn"] return decoder_out @register_model_architecture("transformer_align", "transformer_align") def transformer_align(args): args.alignment_heads = getattr(args, "alignment_heads", 1) args.alignment_layer = getattr(args, "alignment_layer", 4) args.full_context_alignment = getattr(args, "full_context_alignment", False) base_architecture(args) @register_model_architecture("transformer_align", "transformer_wmt_en_de_big_align") def transformer_wmt_en_de_big_align(args): args.alignment_heads = getattr(args, "alignment_heads", 1) args.alignment_layer = getattr(args, "alignment_layer", 4) transformer_wmt_en_de_big(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/transformer_align.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .fairseq_encoder import FairseqEncoder class CompositeEncoder(FairseqEncoder): """ A wrapper around a dictionary of :class:`FairseqEncoder` objects. We run forward on each encoder and return a dictionary of outputs. The first encoder's dictionary is used for initialization. Args: encoders (dict): a dictionary of :class:`FairseqEncoder` objects. """ def __init__(self, encoders): super().__init__(next(iter(encoders.values())).dictionary) self.encoders = encoders for key in self.encoders: self.add_module(key, self.encoders[key]) def forward(self, src_tokens, src_lengths): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: the outputs from each Encoder """ encoder_out = {} for key in self.encoders: encoder_out[key] = self.encoders[key](src_tokens, src_lengths) return encoder_out def reorder_encoder_out(self, encoder_out, new_order): """Reorder encoder output according to new_order.""" for key in self.encoders: encoder_out[key] = self.encoders[key].reorder_encoder_out( encoder_out[key], new_order ) return encoder_out def max_positions(self): return min(self.encoders[key].max_positions() for key in self.encoders) def upgrade_state_dict(self, state_dict): for key in self.encoders: self.encoders[key].upgrade_state_dict(state_dict) return state_dict
KosmosX-API-main
kosmosX/fairseq/fairseq/models/composite_encoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( LayerNorm, SinusoidalPositionalEmbedding, TransformerSentenceEncoder, ) from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import safe_hasattr logger = logging.getLogger(__name__) @register_model("masked_lm") class MaskedLMModel(FairseqEncoderModel): """ Class for training a Masked Language Model. It also supports an additional sentence level prediction if the sent-loss argument is set. """ def __init__(self, args, encoder): super().__init__(encoder) self.args = args # if specified then apply bert initialization on the model. We need # to explictly call this to make sure that the output embeddings # and projection layers are also correctly initialized if getattr(args, "apply_bert_init", False): self.apply(init_bert_params) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # Arguments related to dropout parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability" ) parser.add_argument( "--attention-dropout", type=float, metavar="D", help="dropout probability for" " attention weights", ) parser.add_argument( "--act-dropout", type=float, metavar="D", help="dropout probability after" " activation in FFN", ) # Arguments related to hidden states and self-attention parser.add_argument( "--encoder-ffn-embed-dim", type=int, metavar="N", help="encoder embedding dimension for FFN", ) parser.add_argument( "--encoder-layers", type=int, metavar="N", help="num encoder layers" ) parser.add_argument( "--encoder-attention-heads", type=int, metavar="N", help="num encoder attention heads", ) # Arguments related to input and output embeddings parser.add_argument( "--encoder-embed-dim", type=int, metavar="N", help="encoder embedding dimension", ) parser.add_argument( "--share-encoder-input-output-embed", action="store_true", help="share encoder input" " and output embeddings", ) parser.add_argument( "--encoder-learned-pos", action="store_true", help="use learned positional embeddings in the encoder", ) parser.add_argument( "--no-token-positional-embeddings", action="store_true", help="if set, disables positional embeddings" " (outside self attention)", ) parser.add_argument( "--num-segment", type=int, metavar="N", help="num segment in the input" ) parser.add_argument( "--max-positions", type=int, help="number of positional embeddings to learn" ) # Arguments related to sentence level prediction parser.add_argument( "--sentence-class-num", type=int, metavar="N", help="number of classes for sentence task", ) parser.add_argument( "--sent-loss", action="store_true", help="if set," " calculate sentence level predictions", ) # Arguments related to parameter initialization parser.add_argument( "--apply-bert-init", action="store_true", help="use custom param initialization for BERT", ) # misc params parser.add_argument( "--activation-fn", choices=utils.get_available_activation_fns(), help="activation function to use", ) parser.add_argument( "--pooler-activation-fn", choices=utils.get_available_activation_fns(), help="Which activation function to use for pooler layer.", ) parser.add_argument( "--encoder-normalize-before", action="store_true", help="apply layernorm before each encoder block", ) def forward(self, src_tokens, segment_labels=None, **kwargs): return self.encoder(src_tokens, segment_labels=segment_labels, **kwargs) def max_positions(self): return self.encoder.max_positions @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not safe_hasattr(args, "max_positions"): args.max_positions = args.tokens_per_sample logger.info(args) encoder = MaskedLMEncoder(args, task.dictionary) return cls(args, encoder) class MaskedLMEncoder(FairseqEncoder): """ Encoder for Masked Language Modelling. """ def __init__(self, args, dictionary): super().__init__(dictionary) self.padding_idx = dictionary.pad() self.vocab_size = dictionary.__len__() self.max_positions = args.max_positions self.sentence_encoder = TransformerSentenceEncoder( padding_idx=self.padding_idx, vocab_size=self.vocab_size, num_encoder_layers=args.encoder_layers, embedding_dim=args.encoder_embed_dim, ffn_embedding_dim=args.encoder_ffn_embed_dim, num_attention_heads=args.encoder_attention_heads, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.act_dropout, max_seq_len=self.max_positions, num_segments=args.num_segment, use_position_embeddings=not args.no_token_positional_embeddings, encoder_normalize_before=args.encoder_normalize_before, apply_bert_init=args.apply_bert_init, activation_fn=args.activation_fn, learned_pos_embedding=args.encoder_learned_pos, ) self.share_input_output_embed = args.share_encoder_input_output_embed self.embed_out = None self.sentence_projection_layer = None self.sentence_out_dim = args.sentence_class_num self.lm_output_learned_bias = None # Remove head is set to true during fine-tuning self.load_softmax = not getattr(args, "remove_head", False) self.masked_lm_pooler = nn.Linear( args.encoder_embed_dim, args.encoder_embed_dim ) self.pooler_activation = utils.get_activation_fn(args.pooler_activation_fn) self.lm_head_transform_weight = nn.Linear( args.encoder_embed_dim, args.encoder_embed_dim ) self.activation_fn = utils.get_activation_fn(args.activation_fn) self.layer_norm = LayerNorm(args.encoder_embed_dim) self.lm_output_learned_bias = None if self.load_softmax: self.lm_output_learned_bias = nn.Parameter(torch.zeros(self.vocab_size)) if not self.share_input_output_embed: self.embed_out = nn.Linear( args.encoder_embed_dim, self.vocab_size, bias=False ) if args.sent_loss: self.sentence_projection_layer = nn.Linear( args.encoder_embed_dim, self.sentence_out_dim, bias=False ) def forward(self, src_tokens, segment_labels=None, masked_tokens=None, **unused): """ Forward pass for Masked LM encoder. This first computes the token embedding using the token embedding matrix, position embeddings (if specified) and segment embeddings (if specified). Here we assume that the sentence representation corresponds to the output of the classification_token (see bert_task or cross_lingual_lm task for more details). Args: - src_tokens: B x T matrix representing sentences - segment_labels: B x T matrix representing segment label for tokens Returns: - a tuple of the following: - logits for predictions in format B x T x C to be used in softmax afterwards - a dictionary of additional data, where 'pooled_output' contains the representation for classification_token and 'inner_states' is a list of internal model states used to compute the predictions (similar in ELMO). 'sentence_logits' is the prediction logit for NSP task and is only computed if this is specified in the input arguments. """ inner_states, sentence_rep = self.sentence_encoder( src_tokens, segment_labels=segment_labels, ) x = inner_states[-1].transpose(0, 1) # project masked tokens only if masked_tokens is not None: x = x[masked_tokens, :] x = self.layer_norm(self.activation_fn(self.lm_head_transform_weight(x))) pooled_output = self.pooler_activation(self.masked_lm_pooler(sentence_rep)) # project back to size of vocabulary if self.share_input_output_embed and hasattr( self.sentence_encoder.embed_tokens, "weight" ): x = F.linear(x, self.sentence_encoder.embed_tokens.weight) elif self.embed_out is not None: x = self.embed_out(x) if self.lm_output_learned_bias is not None: x = x + self.lm_output_learned_bias sentence_logits = None if self.sentence_projection_layer: sentence_logits = self.sentence_projection_layer(pooled_output) return x, { "inner_states": inner_states, "pooled_output": pooled_output, "sentence_logits": sentence_logits, } def max_positions(self): """Maximum output length supported by the encoder.""" return self.max_positions def upgrade_state_dict_named(self, state_dict, name): if isinstance( self.sentence_encoder.embed_positions, SinusoidalPositionalEmbedding ): state_dict[ name + ".sentence_encoder.embed_positions._float_tensor" ] = torch.FloatTensor(1) if not self.load_softmax: for k in list(state_dict.keys()): if ( "embed_out.weight" in k or "sentence_projection_layer.weight" in k or "lm_output_learned_bias" in k ): del state_dict[k] return state_dict @register_model_architecture("masked_lm", "masked_lm") def base_architecture(args): args.dropout = getattr(args, "dropout", 0.1) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.act_dropout = getattr(args, "act_dropout", 0.0) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.share_encoder_input_output_embed = getattr( args, "share_encoder_input_output_embed", False ) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.num_segment = getattr(args, "num_segment", 2) args.sentence_class_num = getattr(args, "sentence_class_num", 2) args.sent_loss = getattr(args, "sent_loss", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.activation_fn = getattr(args, "activation_fn", "relu") args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh") args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) @register_model_architecture("masked_lm", "bert_base") def bert_base_architecture(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768) args.share_encoder_input_output_embed = getattr( args, "share_encoder_input_output_embed", True ) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True) args.num_segment = getattr(args, "num_segment", 2) args.encoder_layers = getattr(args, "encoder_layers", 12) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 3072) args.sentence_class_num = getattr(args, "sentence_class_num", 2) args.sent_loss = getattr(args, "sent_loss", True) args.apply_bert_init = getattr(args, "apply_bert_init", True) args.activation_fn = getattr(args, "activation_fn", "gelu") args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh") args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) base_architecture(args) @register_model_architecture("masked_lm", "bert_large") def bert_large_architecture(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_layers = getattr(args, "encoder_layers", 24) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) bert_base_architecture(args) @register_model_architecture("masked_lm", "xlm_base") def xlm_architecture(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.share_encoder_input_output_embed = getattr( args, "share_encoder_input_output_embed", True ) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True) args.num_segment = getattr(args, "num_segment", 1) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.sent_loss = getattr(args, "sent_loss", False) args.activation_fn = getattr(args, "activation_fn", "gelu") args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh") args.apply_bert_init = getattr(args, "apply_bert_init", True) base_architecture(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/masked_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import Dict, Optional from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.models import FairseqDecoder from torch import Tensor logger = logging.getLogger(__name__) @with_incremental_state class FairseqIncrementalDecoder(FairseqDecoder): """Base class for incremental decoders. Incremental decoding is a special mode at inference time where the Model only receives a single timestep of input corresponding to the previous output token (for teacher forcing) and must produce the next output *incrementally*. Thus the model must cache any long-term state that is needed about the sequence, e.g., hidden states, convolutional states, etc. Compared to the standard :class:`FairseqDecoder` interface, the incremental decoder interface allows :func:`forward` functions to take an extra keyword argument (*incremental_state*) that can be used to cache state across time-steps. The :class:`FairseqIncrementalDecoder` interface also defines the :func:`reorder_incremental_state` method, which is used during beam search to select and reorder the incremental state based on the selection of beams. To learn more about how incremental decoding works, refer to `this blog <http://www.telesens.co/2019/04/21/understanding-incremental-decoding-in-fairseq/>`_. """ def __init__(self, dictionary): super().__init__(dictionary) def forward( self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs ): """ Args: prev_output_tokens (LongTensor): shifted output tokens of shape `(batch, tgt_len)`, for teacher forcing encoder_out (dict, optional): output from the encoder, used for encoder-side attention incremental_state (dict, optional): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ raise NotImplementedError def extract_features( self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs ): """ Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ raise NotImplementedError def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor, ): """Reorder incremental state. This will be called when the order of the input has changed from the previous time step. A typical use case is beam search, where the input order changes between time steps based on the selection of beams. """ pass def reorder_incremental_state_scripting( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor, ): """Main entry point for reordering the incremental state. Due to limitations in TorchScript, we call this function in :class:`fairseq.sequence_generator.SequenceGenerator` instead of calling :func:`reorder_incremental_state` directly. """ for module in self.modules(): if hasattr(module, "reorder_incremental_state"): result = module.reorder_incremental_state(incremental_state, new_order) if result is not None: incremental_state = result def set_beam_size(self, beam_size): """Sets the beam size in the decoder and all children.""" if getattr(self, "_beam_size", -1) != beam_size: seen = set() def apply_set_beam_size(module): if ( module != self and hasattr(module, "set_beam_size") and module not in seen ): seen.add(module) module.set_beam_size(beam_size) self.apply(apply_set_beam_size) self._beam_size = beam_size
KosmosX-API-main
kosmosX/fairseq/fairseq/models/fairseq_incremental_decoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, DynamicConv, FairseqDropout, LayerNorm, LightweightConv, MultiheadAttention, PositionalEmbedding, ) from fairseq.utils import safe_hasattr @register_model("lightconv") class LightConvModel(FairseqEncoderDecoderModel): """ LightConv and DynamicConv model from `"Pay Less Attention with Lightweight and Dynamic Convolutions" (Wu, et al, 2019) <https://openreview.net/pdf?id=SkVhlh09tX>`_. To use LightConv please set ``--encoder-conv-type lightweight --decoder-conv-type lightweight`` To use DynamicConv please set ``--encoder-conv-type dynamic --decoder-conv-type dynamic`` Args: encoder (LightConvEncoder): the encoder decoder (LightConvDecoder): the decoder The LightConv model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.lightconv_parser :prog: """ @classmethod def hub_models(cls): # fmt: off def moses_subword(path): return { 'path': path, 'tokenizer': 'moses', 'bpe': 'subword_nmt', } return { 'lightconv.no_glu.iwslt14.de-en': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/iwslt14.de-en.lightconv.tar.gz'), 'dynamicconv.no_glu.iwslt14.de-en': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/iwslt14.de-en.dynamicconv.tar.gz'), 'lightconv.no_glu.wmt16.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.lightconv.tar.gz'), 'dynamicconv.no_glu.wmt16.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.dynamicconv.tar.gz'), 'lightconv.glu.wmt16.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.lightconv-glu.tar.gz'), 'dynamicconv.glu.wmt16.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.dynamicconv-glu.tar.gz'), 'lightconv.glu.wmt17.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.lightconv-glu.tar.gz'), 'dynamicconv.glu.wmt17.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.dynamicconv-glu.tar.gz'), 'lightconv.glu.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt14.en-fr.joined-dict.lightconv-glu.tar.gz'), 'dynamicconv.glu.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt14.en-fr.joined-dict.dynamicconv-glu.tar.gz'), 'lightconv.glu.wmt17.zh-en': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt17.zh-en.lightconv-glu.tar.gz'), 'dynamicconv.glu.wmt17.zh-en': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt17.zh-en.dynamicconv-glu.tar.gz'), } # fmt: on def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability" ) parser.add_argument( "--attention-dropout", type=float, metavar="D", help="dropout probability for attention weights", ) parser.add_argument( "--relu-dropout", type=float, metavar="D", help="dropout probability after ReLU in FFN", ) parser.add_argument( "--input-dropout", type=float, metavar="D", help="dropout probability of the inputs", ) parser.add_argument( "--encoder-embed-path", type=str, metavar="STR", help="path to pre-trained encoder embedding", ) parser.add_argument( "--encoder-embed-dim", type=int, metavar="N", help="encoder embedding dimension", ) parser.add_argument( "--encoder-conv-dim", type=int, metavar="N", help="encoder embedding dimension", ) parser.add_argument( "--encoder-ffn-embed-dim", type=int, metavar="N", help="encoder embedding dimension for FFN", ) parser.add_argument( "--encoder-layers", type=int, metavar="N", help="num encoder layers" ) parser.add_argument( "--encoder-attention-heads", type=int, metavar="N", help="num encoder attention heads or LightConv/DynamicConv heads", ) parser.add_argument( "--encoder-normalize-before", action="store_true", help="apply layernorm before each encoder block", ) parser.add_argument( "--encoder-learned-pos", action="store_true", help="use learned positional embeddings in the encoder", ) parser.add_argument( "--decoder-embed-path", type=str, metavar="STR", help="path to pre-trained decoder embedding", ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-conv-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-ffn-embed-dim", type=int, metavar="N", help="decoder embedding dimension for FFN", ) parser.add_argument( "--decoder-layers", type=int, metavar="N", help="num decoder layers" ) parser.add_argument( "--decoder-attention-heads", type=int, metavar="N", help="num decoder attention heads or LightConv/DynamicConv heads", ) parser.add_argument( "--decoder-learned-pos", action="store_true", help="use learned positional embeddings in the decoder", ) parser.add_argument( "--decoder-normalize-before", action="store_true", help="apply layernorm before each decoder block", ) parser.add_argument( "--share-decoder-input-output-embed", action="store_true", help="share decoder input and output embeddings", ) parser.add_argument( "--share-all-embeddings", action="store_true", help="share encoder, decoder and output embeddings" " (requires shared dictionary and embed dim)", ) parser.add_argument( "--adaptive-softmax-cutoff", metavar="EXPR", help="comma separated list of adaptive softmax cutoff points. " "Must be used with adaptive_loss criterion", ), parser.add_argument( "--adaptive-softmax-dropout", type=float, metavar="D", help="sets adaptive softmax dropout for the tail projections", ) """LightConv and DynamicConv arguments""" parser.add_argument( "--encoder-kernel-size-list", type=lambda x: utils.eval_str_list(x, int), help='list of kernel size (default: "[3,7,15,31,31,31,31]")', ) parser.add_argument( "--decoder-kernel-size-list", type=lambda x: utils.eval_str_list(x, int), help='list of kernel size (default: "[3,7,15,31,31,31]")', ) parser.add_argument( "--encoder-glu", type=utils.eval_bool, help="glu after in proj" ) parser.add_argument( "--decoder-glu", type=utils.eval_bool, help="glu after in proj" ) parser.add_argument( "--encoder-conv-type", default="dynamic", type=str, choices=["dynamic", "lightweight"], help="type of convolution", ) parser.add_argument( "--decoder-conv-type", default="dynamic", type=str, choices=["dynamic", "lightweight"], help="type of convolution", ) parser.add_argument("--weight-softmax", default=True, type=utils.eval_bool) parser.add_argument( "--weight-dropout", type=float, metavar="D", help="dropout probability for conv weights", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not safe_hasattr(args, "max_source_positions"): args.max_source_positions = 1024 if not safe_hasattr(args, "max_target_positions"): args.max_target_positions = 1024 src_dict, tgt_dict = task.source_dictionary, task.target_dictionary def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise RuntimeError( "--share-all-embeddings requires a joined dictionary" ) if args.encoder_embed_dim != args.decoder_embed_dim: raise RuntimeError( "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim" ) if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path ): raise RuntimeError( "--share-all-embeddings not compatible with --decoder-embed-path" ) encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) encoder = LightConvEncoder(args, src_dict, encoder_embed_tokens) decoder = LightConvDecoder(args, tgt_dict, decoder_embed_tokens) return LightConvModel(encoder, decoder) class LightConvEncoder(FairseqEncoder): """ LightConv encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`LightConvEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.dropout_module = FairseqDropout( args.dropout, module_name=self.__class__.__name__ ) embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = ( PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None ) self.layers = nn.ModuleList([]) self.layers.extend( [ LightConvEncoderLayer( args, kernel_size=args.encoder_kernel_size_list[i] ) for i in range(args.encoder_layers) ] ) self.register_buffer("version", torch.Tensor([2])) self.normalize = args.encoder_normalize_before if self.normalize: self.layer_norm = LayerNorm(embed_dim) def forward(self, src_tokens, **unused): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask) if self.normalize: x = self.layer_norm(x) return { "encoder_out": x, # T x B x C "encoder_padding_mask": encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out["encoder_out"] is not None: encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select( 1, new_order ) if encoder_out["encoder_padding_mask"] is not None: encoder_out["encoder_padding_mask"] = encoder_out[ "encoder_padding_mask" ].index_select(0, new_order) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions) class LightConvDecoder(FairseqIncrementalDecoder): """ LightConv decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`LightConvDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs. Default: ``False`` """ def __init__( self, args, dictionary, embed_tokens, no_encoder_attn=False, final_norm=True ): super().__init__(dictionary) self.dropout_module = FairseqDropout( args.dropout, module_name=self.__class__.__name__ ) self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = ( Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None ) self.embed_positions = ( PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None ) self.layers = nn.ModuleList([]) self.layers.extend( [ LightConvDecoderLayer( args, no_encoder_attn, kernel_size=args.decoder_kernel_size_list[i] ) for i in range(args.decoder_layers) ] ) self.adaptive_softmax = None self.project_out_dim = ( Linear(embed_dim, output_embed_dim, bias=False) if embed_dim != output_embed_dim and not args.tie_adaptive_weights else None ) if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), output_embed_dim, utils.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter( torch.Tensor(len(dictionary), output_embed_dim) ) nn.init.normal_(self.embed_out, mean=0, std=output_embed_dim ** -0.5) self.register_buffer("version", torch.Tensor([2])) self.normalize = args.decoder_normalize_before and final_norm if self.normalize: self.layer_norm = LayerNorm(embed_dim) def forward( self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs ): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the last decoder layer's output of shape `(batch, tgt_len, vocab)` - the last decoder layer's attention weights of shape `(batch, tgt_len, src_len)` """ # embed positions positions = ( self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None ) if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out["encoder_out"] if encoder_out is not None else None, encoder_out["encoder_padding_mask"] if encoder_out is not None else None, incremental_state, ) inner_states.append(x) if self.normalize: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: x = F.linear(x, self.embed_tokens.weight) else: x = F.linear(x, self.embed_out) return x, {"attn": attn, "inner_states": inner_states} def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions) def buffered_future_mask(self, tensor): dim = tensor.size(0) if ( not hasattr(self, "_future_mask") or self._future_mask is None or self._future_mask.device != tensor.device ): self._future_mask = torch.triu( utils.fill_with_neg_inf(tensor.new(dim, dim)), 1 ) if self._future_mask.size(0) < dim: self._future_mask = torch.triu( utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1 ) return self._future_mask[:dim, :dim] class LightConvEncoderLayer(nn.Module): """Encoder layer block. Args: args (argparse.Namespace): parsed command-line arguments kernel_size: kernel size of the convolution """ def __init__(self, args, kernel_size=0): super().__init__() self.embed_dim = args.encoder_embed_dim self.conv_dim = args.encoder_conv_dim padding_l = ( kernel_size // 2 if kernel_size % 2 == 1 else ((kernel_size - 1) // 2, kernel_size // 2) ) if args.encoder_glu: self.linear1 = Linear(self.embed_dim, 2 * self.conv_dim) self.act = nn.GLU() else: self.linear1 = Linear(self.embed_dim, self.conv_dim) self.act = None if args.encoder_conv_type == "lightweight": self.conv = LightweightConv( self.conv_dim, kernel_size, padding_l=padding_l, weight_softmax=args.weight_softmax, num_heads=args.encoder_attention_heads, weight_dropout=args.weight_dropout, ) elif args.encoder_conv_type == "dynamic": self.conv = DynamicConv( self.conv_dim, kernel_size, padding_l=padding_l, weight_softmax=args.weight_softmax, num_heads=args.encoder_attention_heads, weight_dropout=args.weight_dropout, ) else: raise NotImplementedError self.linear2 = Linear(self.conv_dim, self.embed_dim) self.dropout_module = FairseqDropout( args.dropout, module_name=self.__class__.__name__ ) self.relu_dropout_module = FairseqDropout( args.relu_dropout, module_name=self.__class__.__name__ ) self.input_dropout_module = FairseqDropout( args.input_dropout, module_name=self.__class__.__name__ ) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.layer_norms = nn.ModuleList([LayerNorm(self.embed_dim) for _ in range(2)]) def forward(self, x, encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(0, x, before=True) x = self.input_dropout_module(x) x = self.linear1(x) if self.act is not None: x = self.act(x) if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.transpose(0, 1).unsqueeze(2), 0) x = self.conv(x) x = self.linear2(x) x = self.dropout_module(x) x = residual + x x = self.maybe_layer_norm(0, x, after=True) residual = x x = self.maybe_layer_norm(1, x, before=True) x = F.relu(self.fc1(x)) x = self.relu_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = residual + x x = self.maybe_layer_norm(1, x, after=True) return x def maybe_layer_norm(self, i, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return self.layer_norms[i](x) else: return x def extra_repr(self): return ( "dropout={}, relu_dropout={}, input_dropout={}, normalize_before={}".format( self.dropout_module.p, self.relu_dropout_module.p, self.input_dropout_module.p, self.normalize_before, ) ) class LightConvDecoderLayer(nn.Module): """Decoder layer block. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs. Default: ``False`` kernel_size: kernel size of the convolution """ def __init__(self, args, no_encoder_attn=False, kernel_size=0): super().__init__() self.embed_dim = args.decoder_embed_dim self.conv_dim = args.decoder_conv_dim if args.decoder_glu: self.linear1 = Linear(self.embed_dim, 2 * self.conv_dim) self.act = nn.GLU() else: self.linear1 = Linear(self.embed_dim, self.conv_dim) self.act = None if args.decoder_conv_type == "lightweight": self.conv = LightweightConv( self.conv_dim, kernel_size, padding_l=kernel_size - 1, weight_softmax=args.weight_softmax, num_heads=args.decoder_attention_heads, weight_dropout=args.weight_dropout, ) elif args.decoder_conv_type == "dynamic": self.conv = DynamicConv( self.conv_dim, kernel_size, padding_l=kernel_size - 1, weight_softmax=args.weight_softmax, num_heads=args.decoder_attention_heads, weight_dropout=args.weight_dropout, ) else: raise NotImplementedError self.linear2 = Linear(self.conv_dim, self.embed_dim) self.dropout_module = FairseqDropout( args.dropout, module_name=self.__class__.__name__ ) self.relu_dropout_module = FairseqDropout( args.relu_dropout, module_name=self.__class__.__name__ ) self.input_dropout_module = FairseqDropout( args.input_dropout, module_name=self.__class__.__name__ ) self.normalize_before = args.decoder_normalize_before self.conv_layer_norm = LayerNorm(self.embed_dim) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True, ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) self.need_attn = True def forward( self, x, encoder_out, encoder_padding_mask, incremental_state, prev_conv_state=None, prev_attn_state=None, conv_mask=None, conv_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.conv_layer_norm, x, before=True) if prev_conv_state is not None: if incremental_state is None: incremental_state = {} self.conv._set_input_buffer(incremental_state, prev_conv_state) x = self.input_dropout_module(x) x = self.linear1(x) if self.act is not None: x = self.act(x) x = self.conv(x, incremental_state=incremental_state) x = self.linear2(x) x = self.dropout_module(x) x = residual + x x = self.maybe_layer_norm(self.conv_layer_norm, x, after=True) attn = None if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x = self.dropout_module(x) x = residual + x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = F.relu(self.fc1(x)) x = self.relu_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x, attn def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def extra_repr(self): return ( "dropout={}, relu_dropout={}, input_dropout={}, normalize_before={}".format( self.dropout_module.p, self.relu_dropout_module.p, self.input_dropout_module.p, self.normalize_before, ) ) def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m @register_model_architecture("lightconv", "lightconv") def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 7) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.relu_dropout = getattr(args, "relu_dropout", 0.0) args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.encoder_conv_dim = getattr(args, "encoder_conv_dim", args.encoder_embed_dim) args.decoder_conv_dim = getattr(args, "decoder_conv_dim", args.decoder_embed_dim) args.encoder_kernel_size_list = getattr( args, "encoder_kernel_size_list", [3, 7, 15, 31, 31, 31, 31] ) args.decoder_kernel_size_list = getattr( args, "decoder_kernel_size_list", [3, 7, 15, 31, 31, 31] ) if len(args.encoder_kernel_size_list) == 1: args.encoder_kernel_size_list = ( args.encoder_kernel_size_list * args.encoder_layers ) if len(args.decoder_kernel_size_list) == 1: args.decoder_kernel_size_list = ( args.decoder_kernel_size_list * args.decoder_layers ) assert ( len(args.encoder_kernel_size_list) == args.encoder_layers ), "encoder_kernel_size_list doesn't match encoder_layers" assert ( len(args.decoder_kernel_size_list) == args.decoder_layers ), "decoder_kernel_size_list doesn't match decoder_layers" args.encoder_glu = getattr(args, "encoder_glu", True) args.decoder_glu = getattr(args, "decoder_glu", True) args.input_dropout = getattr(args, "input_dropout", 0.1) args.weight_dropout = getattr(args, "weight_dropout", args.attention_dropout) @register_model_architecture("lightconv", "lightconv_iwslt_de_en") def lightconv_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.encoder_layers = getattr(args, "encoder_layers", 7) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) args.decoder_layers = getattr(args, "decoder_layers", 6) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.weight_dropout = getattr(args, "weight_dropout", 0.1) args.encoder_glu = getattr(args, "encoder_glu", False) args.decoder_glu = getattr(args, "decoder_glu", False) args.input_dropout = getattr(args, "input_dropout", 0.0) base_architecture(args) @register_model_architecture("lightconv", "lightconv_wmt_en_de") def lightconv_wmt_en_de(args): base_architecture(args) @register_model_architecture("lightconv", "lightconv_wmt_en_de_big") def lightconv_wmt_en_de_big(args): args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.3) base_architecture(args) @register_model_architecture("lightconv", "lightconv_wmt_en_fr_big") def lightconv_wmt_en_fr_big(args): args.dropout = getattr(args, "dropout", 0.1) lightconv_wmt_en_de_big(args) @register_model_architecture("lightconv", "lightconv_wmt_zh_en_big") def lightconv_wmt_zh_en_big(args): args.dropout = getattr(args, "dropout", 0.2) args.attention_dropout = getattr(args, "attention_dropout", 0.2) args.weight_dropout = getattr(args, "weight_dropout", 0.2) lightconv_wmt_en_de_big(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/lightconv.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq import utils from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.lightconv import Embedding, LightConvDecoder from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder @register_model("lightconv_lm") class LightConvLanguageModel(FairseqLanguageModel): def __init__(self, decoder): super().__init__(decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--dropout", default=0.1, type=float, metavar="D", help="dropout probability", ) parser.add_argument( "--attention-dropout", default=0.0, type=float, metavar="D", help="dropout probability for attention weights", ) parser.add_argument( "--relu-dropout", default=0.0, type=float, metavar="D", help="dropout probability after ReLU in FFN", ) parser.add_argument( "--input-dropout", type=float, metavar="D", help="dropout probability of the inputs", ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-output-dim", type=int, metavar="N", help="decoder output dimension", ) parser.add_argument( "--decoder-input-dim", type=int, metavar="N", help="decoder input dimension" ) parser.add_argument( "--decoder-ffn-embed-dim", type=int, metavar="N", help="decoder embedding dimension for FFN", ) parser.add_argument( "--decoder-layers", type=int, metavar="N", help="num decoder layers" ) parser.add_argument( "--decoder-attention-heads", type=int, metavar="N", help="num decoder attention heads or LightConv/DynamicConv heads", ) parser.add_argument( "--decoder-normalize-before", default=False, action="store_true", help="apply layernorm before each decoder block", ) parser.add_argument( "--adaptive-softmax-cutoff", metavar="EXPR", help="comma separated list of adaptive softmax cutoff points. " "Must be used with adaptive_loss criterion", ) parser.add_argument( "--adaptive-softmax-dropout", type=float, metavar="D", help="sets adaptive softmax dropout for the tail projections", ) parser.add_argument( "--adaptive-softmax-factor", type=float, metavar="N", help="adaptive input factor", ) parser.add_argument( "--no-token-positional-embeddings", default=False, action="store_true", help="if set, disables positional embeddings (outside self attention)", ) parser.add_argument( "--share-decoder-input-output-embed", default=False, action="store_true", help="share decoder input and output embeddings", ) parser.add_argument( "--character-embeddings", default=False, action="store_true", help="if set, uses character embedding convolutions to produce token embeddings", ) parser.add_argument( "--character-filters", type=str, metavar="LIST", default="[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]", help="size of character embeddings", ) parser.add_argument( "--character-embedding-dim", type=int, metavar="N", default=4, help="size of character embeddings", ) parser.add_argument( "--char-embedder-highway-layers", type=int, metavar="N", default=2, help="number of highway layers for character token embeddder", ) parser.add_argument( "--adaptive-input", default=False, action="store_true", help="if set, uses adaptive input", ) parser.add_argument( "--adaptive-input-factor", type=float, metavar="N", help="adaptive input factor", ) parser.add_argument( "--adaptive-input-cutoff", metavar="EXPR", help="comma separated list of adaptive input cutoff points.", ) parser.add_argument( "--tie-adaptive-weights", action="store_true", help="if set, ties the weights of adaptive softmax and adaptive input", ) parser.add_argument( "--tie-adaptive-proj", action="store_true", help="if set, ties the projection weights of adaptive softmax and adaptive input", ) parser.add_argument( "--decoder-learned-pos", action="store_true", help="use learned positional embeddings in the decoder", ) """LightConv and DynamicConv arguments""" parser.add_argument( "--decoder-kernel-size-list", type=lambda x: utils.eval_str_list(x, int), help='list of kernel size (default: "[3,7,15,31,31,31]")', ) parser.add_argument( "--decoder-glu", type=utils.eval_bool, help="glu after in proj" ) parser.add_argument( "--decoder-conv-type", default="dynamic", type=str, choices=["dynamic", "lightweight"], help="type of convolution", ) parser.add_argument("--weight-softmax", default=True, type=utils.eval_bool) parser.add_argument( "--weight-dropout", type=float, metavar="D", help="dropout probability for conv weights", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_lm_architecture(args) if getattr(args, "max_source_positions", None) is None: args.max_source_positions = args.tokens_per_sample if getattr(args, "max_target_positions", None) is None: args.max_target_positions = args.tokens_per_sample if args.character_embeddings: embed_tokens = CharacterTokenEmbedder( task.dictionary, eval(args.character_filters), args.character_embedding_dim, args.decoder_embed_dim, args.char_embedder_highway_layers, ) elif args.adaptive_input: embed_tokens = AdaptiveInput( len(task.dictionary), task.dictionary.pad(), args.decoder_input_dim, args.adaptive_input_factor, args.decoder_embed_dim, utils.eval_str_list(args.adaptive_input_cutoff, type=int), ) else: embed_tokens = Embedding( len(task.dictionary), args.decoder_input_dim, task.dictionary.pad() ) if args.tie_adaptive_weights: assert args.adaptive_input assert args.adaptive_input_factor == args.adaptive_softmax_factor assert ( args.adaptive_softmax_cutoff == args.adaptive_input_cutoff ), "{} != {}".format( args.adaptive_softmax_cutoff, args.adaptive_input_cutoff ) assert args.decoder_input_dim == args.decoder_output_dim decoder = LightConvDecoder( args, task.output_dictionary, embed_tokens, no_encoder_attn=True, final_norm=False, ) return LightConvLanguageModel(decoder) @register_model_architecture("lightconv_lm", "lightconv_lm") def base_lm_architecture(args): args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.adaptive_softmax_factor = getattr(args, "adaptive_softmax_factor", 4) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.character_embeddings = getattr(args, "character_embeddings", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.decoder_conv_dim = getattr(args, "decoder_conv_dim", args.decoder_embed_dim) # The model training is not stable without this args.decoder_normalize_before = True args.adaptive_input = getattr(args, "adaptive_input", False) args.adaptive_input_factor = getattr(args, "adaptive_input_factor", 4) args.adaptive_input_cutoff = getattr(args, "adaptive_input_cutoff", None) args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) args.tie_adaptive_proj = getattr(args, "tie_adaptive_proj", False) args.decoder_kernel_size_list = getattr( args, "decoder_kernel_size_list", [3, 7, 15, 31, 31, 31] ) if len(args.decoder_kernel_size_list) == 1: args.decoder_kernel_size_list = ( args.decoder_kernel_size_list * args.decoder_layers ) assert ( len(args.decoder_kernel_size_list) == args.decoder_layers ), "decoder_kernel_size_list doesn't match decoder_layers" args.decoder_glu = getattr(args, "decoder_glu", True) args.input_dropout = getattr(args, "input_dropout", 0.1) args.weight_dropout = getattr(args, "weight_dropout", args.attention_dropout) @register_model_architecture("lightconv_lm", "lightconv_lm_gbw") def lightconv_lm_gbw(args): args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.dropout = getattr(args, "dropout", 0.1) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) base_lm_architecture(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/lightconv_lm.py
#!/usr/bin/env python3 """ This module has the EMA class used to store a copy of the exponentially decayed model params. Typical usage of EMA class involves initializing an object using an existing model (random or from a seed model) and setting the config like ema_decay, ema_start_update which determine how the EMA model is updated. After every update of the model i.e. at the end of the train_step, the EMA should be updated by passing the new model to the EMA.step function. The EMA model state dict can be stored in the extra state under the key of "ema" and dumped into a checkpoint and loaded. The EMA object can be passed to tasks by setting task.uses_ema property. EMA is a smoothed/ensemble model which might have better performance when used for inference or further fine-tuning. EMA class has a reverse function to load the EMA params into a model and use it like a regular model. """ import copy import logging import torch from fairseq import checkpoint_utils class EMA(object): """Exponential Moving Average of Fairseq Models EMA keeps a copy of the exponentially decayed model params. The set of params should include both gradient-descent and non-gradient descent params, such as batch mean/var and buffers. This is a modified implementation of the open source code in https://github.com/zhawe01/fairseq-gec.git, and internal source code in fbcode/mobile-vision/projects/classification_pytorch/lib/utils/model_ema.py. Similar to TF EMA. https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage. EMA provides a averaged and smoothed set of model weights, and has been shown to improve vision models. EMA class does all necessary functions to update, reload, or init EMA methods. EMA object is initialized from an arbitrary model. By default, it is stored in the same device (unless device specified at initialization) and with the same precision as the model (unless ema_fp32 is True). ema_fp32 is recommended. This stores the EMA parameters in fp32 only for the EMA update step, and is used at the default precision otherwise. EMA is usually enabled using EMAConfig with store_ema=True. Some important parameters to configure EMA are 1) ema_decay - The decay of EMA 2) ema_update_freq - EMA is updated every this many model updates. 3) ema_start_update - Start EMA update after this many model updates [default 0] Key methods: 1) step - One update of EMA using new model 2) restore - Update EMA from a state dict 3) reverse - Load EMA into a model 4) get_decay, _set_decay - Used to get or set the decay. Note _set_decay is called from step. 5) build_fp32_params - Used to initialize or update the fp32 copy of EMA params. Note this is enabled only when ema_fp32=True """ def __init__(self, model, config, device=None, skip_keys=None): """ @param model model to initialize the EMA with @param config EMAConfig object with configuration like ema_decay, ema_update_freq, ema_fp32 @param device If provided, copy EMA to this device (e.g. gpu). Otherwise EMA is in the same device as the model. """ self.decay = config.ema_decay self.model = copy.deepcopy(model) self.model.requires_grad_(False) self.config = config self.skip_keys = skip_keys or set() self.fp32_params = {} if self.config.ema_seed_model is not None: state = checkpoint_utils.load_ema_from_checkpoint( self.config.ema_seed_model ) self.model.load_state_dict(state["model"], strict=True) if device is not None: logging.info(f"Copying EMA model to device {device}") self.model = self.model.to(device=device) if self.config.ema_fp32: self.build_fp32_params() self.update_freq_counter = 0 def get_model(self): return self.model def build_fp32_params(self, state_dict=None): """ Store a copy of the EMA params in fp32. If state dict is passed, the EMA params is copied from the provided state dict. Otherwise, it is copied from the current EMA model parameters. """ if not self.config.ema_fp32: raise RuntimeError( "build_fp32_params should not be called if ema_fp32=False. " "Use ema_fp32=True if this is really intended." ) if state_dict is None: state_dict = self.model.state_dict() def _to_float(t): return t.float() if torch.is_floating_point(t) else t for param_key in state_dict: if param_key in self.fp32_params: self.fp32_params[param_key].copy_(state_dict[param_key]) else: self.fp32_params[param_key] = _to_float(state_dict[param_key]) def restore(self, state_dict, build_fp32_params=False): """Load data from a model spec into EMA model""" self.model.load_state_dict(state_dict, strict=False) if build_fp32_params: self.build_fp32_params(state_dict) def _set_decay(self, decay): self.decay = decay def get_decay(self): return self.decay def _step_internal(self, new_model, updates=None): """One update of the EMA model based on new model weights""" decay = self.decay ema_state_dict = {} ema_params = ( self.fp32_params if self.config.ema_fp32 else self.model.state_dict() ) for key, param in new_model.state_dict().items(): if isinstance(param, dict): continue try: ema_param = ema_params[key] except KeyError: ema_param = ( param.float().clone() if param.ndim == 1 else copy.deepcopy(param) ) if param.shape != ema_param.shape: raise ValueError( "incompatible tensor shapes between model param and ema param" + "{} vs. {}".format(param.shape, ema_param.shape) ) if "version" in key: # Do not decay a model.version pytorch param continue if key in self.skip_keys: ema_param = param.to(dtype=ema_param.dtype).clone() else: ema_param.mul_(decay) ema_param.add_(param.to(dtype=ema_param.dtype), alpha=1 - decay) ema_state_dict[key] = ema_param self.restore(ema_state_dict, build_fp32_params=False) def step(self, new_model, updates=None): """ One update of EMA which is done every self.config.ema_update_freq updates of the model. @param updates The current number of model updates done. Decay is set of 0 if model updates < ema_start_update, which means the model will be simply copied over to the EMA. When model updates >= ema_start_updates, then EMA is updated with a decay of self.config.ema_decay. """ if updates is not None: self._set_decay( 0 if updates < self.config.ema_start_update else self.config.ema_decay ) if updates is not None and self.config.ema_update_freq > 1: self.update_freq_counter += 1 if self.update_freq_counter >= self.config.ema_update_freq: self._step_internal(new_model, updates) self.update_freq_counter = 0 else: self._step_internal(new_model, updates) def reverse(self, model): """ Load the model parameters from EMA model. Useful for inference or fine-tuning from the EMA model. """ d = self.model.state_dict() if "_ema" in d: del d["_ema"] model.load_state_dict(d, strict=False) return model
KosmosX-API-main
kosmosX/fairseq/fairseq/models/ema/ema.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import os from .ema import EMA def build_ema(model, cfg, device): return EMA(model, cfg, device) # automatically import any Python files in the models/ema/ directory for file in sorted(os.listdir(os.path.dirname(__file__))): if file.endswith(".py") and not file.startswith("_"): file_name = file[: file.find(".py")] importlib.import_module("fairseq.models.ema." + file_name)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/ema/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .modules import * # noqa from .s2s_transformer import * # noqa
KosmosX-API-main
kosmosX/fairseq/fairseq/models/speech_to_speech/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from pathlib import Path from typing import Any, Dict, List, Optional import torch from torch import Tensor from fairseq import checkpoint_utils, utils from fairseq.models import ( FairseqEncoderModel, FairseqEncoderDecoderModel, FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.speech_to_text import S2TTransformerEncoder from fairseq.models.speech_to_speech.modules import CTCDecoder, StackedEmbedding from fairseq.models.text_to_speech import TTSTransformerDecoder from fairseq.models.transformer import ( Linear, TransformerDecoder, TransformerModelBase, ) logger = logging.getLogger(__name__) class S2STransformerEncoder(S2TTransformerEncoder): """Based on S2T transformer encoder, with support to incorporate target speaker embedding.""" def __init__(self, args): super().__init__(args) self.spk_emb_proj = None if args.target_speaker_embed: self.spk_emb_proj = Linear( args.encoder_embed_dim + args.speaker_embed_dim, args.encoder_embed_dim ) def forward( self, src_tokens, src_lengths, tgt_speaker=None, return_all_hiddens=False ): out = super().forward(src_tokens, src_lengths, return_all_hiddens) if self.spk_emb_proj: x = out["encoder_out"][0] seq_len, bsz, _ = x.size() tgt_speaker_emb = tgt_speaker.view(1, bsz, -1).expand(seq_len, bsz, -1) x = self.spk_emb_proj(torch.cat([x, tgt_speaker_emb], dim=2)) out["encoder_out"][0] = x return out class TransformerUnitDecoder(TransformerDecoder): """Based on Transformer decoder, with support to decoding stacked units""" def __init__( self, args, dictionary, embed_tokens, no_encoder_attn=False, output_projection=None, ): super().__init__( args, dictionary, embed_tokens, no_encoder_attn, output_projection ) self.n_frames_per_step = args.n_frames_per_step self.out_proj_n_frames = ( Linear( self.output_embed_dim, self.output_embed_dim * self.n_frames_per_step, bias=False, ) if self.n_frames_per_step > 1 else None ) def forward( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, features_only: bool = False, full_context_alignment: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, src_lengths: Optional[Any] = None, return_all_hiddens: bool = False, ): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (optional): output from the encoder, used for encoder-side attention, should be of size T x B x C incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` features_only (bool, optional): only return features without applying output layer (default: False). full_context_alignment (bool, optional): don't apply auto-regressive mask to self-attention (default: False). Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, incremental_state=incremental_state, full_context_alignment=full_context_alignment, alignment_layer=alignment_layer, alignment_heads=alignment_heads, ) if not features_only: bsz, seq_len, d = x.size() if self.out_proj_n_frames: x = self.out_proj_n_frames(x) x = self.output_layer(x.view(bsz, seq_len, self.n_frames_per_step, d)) x = x.view(bsz, seq_len * self.n_frames_per_step, -1) if ( incremental_state is None and self.n_frames_per_step > 1 ): # teacher-forcing mode in training x = x[ :, : -(self.n_frames_per_step - 1), : ] # remove extra frames after <eos> return x, extra def upgrade_state_dict_named(self, state_dict, name): if self.n_frames_per_step > 1: move_keys = [ ( f"{name}.project_in_dim.weight", f"{name}.embed_tokens.project_in_dim.weight", ) ] for from_k, to_k in move_keys: if from_k in state_dict and to_k not in state_dict: state_dict[to_k] = state_dict[from_k] del state_dict[from_k] class S2STransformerMultitaskModelBase(FairseqEncoderDecoderModel): @classmethod def build_encoder(cls, args): encoder = S2STransformerEncoder(args) pretraining_path = getattr(args, "load_pretrained_encoder_from", None) if pretraining_path is not None: if not Path(pretraining_path).exists(): logger.warning( f"skipped pretraining because {pretraining_path} does not exist" ) else: encoder = checkpoint_utils.load_pretrained_component_from_model( component=encoder, checkpoint=pretraining_path ) logger.info(f"loaded pretrained encoder from: {pretraining_path}") return encoder @classmethod def build_multitask_decoder(cls, args, tgt_dict, in_dim): decoder_args = args.decoder_args decoder_args.encoder_embed_dim = in_dim if args.decoder_type == "transformer": base_multitask_text_transformer_decoder_arch(decoder_args) task_decoder = TransformerDecoder( decoder_args, tgt_dict, embed_tokens=TransformerModelBase.build_embedding( decoder_args, tgt_dict, decoder_args.decoder_embed_dim, ), ) elif args.decoder_type == "ctc": task_decoder = CTCDecoder( dictionary=tgt_dict, in_dim=in_dim, ) else: raise NotImplementedError( "currently only support multitask decoder_type 'transformer', 'ctc'" ) return task_decoder @classmethod def build_model(cls, args, task): encoder = cls.build_encoder(args) decoder = ( cls.build_decoder(args, task.target_dictionary) if task.args.target_is_code else cls.build_decoder(args) ) base_model = cls(encoder, decoder) # set up multitask decoders base_model.multitask_decoders = {} for task_name, task_obj in task.multitask_tasks.items(): in_dim = ( args.encoder_embed_dim if task_obj.args.input_from == "encoder" else args.decoder_embed_dim ) task_decoder = cls.build_multitask_decoder( task_obj.args, task_obj.target_dictionary, in_dim ) setattr(base_model, f"{task_name}_decoder", task_decoder) decoder_model_cls = ( FairseqEncoderModel if task_obj.args.decoder_type == "ctc" else FairseqLanguageModel ) base_model.multitask_decoders[task_name] = decoder_model_cls( getattr(base_model, f"{task_name}_decoder") ) return base_model def forward_encoder(self, src_tokens, src_lengths, speaker=None, **kwargs): return self.encoder( src_tokens, src_lengths=src_lengths, tgt_speaker=speaker, **kwargs ) @register_model("s2ut_transformer") class S2UTTransformerModel(S2STransformerMultitaskModelBase): """ Direct speech-to-speech translation model with S2T Transformer encoder + Transformer discrete unit decoder https://arxiv.org/abs/2107.05604 """ @staticmethod def add_args(parser): # input parser.add_argument( "--conv-kernel-sizes", type=str, metavar="N", help="kernel sizes of Conv1d subsampling layers", ) parser.add_argument( "--conv-channels", type=int, metavar="N", help="# of channels in Conv1d subsampling layers", ) # Transformer parser.add_argument( "--activation-fn", type=str, default="relu", choices=utils.get_available_activation_fns(), help="activation function to use", ) parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability" ) parser.add_argument( "--attention-dropout", type=float, metavar="D", help="dropout probability for attention weights", ) parser.add_argument( "--activation-dropout", "--relu-dropout", type=float, metavar="D", help="dropout probability after activation in FFN.", ) parser.add_argument( "--encoder-embed-dim", type=int, metavar="N", help="encoder embedding dimension", ) parser.add_argument( "--encoder-ffn-embed-dim", type=int, metavar="N", help="encoder embedding dimension for FFN", ) parser.add_argument( "--encoder-layers", type=int, metavar="N", help="num encoder layers" ) parser.add_argument( "--encoder-attention-heads", type=int, metavar="N", help="num encoder attention heads", ) parser.add_argument( "--encoder-normalize-before", action="store_true", help="apply layernorm before each encoder block", ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-ffn-embed-dim", type=int, metavar="N", help="decoder embedding dimension for FFN", ) parser.add_argument( "--decoder-layers", type=int, metavar="N", help="num decoder layers" ) parser.add_argument( "--decoder-attention-heads", type=int, metavar="N", help="num decoder attention heads", ) parser.add_argument( "--decoder-normalize-before", action="store_true", help="apply layernorm before each decoder block", ) parser.add_argument( "--share-decoder-input-output-embed", action="store_true", help="share decoder input and output embeddings", ) parser.add_argument( "--layernorm-embedding", action="store_true", help="add layernorm to embedding", ) parser.add_argument( "--no-scale-embedding", action="store_true", help="if True, dont scale embeddings", ) parser.add_argument( "--load-pretrained-encoder-from", type=str, metavar="STR", help="model to take encoder weights from (for initialization)", ) parser.add_argument( "--encoder-freezing-updates", type=int, metavar="N", help="freeze encoder for first N updates", ) # speaker parser.add_argument( "--speaker-embed-dim", type=int, metavar="N", help="speaker embedding dimension", ) @classmethod def build_decoder(cls, args, tgt_dict): num_embeddings = len(tgt_dict) padding_idx = tgt_dict.pad() embed_tokens = StackedEmbedding( num_embeddings, args.decoder_embed_dim, padding_idx, num_stacked=args.n_frames_per_step, ) return TransformerUnitDecoder( args, tgt_dict, embed_tokens, ) def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_speaker=None, return_all_hiddens=False, ): encoder_out = self.encoder( src_tokens, src_lengths=src_lengths, tgt_speaker=tgt_speaker, return_all_hiddens=return_all_hiddens, ) decoder_out = self.decoder( prev_output_tokens, encoder_out=encoder_out, ) if return_all_hiddens: decoder_out[-1]["encoder_states"] = encoder_out["encoder_states"] decoder_out[-1]["encoder_padding_mask"] = encoder_out[ "encoder_padding_mask" ] return decoder_out @register_model("s2spect_transformer") class S2SpecTTransformerModel(S2STransformerMultitaskModelBase): """ Speech-to-spectrogram model with S2T Transformer encoder + TTS Transformer decoder """ @staticmethod def add_args(parser): # input parser.add_argument( "--conv-kernel-sizes", type=str, metavar="N", help="kernel sizes of Conv1d subsampling layers", ) parser.add_argument( "--conv-channels", type=int, metavar="N", help="# of channels in Conv1d subsampling layers", ) # Transformer parser.add_argument( "--activation-fn", type=str, default="relu", choices=utils.get_available_activation_fns(), help="activation function to use", ) parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability" ) parser.add_argument( "--attention-dropout", type=float, metavar="D", help="dropout probability for attention weights", ) parser.add_argument( "--activation-dropout", "--relu-dropout", type=float, metavar="D", help="dropout probability after activation in FFN.", ) parser.add_argument( "--encoder-embed-dim", type=int, metavar="N", help="encoder embedding dimension", ) parser.add_argument( "--encoder-ffn-embed-dim", type=int, metavar="N", help="encoder embedding dimension for FFN", ) parser.add_argument( "--encoder-layers", type=int, metavar="N", help="num encoder layers" ) parser.add_argument( "--encoder-attention-heads", type=int, metavar="N", help="num encoder attention heads", ) parser.add_argument( "--encoder-normalize-before", action="store_true", help="apply layernorm before each encoder block", ) parser.add_argument( "--no-scale-embedding", action="store_true", help="if True, dont scale embeddings", ) parser.add_argument( "--load-pretrained-encoder-from", type=str, metavar="STR", help="model to take encoder weights from (for initialization)", ) parser.add_argument( "--encoder-freezing-updates", type=int, metavar="N", help="freeze encoder for first N updates", ) # speaker parser.add_argument( "--speaker-embed-dim", type=int, metavar="N", help="speaker embedding dimension", ) # decoder parser.add_argument("--output-frame-dim", type=int) # decoder prenet parser.add_argument("--prenet-dropout", type=float) parser.add_argument("--prenet-layers", type=int) parser.add_argument("--prenet-dim", type=int) # decoder postnet parser.add_argument("--postnet-dropout", type=float) parser.add_argument("--postnet-layers", type=int) parser.add_argument("--postnet-conv-dim", type=int) parser.add_argument("--postnet-conv-kernel-size", type=int) # decoder transformer layers parser.add_argument("--decoder-transformer-layers", type=int) parser.add_argument("--decoder-embed-dim", type=int) parser.add_argument("--decoder-ffn-embed-dim", type=int) parser.add_argument("--decoder-normalize-before", action="store_true") parser.add_argument("--decoder-attention-heads", type=int) @classmethod def build_decoder(cls, args): return TTSTransformerDecoder(args, None, padding_idx=1) def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_speaker=None, incremental_state=None, target_lengths=None, speaker=None, return_all_hiddens=False, ): encoder_out = self.encoder( src_tokens, src_lengths=src_lengths, tgt_speaker=tgt_speaker, return_all_hiddens=return_all_hiddens, ) decoder_out = self.decoder( prev_output_tokens, encoder_out=encoder_out, incremental_state=incremental_state, target_lengths=target_lengths, speaker=speaker, ) if return_all_hiddens: decoder_out[-1]["encoder_states"] = encoder_out["encoder_states"] decoder_out[-1]["encoder_padding_mask"] = encoder_out[ "encoder_padding_mask" ] return decoder_out def base_multitask_text_transformer_decoder_arch(args): args.dropout = getattr(args, "dropout", 0.3) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", True ) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.max_target_positions = getattr(args, "max_target_positions", 1024) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.adaptive_input = getattr(args, "adaptive_input", False) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.decoder_layers = getattr(args, "decoder_layers", 2) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) # decoder layer args.activation_dropout = getattr(args, "activation_dropout", args.dropout) args.activation_fn = getattr(args, "activation_fn", "relu") args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048) args.attention_dropout = getattr(args, "attention_dropout", args.dropout) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) def base_s2st_transformer_encoder_architecture(args): args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0) # Convolutional subsampler args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5") args.conv_channels = getattr(args, "conv_channels", 1024) # Transformer args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 12) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.dropout = getattr(args, "dropout", 0.1) args.attention_dropout = getattr(args, "attention_dropout", args.dropout) args.activation_dropout = getattr(args, "activation_dropout", args.dropout) args.activation_fn = getattr(args, "activation_fn", "relu") args.speaker_embed_dim = getattr(args, "speaker_embed_dim", 256) @register_model_architecture( model_name="s2ut_transformer", arch_name="s2ut_transformer" ) def s2ut_architecture_base(args): base_s2st_transformer_encoder_architecture(args) # decoder args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) @register_model_architecture("s2ut_transformer", "s2ut_transformer_fisher") def s2ut_architecture_fisher(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.dropout = getattr(args, "dropout", 0.1) s2ut_architecture_base(args) @register_model_architecture( model_name="s2spect_transformer", arch_name="s2spect_transformer" ) def s2spect_architecture_base(args): base_s2st_transformer_encoder_architecture(args) # decoder args.output_frame_dim = getattr(args, "output_frame_dim", 80) # decoder prenet args.prenet_dropout = getattr(args, "prenet_dropout", 0.5) args.prenet_layers = getattr(args, "prenet_layers", 2) args.prenet_dim = getattr(args, "prenet_dim", 256) # decoder postnet args.postnet_dropout = getattr(args, "postnet_dropout", 0.5) args.postnet_layers = getattr(args, "postnet_layers", 5) args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512) args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5) # decoder transformer layers args.decoder_transformer_layers = getattr(args, "decoder_transformer_layers", 6) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", 4 * args.decoder_embed_dim ) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) @register_model_architecture("s2spect_transformer", "s2spect_transformer_fisher") def s2spect_architecture_fisher(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 8) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.dropout = getattr(args, "dropout", 0.1) # decoder args.prenet_dim = getattr(args, "prenet_dim", 32) s2spect_architecture_base(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/speech_to_speech/s2s_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from torch import nn from fairseq.models import FairseqEncoder from fairseq.models.transformer import Linear class CTCDecoder(FairseqEncoder): def __init__(self, dictionary, in_dim): super().__init__(dictionary) self.proj = nn.Linear(in_dim, len(dictionary)) def forward(self, src_tokens, src_lengths=None, **kwargs): encoder_out = self.proj(src_tokens) return {"encoder_out": encoder_out} class StackedEmbedding(nn.Embedding): """Embedding module that supports stacked units -> single embedding""" def __init__(self, num_embeddings, embed_dim, padding_idx, num_stacked=1): super().__init__(num_embeddings, embed_dim, padding_idx) # follow transformer.Embedding nn.init.normal_(self.weight, mean=0, std=embed_dim ** -0.5) nn.init.constant_(self.weight[padding_idx], 0) self.offset = ( 4 # skip <bos>, <pad>, <eos>, <unk>, specific to fairseq dictionary ) self.vocab_size = num_embeddings - self.offset self.num_stacked = num_stacked if self.num_stacked > 1: self.project_in_dim = Linear(embed_dim * num_stacked, embed_dim, bias=False) def forward(self, input): if self.num_stacked == 1: return super().forward(input) # expand input indices mask = input >= self.offset stacked_input = [] cum_input = input.new_zeros(input.shape) for i in range(1, self.num_stacked + 1): div = pow(self.vocab_size, i) next_input = torch.remainder(input - self.offset - cum_input, div) cum_input += next_input next_input = torch.floor_divide(next_input, div // self.vocab_size) stacked_input.append((next_input + self.offset) * mask + input * ~mask) stacked_input = torch.stack(stacked_input[::-1], dim=2) embed = super().forward(stacked_input).view(input.size(0), input.size(1), -1) embed = self.project_in_dim(embed) return embed
KosmosX-API-main
kosmosX/fairseq/fairseq/models/speech_to_speech/modules.py
#!/usr/bin/env python3 import logging import math from typing import Dict, List, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils, utils from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.models.transformer import Embedding, TransformerDecoder from fairseq.modules import LayerNorm, PositionalEmbedding, TransformerEncoderLayer from torch import Tensor logger = logging.getLogger(__name__) @register_model("convtransformer") class ConvTransformerModel(FairseqEncoderDecoderModel): """ Transformer-based Speech translation model from ESPNet-ST https://arxiv.org/abs/2004.10234 """ def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--input-feat-per-channel", type=int, metavar="N", help="encoder input dimension per input channel", ) parser.add_argument( "--activation-fn", choices=utils.get_available_activation_fns(), help="activation function to use", ) parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability" ) parser.add_argument( "--attention-dropout", type=float, metavar="D", help="dropout probability for attention weights", ) parser.add_argument( "--activation-dropout", "--relu-dropout", type=float, metavar="D", help="dropout probability after activation in FFN.", ) parser.add_argument( "--encoder-embed-dim", type=int, metavar="N", help="encoder embedding dimension", ) parser.add_argument( "--encoder-ffn-embed-dim", type=int, metavar="N", help="encoder embedding dimension for FFN", ) parser.add_argument( "--encoder-layers", type=int, metavar="N", help="num encoder layers" ) parser.add_argument( "--encoder-attention-heads", type=int, metavar="N", help="num encoder attention heads", ) parser.add_argument( "--encoder-normalize-before", action="store_true", help="apply layernorm before each encoder block", ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-ffn-embed-dim", type=int, metavar="N", help="decoder embedding dimension for FFN", ) parser.add_argument( "--decoder-layers", type=int, metavar="N", help="num decoder layers" ) parser.add_argument( "--decoder-attention-heads", type=int, metavar="N", help="num decoder attention heads", ) parser.add_argument( "--decoder-normalize-before", action="store_true", help="apply layernorm before each decoder block", ) parser.add_argument( "--decoder-output-dim", type=int, metavar="N", help="decoder output dimension (extra linear layer if different from decoder embed dim)", ) parser.add_argument( "--share-decoder-input-output-embed", action="store_true", help="share decoder input and output embeddings", ) parser.add_argument( "--layernorm-embedding", action="store_true", help="add layernorm to embedding", ) parser.add_argument( "--no-scale-embedding", action="store_true", help="if True, dont scale embeddings", ) parser.add_argument( "--load-pretrained-encoder-from", type=str, metavar="STR", help="model to take encoder weights from (for initialization)", ) parser.add_argument( "--load-pretrained-decoder-from", type=str, metavar="STR", help="model to take decoder weights from (for initialization)", ) parser.add_argument( "--conv-out-channels", type=int, metavar="INT", help="the number of output channels of conv layer", ) @classmethod def build_encoder(cls, args): encoder = ConvTransformerEncoder(args) if getattr(args, "load_pretrained_encoder_from", None): encoder = checkpoint_utils.load_pretrained_component_from_model( component=encoder, checkpoint=args.load_pretrained_encoder_from ) return encoder @classmethod def build_decoder(cls, args, task, embed_tokens): decoder = TransformerDecoderNoExtra(args, task.target_dictionary, embed_tokens) if getattr(args, "load_pretrained_decoder_from", None): decoder = checkpoint_utils.load_pretrained_component_from_model( component=decoder, checkpoint=args.load_pretrained_decoder_from ) return decoder @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) def build_embedding(dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() return Embedding(num_embeddings, embed_dim, padding_idx) decoder_embed_tokens = build_embedding( task.target_dictionary, args.decoder_embed_dim ) encoder = cls.build_encoder(args) decoder = cls.build_decoder(args, task, decoder_embed_tokens) return cls(encoder, decoder) @staticmethod @torch.jit.unused def set_batch_first(lprobs): lprobs.batch_first = True def get_normalized_probs( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): # net_output['encoder_out'] is a (B, T, D) tensor lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample) if self.training: self.set_batch_first(lprobs) return lprobs def output_layout(self): return "BTD" """ The forward method inherited from the base class has a **kwargs argument in its input, which is not supported in torchscript. This method overrites the forward method definition without **kwargs. """ def forward(self, src_tokens, src_lengths, prev_output_tokens): encoder_out = self.encoder(src_tokens=src_tokens, src_lengths=src_lengths) decoder_out = self.decoder( prev_output_tokens=prev_output_tokens, encoder_out=encoder_out ) return decoder_out class ConvTransformerEncoder(FairseqEncoder): """Conv + Transformer encoder""" def __init__(self, args): """Construct an Encoder object.""" super().__init__(None) self.dropout = args.dropout self.embed_scale = ( 1.0 if args.no_scale_embedding else math.sqrt(args.encoder_embed_dim) ) self.padding_idx = 1 self.in_channels = 1 self.input_dim = args.input_feat_per_channel self.conv = torch.nn.Sequential( torch.nn.Conv2d(1, args.conv_out_channels, 3, stride=2, padding=3 // 2), torch.nn.ReLU(), torch.nn.Conv2d( args.conv_out_channels, args.conv_out_channels, 3, stride=2, padding=3 // 2, ), torch.nn.ReLU(), ) transformer_input_dim = self.infer_conv_output_dim( self.in_channels, self.input_dim, args.conv_out_channels ) self.out = torch.nn.Linear(transformer_input_dim, args.encoder_embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, args.encoder_embed_dim, self.padding_idx, learned=False, ) self.transformer_layers = nn.ModuleList([]) self.transformer_layers.extend( [TransformerEncoderLayer(args) for i in range(args.encoder_layers)] ) if args.encoder_normalize_before: self.layer_norm = LayerNorm(args.encoder_embed_dim) else: self.layer_norm = None def pooling_ratio(self): return 4 def infer_conv_output_dim(self, in_channels, input_dim, out_channels): sample_seq_len = 200 sample_bsz = 10 x = torch.randn(sample_bsz, in_channels, sample_seq_len, input_dim) x = torch.nn.Conv2d(1, out_channels, 3, stride=2, padding=3 // 2)(x) x = torch.nn.Conv2d(out_channels, out_channels, 3, stride=2, padding=3 // 2)(x) x = x.transpose(1, 2) mb, seq = x.size()[:2] return x.contiguous().view(mb, seq, -1).size(-1) def forward(self, src_tokens, src_lengths): """Encode input sequence. :param torch.Tensor xs: input tensor :param torch.Tensor masks: input mask :return: position embedded tensor and mask :rtype Tuple[torch.Tensor, torch.Tensor]: """ bsz, max_seq_len, _ = src_tokens.size() x = ( src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim) .transpose(1, 2) .contiguous() ) x = self.conv(x) bsz, _, output_seq_len, _ = x.size() x = x.transpose(1, 2).transpose(0, 1).contiguous().view(output_seq_len, bsz, -1) x = self.out(x) x = self.embed_scale * x subsampling_factor = int(max_seq_len * 1.0 / output_seq_len + 0.5) input_len_0 = (src_lengths.float() / subsampling_factor).ceil().long() input_len_1 = x.size(0) * torch.ones([src_lengths.size(0)]).long().to( input_len_0.device ) input_lengths = torch.min(input_len_0, input_len_1) encoder_padding_mask = lengths_to_padding_mask(input_lengths) positions = self.embed_positions(encoder_padding_mask).transpose(0, 1) x += positions x = F.dropout(x, p=self.dropout, training=self.training) for layer in self.transformer_layers: x = layer(x, encoder_padding_mask) if not encoder_padding_mask.any(): maybe_encoder_padding_mask = None else: maybe_encoder_padding_mask = encoder_padding_mask return { "encoder_out": [x], "encoder_padding_mask": [maybe_encoder_padding_mask] if maybe_encoder_padding_mask is not None else [], "encoder_embedding": [], "encoder_states": [], "src_tokens": [], "src_lengths": [], } @torch.jit.export def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)] if len(encoder_out["encoder_padding_mask"]) == 0: new_encoder_padding_mask = [] else: new_encoder_padding_mask = [ (encoder_out["encoder_padding_mask"][0]).index_select(0, new_order) ] if len(encoder_out["encoder_embedding"]) == 0: new_encoder_embedding = [] else: new_encoder_embedding = [ (encoder_out["encoder_embedding"][0]).index_select(0, new_order) ] encoder_states = encoder_out["encoder_states"] if len(encoder_states) > 0: for idx, state in enumerate(encoder_states): encoder_states[idx] = state.index_select(1, new_order) return { "encoder_out": new_encoder_out, "encoder_padding_mask": new_encoder_padding_mask, "encoder_embedding": new_encoder_embedding, "encoder_states": encoder_states, "src_tokens": [], "src_lengths": [], } class TransformerDecoderNoExtra(TransformerDecoder): def extract_features( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]], incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, full_context_alignment: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, ): # call scriptable method from parent class x, _ = self.extract_features_scriptable( prev_output_tokens, encoder_out, incremental_state, full_context_alignment, alignment_layer, alignment_heads, ) return x, None @register_model_architecture(model_name="convtransformer", arch_name="convtransformer") def base_architecture(args): args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) args.max_source_positions = getattr(args, "max_source_positions", 3000) args.max_target_positions = getattr(args, "max_target_positions", 1024) args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) args.conv_out_channels = getattr(args, "conv_out_channels", args.encoder_embed_dim) @register_model_architecture("convtransformer", "convtransformer_espnet") def convtransformer_espnet(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) args.encoder_layers = getattr(args, "encoder_layers", 12) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/speech_to_text/convtransformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import copy import logging from typing import Dict, List, Optional, Tuple import numpy as np import torch import torch.nn as nn from torch import Tensor from fairseq import checkpoint_utils, utils from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.models.speech_to_text.hub_interface import S2THubInterface from fairseq.models.transformer import Embedding, TransformerDecoder from fairseq.models.wav2vec import Wav2VecEncoder from fairseq.modules.layer_norm import LayerNorm logger = logging.getLogger(__name__) class Conv1dAdaptor(nn.Module): def __init__( self, in_dim, out_dim, n_layers=3, kernel_size=3, stride=2, layerdrop=0.0, layernorm=False, proj=False, ): super().__init__() self.proj, self.proj_ln = None, None self.post_proj, self.post_proj_ln = None, None if proj: self.proj = nn.Sequential( nn.Linear(in_dim, in_dim * 4), nn.ReLU(), nn.Linear(in_dim * 4, in_dim) ) self.proj_ln = LayerNorm(in_dim) self.post_proj = nn.Sequential( nn.Linear(out_dim, out_dim * 4), nn.ReLU(), nn.Linear(out_dim * 4, out_dim), ) self.post_proj_ln = LayerNorm(out_dim) self.layers = nn.ModuleList( nn.Conv1d( in_dim if i == 0 else out_dim, out_dim * 2, kernel_size, stride=stride, padding=kernel_size // 2, ) for i in range(n_layers) ) self.stride = stride self.layerdrop = layerdrop self.layernorm = LayerNorm(in_dim) if layernorm else None @classmethod def add_args(cls, parser): parser.add_argument("--adaptor-n-layers", type=int) parser.add_argument("--adaptor-kernel-size", type=int) parser.add_argument("--adaptor-stride", type=int) parser.add_argument("--adaptor-layerdrop", type=float) parser.add_argument("--adaptor-layernorm", action="store_true") parser.add_argument("--adaptor-proj", action="store_true") def forward(self, x, padding_mask: Optional[torch.Tensor]): if self.layernorm is not None: x = self.layernorm(x) if self.proj is not None: x = x + 0.5 * self.proj(x) x = self.proj_ln(x) # T x B x C -> B x C x T x = x.transpose(0, 1).transpose(1, 2) out_lens = None if padding_mask is not None: out_lens = (~padding_mask).sum(1).float() for layer in self.layers: layerdrop_prob = np.random.random() if not self.training or (layerdrop_prob > self.layerdrop): x = nn.functional.glu(layer(x), dim=1) if padding_mask is not None: out_lens = ((out_lens - 1) / self.stride + 1).floor() # B x C x T -> T x B x C x = x.transpose(1, 2).transpose(0, 1) if self.post_proj is not None: x = x + 0.5 * self.post_proj(x) x = self.post_proj_ln(x) out_padding_mask = None if padding_mask is not None: out_padding_mask = lengths_to_padding_mask(out_lens.long()) return x, out_padding_mask def add_wav2vec_asr_args(parser): parser.add_argument("--w2v-path", help="path to wav2vec 2.0 model") parser.add_argument( "--no-pretrained-weights", action="store_true", help="if true, does not load pretrained weights", ) parser.add_argument( "--dropout-input", type=float, metavar="D", help="dropout to apply to the input (after feat extr)", ) parser.add_argument( "--final-dropout", type=float, metavar="D", help="dropout after transformer and before final projection", ) parser.add_argument( "--apply-mask", action="store_true", help="apply masking during fine-tuning" ) parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability inside wav2vec 2.0 model", ) parser.add_argument( "--attention-dropout", type=float, metavar="D", help="dropout probability for attention weights inside wav2vec 2.0 model", ) parser.add_argument( "--activation-dropout", "--relu-dropout", type=float, metavar="D", help="dropout probability after activation in FFN inside wav2vec 2.0 model", ) parser.add_argument( "--mask-length", type=int, help="repeat the mask indices multiple times" ) parser.add_argument( "--mask-prob", type=float, help="probability of replacing a token with mask" ) parser.add_argument( "--mask-selection", type=str, choices=["static", "uniform", "normal", "poisson"], help="how to choose masks", ) parser.add_argument( "--mask-other", type=float, help="stdev of the mask length in case of 'normal' selection strategy", ) parser.add_argument( "--no-mask-overlap", action="store_true", help="whether to allow masks to overlap", ) parser.add_argument( "--mask-channel-length", type=int, help="repeat the mask indices multiple times" ) parser.add_argument( "--mask-channel-prob", type=float, help="probability of replacing a token with mask", ) parser.add_argument( "--mask-channel-selection", type=str, choices=["static", "uniform", "normal", "poisson"], help="how to choose masks", ) parser.add_argument( "--mask-channel-other", type=float, help="stdev of the mask length in case of 'normal' selection strategy", ) parser.add_argument( "--no-mask-channel-overlap", action="store_true", help="whether to allow masks to overlap", ) parser.add_argument( "--freeze-finetune-updates", default=0, type=int, help="dont finetune wav2vec for this many updates", ) parser.add_argument( "--feature-grad-mult", default=None, type=float, help="reset feature grad mult in wav2vec 2.0 to this", ) parser.add_argument( "--layerdrop", default=0.0, type=float, help="probability of dropping a layer in wav2vec 2.0", ) parser.add_argument("--w2v-args", default=None) def need_finetuning(ft_params, param_name): if ft_params == "all": return True ft_params_list = ft_params.split(",") for ft_param in ft_params_list: if ft_param in param_name: return True return False class Wav2VecEncoderWithAdaptor(FairseqEncoder): def build_adaptor(self, args): adaptor = None if args.adaptor_n_layers > 0: adaptor = Conv1dAdaptor( args.decoder_embed_dim, args.decoder_embed_dim, n_layers=args.adaptor_n_layers, kernel_size=args.adaptor_kernel_size, stride=args.adaptor_stride, layerdrop=args.adaptor_layerdrop, layernorm=args.adaptor_layernorm, proj=args.adaptor_proj, ) return adaptor def __init__(self, args): super().__init__(None) self.w2v_encoder = Wav2VecEncoder(args) self.is_v0_arch = not args.adaptor_proj self.w2v_proj_ln = None if not self.is_v0_arch and self.w2v_encoder.proj is not None: self.w2v_proj_ln = LayerNorm(args.decoder_embed_dim) self.adaptor = self.build_adaptor(args) self.num_updates = 0 self.freezing_updates = args.w2v_freezing_updates self.finetuning_params = args.finetune_w2v_params for k, p in self.w2v_encoder.w2v_model.named_parameters(): p.requires_grad = need_finetuning(self.finetuning_params, k) @classmethod def add_args(cls, parser): add_wav2vec_asr_args(parser) parser.add_argument( "--normalize", action="store_true", help="if set, normalizes input to have 0 mean and unit variance", ) parser.add_argument( "--finetune-w2v-params", type=str, metavar="STR", help="comma-separated param strings to finetune.", ) parser.add_argument("--w2v-freezing-updates", type=int) parser.add_argument("--load-pretrained-encoder-from", type=str, metavar="STR") Conv1dAdaptor.add_args(parser) def set_num_updates(self, num_updates): super().set_num_updates(num_updates) self.num_updates = num_updates def forward(self, src_tokens, src_lengths=None, **kwargs): if ( self.freezing_updates is not None and self.num_updates > self.freezing_updates ): for p in self.w2v_encoder.w2v_model.parameters(): p.requires_grad = True padding_mask = lengths_to_padding_mask(src_lengths) out = self.w2v_encoder.forward(src_tokens, padding_mask, tbc=True) x, padding_mask = out["encoder_out"], out["padding_mask"] if self.w2v_proj_ln is not None: x = self.w2v_proj_ln(x) if self.adaptor is not None: x, padding_mask = self.adaptor(x, padding_mask) return { "encoder_out": [x], # T x B x C "encoder_padding_mask": [] if padding_mask is None else [padding_mask], # B x T "encoder_embedding": [], # B x T x C "encoder_states": [], # List[T x B x C] "src_tokens": [], "src_lengths": [], } def reorder_encoder_out(self, encoder_out, new_order): new_encoder_out = ( [] if len(encoder_out["encoder_out"]) == 0 else [x.index_select(1, new_order) for x in encoder_out["encoder_out"]] ) new_encoder_padding_mask = ( [] if len(encoder_out["encoder_padding_mask"]) == 0 else [ x.index_select(0, new_order) for x in encoder_out["encoder_padding_mask"] ] ) new_encoder_embedding = ( [] if len(encoder_out["encoder_embedding"]) == 0 else [ x.index_select(0, new_order) for x in encoder_out["encoder_embedding"] ] ) encoder_states = encoder_out["encoder_states"] if len(encoder_states) > 0: for idx, state in enumerate(encoder_states): encoder_states[idx] = state.index_select(1, new_order) return { "encoder_out": new_encoder_out, # T x B x C "encoder_padding_mask": new_encoder_padding_mask, # B x T "encoder_embedding": new_encoder_embedding, # B x T x C "encoder_states": encoder_states, # List[T x B x C] "src_tokens": [], # B x T "src_lengths": [], # B x 1 } def add_decoder_args(parser): parser.add_argument( "--activation-fn", type=str, default="relu", choices=utils.get_available_activation_fns(), help="activation function to use", ) parser.add_argument( "--decoder-dropout", type=float, metavar="D", help="dropout probability" ) parser.add_argument( "--decoder-attention-dropout", type=float, metavar="D", help="dropout probability for attention weights", ) parser.add_argument( "--decoder-activation-dropout", type=float, metavar="D", help="dropout probability after activation in FFN.", ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension" ) parser.add_argument( "--decoder-ffn-embed-dim", type=int, metavar="N", help="decoder embedding dimension for FFN", ) parser.add_argument( "--decoder-layers", type=int, metavar="N", help="num decoder layers" ) parser.add_argument( "--decoder-attention-heads", type=int, metavar="N", help="num decoder attention heads", ) parser.add_argument( "--decoder-normalize-before", action="store_true", help="apply layernorm before each decoder block", ) parser.add_argument( "--layernorm-embedding", action="store_true", help="add layernorm to embedding" ) parser.add_argument("--decoder-layerdrop", type=float, metavar="D") parser.add_argument("--decoder-learned-pos", action="store_true") parser.add_argument("--share-decoder-input-output-embed", action="store_true") parser.add_argument( "--no-scale-embedding", action="store_true", help="if True, dont scale embeddings", ) parser.add_argument( "--load-pretrained-decoder-from", type=str, metavar="STR", help="model to take decoder weights from (for initialization)", ) parser.add_argument( "--finetune-decoder-params", type=str, metavar="STR", help="comma-separated param strings to finetune.", ) @register_model("xm_transformer") class XMTransformerModel(FairseqEncoderDecoderModel): @classmethod def hub_models(cls): base_url = "http://dl.fbaipublicfiles.com/fairseq/s2t" model_ids = [ "xm_transformer_600m-es_en-multi_domain", "xm_transformer_600m-ru_en-multi_domain", "xm_transformer_600m-fr_en-multi_domain", "xm_transformer_600m-en_es-multi_domain", "xm_transformer_600m-en_ru-multi_domain", "xm_transformer_600m-en_fr-multi_domain", "xm_transformer_600m-en_zh-multi_domain", "xm_transformer_600m-en_ar-multi_domain", "xm_transformer_600m-en_tr-multi_domain", "xm_transformer_600m-en_vi-multi_domain", "xm_transformer-21_en-xls_r_300m", "xm_transformer-en_15-xls_r_300m", "xm_transformer-21_en-xls_r_1b", "xm_transformer-en_15-xls_r_1b", "xm_transformer-21_en-xls_r_2b", "xm_transformer-en_15-xls_r_2b", "xm_transformer-22_16-xls_r_2b", ] return {i: f"{base_url}/{i}.tar.gz" for i in model_ids} @classmethod def from_pretrained( cls, model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", config_yaml="config.yaml", **kwargs, ): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), config_yaml=config_yaml, **kwargs, ) return S2THubInterface(x["args"], x["task"], x["models"][0]) def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def add_args(cls, parser): """Add model-specific arguments to the parser.""" Wav2VecEncoderWithAdaptor.add_args(parser) add_decoder_args(parser) parser.add_argument("--checkpoint-activations", action="store_true") parser.add_argument("--offload-activations", action="store_true") parser.add_argument("--min-params-to-wrap", type=int) @classmethod def maybe_load_pretrained(cls, component, checkpoint: Optional[str] = None): if checkpoint is None: return component _load = checkpoint_utils.load_pretrained_component_from_model try: return _load(component, checkpoint) except RuntimeError as e: logger.warning(e) return _load(component, checkpoint, strict=False) @classmethod def build_encoder(cls, args): _args = copy.deepcopy(args) if not args.adaptor_proj: # V0 arch state = checkpoint_utils.load_checkpoint_to_cpu(args.w2v_path) if state.get("cfg") is not None: encoder_embed_dim = state["cfg"]._content["model"]["encoder_embed_dim"] elif state.get("args") is not None: encoder_embed_dim = state["args"].encoder_embed_dim else: raise ValueError(f"Invalid config in {args.w2v_path}") _args.decoder_embed_dim = encoder_embed_dim del state encoder = Wav2VecEncoderWithAdaptor(_args) return cls.maybe_load_pretrained( encoder, getattr(args, "load_pretrained_encoder_from", None) ) @classmethod def build_decoder(cls, args, task, embed_tokens): _args = copy.deepcopy(args) if args.adaptor_proj: # not V0 arch _args.encoder_embed_dim = _args.decoder_embed_dim _args.dropout = args.decoder_dropout _args.attention_dropout = args.decoder_attention_dropout _args.activation_dropout = args.decoder_activation_dropout _args.max_target_positions = 1024 decoder = TransformerDecoder(_args, task.target_dictionary, embed_tokens) decoder = cls.maybe_load_pretrained( decoder, getattr(args, "load_pretrained_decoder_from", None) ) for k, p in decoder.named_parameters(): p.requires_grad = need_finetuning(args.finetune_decoder_params, k) return decoder @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) def build_embedding(dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() return Embedding(num_embeddings, embed_dim, padding_idx) decoder_embed_tokens = build_embedding( task.target_dictionary, args.decoder_embed_dim ) encoder = cls.build_encoder(args) decoder = cls.build_decoder(args, task, decoder_embed_tokens) return cls(encoder, decoder) def get_normalized_probs( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): return self.get_normalized_probs_scriptable(net_output, log_probs, sample) def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): """ The forward method inherited from the base class has a **kwargs argument in its input, which is not supported in torchscript. This method overwrites the forward method definition without **kwargs. """ encoder_out = self.encoder( src_tokens=src_tokens, src_lengths=src_lengths, **kwargs ) decoder_out = self.decoder( prev_output_tokens=prev_output_tokens, encoder_out=encoder_out ) return decoder_out def upgrade_state_dict(self, state_dict): for k, _ in state_dict.items(): if "adaptor.layers" in state_dict: print(k) new = k.replace("adaptor.layers", "adaptor_layers") state_dict[new] = state_dict[k] del state_dict[k] def set_default_w2v_encoder_args(args): args.no_pretrained_weights = getattr(args, "no_pretrained_weights", False) args.dropout_input = getattr(args, "dropout_input", 0) args.final_dropout = getattr(args, "final_dropout", 0) args.apply_mask = getattr(args, "apply_mask", False) args.dropout = getattr(args, "dropout", 0) args.attention_dropout = getattr(args, "attention_dropout", 0) args.activation_dropout = getattr(args, "activation_dropout", 0) args.mask_length = getattr(args, "mask_length", 10) args.mask_prob = getattr(args, "mask_prob", 0.5) args.mask_selection = getattr(args, "mask_selection", "static") args.mask_other = getattr(args, "mask_other", 0) args.no_mask_overlap = getattr(args, "no_mask_overlap", False) args.mask_channel_length = getattr(args, "mask_channel_length", 10) args.mask_channel_prob = getattr(args, "mask_channel_prob", 0.5) args.mask_channel_before = getattr(args, "mask_channel_before", False) args.mask_channel_selection = getattr(args, "mask_channel_selection", "static") args.mask_channel_other = getattr(args, "mask_channel_other", 0) args.no_mask_channel_overlap = getattr(args, "no_mask_channel_overlap", False) args.freeze_finetune_updates = getattr(args, "freeze_finetune_updates", 0) args.feature_grad_mult = 0.1 args.layerdrop = getattr(args, "layerdrop", 0.0) args.normalize = getattr(args, "normalize", False) args.finetune_w2v_params = getattr(args, "finetune_w2v_params", "all") args.w2v_freezing_updates = getattr(args, "w2v_freezing_updates", None) def set_default_adaptor_args(args): args.adaptor_n_layers = getattr(args, "adaptor_n_layers", 3) args.adaptor_kernel_size = getattr(args, "adaptor_kernel_size", 3) args.adaptor_stride = getattr(args, "adaptor_stride", 2) args.adaptor_layerdrop = getattr(args, "adaptor_layerdrop", 0.0) args.adaptor_layernorm = getattr(args, "adaptor_layernorm", False) args.adaptor_proj = getattr(args, "adaptor_proj", False) def set_default_transformer_decoder_args(args): args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4 * 1024) args.decoder_layers = getattr(args, "decoder_layers", 12) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0) args.adaptive_input = getattr(args, "adaptive_input", False) args.decoder_attention_dropout = getattr(args, "decoder_attention_dropout", 0.0) args.decoder_activation_dropout = getattr(args, "decoder_activation_dropout", 0.0) args.decoder_dropout = getattr(args, "decoder_dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) args.activation_fn = getattr(args, "activation_fn", "gelu") args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh") args.pooler_dropout = getattr(args, "pooler_dropout", 0.0) args.finetune_decoder_params = getattr(args, "finetune_decoder_params", "all") def set_default_general_args(args): args.checkpoint_activations = getattr(args, "checkpoint_activations", False) args.offload_activations = getattr(args, "offload_activations", False) args.min_params_to_wrap = getattr(args, "min_params_to_wrap", int(1e8)) @register_model_architecture(model_name="xm_transformer", arch_name="xm_transformer") def base_architecture(args): set_default_general_args(args) set_default_w2v_encoder_args(args) set_default_adaptor_args(args) set_default_transformer_decoder_args(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/speech_to_text/xm_transformer.py
import logging import torch from fairseq.models.speech_to_text.s2t_transformer import ( S2TTransformerEncoder, S2TTransformerModel, Conv1dSubsampler, base_architecture as transformer_base_architecture, ) from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.modules.conformer_layer import ConformerEncoderLayer from fairseq.models import FairseqEncoder, register_model_architecture, register_model from fairseq.modules import PositionalEmbedding, RelPositionalEncoding import math logger = logging.getLogger(__name__) class S2TConformerEncoder(FairseqEncoder): """Conformer Encoder for speech translation based on https://arxiv.org/abs/2005.08100""" def __init__(self, args): super().__init__(None) self.embed_scale = math.sqrt(args.encoder_embed_dim) if args.no_scale_embedding: self.embed_scale = 1.0 self.padding_idx = 1 self.subsample = Conv1dSubsampler( args.input_feat_per_channel * args.input_channels, args.conv_channels, args.encoder_embed_dim, [int(k) for k in args.conv_kernel_sizes.split(",")], ) self.pos_enc_type = args.pos_enc_type if self.pos_enc_type == "rel_pos": self.embed_positions = RelPositionalEncoding( args.max_source_positions, args.encoder_embed_dim ) elif self.pos_enc_type == "rope": self.embed_positions = None else: # Use absolute positional embedding self.pos_enc_type = "abs" self.embed_positions = PositionalEmbedding( args.max_source_positions, args.encoder_embed_dim, self.padding_idx ) self.linear = torch.nn.Linear(args.encoder_embed_dim, args.encoder_embed_dim) self.dropout = torch.nn.Dropout(args.dropout) self.conformer_layers = torch.nn.ModuleList( [ ConformerEncoderLayer( embed_dim=args.encoder_embed_dim, ffn_embed_dim=args.encoder_ffn_embed_dim, attention_heads=args.encoder_attention_heads, dropout=args.dropout, depthwise_conv_kernel_size=args.depthwise_conv_kernel_size, attn_type=args.attn_type, pos_enc_type=self.pos_enc_type, use_fp16=args.fp16, ) for _ in range(args.encoder_layers) ] ) def forward(self, src_tokens, src_lengths, return_all_hiddens=False): """ Args: src_tokens: Input source tokens Tensor of shape B X T X C src_lengths: Lengths Tensor corresponding to input source tokens return_all_hiddens: If true will append the self attention states to the encoder states Returns: encoder_out: Tensor of shape B X T X C encoder_padding_mask: Optional Tensor with mask encoder_embedding: Optional Tensor. Always empty here encoder_states: List of Optional Tensors wih self attention states src_tokens: Optional Tensor. Always empty here src_lengths: Optional Tensor. Always empty here """ x, input_lengths = self.subsample(src_tokens, src_lengths) # returns T X B X C encoder_padding_mask = lengths_to_padding_mask(input_lengths) x = self.embed_scale * x if self.pos_enc_type == "rel_pos": positions = self.embed_positions(x) elif self.pos_enc_type == "rope": positions = None else: positions = self.embed_positions(encoder_padding_mask).transpose(0, 1) x += positions positions = None x = self.linear(x) x = self.dropout(x) encoder_states = [] # x is T X B X C for layer in self.conformer_layers: x, _ = layer(x, encoder_padding_mask, positions) if return_all_hiddens: encoder_states.append(x) return { "encoder_out": [x], # T x B x C "encoder_padding_mask": [encoder_padding_mask] if encoder_padding_mask.any() else [], # B x T "encoder_embedding": [], # B x T x C "encoder_states": encoder_states, # List[T x B x C] "src_tokens": [], "src_lengths": [], } def reorder_encoder_out(self, encoder_out, new_order): """Required method for a FairseqEncoder. Calls the method from the parent class""" return S2TTransformerEncoder.reorder_encoder_out(self, encoder_out, new_order) @register_model("s2t_conformer") class S2TConformerModel(S2TTransformerModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): S2TTransformerModel.add_args(parser) parser.add_argument("--input-feat-per-channel", default=80) parser.add_argument("--depthwise-conv-kernel-size", default=31) parser.add_argument("--input-channels", default=1) parser.add_argument( "--attn-type", default=None, help="If not specified uses fairseq MHA. Other valid option is espnet", ) parser.add_argument( "--pos-enc-type", default="abs", help="Must be specified in addition to attn-type=espnet for rel_pos and rope", ) @classmethod def build_encoder(cls, args): encoder = S2TConformerEncoder(args) return encoder @register_model_architecture("s2t_conformer", "s2t_conformer") def base_architecture(args): args.attn_type = getattr(args, "attn_type", None) args.pos_enc_type = getattr(args, "pos_enc_type", "abs") args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) args.input_channels = getattr(args, "input_channels", 1) args.max_source_positions = getattr(args, "max_source_positions", 6000) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.dropout = getattr(args, "dropout", 0.1) args.encoder_layers = getattr(args, "encoder_layers", 16) args.depthwise_conv_kernel_size = getattr(args, "depthwise_conv_kernel_size", 31) transformer_base_architecture(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/speech_to_text/s2t_conformer.py
#!/usr/bin/env python3 from ast import literal_eval from typing import List, Tuple import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils, utils from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture, ) @register_model("s2t_berard") class BerardModel(FairseqEncoderDecoderModel): """Implementation of a model similar to https://arxiv.org/abs/1802.04200 Paper title: End-to-End Automatic Speech Translation of Audiobooks An implementation is available in tensorflow at https://github.com/eske/seq2seq Relevant files in this implementation are the config (https://github.com/eske/seq2seq/blob/master/config/LibriSpeech/AST.yaml) and the model code (https://github.com/eske/seq2seq/blob/master/translate/models.py). The encoder and decoder try to be close to the original implementation. The attention is an MLP as in Bahdanau et al. (https://arxiv.org/abs/1409.0473). There is no state initialization by averaging the encoder outputs. """ def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): parser.add_argument( "--input-layers", type=str, metavar="EXPR", help="List of linear layer dimensions. These " "layers are applied to the input features and " "are followed by tanh and possibly dropout.", ) parser.add_argument( "--dropout", type=float, metavar="D", help="Dropout probability to use in the encoder/decoder. " "Note that this parameters control dropout in various places, " "there is no fine-grained control for dropout for embeddings " "vs LSTM layers for example.", ) parser.add_argument( "--in-channels", type=int, metavar="N", help="Number of encoder input channels. " "Typically value is 1.", ) parser.add_argument( "--conv-layers", type=str, metavar="EXPR", help="List of conv layers " "(format: (channels, kernel, stride)).", ) parser.add_argument( "--num-blstm-layers", type=int, metavar="N", help="Number of encoder bi-LSTM layers.", ) parser.add_argument( "--lstm-size", type=int, metavar="N", help="LSTM hidden size." ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="Embedding dimension of the decoder target tokens.", ) parser.add_argument( "--decoder-hidden-dim", type=int, metavar="N", help="Decoder LSTM hidden dimension.", ) parser.add_argument( "--decoder-num-layers", type=int, metavar="N", help="Number of decoder LSTM layers.", ) parser.add_argument( "--attention-dim", type=int, metavar="N", help="Hidden layer dimension in MLP attention.", ) parser.add_argument( "--output-layer-dim", type=int, metavar="N", help="Hidden layer dim for linear layer prior to output projection.", ) parser.add_argument( "--load-pretrained-encoder-from", type=str, metavar="STR", help="model to take encoder weights from (for initialization)", ) parser.add_argument( "--load-pretrained-decoder-from", type=str, metavar="STR", help="model to take decoder weights from (for initialization)", ) @classmethod def build_encoder(cls, args, task): encoder = BerardEncoder( input_layers=literal_eval(args.input_layers), conv_layers=literal_eval(args.conv_layers), in_channels=args.input_channels, input_feat_per_channel=args.input_feat_per_channel, num_blstm_layers=args.num_blstm_layers, lstm_size=args.lstm_size, dropout=args.dropout, ) if getattr(args, "load_pretrained_encoder_from", None): encoder = checkpoint_utils.load_pretrained_component_from_model( component=encoder, checkpoint=args.load_pretrained_encoder_from ) return encoder @classmethod def build_decoder(cls, args, task): decoder = LSTMDecoder( dictionary=task.target_dictionary, embed_dim=args.decoder_embed_dim, num_layers=args.decoder_num_layers, hidden_size=args.decoder_hidden_dim, dropout=args.dropout, encoder_output_dim=2 * args.lstm_size, # bidirectional attention_dim=args.attention_dim, output_layer_dim=args.output_layer_dim, ) if getattr(args, "load_pretrained_decoder_from", None): decoder = checkpoint_utils.load_pretrained_component_from_model( component=decoder, checkpoint=args.load_pretrained_decoder_from ) return decoder @classmethod def build_model(cls, args, task): """Build a new model instance.""" encoder = cls.build_encoder(args, task) decoder = cls.build_decoder(args, task) return cls(encoder, decoder) def get_normalized_probs(self, net_output, log_probs, sample=None): # net_output['encoder_out'] is a (B, T, D) tensor lprobs = super().get_normalized_probs(net_output, log_probs, sample) # lprobs is a (B, T, D) tensor lprobs.batch_first = True return lprobs class BerardEncoder(FairseqEncoder): def __init__( self, input_layers: List[int], conv_layers: List[Tuple[int]], in_channels: int, input_feat_per_channel: int, num_blstm_layers: int, lstm_size: int, dropout: float, ): """ Args: input_layers: list of linear layer dimensions. These layers are applied to the input features and are followed by tanh and possibly dropout. conv_layers: list of conv2d layer configurations. A configuration is a tuple (out_channels, conv_kernel_size, stride). in_channels: number of input channels. input_feat_per_channel: number of input features per channel. These are speech features, typically 40 or 80. num_blstm_layers: number of bidirectional LSTM layers. lstm_size: size of the LSTM hidden (and cell) size. dropout: dropout probability. Dropout can be applied after the linear layers and LSTM layers but not to the convolutional layers. """ super().__init__(None) self.input_layers = nn.ModuleList() in_features = input_feat_per_channel for out_features in input_layers: if dropout > 0: self.input_layers.append( nn.Sequential( nn.Linear(in_features, out_features), nn.Dropout(p=dropout) ) ) else: self.input_layers.append(nn.Linear(in_features, out_features)) in_features = out_features self.in_channels = in_channels self.input_dim = input_feat_per_channel self.conv_kernel_sizes_and_strides = [] self.conv_layers = nn.ModuleList() lstm_input_dim = input_layers[-1] for conv_layer in conv_layers: out_channels, conv_kernel_size, conv_stride = conv_layer self.conv_layers.append( nn.Conv2d( in_channels, out_channels, conv_kernel_size, stride=conv_stride, padding=conv_kernel_size // 2, ) ) self.conv_kernel_sizes_and_strides.append((conv_kernel_size, conv_stride)) in_channels = out_channels lstm_input_dim //= conv_stride lstm_input_dim *= conv_layers[-1][0] self.lstm_size = lstm_size self.num_blstm_layers = num_blstm_layers self.lstm = nn.LSTM( input_size=lstm_input_dim, hidden_size=lstm_size, num_layers=num_blstm_layers, dropout=dropout, bidirectional=True, ) self.output_dim = 2 * lstm_size # bidirectional if dropout > 0: self.dropout = nn.Dropout(p=dropout) else: self.dropout = None def forward(self, src_tokens, src_lengths=None, **kwargs): """ Args src_tokens: padded tensor (B, T, C * feat) src_lengths: tensor of original lengths of input utterances (B,) """ bsz, max_seq_len, _ = src_tokens.size() # (B, C, T, feat) x = ( src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim) .transpose(1, 2) .contiguous() ) for input_layer in self.input_layers: x = input_layer(x) x = torch.tanh(x) for conv_layer in self.conv_layers: x = conv_layer(x) bsz, _, output_seq_len, _ = x.size() # (B, C, T, feat) -> (B, T, C, feat) -> (T, B, C, feat) -> # (T, B, C * feat) x = x.transpose(1, 2).transpose(0, 1).contiguous().view(output_seq_len, bsz, -1) input_lengths = src_lengths.clone() for k, s in self.conv_kernel_sizes_and_strides: p = k // 2 input_lengths = (input_lengths.float() + 2 * p - k) / s + 1 input_lengths = input_lengths.floor().long() packed_x = nn.utils.rnn.pack_padded_sequence(x, input_lengths) h0 = x.new(2 * self.num_blstm_layers, bsz, self.lstm_size).zero_() c0 = x.new(2 * self.num_blstm_layers, bsz, self.lstm_size).zero_() packed_outs, _ = self.lstm(packed_x, (h0, c0)) # unpack outputs and apply dropout x, output_lengths = nn.utils.rnn.pad_packed_sequence(packed_outs) if self.dropout is not None: x = self.dropout(x) encoder_padding_mask = ( lengths_to_padding_mask(output_lengths).to(src_tokens.device).t() ) return { "encoder_out": x, # (T, B, C) "encoder_padding_mask": encoder_padding_mask, # (T, B) } def reorder_encoder_out(self, encoder_out, new_order): encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select( 1, new_order ) encoder_out["encoder_padding_mask"] = encoder_out[ "encoder_padding_mask" ].index_select(1, new_order) return encoder_out class MLPAttention(nn.Module): """The original attention from Badhanau et al. (2014) https://arxiv.org/abs/1409.0473, based on a Multi-Layer Perceptron. The attention score between position i in the encoder and position j in the decoder is: alpha_ij = V_a * tanh(W_ae * enc_i + W_ad * dec_j + b_a) """ def __init__(self, decoder_hidden_state_dim, context_dim, attention_dim): super().__init__() self.context_dim = context_dim self.attention_dim = attention_dim # W_ae and b_a self.encoder_proj = nn.Linear(context_dim, self.attention_dim, bias=True) # W_ad self.decoder_proj = nn.Linear( decoder_hidden_state_dim, self.attention_dim, bias=False ) # V_a self.to_scores = nn.Linear(self.attention_dim, 1, bias=False) def forward(self, decoder_state, source_hids, encoder_padding_mask): """The expected input dimensions are: decoder_state: bsz x decoder_hidden_state_dim source_hids: src_len x bsz x context_dim encoder_padding_mask: src_len x bsz """ src_len, bsz, _ = source_hids.size() # (src_len*bsz) x context_dim (to feed through linear) flat_source_hids = source_hids.view(-1, self.context_dim) # (src_len*bsz) x attention_dim encoder_component = self.encoder_proj(flat_source_hids) # src_len x bsz x attention_dim encoder_component = encoder_component.view(src_len, bsz, self.attention_dim) # 1 x bsz x attention_dim decoder_component = self.decoder_proj(decoder_state).unsqueeze(0) # Sum with broadcasting and apply the non linearity # src_len x bsz x attention_dim hidden_att = torch.tanh( (decoder_component + encoder_component).view(-1, self.attention_dim) ) # Project onto the reals to get attentions scores (src_len x bsz) attn_scores = self.to_scores(hidden_att).view(src_len, bsz) # Mask + softmax (src_len x bsz) if encoder_padding_mask is not None: attn_scores = ( attn_scores.float() .masked_fill_(encoder_padding_mask, float("-inf")) .type_as(attn_scores) ) # FP16 support: cast to float and back # srclen x bsz normalized_masked_attn_scores = F.softmax(attn_scores, dim=0) # Sum weighted sources (bsz x context_dim) attn_weighted_context = ( source_hids * normalized_masked_attn_scores.unsqueeze(2) ).sum(dim=0) return attn_weighted_context, normalized_masked_attn_scores class LSTMDecoder(FairseqIncrementalDecoder): def __init__( self, dictionary, embed_dim, num_layers, hidden_size, dropout, encoder_output_dim, attention_dim, output_layer_dim, ): """ Args: dictionary: target text dictionary. embed_dim: embedding dimension for target tokens. num_layers: number of LSTM layers. hidden_size: hidden size for LSTM layers. dropout: dropout probability. Dropout can be applied to the embeddings, the LSTM layers, and the context vector. encoder_output_dim: encoder output dimension (hidden size of encoder LSTM). attention_dim: attention dimension for MLP attention. output_layer_dim: size of the linear layer prior to output projection. """ super().__init__(dictionary) self.num_layers = num_layers self.hidden_size = hidden_size num_embeddings = len(dictionary) padding_idx = dictionary.pad() self.embed_tokens = nn.Embedding(num_embeddings, embed_dim, padding_idx) if dropout > 0: self.dropout = nn.Dropout(p=dropout) else: self.dropout = None self.layers = nn.ModuleList() for layer_id in range(num_layers): input_size = embed_dim if layer_id == 0 else encoder_output_dim self.layers.append( nn.LSTMCell(input_size=input_size, hidden_size=hidden_size) ) self.context_dim = encoder_output_dim self.attention = MLPAttention( decoder_hidden_state_dim=hidden_size, context_dim=encoder_output_dim, attention_dim=attention_dim, ) self.deep_output_layer = nn.Linear( hidden_size + encoder_output_dim + embed_dim, output_layer_dim ) self.output_projection = nn.Linear(output_layer_dim, num_embeddings) def forward( self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs ): encoder_padding_mask = encoder_out["encoder_padding_mask"] encoder_outs = encoder_out["encoder_out"] if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] bsz, seqlen = prev_output_tokens.size() srclen = encoder_outs.size(0) # embed tokens embeddings = self.embed_tokens(prev_output_tokens) x = embeddings if self.dropout is not None: x = self.dropout(x) # B x T x C -> T x B x C x = x.transpose(0, 1) # initialize previous states (or get from cache during incremental # generation) cached_state = utils.get_incremental_state( self, incremental_state, "cached_state" ) if cached_state is not None: prev_hiddens, prev_cells = cached_state else: prev_hiddens = [encoder_out["encoder_out"].mean(dim=0)] * self.num_layers prev_cells = [x.new_zeros(bsz, self.hidden_size)] * self.num_layers attn_scores = x.new_zeros(bsz, srclen) attention_outs = [] outs = [] for j in range(seqlen): input = x[j, :, :] attention_out = None for i, layer in enumerate(self.layers): # the previous state is one layer below except for the bottom # layer where the previous state is the state emitted by the # top layer hidden, cell = layer( input, ( prev_hiddens[(i - 1) % self.num_layers], prev_cells[(i - 1) % self.num_layers], ), ) if self.dropout is not None: hidden = self.dropout(hidden) prev_hiddens[i] = hidden prev_cells[i] = cell if attention_out is None: attention_out, attn_scores = self.attention( hidden, encoder_outs, encoder_padding_mask ) if self.dropout is not None: attention_out = self.dropout(attention_out) attention_outs.append(attention_out) input = attention_out # collect the output of the top layer outs.append(hidden) # cache previous states (no-op except during incremental generation) utils.set_incremental_state( self, incremental_state, "cached_state", (prev_hiddens, prev_cells) ) # collect outputs across time steps x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size) attention_outs_concat = torch.cat(attention_outs, dim=0).view( seqlen, bsz, self.context_dim ) # T x B x C -> B x T x C x = x.transpose(0, 1) attention_outs_concat = attention_outs_concat.transpose(0, 1) # concat LSTM output, attention output and embedding # before output projection x = torch.cat((x, attention_outs_concat, embeddings), dim=2) x = self.deep_output_layer(x) x = torch.tanh(x) if self.dropout is not None: x = self.dropout(x) # project back to size of vocabulary x = self.output_projection(x) # to return the full attn_scores tensor, we need to fix the decoder # to account for subsampling input frames # return x, attn_scores return x, None def reorder_incremental_state(self, incremental_state, new_order): super().reorder_incremental_state(incremental_state, new_order) cached_state = utils.get_incremental_state( self, incremental_state, "cached_state" ) if cached_state is None: return def reorder_state(state): if isinstance(state, list): return [reorder_state(state_i) for state_i in state] return state.index_select(0, new_order) new_state = tuple(map(reorder_state, cached_state)) utils.set_incremental_state(self, incremental_state, "cached_state", new_state) @register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard") def berard(args): """The original version: "End-to-End Automatic Speech Translation of Audiobooks" (https://arxiv.org/abs/1802.04200) """ args.input_layers = getattr(args, "input_layers", "[256, 128]") args.conv_layers = getattr(args, "conv_layers", "[(16, 3, 2), (16, 3, 2)]") args.num_blstm_layers = getattr(args, "num_blstm_layers", 3) args.lstm_size = getattr(args, "lstm_size", 256) args.dropout = getattr(args, "dropout", 0.2) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128) args.decoder_num_layers = getattr(args, "decoder_num_layers", 2) args.decoder_hidden_dim = getattr(args, "decoder_hidden_dim", 512) args.attention_dim = getattr(args, "attention_dim", 512) args.output_layer_dim = getattr(args, "output_layer_dim", 128) args.load_pretrained_encoder_from = getattr( args, "load_pretrained_encoder_from", None ) args.load_pretrained_decoder_from = getattr( args, "load_pretrained_decoder_from", None ) @register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard_256_3_3") def berard_256_3_3(args): """Used in * "Harnessing Indirect Training Data for End-to-End Automatic Speech Translation: Tricks of the Trade" (https://arxiv.org/abs/1909.06515) * "CoVoST: A Diverse Multilingual Speech-To-Text Translation Corpus" (https://arxiv.org/pdf/2002.01320.pdf) * "Self-Supervised Representations Improve End-to-End Speech Translation" (https://arxiv.org/abs/2006.12124) """ args.decoder_num_layers = getattr(args, "decoder_num_layers", 3) berard(args) @register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard_512_3_2") def berard_512_3_2(args): args.num_blstm_layers = getattr(args, "num_blstm_layers", 3) args.lstm_size = getattr(args, "lstm_size", 512) args.dropout = getattr(args, "dropout", 0.3) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) args.decoder_num_layers = getattr(args, "decoder_num_layers", 2) args.decoder_hidden_dim = getattr(args, "decoder_hidden_dim", 1024) args.attention_dim = getattr(args, "attention_dim", 512) args.output_layer_dim = getattr(args, "output_layer_dim", 256) berard(args) @register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard_512_5_3") def berard_512_5_3(args): args.num_blstm_layers = getattr(args, "num_blstm_layers", 5) args.lstm_size = getattr(args, "lstm_size", 512) args.dropout = getattr(args, "dropout", 0.3) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) args.decoder_num_layers = getattr(args, "decoder_num_layers", 3) args.decoder_hidden_dim = getattr(args, "decoder_hidden_dim", 1024) args.attention_dim = getattr(args, "attention_dim", 512) args.output_layer_dim = getattr(args, "output_layer_dim", 256) berard(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/speech_to_text/berard.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .berard import * # noqa from .convtransformer import * # noqa from .s2t_transformer import * # noqa from .xm_transformer import * # noqa from .s2t_conformer import * # noqa
KosmosX-API-main
kosmosX/fairseq/fairseq/models/speech_to_text/__init__.py
#!/usr/bin/env python3 import logging import math from pathlib import Path from typing import Dict, List, Optional, Tuple import torch import torch.nn as nn from torch import Tensor from fairseq import checkpoint_utils, utils from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.models.speech_to_text.hub_interface import S2THubInterface from fairseq.models.transformer import Embedding, TransformerDecoder from fairseq.modules import ( FairseqDropout, LayerNorm, PositionalEmbedding, TransformerEncoderLayer, ) logger = logging.getLogger(__name__) class Conv1dSubsampler(nn.Module): """Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation via gated linear units (https://arxiv.org/abs/1911.08460) Args: in_channels (int): the number of input channels mid_channels (int): the number of intermediate channels out_channels (int): the number of output channels kernel_sizes (List[int]): the kernel size for each convolutional layer """ def __init__( self, in_channels: int, mid_channels: int, out_channels: int, kernel_sizes: List[int] = (3, 3), ): super(Conv1dSubsampler, self).__init__() self.n_layers = len(kernel_sizes) self.conv_layers = nn.ModuleList( nn.Conv1d( in_channels if i == 0 else mid_channels // 2, mid_channels if i < self.n_layers - 1 else out_channels * 2, k, stride=2, padding=k // 2, ) for i, k in enumerate(kernel_sizes) ) def get_out_seq_lens_tensor(self, in_seq_lens_tensor): out = in_seq_lens_tensor.clone() for _ in range(self.n_layers): out = ((out.float() - 1) / 2 + 1).floor().long() return out def forward(self, src_tokens, src_lengths): bsz, in_seq_len, _ = src_tokens.size() # B x T x (C x D) x = src_tokens.transpose(1, 2).contiguous() # -> B x (C x D) x T for conv in self.conv_layers: x = conv(x) x = nn.functional.glu(x, dim=1) _, _, out_seq_len = x.size() x = x.transpose(1, 2).transpose(0, 1).contiguous() # -> T x B x (C x D) return x, self.get_out_seq_lens_tensor(src_lengths) @register_model("s2t_transformer") class S2TTransformerModel(FairseqEncoderDecoderModel): """Adapted Transformer model (https://arxiv.org/abs/1706.03762) for speech-to-text tasks. The Transformer encoder/decoder remains the same. A trainable input subsampler is prepended to the Transformer encoder to project inputs into the encoder dimension as well as downsample input sequence for computational efficiency.""" @classmethod def hub_models(cls): base_url = "http://dl.fbaipublicfiles.com/fairseq/s2t" model_ids = [ "s2t_transformer_s-en-asr-librispeech", "s2t_transformer_m-en-asr-librispeech", "s2t_transformer_l-en-asr-librispeech", ] return {i: f"{base_url}/{i}.tar.gz" for i in model_ids} @classmethod def from_pretrained( cls, model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", config_yaml="config.yaml", **kwargs, ): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), config_yaml=config_yaml, **kwargs, ) return S2THubInterface(x["args"], x["task"], x["models"][0]) def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # input parser.add_argument( "--conv-kernel-sizes", type=str, metavar="N", help="kernel sizes of Conv1d subsampling layers", ) parser.add_argument( "--conv-channels", type=int, metavar="N", help="# of channels in Conv1d subsampling layers", ) # Transformer parser.add_argument( "--activation-fn", type=str, default="relu", choices=utils.get_available_activation_fns(), help="activation function to use", ) parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability" ) parser.add_argument( "--attention-dropout", type=float, metavar="D", help="dropout probability for attention weights", ) parser.add_argument( "--activation-dropout", "--relu-dropout", type=float, metavar="D", help="dropout probability after activation in FFN.", ) parser.add_argument( "--encoder-embed-dim", type=int, metavar="N", help="encoder embedding dimension", ) parser.add_argument( "--encoder-ffn-embed-dim", type=int, metavar="N", help="encoder embedding dimension for FFN", ) parser.add_argument( "--encoder-layers", type=int, metavar="N", help="num encoder layers" ) parser.add_argument( "--encoder-attention-heads", type=int, metavar="N", help="num encoder attention heads", ) parser.add_argument( "--encoder-normalize-before", action="store_true", help="apply layernorm before each encoder block", ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-ffn-embed-dim", type=int, metavar="N", help="decoder embedding dimension for FFN", ) parser.add_argument( "--decoder-layers", type=int, metavar="N", help="num decoder layers" ) parser.add_argument( "--decoder-attention-heads", type=int, metavar="N", help="num decoder attention heads", ) parser.add_argument( "--decoder-normalize-before", action="store_true", help="apply layernorm before each decoder block", ) parser.add_argument( "--share-decoder-input-output-embed", action="store_true", help="share decoder input and output embeddings", ) parser.add_argument( "--layernorm-embedding", action="store_true", help="add layernorm to embedding", ) parser.add_argument( "--no-scale-embedding", action="store_true", help="if True, dont scale embeddings", ) parser.add_argument( "--load-pretrained-encoder-from", type=str, metavar="STR", help="model to take encoder weights from (for initialization)", ) parser.add_argument( "--encoder-freezing-updates", type=int, metavar="N", help="freeze encoder for first N updates", ) @classmethod def build_encoder(cls, args): encoder = S2TTransformerEncoder(args) pretraining_path = getattr(args, "load_pretrained_encoder_from", None) if pretraining_path is not None: if not Path(pretraining_path).exists(): logger.warning( f"skipped pretraining because {pretraining_path} does not exist" ) else: encoder = checkpoint_utils.load_pretrained_component_from_model( component=encoder, checkpoint=pretraining_path ) logger.info(f"loaded pretrained encoder from: {pretraining_path}") return encoder @classmethod def build_decoder(cls, args, task, embed_tokens): return TransformerDecoderScriptable(args, task.target_dictionary, embed_tokens) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) def build_embedding(dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() return Embedding(num_embeddings, embed_dim, padding_idx) decoder_embed_tokens = build_embedding( task.target_dictionary, args.decoder_embed_dim ) encoder = cls.build_encoder(args) decoder = cls.build_decoder(args, task, decoder_embed_tokens) return cls(encoder, decoder) def get_normalized_probs( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): # net_output['encoder_out'] is a (B, T, D) tensor lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample) lprobs.batch_first = True return lprobs def forward(self, src_tokens, src_lengths, prev_output_tokens): """ The forward method inherited from the base class has a **kwargs argument in its input, which is not supported in torchscript. This method overwrites the forward method definition without **kwargs. """ encoder_out = self.encoder(src_tokens=src_tokens, src_lengths=src_lengths) decoder_out = self.decoder( prev_output_tokens=prev_output_tokens, encoder_out=encoder_out ) return decoder_out class S2TTransformerEncoder(FairseqEncoder): """Speech-to-text Transformer encoder that consists of input subsampler and Transformer encoder.""" def __init__(self, args): super().__init__(None) self.encoder_freezing_updates = args.encoder_freezing_updates self.num_updates = 0 self.dropout_module = FairseqDropout( p=args.dropout, module_name=self.__class__.__name__ ) self.embed_scale = math.sqrt(args.encoder_embed_dim) if args.no_scale_embedding: self.embed_scale = 1.0 self.padding_idx = 1 self.subsample = Conv1dSubsampler( args.input_feat_per_channel * args.input_channels, args.conv_channels, args.encoder_embed_dim, [int(k) for k in args.conv_kernel_sizes.split(",")], ) self.embed_positions = PositionalEmbedding( args.max_source_positions, args.encoder_embed_dim, self.padding_idx ) self.transformer_layers = nn.ModuleList( [TransformerEncoderLayer(args) for _ in range(args.encoder_layers)] ) if args.encoder_normalize_before: self.layer_norm = LayerNorm(args.encoder_embed_dim) else: self.layer_norm = None def _forward(self, src_tokens, src_lengths, return_all_hiddens=False): x, input_lengths = self.subsample(src_tokens, src_lengths) x = self.embed_scale * x encoder_padding_mask = lengths_to_padding_mask(input_lengths) positions = self.embed_positions(encoder_padding_mask).transpose(0, 1) x += positions x = self.dropout_module(x) encoder_states = [] for layer in self.transformer_layers: x = layer(x, encoder_padding_mask) if return_all_hiddens: encoder_states.append(x) if self.layer_norm is not None: x = self.layer_norm(x) return { "encoder_out": [x], # T x B x C "encoder_padding_mask": [encoder_padding_mask] if encoder_padding_mask.any() else [], # B x T "encoder_embedding": [], # B x T x C "encoder_states": encoder_states, # List[T x B x C] "src_tokens": [], "src_lengths": [], } def forward(self, src_tokens, src_lengths, return_all_hiddens=False): if self.num_updates < self.encoder_freezing_updates: with torch.no_grad(): x = self._forward( src_tokens, src_lengths, return_all_hiddens=return_all_hiddens ) else: x = self._forward( src_tokens, src_lengths, return_all_hiddens=return_all_hiddens ) return x def reorder_encoder_out(self, encoder_out, new_order): new_encoder_out = ( [] if len(encoder_out["encoder_out"]) == 0 else [x.index_select(1, new_order) for x in encoder_out["encoder_out"]] ) new_encoder_padding_mask = ( [] if len(encoder_out["encoder_padding_mask"]) == 0 else [ x.index_select(0, new_order) for x in encoder_out["encoder_padding_mask"] ] ) new_encoder_embedding = ( [] if len(encoder_out["encoder_embedding"]) == 0 else [ x.index_select(0, new_order) for x in encoder_out["encoder_embedding"] ] ) encoder_states = encoder_out["encoder_states"] if len(encoder_states) > 0: for idx, state in enumerate(encoder_states): encoder_states[idx] = state.index_select(1, new_order) return { "encoder_out": new_encoder_out, # T x B x C "encoder_padding_mask": new_encoder_padding_mask, # B x T "encoder_embedding": new_encoder_embedding, # B x T x C "encoder_states": encoder_states, # List[T x B x C] "src_tokens": [], # B x T "src_lengths": [], # B x 1 } def set_num_updates(self, num_updates): super().set_num_updates(num_updates) self.num_updates = num_updates class TransformerDecoderScriptable(TransformerDecoder): def extract_features( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, full_context_alignment: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, ): # call scriptable method from parent class x, _ = self.extract_features_scriptable( prev_output_tokens, encoder_out, incremental_state, full_context_alignment, alignment_layer, alignment_heads, ) return x, None @register_model_architecture(model_name="s2t_transformer", arch_name="s2t_transformer") def base_architecture(args): args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0) # Convolutional subsampler args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5") args.conv_channels = getattr(args, "conv_channels", 1024) # Transformer args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 12) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.dropout = getattr(args, "dropout", 0.1) args.attention_dropout = getattr(args, "attention_dropout", args.dropout) args.activation_dropout = getattr(args, "activation_dropout", args.dropout) args.activation_fn = getattr(args, "activation_fn", "relu") args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) @register_model_architecture("s2t_transformer", "s2t_transformer_s") def s2t_transformer_s(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 8) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) args.dropout = getattr(args, "dropout", 0.1) base_architecture(args) @register_model_architecture("s2t_transformer", "s2t_transformer_xs") def s2t_transformer_xs(args): args.encoder_layers = getattr(args, "encoder_layers", 6) args.decoder_layers = getattr(args, "decoder_layers", 3) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 4) args.dropout = getattr(args, "dropout", 0.3) s2t_transformer_s(args) @register_model_architecture("s2t_transformer", "s2t_transformer_sp") def s2t_transformer_sp(args): args.encoder_layers = getattr(args, "encoder_layers", 16) s2t_transformer_s(args) @register_model_architecture("s2t_transformer", "s2t_transformer_m") def s2t_transformer_m(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 512 * 4) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.dropout = getattr(args, "dropout", 0.15) base_architecture(args) @register_model_architecture("s2t_transformer", "s2t_transformer_mp") def s2t_transformer_mp(args): args.encoder_layers = getattr(args, "encoder_layers", 16) s2t_transformer_m(args) @register_model_architecture("s2t_transformer", "s2t_transformer_l") def s2t_transformer_l(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024 * 4) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.2) base_architecture(args) @register_model_architecture("s2t_transformer", "s2t_transformer_lp") def s2t_transformer_lp(args): args.encoder_layers = getattr(args, "encoder_layers", 16) s2t_transformer_l(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/speech_to_text/s2t_transformer.py
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import logging from collections.abc import Iterable from itertools import repeat from typing import List, Optional, Tuple import torch from torch import Tensor # ------------------------------------------------------------------------------ # assert_equal() # ------------------------------------------------------------------------------ def assert_equal(value1, value2, name1=None, name2=None): """Asserts two values are equal otherwise raise an error.""" str_name1 = "" if name1 is None else "{} ".format(name1) str_name2 = "" if name2 is None else "{} ".format(name2) if value1 != value2: str_value1 = "{}" if name1 is None else "({})" str_value1 = str_value1.format(value1) str_value2 = "{}" if name2 is None else "({})" str_value2 = str_value2.format(value2) raise ValueError( "Expected {}{} == {}{}".format(str_name1, str_value1, str_name2, str_value2) ) def fill_config(config, key, value): if value is not None: if key not in config or config[key] is None: config[key] = value assert_equal(value, config[key], "value", f'config["{key}"]') # ------------------------------------------------------------------------------ # check_and_return_expected() # ------------------------------------------------------------------------------ def check_and_return_expected(value, undefined_value, expected_value, name=None): """ Return the expected value while checking if the given value is undefined or equal to the expected value. """ if (undefined_value is None and value is None) or (undefined_value == value): return expected_value if value != expected_value: str_name = "" if name is None else "{} ".format(name) str_value = "{}" if name is None else "({})" str_value = str_value.format(value) raise ValueError( "Expected {}{} == {}".format(str_name, str_value, expected_value) ) return expected_value # ------------------------------------------------------------------------------ # get_time_axis() # ------------------------------------------------------------------------------ def get_time_axis(layout): """ Extract the time axis from the layout, for example for breaking sequence into segments. """ if layout in ["TB", "TBD"]: return 0 if layout in ["BT", "BTD"]: return 1 if layout in ["BCTD"]: return 2 raise ValueError("Unsupported layout = {}".format(layout)) # ------------------------------------------------------------------------------ # get_batch_axis() # ------------------------------------------------------------------------------ def get_batch_axis(layout): """ Extract the batch axis from the layout """ if layout in ["TB", "TBD"]: return 1 if layout in ["BT", "BTD", "BCTD"]: return 0 raise ValueError("Unsupported layout = {}".format(layout)) # ------------------------------------------------------------------------------ # monotonically_increasing_and_bounded() # ------------------------------------------------------------------------------ def monotonically_increasing_and_bounded(iterable, min=None, max=None): """ Check if the elements in the given iterable are monotonically increasing and bounded by upper/lower bounds. """ if not isinstance(iterable, Iterable): raise TypeError( "Expected iterable to be of type Iterable, got ({})".format( iterable.__class__.__name__ ) ) for i in range(len(iterable)): if min is not None and iterable[i] < min: return False if max is not None and iterable[i] > max: return False if i > 0 and iterable[i] <= iterable[i - 1]: return False return True # ------------------------------------------------------------------------------ # to_pair() # ------------------------------------------------------------------------------ def to_pair(value, name): """Make a pair (of type tuple) of given value.""" if isinstance(value, Iterable): if len(value) != 2: raise ValueError( "Expected `{}` to have exactly 2 elements, got: ({})".format( name, value ) ) return value return tuple(repeat(value, 2)) # ------------------------------------------------------------------------------ # infer_conv_output_attrs() # ------------------------------------------------------------------------------ # TODO(cfyeh): figure out if we can get `output_dim` without calling the module. def infer_conv_output_attrs( module, input_channels, input_dim, batch_size=1, max_length=8 ): """Get output attributes of a module with input.""" input = torch.randn(batch_size, input_channels, max_length, input_dim) output = module(input) output_channels = output.shape[1] output_dim = output.shape[-1] return output_channels, output_dim # ------------------------------------------------------------------------------ # NoOp # ------------------------------------------------------------------------------ class NoOp(torch.nn.Module): """ NoOp simply passes the input as the output. """ def __init__(self): super().__init__() def forward(self, input: Tensor) -> Tensor: return input # ------------------------------------------------------------------------------ # Permute: a torch.nn.Module applies permutation on the input tensor. # ------------------------------------------------------------------------------ class Permute(torch.nn.Module): def __init__(self, dims): super().__init__() self.dims = dims def forward(self, input: Tensor) -> Tensor: return input.permute(self.dims).contiguous() # ------------------------------------------------------------------------------ # lengths_to_padding_mask() # ------------------------------------------------------------------------------ def lengths_to_padding_mask(lengths: Tensor) -> Tensor: """Convert lengths of shape (B, ) to padding mask.""" batch_size = lengths.shape[0] max_length = int(torch.max(lengths).item()) padding_mask = torch.arange( # [0, ..., T-1] max_length, device=lengths.device, dtype=lengths.dtype ).expand(batch_size, max_length) >= lengths.unsqueeze(1) return padding_mask # ------------------------------------------------------------------------------ # lengths_to_attention_mask() # ------------------------------------------------------------------------------ def lengths_to_attention_mask( lengths: Tensor, left_context: Optional[int] = None, right_context: Optional[int] = None, ) -> Optional[Tensor]: """ Generate attention mask based on (lengths, left_context, right_context). left_context is None means unlimited left context. right_context is None means unlimited right context. """ if left_context is None and right_context is None: return None max_length = int(torch.max(lengths).item()) # For example, with `max_length` == 5, # indices = tensor([ # [ 0, 1, 2, 3, 4, 5], # [-1, 0, 1, 2, 3, 4], # [-2, -1, 0, 1, 2, 3], # [-3, -2, -1, 0, 1, 2], # [-4, -3, -2, -1, 0, 1], # [-5, -4, -3, -2, -1, 0], # ]) # In some cases the second torch.arange is created on cpu which causes a # failure. Adding the device option to guard against it. indices = torch.arange( max_length, device=lengths.device, dtype=lengths.dtype ).expand(max_length, max_length) - torch.arange( max_length, device=lengths.device ).view( max_length, -1 ) # For example, with `max_length` == 5, # bool_mask = tensor([ # [True, True, True, True, True], # [True, True, True, True, True], # [True, True, True, True, True], # [True, True, True, True, True], # [True, True, True, True, True], # ]) bool_mask = ( torch.tensor([True]).to(device=lengths.device).expand(max_length, max_length) ) # For example, with `max_length` == 5, left_context == 2 # left_mask = tensor([ # [ True, True, True, True, True], # [ True, True, True, True, True], # [ True, True, True, True, True], # [False, True, True, True, True], # [False, False, True, True, True], # ]) if left_context is not None: left_mask = indices >= -left_context bool_mask = bool_mask & left_mask # For example, with `max_length` == 5, right_context == 1 # right_mask = tensor([ # [True, True, False, False, False], # [True, True, True, False, False], # [True, True, True, True, False], # [True, True, True, True, True], # [True, True, True, True, True], # ]) if right_context is not None: right_mask = indices <= right_context bool_mask = bool_mask & right_mask bool_mask = (~bool_mask).to(device=lengths.device) return bool_mask # ------------------------------------------------------------------------------ # infer_output_norm() # ------------------------------------------------------------------------------ def infer_output_norm(module, output_norm=None): """ Infer the output norm (string and module) needed on the module gvien desired output normalization. """ if output_norm == module.output_norm(): # output_norm already matches module.output_norm(). return (None, NoOp()) if output_norm is None and module.output_norm() is not None: logger = logging.getLogger("infer_output_norm()") logger.warning( "trying to set output_norm ({}) ".format(output_norm) + "but got module.output_norm() ({}), ".format(module.output_norm()) + "the combined output_norm() will be ({})".format(module.output_norm()) ) return (None, NoOp()) if output_norm == "log_softmax": if module.output_norm() is not None: raise ValueError( "incompatible output_norm ({}) ".format(output_norm) + "and module.output_norm() ({})".format(module.output_norm()) ) else: return ("log_softmax", torch.nn.LogSoftmax(dim=-1)) if output_norm == "softmax": if module.output_norm() is not None: raise ValueError( "incompatible output_norm ({}) ".format(output_norm) + "and module.output_norm() ({})".format(module.output_norm()) ) else: return ("softmax", torch.nn.Softmax(dim=-1)) raise ValueError( "output_norm ({}) not in ".format(output_norm) + "supported list = [None, softmax, log_softmax]" ) # ------------------------------------------------------------------------------ # infer_channels_from_layout() # ------------------------------------------------------------------------------ def infer_channels_from_layout(layout, channels): """Extract the number of channels from the layout.""" if layout in ("TBD", "BTD"): if channels is not None and channels != 1: raise ValueError( "Expected channels ({}) to be 1 for layout = {}".format( channels, layout ) ) if channels is None: return 1 return channels # ------------------------------------------------------------------------------ # pad_sequence() # ------------------------------------------------------------------------------ @torch.jit.export def pad_sequence( sequence: Tensor, time_axis: int, extra_left_context: int = 0, extra_right_context: int = 0, ) -> Tensor: """Pad extra left/right contexts to the sequence.""" if extra_left_context == 0 and extra_right_context == 0: return sequence tensors_to_concat = [] if extra_left_context: size = (extra_left_context,) fill_value = 0 indices = torch.full( size=size, fill_value=fill_value, dtype=torch.long, device=sequence.device, ) left_padding = torch.index_select(sequence, time_axis, indices) tensors_to_concat.append(left_padding) tensors_to_concat.append(sequence) # NOTE(cfyeh): for efficiency reason we pad 0 instead of the last frame for # extra right contexts. if extra_right_context: size = list(sequence.shape) size[time_axis] = extra_right_context right_padding = torch.zeros(size, dtype=sequence.dtype, device=sequence.device) tensors_to_concat.append(right_padding) padded_sequence = torch.cat(tensors_to_concat, dim=time_axis) return padded_sequence # ------------------------------------------------------------------------------ # sequence_to_segments() # ------------------------------------------------------------------------------ @torch.jit.export def sequence_to_segments( sequence: Tensor, time_axis: int, lengths: Tensor, segment_size: Optional[int] = None, extra_left_context: int = 0, extra_right_context: int = 0, ) -> List[Tuple[Tensor, Tensor]]: """Breaks sequence into segments.""" sequence = pad_sequence( sequence=sequence, time_axis=time_axis, extra_left_context=extra_left_context, extra_right_context=extra_right_context, ) lengths = lengths + extra_left_context + extra_right_context segments: List[Tuple[Tensor, Tensor]] = [] if segment_size is None: segments.append((sequence, lengths)) return segments offset = 0 end = sequence.shape[time_axis] step = segment_size size = extra_left_context + segment_size + extra_right_context while offset + extra_left_context + extra_right_context < end: clamped_size = min(size, end - offset) segment_lengths = torch.clamp(lengths - offset, min=0, max=clamped_size) indices = torch.arange( start=offset, end=(offset + clamped_size), step=1, dtype=torch.long, device=sequence.device, ) segment_tensor = torch.index_select(sequence, time_axis, indices) segments.append((segment_tensor, segment_lengths)) offset = offset + step return segments # ------------------------------------------------------------------------------ # segments_to_sequence() # ------------------------------------------------------------------------------ @torch.jit.export def segments_to_sequence( segments: List[Tuple[Tensor, Tensor]], time_axis: int ) -> Tuple[Tensor, Tensor]: """Concatenate segments into a full sequence.""" if len(segments) == 1: return segments[0] tensors_to_concat: List[Tensor] = [] lengths_to_stack: List[Tensor] = [] for tensor, lengths in segments: tensors_to_concat.append(tensor) lengths_to_stack.append(lengths) sequence = torch.cat(tensors_to_concat, dim=time_axis) lengths = torch.stack(lengths_to_stack, dim=0) lengths = torch.sum(lengths, dim=0) return sequence, lengths def lengths_to_encoder_padding_mask(lengths, batch_first: bool = False): """ convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor Args: lengths: a (B, )-shaped tensor batch_first: whether to return a (B, T) tensor Return: max_length: maximum length of B sequences encoder_padding_mask: a (max_length, B) binary mask, where [t, b] = False for t < lengths[b] and True otherwise TODO: kernelize this function if benchmarking shows this function is slow """ max_lengths = torch.max(lengths).item() bsz = lengths.size(0) encoder_padding_mask = torch.arange( max_lengths ).to( # a (T, ) tensor with [0, ..., T-1] lengths.device ).view( # move to the right device 1, max_lengths ).expand( # reshape to (1, T)-shaped tensor bsz, -1 ) > lengths.view( # expand to (B, T)-shaped tensor bsz, 1 ).expand( -1, max_lengths ) if not batch_first: return encoder_padding_mask.t(), max_lengths else: return encoder_padding_mask, max_lengths # ------------------------------------------------------------------------------ # attention suppression # ------------------------------------------------------------------------------ def attention_suppression(attention_weights: Tensor, scale: float): # B, H, qlen, klen -> B, H, qlen, 1 attention_prob = torch.nn.functional.softmax(attention_weights.float(), dim=-1) attention_nozeros = attention_prob.to(torch.bool) nozeros_sum = torch.sum(attention_nozeros.to(torch.float), dim=-1, keepdim=True) # For very sparse situation, we need get round about 0s key_sum = torch.sum(attention_prob, dim=-1, keepdim=True) # nozeros_sum should > 1 key_mean = key_sum / (nozeros_sum + 1e-8) # std calculation dis = (attention_prob - key_mean) * (attention_prob - key_mean) # if attention_prob[i] < threshold, then dis_masked[i] = 0; for all i dis_masked = torch.where( attention_nozeros, dis, attention_prob.new_zeros(attention_prob.size()) ) key_var = torch.sum(dis_masked, dim=-1, keepdim=True) key_var = key_var / (nozeros_sum - 1.0 + 1e-8) key_std = torch.sqrt(key_var) key_thread = key_mean - scale * key_std # if attention_prob[i] >= key_thread, then attention_prob[i] # , otherwise "-inf" inf_tensor = attention_prob.new_zeros(attention_prob.size()).detach() inf_tensor[:] = float("-inf") attention_weights_float = torch.where( attention_prob < key_thread, inf_tensor, attention_weights.float(), ) return attention_weights_float.type_as(attention_weights) def layer_norm_backward_hook(module, grad_input, grad_output, clamp_value): return tuple(torch.clamp(v, min=-clamp_value, max=clamp_value) for v in grad_input)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/speech_to_text/utils.py