python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np from dataclasses import dataclass from fairseq.dataclass import FairseqDataclass from fairseq.scoring import BaseScorer, register_scorer @dataclass class MeteorScorerConfig(FairseqDataclass): pass @register_scorer("meteor", dataclass=MeteorScorerConfig) class MeteorScorer(BaseScorer): def __init__(self, args): super(MeteorScorer, self).__init__(args) try: import nltk except ImportError: raise ImportError("Please install nltk to use METEOR scorer") self.nltk = nltk self.scores = [] def add_string(self, ref, pred): self.ref.append(ref) self.pred.append(pred) def score(self, order=4): self.scores = [ self.nltk.translate.meteor_score.single_meteor_score(r, p) for r, p in zip(self.ref, self.pred) ] return np.mean(self.scores) def result_string(self, order=4): return f"METEOR: {self.score():.4f}"
EXA-1-master
exa/libraries/fairseq/fairseq/scoring/meteor.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass from fairseq.dataclass import FairseqDataclass from fairseq.scoring import BaseScorer, register_scorer @dataclass class ChrFScorerConfig(FairseqDataclass): pass @register_scorer("chrf", dataclass=ChrFScorerConfig) class ChrFScorer(BaseScorer): def __init__(self, args): super(ChrFScorer, self).__init__(args) import sacrebleu self.sacrebleu = sacrebleu def add_string(self, ref, pred): self.ref.append(ref) self.pred.append(pred) def score(self, order=4): return self.result_string(order).score def result_string(self, order=4): if order != 4: raise NotImplementedError return self.sacrebleu.corpus_chrf(self.pred, [self.ref]).format()
EXA-1-master
exa/libraries/fairseq/fairseq/scoring/chrf.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import sys from dataclasses import _MISSING_TYPE, dataclass, field from typing import Any, List, Optional import torch from omegaconf import II, MISSING from fairseq.dataclass.constants import ( DATASET_IMPL_CHOICES, DDP_BACKEND_CHOICES, DDP_COMM_HOOK_CHOICES, GENERATION_CONSTRAINTS_CHOICES, GENERATION_DECODING_FORMAT_CHOICES, LOG_FORMAT_CHOICES, PIPELINE_CHECKPOINT_CHOICES, PRINT_ALIGNMENT_CHOICES, ZERO_SHARDING_CHOICES, ) @dataclass class FairseqDataclass: """fairseq base dataclass that supported fetching attributes and metas""" _name: Optional[str] = None @staticmethod def name(): return None def _get_all_attributes(self) -> List[str]: return [k for k in self.__dataclass_fields__.keys()] def _get_meta( self, attribute_name: str, meta: str, default: Optional[Any] = None ) -> Any: return self.__dataclass_fields__[attribute_name].metadata.get(meta, default) def _get_name(self, attribute_name: str) -> str: return self.__dataclass_fields__[attribute_name].name def _get_default(self, attribute_name: str) -> Any: if hasattr(self, attribute_name): if str(getattr(self, attribute_name)).startswith("${"): return str(getattr(self, attribute_name)) elif str(self.__dataclass_fields__[attribute_name].default).startswith( "${" ): return str(self.__dataclass_fields__[attribute_name].default) elif ( getattr(self, attribute_name) != self.__dataclass_fields__[attribute_name].default ): return getattr(self, attribute_name) f = self.__dataclass_fields__[attribute_name] if not isinstance(f.default_factory, _MISSING_TYPE): return f.default_factory() return f.default def _get_type(self, attribute_name: str) -> Any: return self.__dataclass_fields__[attribute_name].type def _get_help(self, attribute_name: str) -> Any: return self._get_meta(attribute_name, "help") def _get_argparse_const(self, attribute_name: str) -> Any: return self._get_meta(attribute_name, "argparse_const") def _get_argparse_alias(self, attribute_name: str) -> Any: return self._get_meta(attribute_name, "argparse_alias") def _get_choices(self, attribute_name: str) -> Any: return self._get_meta(attribute_name, "choices") @classmethod def from_namespace(cls, args): if isinstance(args, cls): return args else: config = cls() for k in config.__dataclass_fields__.keys(): if k.startswith("_"): # private member, skip continue if hasattr(args, k): setattr(config, k, getattr(args, k)) return config @dataclass class CommonConfig(FairseqDataclass): # This is the core dataclass including common parameters shared by all different jobs. Please append your params to other dataclasses if they were # used for a particular purpose or task, such as those dedicated for `distributed training`, `optimization`, etc. no_progress_bar: bool = field( default=False, metadata={"help": "disable progress bar"} ) log_interval: int = field( default=100, metadata={ "help": "log progress every N batches (when progress bar is disabled)" }, ) log_format: Optional[LOG_FORMAT_CHOICES] = field( default=None, metadata={"help": "log format to use"} ) log_file: Optional[str] = field( default=None, metadata={"help": "log file to copy metrics to."} ) aim_repo: Optional[str] = field( default=None, metadata={"help": "path to Aim repository"}, ) aim_run_hash: Optional[str] = field( default=None, metadata={ "help": "Aim run hash. If skipped, creates or continues run " "based on save_dir" }, ) tensorboard_logdir: Optional[str] = field( default=None, metadata={ "help": "path to save logs for tensorboard, should match --logdir " "of running tensorboard (default: no tensorboard logging)" }, ) wandb_project: Optional[str] = field( default=None, metadata={"help": "Weights and Biases project name to use for logging"}, ) azureml_logging: Optional[bool] = field( default=False, metadata={"help": "Log scalars to AzureML context"}, ) seed: int = field( default=1, metadata={"help": "pseudo random number generator seed"} ) cpu: bool = field(default=False, metadata={"help": "use CPU instead of CUDA"}) tpu: bool = field(default=False, metadata={"help": "use TPU instead of CUDA"}) bf16: bool = field(default=False, metadata={"help": "use bfloat16; implies --tpu"}) memory_efficient_bf16: bool = field( default=False, metadata={ "help": "use a memory-efficient version of BF16 training; implies --bf16" }, ) fp16: bool = field(default=False, metadata={"help": "use FP16"}) memory_efficient_fp16: bool = field( default=False, metadata={ "help": "use a memory-efficient version of FP16 training; implies --fp16" }, ) fp16_no_flatten_grads: bool = field( default=False, metadata={"help": "don't flatten FP16 grads tensor"} ) fp16_init_scale: int = field( default=2**7, metadata={"help": "default FP16 loss scale"} ) fp16_scale_window: Optional[int] = field( default=None, metadata={"help": "number of updates before increasing loss scale"}, ) fp16_scale_tolerance: float = field( default=0.0, metadata={ "help": "pct of updates that can overflow before decreasing the loss scale" }, ) on_cpu_convert_precision: bool = field( default=False, metadata={ "help": "if set, the floating point conversion to fp16/bf16 runs on CPU. " "This reduces bus transfer time and GPU memory usage." }, ) min_loss_scale: float = field( default=1e-4, metadata={ "help": "minimum FP16/AMP loss scale, after which training is stopped" }, ) threshold_loss_scale: Optional[float] = field( default=None, metadata={"help": "threshold FP16 loss scale from below"} ) amp: bool = field(default=False, metadata={"help": "use automatic mixed precision"}) amp_batch_retries: int = field( default=2, metadata={ "help": "number of retries of same batch after reducing loss scale with AMP" }, ) amp_init_scale: int = field( default=2**7, metadata={"help": "default AMP loss scale"} ) amp_scale_window: Optional[int] = field( default=None, metadata={"help": "number of updates before increasing AMP loss scale"}, ) user_dir: Optional[str] = field( default=None, metadata={ "help": "path to a python module containing custom extensions (tasks and/or architectures)" }, ) empty_cache_freq: int = field( default=0, metadata={"help": "how often to clear the PyTorch CUDA cache (0 to disable)"}, ) all_gather_list_size: int = field( default=16384, metadata={"help": "number of bytes reserved for gathering stats from workers"}, ) model_parallel_size: int = field( default=1, metadata={"help": "total number of GPUs to parallelize model over"} ) quantization_config_path: Optional[str] = field( default=None, metadata={"help": "path to quantization config file"} ) profile: bool = field( default=False, metadata={"help": "enable autograd profiler emit_nvtx"} ) reset_logging: bool = field( default=False, metadata={ "help": "when using Hydra, reset the logging at the beginning of training" }, ) suppress_crashes: bool = field( default=False, metadata={ "help": "suppress crashes when training with the hydra_train entry point so that the " "main method can return a value (useful for sweeps)" }, ) use_plasma_view: bool = field( default=False, metadata={"help": "Store indices and sizes in shared memory"} ) plasma_path: Optional[str] = field( default="/tmp/plasma", metadata={ "help": "path to run plasma_store, defaults to /tmp/plasma. Paths outside /tmp tend to fail." }, ) @dataclass class DistributedTrainingConfig(FairseqDataclass): distributed_world_size: int = field( default=max(1, torch.cuda.device_count()), metadata={ "help": "total number of GPUs across all nodes (default: all visible GPUs)" }, ) distributed_num_procs: Optional[int] = field( default=max(1, torch.cuda.device_count()), metadata={ "help": "total number of processes to fork (default: all visible GPUs)" }, ) distributed_rank: Optional[int] = field( default=0, metadata={"help": "rank of the current worker"} ) distributed_backend: str = field( default="nccl", metadata={"help": "distributed backend"} ) distributed_init_method: Optional[str] = field( default=None, metadata={ "help": "typically tcp://hostname:port that will be used to " "establish initial connetion" }, ) distributed_port: int = field( default=-1, metadata={ "help": "port number (not required if using --distributed-init-method)" }, ) device_id: int = field( default=os.getenv("LOCAL_RANK", 0), metadata={ "help": "which GPU to use (by default looks for $LOCAL_RANK, usually configured automatically)", "argparse_alias": "--local_rank", }, ) distributed_no_spawn: bool = field( default=False, metadata={ "help": "do not spawn multiple processes even if multiple GPUs are visible" }, ) ddp_backend: DDP_BACKEND_CHOICES = field( default="pytorch_ddp", metadata={"help": "DistributedDataParallel backend"} ) ddp_comm_hook: DDP_COMM_HOOK_CHOICES = field( default="none", metadata={"help": "communication hook"} ) bucket_cap_mb: int = field( default=25, metadata={"help": "bucket size for reduction"} ) fix_batches_to_gpus: bool = field( default=False, metadata={ "help": "don't shuffle batches between GPUs; this reduces overall " "randomness and may affect precision but avoids the cost of re-reading the data" }, ) find_unused_parameters: bool = field( default=False, metadata={ "help": "disable unused parameter detection (not applicable to " "--ddp-backend=legacy_ddp)" }, ) gradient_as_bucket_view: bool = field( default=False, metadata={ "help": "when set to True, gradients will be views pointing to different offsets of allreduce communication buckets. This can reduce peak memory usage, where the saved memory size will be equal to the total gradients size. " "--gradient-as-bucket-view=gradient_as_bucket_view)" }, ) fast_stat_sync: bool = field( default=False, metadata={"help": "[deprecated] this is now defined per Criterion"}, ) heartbeat_timeout: int = field( default=-1, metadata={ "help": "kill the job if no progress is made in N seconds; " "set to -1 to disable" }, ) broadcast_buffers: bool = field( default=False, metadata={ "help": "Copy non-trainable parameters between GPUs, such as " "batchnorm population statistics" }, ) slowmo_momentum: Optional[float] = field( default=None, metadata={ "help": "SlowMo momentum term; by default use 0.0 for 16 GPUs, " "0.2 for 32 GPUs; 0.5 for 64 GPUs, 0.6 for > 64 GPUs" }, ) slowmo_base_algorithm: str = field( default="localsgd", metadata={ "help": "Base algorithm. Either 'localsgd' or 'sgp'. Please refer " "to the documentation of 'slowmo_base_algorithm' parameter in " "https://fairscale.readthedocs.io/en/latest/api/experimental/nn/slowmo_ddp.html " "for more details" }, ) localsgd_frequency: int = field( default=3, metadata={"help": "Local SGD allreduce frequency"} ) nprocs_per_node: int = field( default=max(1, torch.cuda.device_count()), metadata={ "help": "number of GPUs in each node. An allreduce operation across GPUs in " "a node is very fast. Hence, we do allreduce across GPUs in a node, " "and gossip across different nodes" }, ) pipeline_model_parallel: bool = field( default=False, metadata={"help": "if set, use pipeline model parallelism across GPUs"}, ) pipeline_balance: Optional[str] = field( default=None, metadata={ "help": "partition the model into N_K pieces, where each piece " "contains N_i layers. The sum(args.pipeline_balance) " "should equal the total number of layers in the model" }, ) pipeline_devices: Optional[str] = field( default=None, metadata={ "help": "a list of device indices indicating which device to place " "each of the N_K partitions. The length of this list should " "equal the length of the --pipeline-balance argument" }, ) pipeline_chunks: Optional[int] = field( default=0, metadata={"help": "microbatch count for pipeline model parallelism"} ) pipeline_encoder_balance: Optional[str] = field( default=None, metadata={ "help": "partition the pipeline parallel encoder into N_K pieces, where each piece " "contains N_i layers. The sum(args.pipeline_encoder_balance) " "should equal the total number of encoder layers in the model" }, ) pipeline_encoder_devices: Optional[str] = field( default=None, metadata={ "help": "a list of device indices indicating which device to place " "each of the N_K partitions. The length of this list should " "equal the length of the --pipeline-encoder-balance argument" }, ) pipeline_decoder_balance: Optional[str] = field( default=None, metadata={ "help": "partition the pipeline parallel decoder into N_K pieces, where each piece " "contains N_i layers. The sum(args.pipeline_decoder_balance) " "should equal the total number of decoder layers in the model" }, ) pipeline_decoder_devices: Optional[str] = field( default=None, metadata={ "help": "a list of device indices indicating which device to place " "each of the N_K partitions. The length of this list should " "equal the length of the --pipeline-decoder-balance argument" }, ) pipeline_checkpoint: PIPELINE_CHECKPOINT_CHOICES = field( default="never", metadata={"help": "checkpointing mode for pipeline model parallelism"}, ) zero_sharding: ZERO_SHARDING_CHOICES = field( default="none", metadata={"help": "ZeRO sharding"} ) fp16: bool = II("common.fp16") memory_efficient_fp16: bool = II("common.memory_efficient_fp16") tpu: bool = II("common.tpu") # configuration for --ddp-backend=fully_sharded no_reshard_after_forward: bool = field( default=False, metadata={"help": "don't reshard parameters after forward pass"}, ) fp32_reduce_scatter: bool = field( default=False, metadata={"help": "reduce-scatter grads in FP32"}, ) cpu_offload: bool = field( default=False, metadata={"help": "offload FP32 params to CPU"} ) use_sharded_state: bool = field( default=False, metadata={"help": "use sharded checkpoint files"}, ) not_fsdp_flatten_parameters: bool = field( default=False, metadata={"help": "not flatten parameter param for fsdp"}, ) @dataclass class DatasetConfig(FairseqDataclass): num_workers: int = field( default=1, metadata={"help": "how many subprocesses to use for data loading"} ) skip_invalid_size_inputs_valid_test: bool = field( default=False, metadata={"help": "ignore too long or too short lines in valid and test set"}, ) max_tokens: Optional[int] = field( default=None, metadata={"help": "maximum number of tokens in a batch"} ) batch_size: Optional[int] = field( default=None, metadata={ "help": "number of examples in a batch", "argparse_alias": "--max-sentences", }, ) required_batch_size_multiple: int = field( default=8, metadata={"help": "batch size will be a multiplier of this value"} ) required_seq_len_multiple: int = field( default=1, metadata={ "help": "maximum sequence length in batch will be a multiplier of this value" }, ) dataset_impl: Optional[DATASET_IMPL_CHOICES] = field( default=None, metadata={"help": "output dataset implementation"} ) data_buffer_size: int = field( default=10, metadata={"help": "Number of batches to preload"} ) train_subset: str = field( default="train", metadata={"help": "data subset to use for training (e.g. train, valid, test)"}, ) valid_subset: str = field( default="valid", metadata={ "help": "comma separated list of data subsets to use for validation" " (e.g. train, valid, test)" }, ) combine_valid_subsets: Optional[bool] = field( default=None, metadata={ "help": "comma separated list of data subsets to use for validation" " (e.g. train, valid, test)", "argparse_alias": "--combine-val", }, ) ignore_unused_valid_subsets: Optional[bool] = field( default=False, metadata={"help": "do not raise error if valid subsets are ignored"}, ) validate_interval: int = field( default=1, metadata={"help": "validate every N epochs"} ) validate_interval_updates: int = field( default=0, metadata={"help": "validate every N updates"} ) validate_after_updates: int = field( default=0, metadata={"help": "dont validate until reaching this many updates"} ) fixed_validation_seed: Optional[int] = field( default=None, metadata={"help": "specified random seed for validation"} ) disable_validation: bool = field( default=False, metadata={"help": "disable validation"} ) max_tokens_valid: Optional[int] = field( default=II("dataset.max_tokens"), metadata={ "help": "maximum number of tokens in a validation batch" " (defaults to --max-tokens)" }, ) batch_size_valid: Optional[int] = field( default=II("dataset.batch_size"), metadata={ "help": "batch size of the validation batch (defaults to --batch-size)", "argparse_alias": "--max-sentences-valid", }, ) max_valid_steps: Optional[int] = field( default=None, metadata={"help": "How many batches to evaluate", "argparse_alias": "--nval"}, ) curriculum: int = field( default=0, metadata={"help": "don't shuffle batches for first N epochs"} ) gen_subset: str = field( default="test", metadata={"help": "data subset to generate (train, valid, test)"}, ) num_shards: int = field( default=1, metadata={"help": "shard generation over N shards"} ) shard_id: int = field( default=0, metadata={"help": "id of the shard to generate (id < num_shards)"} ) grouped_shuffling: bool = field( default=False, metadata={ "help": "shuffle batches in groups of num_shards to enable similar sequence lengths on each GPU worker when batches are sorted by length", }, ) update_epoch_batch_itr: bool = field( default=II("dataset.grouped_shuffling"), metadata={ "help": "if true then prevents the reuse the epoch batch iterator by setting can_reuse_epoch_itr to false, defaults to --grouped-shuffling )", }, ) update_ordered_indices_seed: bool = field( default=False, metadata={ "help": "if true then increment seed with epoch for getting batch iterators, defautls to False.", }, ) @dataclass class OptimizationConfig(FairseqDataclass): max_epoch: int = field( default=0, metadata={"help": "force stop training at specified epoch"} ) max_update: int = field( default=0, metadata={"help": "force stop training at specified update"} ) stop_time_hours: float = field( default=0, metadata={ "help": "force stop training after specified cumulative time (if >0)" }, ) clip_norm: float = field( default=0.0, metadata={"help": "clip threshold of gradients"} ) sentence_avg: bool = field( default=False, metadata={ "help": "normalize gradients by the number of sentences in a batch" " (default is to normalize by number of tokens)" }, ) update_freq: List[int] = field( default_factory=lambda: [1], metadata={"help": "update parameters every N_i batches, when in epoch i"}, ) lr: List[float] = field( default_factory=lambda: [0.25], metadata={ "help": "learning rate for the first N epochs; all epochs >N using LR_N" " (note: this may be interpreted differently depending on --lr-scheduler)" }, ) stop_min_lr: float = field( default=-1.0, metadata={"help": "stop training when the learning rate reaches this minimum"}, ) use_bmuf: bool = field( default=False, metadata={ "help": "specify global optimizer for syncing models on different GPUs/shards" }, ) skip_remainder_batch: Optional[bool] = field( default=False, metadata={ "help": "if set, include the last (partial) batch of each epoch in training" " (default is to skip it)." }, ) debug_param_names: bool = False @dataclass class CheckpointConfig(FairseqDataclass): save_dir: str = field( default="checkpoints", metadata={"help": "path to save checkpoints"} ) restore_file: str = field( default="checkpoint_last.pt", metadata={ "help": "filename from which to load checkpoint " "(default: <save-dir>/checkpoint_last.pt" }, ) continue_once: Optional[str] = field( default=None, metadata={ "help": "continues from this checkpoint, unless a checkpoint indicated in 'restore_file' option is present" }, ) finetune_from_model: Optional[str] = field( default=None, metadata={ "help": "finetune from a pretrained model; note that meters and lr scheduler will be reset" }, ) reset_dataloader: bool = field( default=False, metadata={ "help": "if set, does not reload dataloader state from the checkpoint" }, ) reset_lr_scheduler: bool = field( default=False, metadata={ "help": "if set, does not load lr scheduler state from the checkpoint" }, ) reset_meters: bool = field( default=False, metadata={"help": "if set, does not load meters from the checkpoint"}, ) reset_optimizer: bool = field( default=False, metadata={"help": "if set, does not load optimizer state from the checkpoint"}, ) optimizer_overrides: str = field( default="{}", metadata={ "help": "a dictionary used to override optimizer args when loading a checkpoint" }, ) save_interval: int = field( default=1, metadata={"help": "save a checkpoint every N epochs"} ) save_interval_updates: int = field( default=0, metadata={"help": "save a checkpoint (and validate) every N updates"} ) keep_interval_updates: int = field( default=-1, metadata={ "help": "keep the last N checkpoints saved with --save-interval-updates" }, ) keep_interval_updates_pattern: int = field( default=-1, metadata={ "help": "when used with --keep-interval-updates, skips deleting " "any checkpoints with update X where " "X %% keep_interval_updates_pattern == 0" }, ) keep_last_epochs: int = field( default=-1, metadata={"help": "keep last N epoch checkpoints"} ) keep_best_checkpoints: int = field( default=-1, metadata={"help": "keep best N checkpoints based on scores"} ) no_save: bool = field( default=False, metadata={"help": "don't save models or checkpoints"} ) no_epoch_checkpoints: bool = field( default=False, metadata={"help": "only store last and best checkpoints"} ) no_last_checkpoints: bool = field( default=False, metadata={"help": "don't store last checkpoints"} ) no_save_optimizer_state: bool = field( default=False, metadata={"help": "don't save optimizer-state as part of checkpoint"}, ) best_checkpoint_metric: str = field( default="loss", metadata={"help": 'metric to use for saving "best" checkpoints'} ) maximize_best_checkpoint_metric: bool = field( default=False, metadata={ "help": 'select the largest metric value for saving "best" checkpoints' }, ) patience: int = field( default=-1, metadata={ "help": ( "early stop training if valid performance doesn't " "improve for N consecutive validation runs; note " "that this is influenced by --validate-interval" ) }, ) checkpoint_suffix: str = field( default="", metadata={"help": "suffix to add to the checkpoint file name"} ) checkpoint_shard_count: int = field( default=1, metadata={ "help": "Number of shards containing the checkpoint - " "if the checkpoint is over 300GB, it is preferable " "to split it into shards to prevent OOM on CPU while loading " "the checkpoint" }, ) load_checkpoint_on_all_dp_ranks: bool = field( default=False, metadata={ "help": "load checkpoints on all data parallel devices " "(default: only load on rank 0 and broadcast to other devices)" }, ) write_checkpoints_asynchronously: bool = field( default=False, metadata={ "help": ( "Write checkpoints asynchronously in a separate " "thread. NOTE: This feature is currently being tested." ), "argparse_alias": "--save-async", }, ) model_parallel_size: int = II("common.model_parallel_size") @dataclass class FairseqBMUFConfig(FairseqDataclass): block_lr: float = field( default=1, metadata={"help": "block learning rate for bmuf"} ) block_momentum: float = field( default=0.875, metadata={"help": "block momentum for bmuf"} ) global_sync_iter: int = field( default=50, metadata={"help": "Iteration for syncing global model"} ) warmup_iterations: int = field( default=500, metadata={"help": "warmup iterations for model to broadcast"} ) use_nbm: bool = field( default=False, metadata={"help": "Specify whether you want to use classical BM / Nesterov BM"}, ) average_sync: bool = field( default=False, metadata={ "help": "Specify whether you want to average the local momentum after each sync" }, ) distributed_world_size: int = II("distributed_training.distributed_world_size") @dataclass class GenerationConfig(FairseqDataclass): beam: int = field( default=5, metadata={"help": "beam size"}, ) beam_mt: int = field( default=0, metadata={"help": "beam size for the first-pass decoder"}, ) nbest: int = field( default=1, metadata={"help": "number of hypotheses to output"}, ) max_len_a: float = field( default=0, metadata={ "help": "generate sequences of maximum length ax + b, where x is the source length" }, ) max_len_b: int = field( default=200, metadata={ "help": "generate sequences of maximum length ax + b, where x is the source length" }, ) max_len_a_mt: float = field( default=0, metadata={ "help": "generate sequences of maximum length ax + b, where x is the source length for the first-pass decoder" }, ) max_len_b_mt: int = field( default=200, metadata={ "help": "generate sequences of maximum length ax + b, where x is the source length for the first-pass decoder" }, ) min_len: int = field( default=1, metadata={"help": "minimum generation length"}, ) match_source_len: bool = field( default=False, metadata={"help": "generations should match the source length"}, ) unnormalized: bool = field( default=False, metadata={"help": "compare unnormalized hypothesis scores"}, ) no_early_stop: bool = field( default=False, metadata={"help": "deprecated"}, ) no_beamable_mm: bool = field( default=False, metadata={"help": "don't use BeamableMM in attention layers"}, ) lenpen: float = field( default=1, metadata={ "help": "length penalty: <1.0 favors shorter, >1.0 favors longer sentences" }, ) lenpen_mt: float = field( default=1, metadata={ "help": "length penalty for the first-pass decoder: <1.0 favors shorter, >1.0 favors longer sentences" }, ) unkpen: float = field( default=0, metadata={ "help": "unknown word penalty: <0 produces more unks, >0 produces fewer" }, ) replace_unk: Optional[str] = field( default=None, metadata={ "help": "perform unknown replacement (optionally with alignment dictionary)", "argparse_const": "@@ ", }, ) sacrebleu: bool = field( default=False, metadata={"help": "score with sacrebleu"}, ) score_reference: bool = field( default=False, metadata={"help": "just score the reference translation"}, ) prefix_size: int = field( default=0, metadata={"help": "initialize generation by target prefix of given length"}, ) no_repeat_ngram_size: int = field( default=0, metadata={ "help": "ngram blocking such that this size ngram cannot be repeated in the generation" }, ) sampling: bool = field( default=False, metadata={"help": "sample hypotheses instead of using beam search"}, ) sampling_topk: int = field( default=-1, metadata={"help": "sample from top K likely next words instead of all words"}, ) sampling_topp: float = field( default=-1.0, metadata={ "help": "sample from the smallest set whose cumulative probability mass exceeds p for next words" }, ) constraints: Optional[GENERATION_CONSTRAINTS_CHOICES] = field( default=None, metadata={ "help": "enables lexically constrained decoding", "argparse_const": "ordered", }, ) temperature: float = field( default=1.0, metadata={"help": "temperature for generation"}, ) diverse_beam_groups: int = field( default=-1, metadata={"help": "number of groups for Diverse Beam Search"}, ) diverse_beam_strength: float = field( default=0.5, metadata={"help": "strength of diversity penalty for Diverse Beam Search"}, ) diversity_rate: float = field( default=-1.0, metadata={"help": "strength of diversity penalty for Diverse Siblings Search"}, ) print_alignment: Optional[PRINT_ALIGNMENT_CHOICES] = field( default=None, metadata={ "help": "if set, uses attention feedback to compute and print alignment to source tokens " "(valid options are: hard, soft, otherwise treated as hard alignment)", "argparse_const": "hard", }, ) print_step: bool = field( default=False, metadata={"help": "print steps"}, ) lm_path: Optional[str] = field( default=None, metadata={"help": "path to lm checkpoint for lm fusion"}, ) lm_weight: float = field( default=0.0, metadata={"help": "weight for lm probs for lm fusion"}, ) # arguments for iterative refinement generator iter_decode_eos_penalty: float = field( default=0.0, metadata={"help": "if > 0.0, it penalized early-stopping in decoding."}, ) iter_decode_max_iter: int = field( default=10, metadata={"help": "maximum iterations for iterative refinement."}, ) iter_decode_force_max_iter: bool = field( default=False, metadata={ "help": "if set, run exact the maximum number of iterations without early stop" }, ) iter_decode_with_beam: int = field( default=1, metadata={ "help": "if > 1, model will generate translations varying by the lengths." }, ) iter_decode_with_external_reranker: bool = field( default=False, metadata={ "help": "if set, the last checkpoint are assumed to be a reranker to rescore the translations" }, ) retain_iter_history: bool = field( default=False, metadata={ "help": "if set, decoding returns the whole history of iterative refinement" }, ) retain_dropout: bool = field( default=False, metadata={"help": "Use dropout at inference time"}, ) # temporarily set to Any until https://github.com/facebookresearch/hydra/issues/1117 is fixed # retain_dropout_modules: Optional[List[str]] = field( retain_dropout_modules: Any = field( default=None, metadata={ "help": "if set, only retain dropout for the specified modules; " "if not set, then dropout will be retained for all modules" }, ) # special decoding format for advanced decoding. decoding_format: Optional[GENERATION_DECODING_FORMAT_CHOICES] = field( default=None, metadata={"help": "special decoding format for advanced decoding."}, ) no_seed_provided: bool = field( default=False, metadata={"help": "if set, dont use seed for initializing random generators"}, ) eos_token: Optional[str] = field( default=None, metadata={"help": "EOS token"}, ) @dataclass class CommonEvalConfig(FairseqDataclass): path: Optional[str] = field( default=None, metadata={"help": "path(s) to model file(s), colon separated"}, ) post_process: Optional[str] = field( default=None, metadata={ "help": ( "post-process text by removing BPE, letter segmentation, etc. " "Valid options can be found in fairseq.data.utils.post_process." ), "argparse_const": "subword_nmt", "argparse_alias": "--remove-bpe", }, ) quiet: bool = field(default=False, metadata={"help": "only print final scores"}) model_overrides: str = field( default="{}", metadata={ "help": "a dictionary used to override model args at generation that were used during model training" }, ) results_path: Optional[str] = field( default=None, metadata={"help": "path to save eval results (optional)"} ) @dataclass class EvalLMConfig(FairseqDataclass): output_word_probs: bool = field( default=False, metadata={ "help": "if set, outputs words and their predicted log probabilities to standard output" }, ) output_word_stats: bool = field( default=False, metadata={ "help": "if set, outputs word statistics such as word count, average probability, etc" }, ) context_window: int = field( default=0, metadata={ "help": "ensures that every evaluated token has access to a context of at least this size, if possible" }, ) softmax_batch: int = field( default=sys.maxsize, metadata={ "help": "if BxT is more than this, will batch the softmax over vocab to this amount of tokens, in order to fit into GPU memory" }, ) @dataclass class InteractiveConfig(FairseqDataclass): buffer_size: int = field( default=0, metadata={ "help": "read this many sentences into a buffer before processing them" }, ) input: str = field( default="-", metadata={"help": "file to read from; use - for stdin"}, ) @dataclass class EMAConfig(FairseqDataclass): store_ema: bool = field( default=False, metadata={help: "store exponential moving average shadow model"} ) ema_decay: float = field( default=0.9999, metadata={"help": "decay for exponential moving average model"} ) ema_start_update: int = field( default=0, metadata={"help": "start EMA update after this many model updates"} ) ema_seed_model: Optional[str] = field( default=None, metadata={ "help": "Seed to load EMA model from. " "Used to load EMA model separately from the actual model." }, ) ema_update_freq: int = field( default=1, metadata={"help": "Do EMA update every this many model updates"} ) ema_fp32: bool = field( default=False, metadata={"help": "If true, store EMA model in fp32 even if model is in fp16"}, ) @dataclass class FairseqConfig(FairseqDataclass): common: CommonConfig = CommonConfig() common_eval: CommonEvalConfig = CommonEvalConfig() distributed_training: DistributedTrainingConfig = DistributedTrainingConfig() dataset: DatasetConfig = DatasetConfig() optimization: OptimizationConfig = OptimizationConfig() checkpoint: CheckpointConfig = CheckpointConfig() bmuf: FairseqBMUFConfig = FairseqBMUFConfig() generation: GenerationConfig = GenerationConfig() eval_lm: EvalLMConfig = EvalLMConfig() interactive: InteractiveConfig = InteractiveConfig() model: Any = MISSING task: Any = None criterion: Any = None optimizer: Any = None lr_scheduler: Any = None scoring: Any = None bpe: Any = None tokenizer: Any = None ema: EMAConfig = EMAConfig()
EXA-1-master
exa/libraries/fairseq/fairseq/dataclass/configs.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" import logging from hydra.core.config_store import ConfigStore from fairseq.dataclass.configs import FairseqConfig from omegaconf import DictConfig, OmegaConf logger = logging.getLogger(__name__) def hydra_init(cfg_name="config") -> None: cs = ConfigStore.instance() cs.store(name=f"{cfg_name}", node=FairseqConfig) for k in FairseqConfig.__dataclass_fields__: v = FairseqConfig.__dataclass_fields__[k].default try: cs.store(name=k, node=v) except BaseException: logger.error(f"{k} - {v}") raise def add_defaults(cfg: DictConfig) -> None: """This function adds default values that are stored in dataclasses that hydra doesn't know about""" from fairseq.registry import REGISTRIES from fairseq.tasks import TASK_DATACLASS_REGISTRY from fairseq.models import ARCH_MODEL_NAME_REGISTRY, MODEL_DATACLASS_REGISTRY from fairseq.dataclass.utils import merge_with_parent from typing import Any OmegaConf.set_struct(cfg, False) for k, v in FairseqConfig.__dataclass_fields__.items(): field_cfg = cfg.get(k) if field_cfg is not None and v.type == Any: dc = None if isinstance(field_cfg, str): field_cfg = DictConfig({"_name": field_cfg}) field_cfg.__dict__["_parent"] = field_cfg.__dict__["_parent"] name = getattr(field_cfg, "_name", None) if k == "task": dc = TASK_DATACLASS_REGISTRY.get(name) elif k == "model": name = ARCH_MODEL_NAME_REGISTRY.get(name, name) dc = MODEL_DATACLASS_REGISTRY.get(name) elif k in REGISTRIES: dc = REGISTRIES[k]["dataclass_registry"].get(name) if dc is not None: cfg[k] = merge_with_parent(dc, field_cfg)
EXA-1-master
exa/libraries/fairseq/fairseq/dataclass/initialize.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from enum import Enum, EnumMeta from typing import List class StrEnumMeta(EnumMeta): # this is workaround for submitit pickling leading to instance checks failing in hydra for StrEnum, see # https://github.com/facebookresearch/hydra/issues/1156 @classmethod def __instancecheck__(cls, other): return "enum" in str(type(other)) class StrEnum(Enum, metaclass=StrEnumMeta): def __str__(self): return self.value def __eq__(self, other: str): return self.value == other def __repr__(self): return self.value def __hash__(self): return hash(str(self)) def ChoiceEnum(choices: List[str]): """return the Enum class used to enforce list of choices""" return StrEnum("Choices", {k: k for k in choices}) LOG_FORMAT_CHOICES = ChoiceEnum(["json", "none", "simple", "tqdm"]) DDP_BACKEND_CHOICES = ChoiceEnum( [ "c10d", # alias for pytorch_ddp "fully_sharded", # FullyShardedDataParallel from fairscale "legacy_ddp", "no_c10d", # alias for legacy_ddp "pytorch_ddp", "slowmo", ] ) DDP_COMM_HOOK_CHOICES = ChoiceEnum(["none", "fp16"]) DATASET_IMPL_CHOICES = ChoiceEnum(["raw", "lazy", "cached", "mmap", "fasta", "huffman"]) GENERATION_CONSTRAINTS_CHOICES = ChoiceEnum(["ordered", "unordered"]) GENERATION_DECODING_FORMAT_CHOICES = ChoiceEnum( ["unigram", "ensemble", "vote", "dp", "bs"] ) ZERO_SHARDING_CHOICES = ChoiceEnum(["none", "os"]) PIPELINE_CHECKPOINT_CHOICES = ChoiceEnum(["always", "never", "except_last"]) PRINT_ALIGNMENT_CHOICES = ChoiceEnum(["hard", "soft"])
EXA-1-master
exa/libraries/fairseq/fairseq/dataclass/constants.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .configs import FairseqDataclass from .constants import ChoiceEnum __all__ = [ "FairseqDataclass", "ChoiceEnum", ]
EXA-1-master
exa/libraries/fairseq/fairseq/dataclass/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import ast import inspect import logging import os import re from argparse import ArgumentError, ArgumentParser, Namespace from dataclasses import _MISSING_TYPE, MISSING, is_dataclass from enum import Enum from typing import Any, Dict, List, Optional, Tuple, Type from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.configs import FairseqConfig from hydra.core.global_hydra import GlobalHydra from hydra.experimental import compose, initialize from omegaconf import DictConfig, OmegaConf, open_dict, _utils logger = logging.getLogger(__name__) def eval_str_list(x, x_type=float): if x is None: return None if isinstance(x, str): if len(x) == 0: return [] x = ast.literal_eval(x) try: return list(map(x_type, x)) except TypeError: return [x_type(x)] def interpret_dc_type(field_type): if isinstance(field_type, str): raise RuntimeError("field should be a type") if field_type == Any: return str typestring = str(field_type) if re.match( r"(typing.|^)Union\[(.*), NoneType\]$", typestring ) or typestring.startswith("typing.Optional"): return field_type.__args__[0] return field_type def gen_parser_from_dataclass( parser: ArgumentParser, dataclass_instance: FairseqDataclass, delete_default: bool = False, with_prefix: Optional[str] = None, ) -> None: """ convert a dataclass instance to tailing parser arguments. If `with_prefix` is provided, prefix all the keys in the resulting parser with it. It means that we are building a flat namespace from a structured dataclass (see transformer_config.py for example). """ def argparse_name(name: str): if name == "data" and (with_prefix is None or with_prefix == ""): # normally data is positional args, so we don't add the -- nor the prefix return name if name == "_name": # private member, skip return None full_name = "--" + name.replace("_", "-") if with_prefix is not None and with_prefix != "": # if a prefix is specified, construct the prefixed arg name full_name = with_prefix + "-" + full_name[2:] # strip -- when composing return full_name def get_kwargs_from_dc( dataclass_instance: FairseqDataclass, k: str ) -> Dict[str, Any]: """k: dataclass attributes""" kwargs = {} field_type = dataclass_instance._get_type(k) inter_type = interpret_dc_type(field_type) field_default = dataclass_instance._get_default(k) if isinstance(inter_type, type) and issubclass(inter_type, Enum): field_choices = [t.value for t in list(inter_type)] else: field_choices = None field_help = dataclass_instance._get_help(k) field_const = dataclass_instance._get_argparse_const(k) if isinstance(field_default, str) and field_default.startswith("${"): kwargs["default"] = field_default else: if field_default is MISSING: kwargs["required"] = True if field_choices is not None: kwargs["choices"] = field_choices if ( isinstance(inter_type, type) and (issubclass(inter_type, List) or issubclass(inter_type, Tuple)) ) or ("List" in str(inter_type) or "Tuple" in str(inter_type)): if "int" in str(inter_type): kwargs["type"] = lambda x: eval_str_list(x, int) elif "float" in str(inter_type): kwargs["type"] = lambda x: eval_str_list(x, float) elif "str" in str(inter_type): kwargs["type"] = lambda x: eval_str_list(x, str) else: raise NotImplementedError( "parsing of type " + str(inter_type) + " is not implemented" ) if field_default is not MISSING: kwargs["default"] = ( ",".join(map(str, field_default)) if field_default is not None else None ) elif ( isinstance(inter_type, type) and issubclass(inter_type, Enum) ) or "Enum" in str(inter_type): kwargs["type"] = str if field_default is not MISSING: if isinstance(field_default, Enum): kwargs["default"] = field_default.value else: kwargs["default"] = field_default elif inter_type is bool: kwargs["action"] = ( "store_false" if field_default is True else "store_true" ) kwargs["default"] = field_default else: kwargs["type"] = inter_type if field_default is not MISSING: kwargs["default"] = field_default # build the help with the hierarchical prefix if with_prefix is not None and with_prefix != "" and field_help is not None: field_help = with_prefix[2:] + ": " + field_help kwargs["help"] = field_help if field_const is not None: kwargs["const"] = field_const kwargs["nargs"] = "?" return kwargs for k in dataclass_instance._get_all_attributes(): field_name = argparse_name(dataclass_instance._get_name(k)) field_type = dataclass_instance._get_type(k) if field_name is None: continue elif inspect.isclass(field_type) and issubclass(field_type, FairseqDataclass): # for fields that are of type FairseqDataclass, we can recursively # add their fields to the namespace (so we add the args from model, task, etc. to the root namespace) prefix = None if with_prefix is not None: # if a prefix is specified, then we don't want to copy the subfields directly to the root namespace # but we prefix them with the name of the current field. prefix = field_name gen_parser_from_dataclass(parser, field_type(), delete_default, prefix) continue kwargs = get_kwargs_from_dc(dataclass_instance, k) field_args = [field_name] alias = dataclass_instance._get_argparse_alias(k) if alias is not None: field_args.append(alias) if "default" in kwargs: if isinstance(kwargs["default"], str) and kwargs["default"].startswith( "${" ): if kwargs["help"] is None: # this is a field with a name that will be added elsewhere continue else: del kwargs["default"] if delete_default and "default" in kwargs: del kwargs["default"] try: parser.add_argument(*field_args, **kwargs) except ArgumentError: pass def _set_legacy_defaults(args, cls): """Helper to set default arguments based on *add_args*.""" if not hasattr(cls, "add_args"): return import argparse parser = argparse.ArgumentParser( argument_default=argparse.SUPPRESS, allow_abbrev=False ) cls.add_args(parser) # copied from argparse.py: defaults = argparse.Namespace() for action in parser._actions: if action.dest is not argparse.SUPPRESS: if not hasattr(defaults, action.dest): if action.default is not argparse.SUPPRESS: setattr(defaults, action.dest, action.default) for key, default_value in vars(defaults).items(): if not hasattr(args, key): setattr(args, key, default_value) def _override_attr( sub_node: str, data_class: Type[FairseqDataclass], args: Namespace ) -> List[str]: overrides = [] if not inspect.isclass(data_class) or not issubclass(data_class, FairseqDataclass): return overrides def get_default(f): if not isinstance(f.default_factory, _MISSING_TYPE): return f.default_factory() return f.default for k, v in data_class.__dataclass_fields__.items(): if k.startswith("_"): # private member, skip continue val = get_default(v) if not hasattr(args, k) else getattr(args, k) field_type = interpret_dc_type(v.type) if ( isinstance(val, str) and not val.startswith("${") # not interpolation and field_type != str and ( not inspect.isclass(field_type) or not issubclass(field_type, Enum) ) # not choices enum ): # upgrade old models that stored complex parameters as string val = ast.literal_eval(val) if isinstance(val, tuple): val = list(val) v_type = getattr(v.type, "__origin__", None) if ( (v_type is List or v_type is list or v_type is Optional) # skip interpolation and not (isinstance(val, str) and val.startswith("${")) ): # if type is int but val is float, then we will crash later - try to convert here if hasattr(v.type, "__args__"): t_args = v.type.__args__ if len(t_args) == 1 and (t_args[0] is float or t_args[0] is int): val = list(map(t_args[0], val)) elif val is not None and ( field_type is int or field_type is bool or field_type is float ): try: val = field_type(val) except: pass # ignore errors here, they are often from interpolation args if val is None: overrides.append("{}.{}=null".format(sub_node, k)) elif val == "": overrides.append("{}.{}=''".format(sub_node, k)) elif isinstance(val, str): val = val.replace("'", r"\'") overrides.append("{}.{}='{}'".format(sub_node, k, val)) elif isinstance(val, FairseqDataclass): overrides += _override_attr(f"{sub_node}.{k}", type(val), args) elif isinstance(val, Namespace): sub_overrides, _ = override_module_args(val) for so in sub_overrides: overrides.append(f"{sub_node}.{k}.{so}") else: overrides.append("{}.{}={}".format(sub_node, k, val)) return overrides def migrate_registry( name, value, registry, args, overrides, deletes, use_name_as_val=False ): if value in registry: overrides.append("{}={}".format(name, value)) overrides.append("{}._name={}".format(name, value)) overrides.extend(_override_attr(name, registry[value], args)) elif use_name_as_val and value is not None: overrides.append("{}={}".format(name, value)) else: deletes.append(name) def override_module_args(args: Namespace) -> Tuple[List[str], List[str]]: """use the field in args to overrides those in cfg""" overrides = [] deletes = [] for k in FairseqConfig.__dataclass_fields__.keys(): overrides.extend( _override_attr(k, FairseqConfig.__dataclass_fields__[k].type, args) ) if args is not None: if hasattr(args, "task"): from fairseq.tasks import TASK_DATACLASS_REGISTRY migrate_registry( "task", args.task, TASK_DATACLASS_REGISTRY, args, overrides, deletes ) else: deletes.append("task") # these options will be set to "None" if they have not yet been migrated # so we can populate them with the entire flat args CORE_REGISTRIES = {"criterion", "optimizer", "lr_scheduler"} from fairseq.registry import REGISTRIES for k, v in REGISTRIES.items(): if hasattr(args, k): migrate_registry( k, getattr(args, k), v["dataclass_registry"], args, overrides, deletes, use_name_as_val=k not in CORE_REGISTRIES, ) else: deletes.append(k) no_dc = True if hasattr(args, "arch"): from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_MODEL_NAME_REGISTRY if args.arch in ARCH_MODEL_REGISTRY: m_cls = ARCH_MODEL_REGISTRY[args.arch] dc = getattr(m_cls, "__dataclass", None) if dc is not None: m_name = ARCH_MODEL_NAME_REGISTRY[args.arch] overrides.append("model={}".format(m_name)) overrides.append("model._name={}".format(args.arch)) # override model params with those exist in args overrides.extend(_override_attr("model", dc, args)) no_dc = False if no_dc: deletes.append("model") return overrides, deletes class omegaconf_no_object_check: def __init__(self): # Changed in https://github.com/omry/omegaconf/pull/911 - both are kept for back compat. if hasattr(_utils, "is_primitive_type"): self.old_is_primitive = _utils.is_primitive_type else: self.old_is_primitive = _utils.is_primitive_type_annotation def __enter__(self): if hasattr(_utils, "is_primitive_type"): _utils.is_primitive_type = lambda _: True else: _utils.is_primitive_type_annotation = lambda _: True def __exit__(self, type, value, traceback): if hasattr(_utils, "is_primitive_type"): _utils.is_primitive_type = self.old_is_primitive else: _utils.is_primitive_type_annotation = self.old_is_primitive def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig: """Convert a flat argparse.Namespace to a structured DictConfig.""" # Here we are using field values provided in args to override counterparts inside config object overrides, deletes = override_module_args(args) # configs will be in fairseq/config after installation config_path = os.path.join("..", "config") GlobalHydra.instance().clear() with initialize(config_path=config_path): try: composed_cfg = compose("config", overrides=overrides, strict=False) except: logger.error("Error when composing. Overrides: " + str(overrides)) raise for k in deletes: composed_cfg[k] = None cfg = OmegaConf.create( OmegaConf.to_container(composed_cfg, resolve=True, enum_to_str=True) ) # hack to be able to set Namespace in dict config. this should be removed when we update to newer # omegaconf version that supports object flags, or when we migrate all existing models from omegaconf import _utils with omegaconf_no_object_check(): if cfg.task is None and getattr(args, "task", None): cfg.task = Namespace(**vars(args)) from fairseq.tasks import TASK_REGISTRY _set_legacy_defaults(cfg.task, TASK_REGISTRY[args.task]) cfg.task._name = args.task if cfg.model is None and getattr(args, "arch", None): cfg.model = Namespace(**vars(args)) from fairseq.models import ARCH_MODEL_REGISTRY _set_legacy_defaults(cfg.model, ARCH_MODEL_REGISTRY[args.arch]) cfg.model._name = args.arch if cfg.optimizer is None and getattr(args, "optimizer", None): cfg.optimizer = Namespace(**vars(args)) from fairseq.optim import OPTIMIZER_REGISTRY _set_legacy_defaults(cfg.optimizer, OPTIMIZER_REGISTRY[args.optimizer]) cfg.optimizer._name = args.optimizer if cfg.lr_scheduler is None and getattr(args, "lr_scheduler", None): cfg.lr_scheduler = Namespace(**vars(args)) from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY _set_legacy_defaults( cfg.lr_scheduler, LR_SCHEDULER_REGISTRY[args.lr_scheduler] ) cfg.lr_scheduler._name = args.lr_scheduler if cfg.criterion is None and getattr(args, "criterion", None): cfg.criterion = Namespace(**vars(args)) from fairseq.criterions import CRITERION_REGISTRY _set_legacy_defaults(cfg.criterion, CRITERION_REGISTRY[args.criterion]) cfg.criterion._name = args.criterion OmegaConf.set_struct(cfg, True) return cfg def overwrite_args_by_name(cfg: DictConfig, overrides: Dict[str, any]): # this will be deprecated when we get rid of argparse and model_overrides logic from fairseq.registry import REGISTRIES with open_dict(cfg): for k in cfg.keys(): # "k in cfg" will return false if its a "mandatory value (e.g. ???)" if k in cfg and isinstance(cfg[k], DictConfig): if k in overrides and isinstance(overrides[k], dict): for ok, ov in overrides[k].items(): if isinstance(ov, dict) and cfg[k][ok] is not None: overwrite_args_by_name(cfg[k][ok], ov) else: cfg[k][ok] = ov else: overwrite_args_by_name(cfg[k], overrides) elif k in cfg and isinstance(cfg[k], Namespace): for override_key, val in overrides.items(): setattr(cfg[k], override_key, val) elif k in overrides: if ( k in REGISTRIES and overrides[k] in REGISTRIES[k]["dataclass_registry"] ): cfg[k] = DictConfig( REGISTRIES[k]["dataclass_registry"][overrides[k]] ) overwrite_args_by_name(cfg[k], overrides) cfg[k]._name = overrides[k] else: cfg[k] = overrides[k] def merge_with_parent(dc: FairseqDataclass, cfg: DictConfig, remove_missing=False): if remove_missing: def remove_missing_rec(src_keys, target_cfg): if is_dataclass(target_cfg): target_keys = set(target_cfg.__dataclass_fields__.keys()) else: target_keys = set(target_cfg.keys()) for k in list(src_keys.keys()): if k not in target_keys: del src_keys[k] elif OmegaConf.is_config(src_keys[k]): tgt = getattr(target_cfg, k) if tgt is not None and (is_dataclass(tgt) or hasattr(tgt, "keys")): remove_missing_rec(src_keys[k], tgt) with open_dict(cfg): remove_missing_rec(cfg, dc) merged_cfg = OmegaConf.merge(dc, cfg) merged_cfg.__dict__["_parent"] = cfg.__dict__["_parent"] OmegaConf.set_struct(merged_cfg, True) return merged_cfg
EXA-1-master
exa/libraries/fairseq/fairseq/dataclass/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn class BeamableMM(nn.Module): """This module provides an optimized MM for beam decoding with attention. It leverage the fact that the source-side of the input is replicated beam times and the target-side of the input is of width one. This layer speeds up inference by replacing the inputs {(bsz x 1 x nhu), (bsz x sz2 x nhu)} with smaller inputs {(bsz/beam x beam x nhu), (bsz/beam x sz2 x nhu)}. """ def __init__(self, beam_size=None): super(BeamableMM, self).__init__() self.beam_size = beam_size def forward(self, input1, input2): if ( not self.training and self.beam_size is not None # test mode and input1.dim() == 3 # beam size is set and input1.size(1) # only support batched input == 1 # single time step update ): bsz, beam = input1.size(0), self.beam_size # bsz x 1 x nhu --> bsz/beam x beam x nhu input1 = input1[:, 0, :].unfold(0, beam, beam).transpose(2, 1) # bsz x sz2 x nhu --> bsz/beam x sz2 x nhu input2 = input2.unfold(0, beam, beam)[:, :, :, 0] # use non batched operation if bsz = beam if input1.size(0) == 1: output = torch.mm(input1[0, :, :], input2[0, :, :]) else: output = input1.bmm(input2) return output.view(bsz, 1, -1) else: return input1.bmm(input2) def set_beam_size(self, beam_size): self.beam_size = beam_size
EXA-1-master
exa/libraries/fairseq/fairseq/modules/beamable_mm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq.modules.fairseq_dropout import FairseqDropout from fairseq.modules.scalar_bias import scalar_bias class SingleHeadAttention(nn.Module): """ Single-head attention that supports Gating and Downsampling """ def __init__( self, out_channels, embed_dim, head_dim, head_index, dropout=0.0, bias=True, project_input=True, gated=False, downsample=False, num_heads=1, ): super().__init__() self.embed_dim = embed_dim self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.head_index = head_index self.head_dim = head_dim self.project_input = project_input self.gated = gated self.downsample = downsample self.num_heads = num_heads self.projection = None k_layers = [] v_layers = [] if self.downsample: k_layers.append(Downsample(self.head_index)) v_layers.append(Downsample(self.head_index)) out_proj_size = self.head_dim else: out_proj_size = self.head_dim * self.num_heads if self.gated: k_layers.append(GatedLinear(self.embed_dim, out_proj_size, bias=bias)) self.in_proj_q = GatedLinear(self.embed_dim, out_proj_size, bias=bias) v_layers.append(GatedLinear(self.embed_dim, out_proj_size, bias=bias)) else: k_layers.append(Linear(self.embed_dim, out_proj_size, bias=bias)) self.in_proj_q = Linear(self.embed_dim, out_proj_size, bias=bias) v_layers.append(Linear(self.embed_dim, out_proj_size, bias=bias)) self.in_proj_k = nn.Sequential(*k_layers) self.in_proj_v = nn.Sequential(*v_layers) if self.downsample: self.out_proj = Linear(out_proj_size, self.head_dim, bias=bias) else: self.out_proj = Linear(out_proj_size, out_channels, bias=bias) self.scaling = self.head_dim**-0.5 def forward( self, query, key, value, mask_future_timesteps=False, key_padding_mask=None, use_scalar_bias=False, ): """Input shape: Time x Batch x Channel Self-attention can be implemented by passing in the same arguments for query, key and value. Future timesteps can be masked with the `mask_future_timesteps` argument. Padding elements can be excluded from the key by passing a binary ByteTensor (`key_padding_mask`) with shape: batch x src_len, where padding elements are indicated by 1s. """ src_len, bsz, out_channels = key.size() tgt_len = query.size(0) assert list(query.size()) == [tgt_len, bsz, out_channels] assert key.size() == value.size() if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len if self.downsample: size = bsz else: size = bsz * self.num_heads k = key v = value q = query if self.project_input: q = self.in_proj_q(q) k = self.in_proj_k(k) v = self.in_proj_v(v) src_len = k.size()[0] q *= self.scaling if not self.downsample: q = q.view(tgt_len, size, self.head_dim) k = k.view(src_len, size, self.head_dim) v = v.view(src_len, size, self.head_dim) q = q.transpose(0, 1) k = k.transpose(0, 1) v = v.transpose(0, 1) attn_weights = torch.bmm(q, k.transpose(1, 2)) if mask_future_timesteps: assert ( query.size() == key.size() ), "mask_future_timesteps only applies to self-attention" attn_weights *= torch.tril( attn_weights.data.new([1]).expand(tgt_len, tgt_len).clone(), diagonal=-1, )[:, :: self.head_index + 1 if self.downsample else 1].unsqueeze(0) attn_weights += torch.triu( attn_weights.data.new([-math.inf]).expand(tgt_len, tgt_len).clone(), diagonal=0, )[:, :: self.head_index + 1 if self.downsample else 1].unsqueeze(0) tgt_size = tgt_len if use_scalar_bias: attn_weights = scalar_bias(attn_weights, 2) v = scalar_bias(v, 1) tgt_size += 1 if key_padding_mask is not None: # don't attend to padding symbols if key_padding_mask.max() > 0: if self.downsample: attn_weights = attn_weights.view(bsz, 1, tgt_len, src_len) else: attn_weights = attn_weights.view( size, self.num_heads, tgt_len, src_len ) attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2), -math.inf, ) attn_weights = attn_weights.view(size, tgt_len, src_len) attn_weights = F.softmax(attn_weights, dim=-1) attn_weights = self.dropout_module(attn_weights) attn = torch.bmm(attn_weights, v) if self.downsample: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, self.head_dim) else: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, self.embed_dim) attn = self.out_proj(attn) return attn, attn_weights class DownsampledMultiHeadAttention(nn.ModuleList): """ Multi-headed attention with Gating and Downsampling """ def __init__( self, out_channels, embed_dim, num_heads, dropout=0.0, bias=True, project_input=True, gated=False, downsample=False, ): self.embed_dim = embed_dim self.num_heads = num_heads self.head_dim = embed_dim // num_heads self.downsample = downsample self.gated = gated self.project_input = project_input assert self.head_dim * num_heads == embed_dim if self.downsample: attention_heads = [] for index in range(self.num_heads): attention_heads.append( SingleHeadAttention( out_channels, self.embed_dim, self.head_dim, index, dropout, bias, self.project_input, self.gated, self.downsample, self.num_heads, ) ) super().__init__(modules=attention_heads) self.out_proj = Linear(embed_dim, out_channels, bias=bias) else: # either we have a list of attention heads, or just one attention head # if not being downsampled, we can do the heads with one linear layer instead of separate ones super().__init__() self.attention_module = SingleHeadAttention( out_channels, self.embed_dim, self.head_dim, 1, dropout, bias, self.project_input, self.gated, self.downsample, self.num_heads, ) def forward( self, query, key, value, mask_future_timesteps=False, key_padding_mask=None, use_scalar_bias=False, ): src_len, bsz, embed_dim = key.size() tgt_len = query.size(0) assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] assert key.size() == value.size() tgt_size = tgt_len if use_scalar_bias: tgt_size += 1 attn = [] attn_weights = [] if self.downsample: for attention_head_number in range(self.num_heads): # call the forward of each attention head _attn, _attn_weight = self[attention_head_number]( query, key, value, mask_future_timesteps, key_padding_mask, use_scalar_bias, ) attn.append(_attn) attn_weights.append(_attn_weight) full_attn = torch.cat(attn, dim=2) full_attn = self.out_proj(full_attn) return full_attn, attn_weights[0].clone() else: _attn, _attn_weight = self.attention_module( query, key, value, mask_future_timesteps, key_padding_mask, use_scalar_bias, ) attn.append(_attn) attn_weights.append(_attn_weight) full_attn = torch.cat(attn, dim=2) full_attn_weights = torch.cat(attn_weights) full_attn_weights = full_attn_weights.view( bsz, self.num_heads, tgt_size, src_len ) full_attn_weights = full_attn_weights.sum(dim=1) / self.num_heads return full_attn, full_attn_weights class Downsample(nn.Module): """ Selects every nth element, where n is the index """ def __init__(self, index): super().__init__() self.index = index def forward(self, x): return x[:: self.index + 1] def Linear(in_features, out_features, dropout=0.0, bias=True): """Weight-normalized Linear layer (input: B x T x C)""" m = nn.Linear(in_features, out_features, bias=bias) m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features)) m.bias.data.zero_() return nn.utils.weight_norm(m) def GatedLinear(in_features, out_features, dropout=0.0, bias=True): """Weight-normalized Linear layer (input: B x T x C) with interspersed GLU units""" return nn.Sequential( Linear(in_features, out_features * 4, dropout, bias), nn.GLU(), Linear(out_features * 2, out_features * 2, dropout, bias), nn.GLU(), Linear(out_features, out_features, dropout, bias), )
EXA-1-master
exa/libraries/fairseq/fairseq/modules/downsampled_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch import torch.nn.functional as F logger = logging.getLogger(__name__) def _cross_entropy_pytorch(logits, target, ignore_index=None, reduction="mean"): lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32) return F.nll_loss( lprobs, target, ignore_index=ignore_index, reduction=reduction, ) try: import xentropy_cuda from apex.contrib import xentropy def cross_entropy(logits, target, ignore_index=-100, reduction="mean"): if logits.device == torch.device("cpu"): return _cross_entropy_pytorch(logits, target, ignore_index, reduction) else: if not getattr(cross_entropy, "_has_logged_once", False): logger.info("using fused cross entropy") cross_entropy._has_logged_once = True half_to_float = logits.dtype == torch.half losses = xentropy.SoftmaxCrossEntropyLoss.apply( logits, target, 0.0, ignore_index, half_to_float, ) if reduction == "sum": return losses.sum() elif reduction == "mean": if ignore_index >= 0: return losses.sum() / target.ne(ignore_index).sum() else: return losses.mean() elif reduction == "none": return losses else: raise NotImplementedError except ImportError: def cross_entropy(logits, target, ignore_index=-100, reduction="mean"): return _cross_entropy_pytorch(logits, target, ignore_index, reduction)
EXA-1-master
exa/libraries/fairseq/fairseq/modules/cross_entropy.py
import torch class RotaryPositionalEmbedding(torch.nn.Module): def __init__(self, dim, base=10000, precision=torch.half): """Rotary positional embedding Reference : https://blog.eleuther.ai/rotary-embeddings/ Paper: https://arxiv.org/pdf/2104.09864.pdf Args: dim: Dimension of embedding base: Base value for exponential precision: precision to use for numerical values """ super().__init__() inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim)) self.register_buffer("inv_freq", inv_freq) self.seq_len_cached = None self.cos_cached = None self.sin_cached = None self.precision = precision def forward(self, x, seq_len=None): """ Args: x: Input x with T X B X C seq_len: Sequence length of input x """ if seq_len != self.seq_len_cached: self.seq_len_cached = seq_len t = torch.arange(seq_len, device=x.device).type_as(self.inv_freq) freqs = torch.einsum("i,j->ij", t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1).to(x.device) self.cos_cached = emb.cos()[:, None, None, :] self.sin_cached = emb.sin()[:, None, None, :] return self.cos_cached, self.sin_cached # rotary pos emb helpers: def rotate_half(x): x1, x2 = x[..., : x.shape[-1] // 2], x[..., x.shape[-1] // 2 :] return torch.cat( (-x2, x1), dim=x1.ndim - 1 ) # dim=-1 triggers a bug in earlier torch versions def apply_rotary_pos_emb(q, k, cos, sin, offset: int = 0): cos, sin = ( cos[offset : q.shape[0] + offset, ...], sin[offset : q.shape[0] + offset, ...], ) return (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin)
EXA-1-master
exa/libraries/fairseq/fairseq/modules/rotary_positional_embedding.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Dict, List, Optional, Tuple import torch import torch.nn.functional as F from torch import Tensor, nn from torch.nn import Parameter try: from xformers.components.attention import build_attention from xformers.components.attention.utils import maybe_merge_masks _xformers_available = True except ImportError: _xformers_available = False from fairseq import utils from fairseq.modules.fairseq_dropout import FairseqDropout from fairseq.modules.quant_noise import quant_noise from fairseq.models.fairseq_incremental_decoder import FairseqIncrementalDecoder # TODO: move this into xformers? # TODO: uint8 input type should just output a bool def _mask_for_xformers(mask: Tensor, to_dtype: Optional[torch.dtype] = None): """ call to pytorch multihead accepts three mask types: - ByteTensor where non-zero means to mask - FloatTensor which is an additive mask - BoolTensor where True means to mask xFormers currently accepts boolean and additive maks. For boolean masks the values have opposite meaning. For a BoolTensor True mean to keep the value. """ float_types = [torch.float, torch.float16] # If an input mask is a float it is an additive mask. Otherwise it is either uint8 or bool. additive = mask.dtype in float_types # If to_dype is not specified, keep same dtype as mask. to_dtype = mask.dtype if to_dtype is None else to_dtype to_additive = to_dtype in float_types if additive: if to_additive: return mask.to(to_dtype) mask = mask < 0 if to_additive: # return additive mask new_mask = torch.zeros_like(mask, dtype=to_dtype) new_mask = new_mask.masked_fill_(mask, -float("inf")) return new_mask # In xFormers True is value to keep rather than value to mask mask = ~mask.to(torch.bool) mask = mask.to(to_dtype) return mask class MultiheadAttention(FairseqIncrementalDecoder): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, dictionary=None, q_noise=0.0, qn_block_size=8, # TODO: pass in config rather than string. # config defined in xformers.components.attention.AttentionConfig xformers_att_config: Optional[str] = None, xformers_blocksparse_layout: Optional[ torch.Tensor ] = None, # This should be part of the config xformers_blocksparse_blocksize: Optional[ int ] = 16, # This should be part of the config ): super().__init__(dictionary) xformers_att_config = utils.eval_str_dict(xformers_att_config) self.use_xformers = xformers_att_config is not None if self.use_xformers and not _xformers_available: raise ImportError("\n\n Please install xFormers.") self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.head_dim = embed_dim // num_heads assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim**-0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, ( "Self-attention requires query, key and " "value to be of the same size" ) self.k_proj = quant_noise( nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size ) self.v_proj = quant_noise( nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size ) self.q_proj = quant_noise( nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size ) self.out_proj = quant_noise( nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size ) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self.beam_size = 1 self.reset_parameters() if self.use_xformers: xformers_att_config["dropout"] = xformers_att_config.get("dropout", dropout) xformers_att_config["num_heads"] = xformers_att_config.get( "num_heads", num_heads ) if xformers_blocksparse_layout is not None: # Could be part of a single config passed only once xformers_att_config["block_size"] = xformers_blocksparse_blocksize xformers_att_config["layout"] = xformers_blocksparse_layout xformers_att_config["name"] = "blocksparse" self.attention = build_attention(xformers_att_config) self.onnx_trace = False self.skip_embed_dim_check = False self.init_incremental_state() def prepare_for_onnx_export_(self): self.onnx_trace = True def reset_parameters(self): if self.qkv_same_dim: # Empirically observed the convergence to be much better with # the scaled initialization nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2)) else: nn.init.xavier_uniform_(self.k_proj.weight) nn.init.xavier_uniform_(self.v_proj.weight) nn.init.xavier_uniform_(self.q_proj.weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.out_proj.bias is not None: nn.init.constant_(self.out_proj.bias, 0.0) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def _get_reserve_head_index(self, num_heads_to_keep: int): k_proj_heads_norm = [] q_proj_heads_norm = [] v_proj_heads_norm = [] for i in range(self.num_heads): start_idx = i * self.head_dim end_idx = (i + 1) * self.head_dim k_proj_heads_norm.append( torch.sum( torch.abs( self.k_proj.weight[ start_idx:end_idx, ] ) ).tolist() + torch.sum(torch.abs(self.k_proj.bias[start_idx:end_idx])).tolist() ) q_proj_heads_norm.append( torch.sum( torch.abs( self.q_proj.weight[ start_idx:end_idx, ] ) ).tolist() + torch.sum(torch.abs(self.q_proj.bias[start_idx:end_idx])).tolist() ) v_proj_heads_norm.append( torch.sum( torch.abs( self.v_proj.weight[ start_idx:end_idx, ] ) ).tolist() + torch.sum(torch.abs(self.v_proj.bias[start_idx:end_idx])).tolist() ) heads_norm = [] for i in range(self.num_heads): heads_norm.append( k_proj_heads_norm[i] + q_proj_heads_norm[i] + v_proj_heads_norm[i] ) sorted_head_index = sorted( range(self.num_heads), key=lambda k: heads_norm[k], reverse=True ) reserve_head_index = [] for i in range(num_heads_to_keep): start = sorted_head_index[i] * self.head_dim end = (sorted_head_index[i] + 1) * self.head_dim reserve_head_index.append((start, end)) return reserve_head_index def _adaptive_prune_heads(self, reserve_head_index: List[Tuple[int, int]]): new_q_weight = [] new_q_bias = [] new_k_weight = [] new_k_bias = [] new_v_weight = [] new_v_bias = [] new_out_proj_weight = [] for ele in reserve_head_index: start_idx, end_idx = ele new_q_weight.append( self.q_proj.weight[ start_idx:end_idx, ] ) new_q_bias.append(self.q_proj.bias[start_idx:end_idx]) new_k_weight.append( self.k_proj.weight[ start_idx:end_idx, ] ) new_k_bias.append(self.k_proj.bias[start_idx:end_idx]) new_v_weight.append( self.v_proj.weight[ start_idx:end_idx, ] ) new_v_bias.append(self.v_proj.bias[start_idx:end_idx]) new_out_proj_weight.append(self.out_proj.weight[:, start_idx:end_idx]) new_q_weight = torch.cat(new_q_weight).detach() new_k_weight = torch.cat(new_k_weight).detach() new_v_weight = torch.cat(new_v_weight).detach() new_out_proj_weight = torch.cat(new_out_proj_weight, dim=-1).detach() new_q_weight.requires_grad = True new_k_weight.requires_grad = True new_v_weight.requires_grad = True new_out_proj_weight.requires_grad = True new_q_bias = torch.cat(new_q_bias).detach() new_q_bias.requires_grad = True new_k_bias = torch.cat(new_k_bias).detach() new_k_bias.requires_grad = True new_v_bias = torch.cat(new_v_bias).detach() new_v_bias.requires_grad = True self.q_proj.weight = torch.nn.Parameter(new_q_weight) self.q_proj.bias = torch.nn.Parameter(new_q_bias) self.k_proj.weight = torch.nn.Parameter(new_k_weight) self.k_proj.bias = torch.nn.Parameter(new_k_bias) self.v_proj.weight = torch.nn.Parameter(new_v_weight) self.v_proj.bias = torch.nn.Parameter(new_v_bias) self.out_proj.weight = torch.nn.Parameter(new_out_proj_weight) self.num_heads = len(reserve_head_index) self.embed_dim = self.head_dim * self.num_heads self.q_proj.out_features = self.embed_dim self.k_proj.out_features = self.embed_dim self.v_proj.out_features = self.embed_dim def _set_skip_embed_dim_check(self): self.skip_embed_dim_check = True def _pad_masks( self, key_padding_mask: Optional[Tensor], attn_mask: Optional[Tensor], ) -> Tuple[Optional[Tensor], Optional[Tensor]]: if attn_mask is not None: shape = attn_mask.size()[:-1] + torch.Size([1]) attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(shape)], dim=-1) if key_padding_mask is not None: shape = key_padding_mask.size()[:-1] + torch.Size([1]) key_padding_mask = torch.cat( [ key_padding_mask, key_padding_mask.new_zeros(shape), ], dim=-1, ) return key_padding_mask, attn_mask def _add_bias( self, k: Tensor, v: Tensor, key_padding_mask: Optional[Tensor], attn_mask: Optional[Tensor], bsz: int, ) -> Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]: assert self.bias_k is not None assert self.bias_v is not None k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) key_padding_mask, attn_mask = self._pad_masks( key_padding_mask=key_padding_mask, attn_mask=attn_mask ) return k, v, key_padding_mask, attn_mask def _append_zero_attn( self, k: Tensor, v: Tensor, key_padding_mask: Optional[Tensor], attn_mask: Optional[Tensor], ) -> Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]: zero_attn_shape = k.size()[:-2] + torch.Size([1]) + k.size()[-1:] k = torch.cat( [k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=-2 ) v = torch.cat( [v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=-2 ) key_padding_mask, attn_mask = self._pad_masks( key_padding_mask=key_padding_mask, attn_mask=attn_mask ) return k, v, key_padding_mask, attn_mask def _xformers_attn_forward( self, query, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, need_weights: bool = True, attn_mask: Optional[Tensor] = None, ) -> Tuple[Tensor, Optional[Tensor]]: tgt_len, bsz, embed_dim = query.size() if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == tgt_len if self.self_attention: key = query value = query elif self.encoder_decoder_attention: value = key q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) if self.bias_k is not None: assert self.bias_v is not None k, v, attn_mask, key_padding_mask = self._add_bias( k, v, attn_mask, key_padding_mask, bsz ) def fold_heads(x): return ( x.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) def split_heads(x): return ( x.contiguous() .view(-1, bsz, self.num_heads, self.head_dim) .transpose(0, 1) .transpose(1, 2) ) massage = split_heads if self.attention.requires_head_dimension else fold_heads q = massage(q) if k is not None: k = massage(k) if v is not None: v = massage(v) if self.add_zero_attn: k, v, key_padding_mask, attn_mask = self._append_zero_attn( k=k, v=v, key_padding_mask=key_padding_mask, attn_mask=attn_mask ) kwargs = {} if attn_mask is not None and self.attention.supports_attention_mask: attn_mask = _mask_for_xformers(attn_mask, to_dtype=q.dtype) kwargs["att_mask"] = attn_mask if key_padding_mask is not None: key_padding_mask = _mask_for_xformers(key_padding_mask, to_dtype=torch.bool) if not self.attention.requires_separate_masks: attn_mask = maybe_merge_masks( attn_mask, key_padding_mask, batch_size=bsz, src_len=k.size(-2), tgt_len=q.size(-2), num_heads=self.num_heads, ) key_padding_mask = None kwargs["att_mask"] = attn_mask if self.attention.supports_key_padding_mask: kwargs["key_padding_mask"] = key_padding_mask y = self.attention(q, k, v, **kwargs) y = ( y.view(bsz, self.num_heads, tgt_len, self.head_dim) .transpose(1, 2) .flatten(start_dim=2, end_dim=3) .transpose(0, 1) ) assert list(y.size()) == [tgt_len, bsz, embed_dim] # Dropout not needed because already applied in attention. # It is applied to the attention weights before matmul with v. y = self.out_proj(y) # TODO: support returning attention weights if needed. return y, None def forward( self, query: Tensor, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, need_weights: bool = True, static_kv: bool = False, attn_mask: Optional[Tensor] = None, before_softmax: bool = False, need_head_weights: bool = False, ) -> Tuple[Tensor, Optional[Tensor]]: """Input shape: Time x Batch x Channel Args: key_padding_mask (ByteTensor, optional): mask to exclude keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s. need_weights (bool, optional): return the attention weights, averaged over heads (default: False). attn_mask (ByteTensor, optional): typically used to implement causal attention, where the mask prevents the attention from looking forward in time (default: None). before_softmax (bool, optional): return the raw attention weights and values before the attention softmax. need_head_weights (bool, optional): return the attention weights for each head. Implies *need_weights*. Default: return the average attention weights over all heads. """ if need_head_weights: need_weights = True is_tpu = query.device.type == "xla" tgt_len, bsz, embed_dim = query.size() src_len = tgt_len if not self.skip_embed_dim_check: assert ( embed_dim == self.embed_dim ), f"query dim {embed_dim} != {self.embed_dim}" assert list(query.size()) == [tgt_len, bsz, embed_dim] if key is not None: src_len, key_bsz, _ = key.size() if not torch.jit.is_scripting(): assert value is not None assert src_len, key_bsz == value.shape[:2] if ( not self.onnx_trace and not is_tpu # don't use PyTorch version on TPUs and incremental_state is None and not static_kv # A workaround for quantization to work. Otherwise JIT compilation # treats bias in linear module as method. and not torch.jit.is_scripting() # The Multihead attention implemented in pytorch forces strong dimension check # for input embedding dimention and K,Q,V projection dimension. # Since pruning will break the dimension check and it is not easy to modify the pytorch API, # it is preferred to bypass the pytorch MHA when we need to skip embed_dim_check and not self.skip_embed_dim_check ): assert key is not None and value is not None if self.use_xformers: return self._xformers_attn_forward( query, key, value, key_padding_mask, need_weights, attn_mask ) else: return F.multi_head_attention_forward( query, key, value, self.embed_dim, self.num_heads, torch.empty([0]), torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)), self.bias_k, self.bias_v, self.add_zero_attn, self.dropout_module.p, self.out_proj.weight, self.out_proj.bias, self.training or self.dropout_module.apply_during_inference, key_padding_mask.bool() if key_padding_mask is not None else None, need_weights, attn_mask, use_separate_proj_weight=True, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.weight, v_proj_weight=self.v_proj.weight, ) if incremental_state is not None: saved_state = self._get_input_buffer(incremental_state) if saved_state is not None and "prev_key" in saved_state: # previous time steps are cached - no need to recompute # key and value if they are static if static_kv: assert self.encoder_decoder_attention and not self.self_attention key = value = None else: saved_state = None if self.self_attention: q = self.q_proj(query) k = self.k_proj(query) v = self.v_proj(query) elif self.encoder_decoder_attention: # encoder-decoder attention q = self.q_proj(query) if key is None: assert value is None k = v = None else: if self.beam_size > 1 and bsz == key.size(1): # key is [T, bsz*beam_size, C], reduce to [T, bsz, C] key = key.view(key.size(0), -1, self.beam_size, key.size(2))[ :, :, 0, : ] if key_padding_mask is not None: key_padding_mask = key_padding_mask.view( -1, self.beam_size, key_padding_mask.size(1) )[:, 0, :] k = self.k_proj(key) v = self.v_proj(key) else: assert key is not None and value is not None q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) q *= self.scaling if self.bias_k is not None: assert self.bias_v is not None k, v, attn_mask, key_padding_mask = self._add_bias( k, v, attn_mask, key_padding_mask, bsz ) q = ( q.contiguous() .view(tgt_len, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) kv_bsz = bsz # need default value for scripting if k is not None: kv_bsz = k.size(1) k = ( k.contiguous() .view(-1, kv_bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if v is not None: v = ( v.contiguous() .view(-1, kv_bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if saved_state is not None: # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) if "prev_key" in saved_state: _prev_key = saved_state["prev_key"] assert _prev_key is not None kv_bsz = _prev_key.size(0) prev_key = _prev_key.view(kv_bsz * self.num_heads, -1, self.head_dim) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) src_len = k.size(1) if "prev_value" in saved_state: _prev_value = saved_state["prev_value"] assert _prev_value is not None assert kv_bsz == _prev_value.size(0) prev_value = _prev_value.view( kv_bsz * self.num_heads, -1, self.head_dim ) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) prev_key_padding_mask: Optional[Tensor] = None if "prev_key_padding_mask" in saved_state: prev_key_padding_mask = saved_state["prev_key_padding_mask"] assert k is not None and v is not None key_padding_mask = MultiheadAttention._append_prev_key_padding_mask( key_padding_mask=key_padding_mask, prev_key_padding_mask=prev_key_padding_mask, batch_size=kv_bsz, src_len=k.size(1), static_kv=static_kv, ) saved_state["prev_key"] = k.view(kv_bsz, self.num_heads, -1, self.head_dim) saved_state["prev_value"] = v.view( kv_bsz, self.num_heads, -1, self.head_dim ) saved_state["prev_key_padding_mask"] = key_padding_mask # In this branch incremental_state is never None assert incremental_state is not None incremental_state = self._set_input_buffer(incremental_state, saved_state) assert k is not None assert k.size(1) == src_len # This is part of a workaround to get around fork/join parallelism # not supporting Optional types. if key_padding_mask is not None and key_padding_mask.dim() == 0: key_padding_mask = None if key_padding_mask is not None: assert key_padding_mask.size(0) == kv_bsz assert key_padding_mask.size(1) == src_len if self.add_zero_attn: assert v is not None src_len += 1 k, v, key_padding_mask, attn_mask = self._append_zero_attn( k=k, v=v, key_padding_mask=key_padding_mask, attn_mask=attn_mask ) if self.encoder_decoder_attention and bsz != kv_bsz: attn_weights = torch.einsum( "bxhtd,bhsd->bxhts", q.view((kv_bsz, -1, self.num_heads) + q.size()[1:]), k.view((kv_bsz, self.num_heads) + k.size()[1:]), ) attn_weights = attn_weights.reshape((-1,) + attn_weights.size()[-2:]) else: attn_weights = torch.bmm(q, k.transpose(1, 2)) attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz) assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if attn_mask is not None: attn_mask = attn_mask.unsqueeze(0) if self.onnx_trace: attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1) attn_weights += attn_mask if key_padding_mask is not None: # don't attend to padding symbols attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) if not is_tpu: attn_weights = attn_weights.view( kv_bsz, -1, self.num_heads, tgt_len, src_len ) attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1) .unsqueeze(2) .unsqueeze(3) .to(torch.bool), float("-inf"), ) else: attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf")) attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if before_softmax: return attn_weights, v attn_weights_float = utils.softmax( attn_weights, dim=-1, onnx_trace=self.onnx_trace ) attn_weights = attn_weights_float.type_as(attn_weights) attn_probs = self.dropout_module(attn_weights) assert v is not None attn: Optional[Tensor] = None if self.encoder_decoder_attention and bsz != kv_bsz: attn = torch.einsum( "bxhts,bhsd->bxhtd", attn_probs.view( ( kv_bsz, -1, self.num_heads, ) + attn_probs.size()[1:] ), v.view( ( kv_bsz, self.num_heads, ) + v.size()[1:] ), ) attn = attn.reshape((-1,) + attn.size()[-2:]) else: attn = torch.bmm(attn_probs, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim] if self.onnx_trace and attn.size(1) == 1: # when ONNX tracing a single decoder step (sequence length == 1) # the transpose is a no-op copy before view, thus unnecessary attn = attn.contiguous().view(tgt_len, bsz, self.embed_dim) else: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, self.embed_dim) attn = self.out_proj(attn) attn_weights: Optional[Tensor] = None if need_weights: attn_weights = attn_weights_float.view( bsz, self.num_heads, tgt_len, src_len ).transpose(1, 0) if not need_head_weights: # average attention weights over heads attn_weights = attn_weights.mean(dim=0) return attn, attn_weights @staticmethod def _append_prev_key_padding_mask( key_padding_mask: Optional[Tensor], prev_key_padding_mask: Optional[Tensor], batch_size: int, src_len: int, static_kv: bool, ) -> Optional[Tensor]: # saved key padding masks have shape (bsz, seq_len) if prev_key_padding_mask is not None and static_kv: new_key_padding_mask = prev_key_padding_mask elif prev_key_padding_mask is not None and key_padding_mask is not None: new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1 ) # During incremental decoding, as the padding token enters and # leaves the frame, there will be a time when prev or current # is None elif prev_key_padding_mask is not None: if src_len > prev_key_padding_mask.size(1): filler = torch.zeros( (batch_size, src_len - prev_key_padding_mask.size(1)), device=prev_key_padding_mask.device, ) new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), filler.float()], dim=1 ) else: new_key_padding_mask = prev_key_padding_mask.float() elif key_padding_mask is not None: if src_len > key_padding_mask.size(1): filler = torch.zeros( (batch_size, src_len - key_padding_mask.size(1)), device=key_padding_mask.device, ) new_key_padding_mask = torch.cat( [filler.float(), key_padding_mask.float()], dim=1 ) else: new_key_padding_mask = key_padding_mask.float() else: new_key_padding_mask = prev_key_padding_mask return new_key_padding_mask @torch.jit.export def reorder_incremental_state( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], new_order: Tensor, ): """Reorder buffered internal state (for incremental generation).""" input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): input_buffer_k = input_buffer[k] if input_buffer_k is not None: if self.encoder_decoder_attention: if input_buffer_k.size(0) * self.beam_size == new_order.size(0): return incremental_state elif self.beam_size > 1: input_buffer[k] = input_buffer_k.index_select( 0, new_order.reshape(-1, self.beam_size)[:, 0] // self.beam_size, ) else: input_buffer[k] = input_buffer_k.index_select(0, new_order) else: input_buffer[k] = input_buffer_k.index_select(0, new_order) incremental_state = self._set_input_buffer(incremental_state, input_buffer) return incremental_state def set_beam_size(self, beam_size): """Used for effiecient beamable enc-dec attention""" self.beam_size = beam_size def _get_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] ) -> Dict[str, Optional[Tensor]]: result = self.get_incremental_state(incremental_state, "attn_state") if result is not None: return result else: empty_result: Dict[str, Optional[Tensor]] = {} return empty_result def _set_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], buffer: Dict[str, Optional[Tensor]], ): return self.set_incremental_state(incremental_state, "attn_state", buffer) def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int): return attn_weights def upgrade_state_dict_named(self, state_dict, name): prefix = name + "." if name != "" else "" items_to_add = {} keys_to_remove = [] for k in state_dict.keys(): if k.endswith(prefix + "in_proj_weight"): # in_proj_weight used to be q + k + v with same dimensions dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim] items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim] items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :] keys_to_remove.append(k) k_bias = prefix + "in_proj_bias" if k_bias in state_dict.keys(): dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim] items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][ dim : 2 * dim ] items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :] keys_to_remove.append(prefix + "in_proj_bias") for k in keys_to_remove: del state_dict[k] for key, value in items_to_add.items(): state_dict[key] = value
EXA-1-master
exa/libraries/fairseq/fairseq/modules/multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function, unicode_literals from collections.abc import Iterable from itertools import repeat import torch import torch.nn as nn def _pair(v): if isinstance(v, Iterable): assert len(v) == 2, "len(v) != 2" return v return tuple(repeat(v, 2)) def infer_conv_output_dim(conv_op, input_dim, sample_inchannel): sample_seq_len = 200 sample_bsz = 10 x = torch.randn(sample_bsz, sample_inchannel, sample_seq_len, input_dim) # N x C x H x W # N: sample_bsz, C: sample_inchannel, H: sample_seq_len, W: input_dim x = conv_op(x) # N x C x H x W x = x.transpose(1, 2) # N x H x C x W bsz, seq = x.size()[:2] per_channel_dim = x.size()[3] # bsz: N, seq: H, CxW the rest return x.contiguous().view(bsz, seq, -1).size(-1), per_channel_dim class VGGBlock(torch.nn.Module): """ VGG motibated cnn module https://arxiv.org/pdf/1409.1556.pdf Args: in_channels: (int) number of input channels (typically 1) out_channels: (int) number of output channels conv_kernel_size: convolution channels pooling_kernel_size: the size of the pooling window to take a max over num_conv_layers: (int) number of convolution layers input_dim: (int) input dimension conv_stride: the stride of the convolving kernel. Can be a single number or a tuple (sH, sW) Default: 1 padding: implicit paddings on both sides of the input. Can be a single number or a tuple (padH, padW). Default: None layer_norm: (bool) if layer norm is going to be applied. Default: False Shape: Input: BxCxTxfeat, i.e. (batch_size, input_size, timesteps, features) Output: BxCxTxfeat, i.e. (batch_size, input_size, timesteps, features) """ def __init__( self, in_channels, out_channels, conv_kernel_size, pooling_kernel_size, num_conv_layers, input_dim, conv_stride=1, padding=None, layer_norm=False, ): assert ( input_dim is not None ), "Need input_dim for LayerNorm and infer_conv_output_dim" super(VGGBlock, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.conv_kernel_size = _pair(conv_kernel_size) self.pooling_kernel_size = _pair(pooling_kernel_size) self.num_conv_layers = num_conv_layers self.padding = ( tuple(e // 2 for e in self.conv_kernel_size) if padding is None else _pair(padding) ) self.conv_stride = _pair(conv_stride) self.layers = nn.ModuleList() for layer in range(num_conv_layers): conv_op = nn.Conv2d( in_channels if layer == 0 else out_channels, out_channels, self.conv_kernel_size, stride=self.conv_stride, padding=self.padding, ) self.layers.append(conv_op) if layer_norm: conv_output_dim, per_channel_dim = infer_conv_output_dim( conv_op, input_dim, in_channels if layer == 0 else out_channels ) self.layers.append(nn.LayerNorm(per_channel_dim)) input_dim = per_channel_dim self.layers.append(nn.ReLU()) if self.pooling_kernel_size is not None: pool_op = nn.MaxPool2d(kernel_size=self.pooling_kernel_size, ceil_mode=True) self.layers.append(pool_op) self.total_output_dim, self.output_dim = infer_conv_output_dim( pool_op, input_dim, out_channels ) def forward(self, x): for i, _ in enumerate(self.layers): x = self.layers[i](x) return x
EXA-1-master
exa/libraries/fairseq/fairseq/modules/vggblock.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, Optional import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from torch import Tensor class LearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to the forward function. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int): super().__init__(num_embeddings, embedding_dim, padding_idx) self.onnx_trace = False if self.padding_idx is not None: self.max_positions = self.num_embeddings - self.padding_idx - 1 else: self.max_positions = self.num_embeddings def forward( self, input: Tensor, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, positions: Optional[Tensor] = None, ): """Input is expected to be of size [bsz x seqlen].""" assert (positions is None) or ( self.padding_idx is None ), "If positions is pre-computed then padding_idx should not be set." if positions is None: if incremental_state is not None: # positions is the same for every token when decoding a single step # Without the int() cast, it doesn't work in some cases when exporting to ONNX positions = torch.zeros( (1, 1), device=input.device, dtype=input.dtype ).fill_(int(self.padding_idx + input.size(1))) else: positions = utils.make_positions( input, self.padding_idx, onnx_trace=self.onnx_trace ) return F.embedding( positions, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, )
EXA-1-master
exa/libraries/fairseq/fairseq/modules/learned_positional_embedding.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch class GradMultiply(torch.autograd.Function): @staticmethod def forward(ctx, x, scale): ctx.scale = scale res = x.new(x) return res @staticmethod def backward(ctx, grad): return grad * ctx.scale, None
EXA-1-master
exa/libraries/fairseq/fairseq/modules/grad_multiply.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import List, Tuple import torch import torch.nn.functional as F from fairseq.data import Dictionary from torch import nn CHAR_PAD_IDX = 0 CHAR_EOS_IDX = 257 logger = logging.getLogger(__name__) class CharacterTokenEmbedder(torch.nn.Module): def __init__( self, vocab: Dictionary, filters: List[Tuple[int, int]], char_embed_dim: int, word_embed_dim: int, highway_layers: int, max_char_len: int = 50, char_inputs: bool = False, ): super(CharacterTokenEmbedder, self).__init__() self.onnx_trace = False self.embedding_dim = word_embed_dim self.max_char_len = max_char_len self.char_embeddings = nn.Embedding(257, char_embed_dim, padding_idx=0) self.symbol_embeddings = nn.Parameter(torch.FloatTensor(2, word_embed_dim)) self.eos_idx, self.unk_idx = 0, 1 self.char_inputs = char_inputs self.convolutions = nn.ModuleList() for width, out_c in filters: self.convolutions.append( nn.Conv1d(char_embed_dim, out_c, kernel_size=width) ) last_dim = sum(f[1] for f in filters) self.highway = Highway(last_dim, highway_layers) if highway_layers > 0 else None self.projection = nn.Linear(last_dim, word_embed_dim) assert ( vocab is not None or char_inputs ), "vocab must be set if not using char inputs" self.vocab = None if vocab is not None: self.set_vocab(vocab, max_char_len) self.reset_parameters() def prepare_for_onnx_export_(self): self.onnx_trace = True def set_vocab(self, vocab, max_char_len): word_to_char = torch.LongTensor(len(vocab), max_char_len) truncated = 0 for i in range(len(vocab)): if i < vocab.nspecial: char_idxs = [0] * max_char_len else: chars = vocab[i].encode() # +1 for padding char_idxs = [c + 1 for c in chars] + [0] * (max_char_len - len(chars)) if len(char_idxs) > max_char_len: truncated += 1 char_idxs = char_idxs[:max_char_len] word_to_char[i] = torch.LongTensor(char_idxs) if truncated > 0: logger.info( "truncated {} words longer than {} characters".format( truncated, max_char_len ) ) self.vocab = vocab self.word_to_char = word_to_char @property def padding_idx(self): return Dictionary().pad() if self.vocab is None else self.vocab.pad() def reset_parameters(self): nn.init.xavier_normal_(self.char_embeddings.weight) nn.init.xavier_normal_(self.symbol_embeddings) nn.init.xavier_uniform_(self.projection.weight) nn.init.constant_( self.char_embeddings.weight[self.char_embeddings.padding_idx], 0.0 ) nn.init.constant_(self.projection.bias, 0.0) def forward( self, input: torch.Tensor, ): if self.char_inputs: chars = input.view(-1, self.max_char_len) pads = chars[:, 0].eq(CHAR_PAD_IDX) eos = chars[:, 0].eq(CHAR_EOS_IDX) if eos.any(): if self.onnx_trace: chars = torch.where(eos.unsqueeze(1), chars.new_zeros(1), chars) else: chars[eos] = 0 unk = None else: flat_words = input.view(-1) chars = self.word_to_char[flat_words.type_as(self.word_to_char)].type_as( input ) pads = flat_words.eq(self.vocab.pad()) eos = flat_words.eq(self.vocab.eos()) unk = flat_words.eq(self.vocab.unk()) word_embs = self._convolve(chars) if self.onnx_trace: if pads.any(): word_embs = torch.where( pads.unsqueeze(1), word_embs.new_zeros(1), word_embs ) if eos.any(): word_embs = torch.where( eos.unsqueeze(1), self.symbol_embeddings[self.eos_idx], word_embs ) if unk is not None and unk.any(): word_embs = torch.where( unk.unsqueeze(1), self.symbol_embeddings[self.unk_idx], word_embs ) else: if pads.any(): word_embs[pads] = 0 if eos.any(): word_embs[eos] = self.symbol_embeddings[self.eos_idx] if unk is not None and unk.any(): word_embs[unk] = self.symbol_embeddings[self.unk_idx] return word_embs.view(input.size()[:2] + (-1,)) def _convolve( self, char_idxs: torch.Tensor, ): char_embs = self.char_embeddings(char_idxs) char_embs = char_embs.transpose(1, 2) # BTC -> BCT conv_result = [] for conv in self.convolutions: x = conv(char_embs) x, _ = torch.max(x, -1) x = F.relu(x) conv_result.append(x) x = torch.cat(conv_result, dim=-1) if self.highway is not None: x = self.highway(x) x = self.projection(x) return x class Highway(torch.nn.Module): """ A `Highway layer <https://arxiv.org/abs/1505.00387>`_. Adopted from the AllenNLP implementation. """ def __init__(self, input_dim: int, num_layers: int = 1): super(Highway, self).__init__() self.input_dim = input_dim self.layers = nn.ModuleList( [nn.Linear(input_dim, input_dim * 2) for _ in range(num_layers)] ) self.activation = nn.ReLU() self.reset_parameters() def reset_parameters(self): for layer in self.layers: # As per comment in AllenNLP: # We should bias the highway layer to just carry its input forward. We do that by # setting the bias on `B(x)` to be positive, because that means `g` will be biased to # be high, so we will carry the input forward. The bias on `B(x)` is the second half # of the bias vector in each Linear layer. nn.init.constant_(layer.bias[self.input_dim :], 1) nn.init.constant_(layer.bias[: self.input_dim], 0) nn.init.xavier_normal_(layer.weight) def forward(self, x: torch.Tensor): for layer in self.layers: projection = layer(x) proj_x, gate = projection.chunk(2, dim=-1) proj_x = self.activation(proj_x) gate = torch.sigmoid(gate) x = gate * x + (gate.new_tensor([1]) - gate) * proj_x return x
EXA-1-master
exa/libraries/fairseq/fairseq/modules/character_token_embedder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import functools from typing import Any, Dict, List, Tuple, Union import torch import torch.utils.checkpoint as checkpoint from fairseq import utils def checkpoint_wrapper(m, offload_to_cpu=False): """ A friendlier wrapper for performing activation checkpointing. Compared to the PyTorch version, this version: - wraps an nn.Module, so that all subsequent calls will use checkpointing - handles keyword arguments in the forward - handles non-Tensor outputs from the forward Usage:: checkpointed_module = checkpoint_wrapper(my_module, offload_to_cpu=True) a, b = checkpointed_module(x, y=3, z=torch.Tensor([1])) """ # should I check whether original_forward has already been set? assert not hasattr( m, "precheckpoint_forward" ), "checkpoint function has already been applied?" m.precheckpoint_forward = m.forward m.forward = functools.partial( _checkpointed_forward, m.precheckpoint_forward, # original_forward offload_to_cpu, ) return m def unwrap_checkpoint(m: torch.nn.Module): """ unwrap a module and its children from checkpoint_wrapper """ for module in m.modules(): if hasattr(module, "precheckpoint_forward"): module.forward = module.precheckpoint_forward del module.precheckpoint_forward if hasattr(module, "old_deepcopy_method"): module.__deepcopy__ = module.old_deepcopy_method del module.old_deepcopy_method return m def _checkpointed_forward(original_forward, offload_to_cpu, *args, **kwargs): # Autograd Functions in PyTorch work best with positional args, since # the backward must return gradients (or None) for every input argument. # We can flatten keyword arguments to make this easier. kwarg_keys, flat_args = pack_kwargs(*args, **kwargs) parent_ctx_dict = {"offload": offload_to_cpu} output = CheckpointFunction.apply( original_forward, parent_ctx_dict, kwarg_keys, *flat_args ) if isinstance(output, torch.Tensor): return output else: packed_non_tensor_outputs = parent_ctx_dict["packed_non_tensor_outputs"] if packed_non_tensor_outputs: output = unpack_non_tensors(output, packed_non_tensor_outputs) return output def pack_kwargs(*args, **kwargs) -> Tuple[List[str], List[Any]]: """ Usage:: kwarg_keys, flat_args = pack_kwargs(1, 2, a=3, b=4) args, kwargs = unpack_kwargs(kwarg_keys, flat_args) assert args == [1, 2] assert kwargs == {"a": 3, "b": 4} """ kwarg_keys = [] flat_args = list(args) for k, v in kwargs.items(): kwarg_keys.append(k) flat_args.append(v) return kwarg_keys, flat_args def unpack_kwargs( kwarg_keys: List[str], flat_args: List[Any] ) -> Tuple[List[Any], Dict[str, Any]]: if len(kwarg_keys) == 0: return flat_args, {} args = flat_args[: -len(kwarg_keys)] kwargs = {k: v for k, v in zip(kwarg_keys, flat_args[-len(kwarg_keys) :])} return args, kwargs def split_non_tensors( mixed: Union[torch.Tensor, Tuple[Any]] ) -> Tuple[Tuple[torch.Tensor], Dict[str, List[Any]]]: """ Usage:: x = torch.Tensor([1]) y = torch.Tensor([2]) tensors, packed_non_tensors = split_non_tensors((x, y, None, 3)) recon = unpack_non_tensors(tensors, packed_non_tensors) assert recon == (x, y, None, 3) """ if isinstance(mixed, torch.Tensor): return (mixed,), None tensors = [] packed_non_tensors = {"is_tensor": [], "objects": []} for o in mixed: if isinstance(o, torch.Tensor): packed_non_tensors["is_tensor"].append(True) tensors.append(o) else: packed_non_tensors["is_tensor"].append(False) packed_non_tensors["objects"].append(o) return tuple(tensors), packed_non_tensors def unpack_non_tensors( tensors: Tuple[torch.Tensor], packed_non_tensors: Dict[str, List[Any]], ) -> Tuple[Any]: if packed_non_tensors is None: return tensors assert isinstance(packed_non_tensors, dict) mixed = [] is_tensor_list = packed_non_tensors["is_tensor"] objects = packed_non_tensors["objects"] assert len(tensors) + len(objects) == len(is_tensor_list) obj_i = tnsr_i = 0 for is_tensor in is_tensor_list: if is_tensor: mixed.append(tensors[tnsr_i]) tnsr_i += 1 else: mixed.append(objects[obj_i]) obj_i += 1 return tuple(mixed) class CheckpointFunction(torch.autograd.Function): """Similar to the torch version, but support non-Tensor outputs. The caller is expected to provide a dict (*parent_ctx_dict*) that will hold the non-Tensor outputs. These should be combined with the Tensor *outputs* by calling ``unpack_non_tensors``. """ @staticmethod def forward(ctx, run_function, parent_ctx_dict, kwarg_keys, *args): if torch.is_grad_enabled(): # grad may be disabled, e.g., during validation checkpoint.check_backward_validity(args) ctx.run_function = run_function ctx.kwarg_keys = kwarg_keys ctx.fwd_rng_state = utils.get_rng_state() tensor_inputs, packed_non_tensor_inputs = split_non_tensors(args) if parent_ctx_dict["offload"]: ctx.fwd_device = tuple(x.device for x in tensor_inputs) ctx.grad_requirements = tuple(x.requires_grad for x in tensor_inputs) tensor_inputs = tuple( x.to(torch.device("cpu"), non_blocking=True) for x in tensor_inputs ) else: ctx.fwd_device, ctx.grad_requirements = None, None ctx.save_for_backward(*tensor_inputs) ctx.packed_non_tensor_inputs = packed_non_tensor_inputs with torch.no_grad(): unpacked_args, unpacked_kwargs = unpack_kwargs(kwarg_keys, args) outputs = run_function(*unpacked_args, **unpacked_kwargs) if isinstance(outputs, torch.Tensor): return outputs else: # Autograd Functions don't like non-Tensor outputs. We can split the # non-Tensor and Tensor outputs, returning the former by reference # through *parent_ctx_dict* and returning the latter directly. outputs, packed_non_tensor_outputs = split_non_tensors(outputs) parent_ctx_dict["packed_non_tensor_outputs"] = packed_non_tensor_outputs return outputs @staticmethod def backward(ctx, *args): if not torch.autograd._is_checkpoint_valid(): raise RuntimeError( "Checkpointing is not compatible with .grad(), please use .backward() if possible" ) tensor_inputs: Tuple = ctx.saved_tensors tensor_inputs = checkpoint.detach_variable(tensor_inputs) if ctx.fwd_device is not None: tensor_inputs = [ t.to(ctx.fwd_device[i], non_blocking=True) for i, t in enumerate(tensor_inputs) ] for i, need_grad in enumerate(ctx.grad_requirements): tensor_inputs[i].requires_grad = need_grad inputs = unpack_non_tensors(tensor_inputs, ctx.packed_non_tensor_inputs) # Store the current states. bwd_rng_state = utils.get_rng_state() # Set the states to what it used to be before the forward pass. utils.set_rng_state(ctx.fwd_rng_state) with torch.enable_grad(): unpacked_args, unpacked_kwargs = unpack_kwargs(ctx.kwarg_keys, inputs) outputs = ctx.run_function(*unpacked_args, **unpacked_kwargs) tensor_outputs, _ = split_non_tensors(outputs) # Set the states back to what it was at the start of this function. utils.set_rng_state(bwd_rng_state) # Run backward() with only Tensors that require grad outputs_with_grad = [] args_with_grad = [] for i in range(len(tensor_outputs)): if tensor_outputs[i].requires_grad: outputs_with_grad.append(tensor_outputs[i]) args_with_grad.append(args[i]) if len(outputs_with_grad) == 0: raise RuntimeError( "None of the outputs have requires_grad=True, " "this checkpoint() is not necessary" ) torch.autograd.backward(outputs_with_grad, args_with_grad) grads = tuple( inp.grad if isinstance(inp, torch.Tensor) else None for inp in inputs ) return (None, None, None) + grads
EXA-1-master
exa/libraries/fairseq/fairseq/modules/checkpoint_activations.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.modules import TransformerSentenceEncoderLayer from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention class SparseTransformerSentenceEncoderLayer(TransformerSentenceEncoderLayer): """ Implements a Sprase Transformer Encoder Layer (see SparseMultiheadAttention) """ def __init__( self, embedding_dim: int = 768, ffn_embedding_dim: int = 3072, num_attention_heads: int = 8, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, activation_fn: str = "relu", export: bool = False, is_bidirectional: bool = True, stride: int = 32, expressivity: int = 8, ) -> None: super().__init__( embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, attention_dropout, activation_dropout, activation_fn, export, ) self.self_attn = SparseMultiheadAttention( self.embedding_dim, num_attention_heads, dropout=attention_dropout, add_bias_kv=False, add_zero_attn=False, self_attention=True, is_bidirectional=is_bidirectional, stride=stride, expressivity=expressivity, )
EXA-1-master
exa/libraries/fairseq/fairseq/modules/sparse_transformer_sentence_encoder_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import functools import operator import torch import torch.nn.functional as F from fairseq.modules.fairseq_dropout import FairseqDropout from fairseq.modules.quant_noise import quant_noise from torch import nn class TiedLinear(nn.Module): def __init__(self, weight, transpose): super().__init__() self.weight = weight self.transpose = transpose def forward(self, input): return F.linear(input, self.weight.t() if self.transpose else self.weight) class TiedHeadModule(nn.Module): def __init__(self, weights, input_dim, num_classes, q_noise, qn_block_size): super().__init__() tied_emb, _ = weights self.num_words, emb_dim = tied_emb.size() self.word_proj = quant_noise( TiedLinear(tied_emb, transpose=False), q_noise, qn_block_size ) if input_dim != emb_dim: self.word_proj = nn.Sequential( quant_noise( nn.Linear(input_dim, emb_dim, bias=False), q_noise, qn_block_size ), self.word_proj, ) self.class_proj = quant_noise( nn.Linear(input_dim, num_classes, bias=False), q_noise, qn_block_size ) self.out_dim = self.num_words + num_classes self.register_buffer("_float_tensor", torch.FloatTensor(1)) def forward(self, input): inp_sz = functools.reduce(operator.mul, input.shape[:-1], 1) out = self._float_tensor.new(inp_sz, self.out_dim) out[:, : self.num_words] = self.word_proj(input.view(inp_sz, -1)) out[:, self.num_words :] = self.class_proj(input.view(inp_sz, -1)) return out class AdaptiveSoftmax(nn.Module): """ This is an implementation of the efficient softmax approximation for graphical processing units (GPU), described in the paper "Efficient softmax approximation for GPUs" (http://arxiv.org/abs/1609.04309). """ def __init__( self, vocab_size, input_dim, cutoff, dropout, factor=4.0, adaptive_inputs=None, tie_proj=False, q_noise=0, qn_block_size=8, ): super().__init__() if vocab_size > cutoff[-1]: cutoff = cutoff + [vocab_size] else: assert ( vocab_size == cutoff[-1] ), "cannot specify cutoff larger than vocab size" output_dim = cutoff[0] + len(cutoff) - 1 self.vocab_size = vocab_size self.cutoff = cutoff self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.input_dim = input_dim self.factor = factor self.q_noise = q_noise self.qn_block_size = qn_block_size self.lsm = nn.LogSoftmax(dim=1) if adaptive_inputs is not None: self.head = TiedHeadModule( adaptive_inputs.weights_for_band(0), input_dim, len(cutoff) - 1, self.q_noise, self.qn_block_size, ) else: self.head = quant_noise( nn.Linear(input_dim, output_dim, bias=False), self.q_noise, self.qn_block_size, ) self._make_tail(adaptive_inputs, tie_proj) def init_weights(m): if ( hasattr(m, "weight") and not isinstance(m, TiedLinear) and not isinstance(m, TiedHeadModule) ): nn.init.xavier_uniform_(m.weight) self.apply(init_weights) self.register_buffer("version", torch.LongTensor([1])) def _make_tail(self, adaptive_inputs=None, tie_proj=False): self.tail = nn.ModuleList() for i in range(len(self.cutoff) - 1): dim = int(self.input_dim // self.factor ** (i + 1)) tied_emb, tied_proj = ( adaptive_inputs.weights_for_band(i + 1) if adaptive_inputs is not None else (None, None) ) if tied_proj is not None: if tie_proj: proj = quant_noise( TiedLinear(tied_proj, transpose=True), self.q_noise, self.qn_block_size, ) else: proj = quant_noise( nn.Linear(tied_proj.size(0), tied_proj.size(1), bias=False), self.q_noise, self.qn_block_size, ) else: proj = quant_noise( nn.Linear(self.input_dim, dim, bias=False), self.q_noise, self.qn_block_size, ) if tied_emb is None: out_proj = nn.Linear( dim, self.cutoff[i + 1] - self.cutoff[i], bias=False ) else: out_proj = TiedLinear(tied_emb, transpose=False) m = nn.Sequential( proj, nn.Dropout(self.dropout_module.p), quant_noise(out_proj, self.q_noise, self.qn_block_size), ) self.tail.append(m) def upgrade_state_dict_named(self, state_dict, name): version_name = name + ".version" if version_name not in state_dict: raise Exception("This version of the model is no longer supported") def adapt_target(self, target): """ In order to be efficient, the AdaptiveSoftMax does not compute the scores for all the word of the vocabulary for all the examples. It is thus necessary to call the method adapt_target of the AdaptiveSoftMax layer inside each forward pass. """ target = target.view(-1) new_target = [target.clone()] target_idxs = [] for i in range(len(self.cutoff) - 1): mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1])) new_target[0][mask] = self.cutoff[0] + i if mask.any(): target_idxs.append(mask.nonzero(as_tuple=False).squeeze(1)) new_target.append(target[mask].add(-self.cutoff[i])) else: target_idxs.append(None) new_target.append(None) return new_target, target_idxs def forward(self, input, target): """ Args: input: (b x t x d) target: (b x t) Returns: 2 lists: output for each cutoff section and new targets by cut off """ input = input.contiguous().view(-1, input.size(-1)) input = self.dropout_module(input) new_target, target_idxs = self.adapt_target(target) output = [self.head(input)] for i in range(len(target_idxs)): if target_idxs[i] is not None: output.append(self.tail[i](input.index_select(0, target_idxs[i]))) else: output.append(None) return output, new_target def get_log_prob(self, input, target): """ Computes the log probabilities for all the words of the vocabulary, given a 2D tensor of hidden vectors. """ bsz, length, dim = input.size() input = input.contiguous().view(-1, dim) if target is not None: _, target_idxs = self.adapt_target(target) else: target_idxs = None head_y = self.head(input) log_probs = head_y.new_zeros(input.size(0), self.vocab_size) head_sz = self.cutoff[0] + len(self.tail) log_probs[:, :head_sz] = self.lsm(head_y) tail_priors = log_probs[:, self.cutoff[0] : head_sz].clone() for i in range(len(self.tail)): start = self.cutoff[i] end = self.cutoff[i + 1] if target_idxs is None: tail_out = log_probs[:, start:end] tail_out.copy_(self.tail[i](input)) log_probs[:, start:end] = self.lsm(tail_out).add_( tail_priors[:, i, None] ) elif target_idxs[i] is not None: idxs = target_idxs[i] tail_out = log_probs[idxs, start:end] tail_out.copy_(self.tail[i](input[idxs])) log_probs[idxs, start:end] = self.lsm(tail_out).add_( tail_priors[idxs, i, None] ) log_probs = log_probs.view(bsz, length, -1) return log_probs
EXA-1-master
exa/libraries/fairseq/fairseq/modules/adaptive_softmax.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from torch import nn from torch.nn.modules.utils import _single from torch import Tensor class ConvTBC(torch.nn.Module): """1D convolution over an input of shape (time x batch x channel) The implementation uses gemm to perform the convolution. This implementation is faster than cuDNN for small kernel sizes. """ def __init__(self, in_channels, out_channels, kernel_size, padding=0): super(ConvTBC, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _single(kernel_size) self.padding = _single(padding) self.weight = torch.nn.Parameter( torch.Tensor(self.kernel_size[0], in_channels, out_channels) ) self.bias = torch.nn.Parameter(torch.Tensor(out_channels)) self.reset_parameters() def reset_parameters(self): nn.init.xavier_normal_(self.weight) nn.init.zeros_(self.bias) def conv_tbc(self, input: Tensor): return torch.conv_tbc( input.contiguous(), self.weight, self.bias, self.padding[0] ) def forward(self, input: Tensor): return self.conv_tbc(input) def __repr__(self): s = ( "{name}({in_channels}, {out_channels}, kernel_size={kernel_size}" ", padding={padding}" ) if self.bias is None: s += ", bias=False" s += ")" return s.format(name=self.__class__.__name__, **self.__dict__)
EXA-1-master
exa/libraries/fairseq/fairseq/modules/conv_tbc.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ batch norm done in fp32 (for fp16 training) """ import torch import torch.nn as nn class Fp32BatchNorm(nn.Module): def __init__(self, sync=False, *args, **kwargs): super().__init__() if sync: from fairseq.distributed import utils if utils.get_global_world_size() == 1: sync = False if sync: self.bn = nn.SyncBatchNorm(*args, **kwargs) else: self.bn = nn.BatchNorm1d(*args, **kwargs) self.sync = sync def forward(self, input): if self.bn.running_mean.dtype != torch.float: if self.sync: self.bn.running_mean = self.bn.running_mean.float() self.bn.running_var = self.bn.running_var.float() if self.bn.affine: try: self.bn.weight = self.bn.weight.float() self.bn.bias = self.bn.bias.float() except: self.bn.float() else: self.bn.float() output = self.bn(input.float()) return output.type_as(input)
EXA-1-master
exa/libraries/fairseq/fairseq/modules/fp32_batch_norm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Layer norm done in fp32 (for fp16 training) """ import torch.nn as nn import torch.nn.functional as F class Fp32InstanceNorm(nn.InstanceNorm1d): def __init__(self, *args, **kwargs): self.transpose_last = "transpose_last" in kwargs and kwargs["transpose_last"] if "transpose_last" in kwargs: del kwargs["transpose_last"] super().__init__(*args, **kwargs) def forward(self, input): if self.transpose_last: input = input.transpose(1, 2) output = F.instance_norm( input.float(), running_mean=self.running_mean, running_var=self.running_var, weight=self.weight.float() if self.weight is not None else None, bias=self.bias.float() if self.bias is not None else None, use_input_stats=self.training or not self.track_running_stats, momentum=self.momentum, eps=self.eps, ) if self.transpose_last: output = output.transpose(1, 2) return output.type_as(input)
EXA-1-master
exa/libraries/fairseq/fairseq/modules/fp32_instance_norm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn.functional as F def unfold1d(x, kernel_size: int, padding_l: int, pad_value: float = 0): """unfold T x B x C to T x B x C x K""" if kernel_size > 1: T, B, C = x.size() x = F.pad( x, (0, 0, 0, 0, padding_l, kernel_size - 1 - padding_l), value=pad_value ) x = x.as_strided((T, B, C, kernel_size), (B * C, C, 1, B * C)) else: x = x.unsqueeze(3) return x
EXA-1-master
exa/libraries/fairseq/fairseq/modules/unfold.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn import torch import sys from fairseq import utils from fairseq.distributed import utils as distributed_utils from fairseq.modules.layer_norm import LayerNorm class BaseLayer(nn.Module): def __init__(self, args): super().__init__() self.num_workers = distributed_utils.get_data_parallel_world_size() expert_centroids = torch.empty(self.num_workers, args.decoder_embed_dim) torch.nn.init.orthogonal_(expert_centroids, gain=0.1) self.register_parameter( "expert_centroids", torch.nn.Parameter(expert_centroids) ) self.expert_network = nn.Sequential( *([BaseSublayer(args) for _ in range(args.base_sublayers)]) ) self.expert_id = distributed_utils.get_data_parallel_rank() self.shuffle = args.base_shuffle self.cpp = self.load_assignment() # Add a special attribute to the expert parameters, so we know not to sync their gradients for param in self.expert_network.parameters(): param.expert = True def forward(self, input_features, *args, **kwargs): features = input_features.reshape(-1, input_features.size(-1)) is_training = input_features.requires_grad if self.shuffle and is_training: # Send each token to a random worker, to break correlations within the batch shuffle_sort = torch.randperm(features.size(0), device=features.device) features = All2All.apply(features[shuffle_sort]) with torch.no_grad(): # Compute similarity of each token to each expert, for routing token_expert_affinities = features.matmul( self.expert_centroids.transpose(0, 1) ) # Compute which token goes to which expert sort_by_expert, input_splits, output_splits = ( self.balanced_assignment(token_expert_affinities) if is_training else self.greedy_assignment(token_expert_affinities) ) # Swap these tokens for the right ones for our expert routed_features = All2All.apply( features[sort_by_expert], output_splits, input_splits ) if routed_features.size(0) > 0: # Mix in the expert network based on how appropriate it is for these tokens alpha = torch.sigmoid( routed_features.mv(self.expert_centroids[self.expert_id]) ).unsqueeze(1) routed_features = ( alpha * self.expert_network(routed_features) + (1 - alpha) * routed_features ) # Return to original worker and ordering result = All2All.apply(routed_features, input_splits, output_splits)[ self.inverse_sort(sort_by_expert) ] if self.shuffle and is_training: # Undo shuffling result = All2All.apply(result)[self.inverse_sort(shuffle_sort)] # Return additional Nones for compatibility with TransformerDecoderLayer return result.view(input_features.size()), None, None def inverse_sort(self, order): # Creates an index that undoes a sort: xs==xs[order][inverse_sort(order)] return torch.empty_like(order).scatter_( 0, order, torch.arange(0, order.size(0), device=order.device) ) def balanced_assignment(self, scores): ok = scores.isfinite() if not ok.all(): # NaNs here can break the assignment algorithm scores[~ok] = scores[ok].min() return self.cpp.balanced_assignment(scores), None, None # Assigns each token to the top k experts def greedy_assignment(self, scores, k=1): token_to_workers = torch.topk(scores, dim=1, k=k, largest=True).indices.view(-1) token_to_workers, sort_ordering = torch.sort(token_to_workers) worker2token = sort_ordering // k # Find how many tokens we're sending to each other worker (being careful for sending 0 tokens to some workers) output_splits = torch.zeros( (self.num_workers,), dtype=torch.long, device=scores.device ) workers, counts = torch.unique_consecutive(token_to_workers, return_counts=True) output_splits[workers] = counts # Tell other workers how many tokens to expect from us input_splits = All2All.apply(output_splits) return worker2token, input_splits.tolist(), output_splits.tolist() def load_assignment(self): try: from fairseq import libbase return libbase except ImportError as e: sys.stderr.write( "ERROR: missing libbase. run `python setup.py build_ext --inplace`\n" ) raise e class BaseSublayer(nn.Module): def __init__(self, args): super().__init__() self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") or "relu" ) self.norm = LayerNorm(args.decoder_embed_dim, export=False) self.ff1 = torch.nn.Linear(args.decoder_embed_dim, args.decoder_ffn_embed_dim) self.ff2 = torch.nn.Linear(args.decoder_ffn_embed_dim, args.decoder_embed_dim) self.ff2.weight.data.zero_() def forward(self, xs): return xs + self.ff2(self.activation_fn(self.ff1(self.norm(xs)))) # Wraps torch.distributed.all_to_all_single as a function that supports autograd class All2All(torch.autograd.Function): @staticmethod def forward(ctx, xs, input_splits=None, output_splits=None): ctx.input_splits = input_splits ctx.output_splits = output_splits ys = ( torch.empty_like(xs) if output_splits is None else xs.new_empty(size=[sum(output_splits)] + list(xs.size()[1:])) ) torch.distributed.all_to_all_single( ys, xs, output_split_sizes=output_splits, input_split_sizes=input_splits ) return ys @staticmethod def backward(ctx, grad_output): result = ( torch.empty_like(grad_output) if ctx.input_splits is None else grad_output.new_empty( size=[sum(ctx.input_splits)] + list(grad_output.size()[1:]) ) ) torch.distributed.all_to_all_single( result, grad_output, output_split_sizes=ctx.input_splits, input_split_sizes=ctx.output_splits, ) return result, None, None
EXA-1-master
exa/libraries/fairseq/fairseq/modules/base_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ This file is to re-implemented the low-rank and beam approximation of CRF layer Proposed by: Sun, Zhiqing, et al. Fast Structured Decoding for Sequence Models https://arxiv.org/abs/1910.11555 The CRF implementation is mainly borrowed from https://github.com/kmkurn/pytorch-crf/blob/master/torchcrf/__init__.py """ import numpy as np import torch import torch.nn as nn def logsumexp(x, dim=1): return torch.logsumexp(x.float(), dim=dim).type_as(x) class DynamicCRF(nn.Module): """Dynamic CRF layer is used to approximate the traditional Conditional Random Fields (CRF) $P(y | x) = 1/Z(x) exp(sum_i s(y_i, x) + sum_i t(y_{i-1}, y_i, x))$ where in this function, we assume the emition scores (s) are given, and the transition score is a |V| x |V| matrix $M$ in the following two aspects: (1) it used a low-rank approximation for the transition matrix: $M = E_1 E_2^T$ (2) it used a beam to estimate the normalizing factor Z(x) """ def __init__(self, num_embedding, low_rank=32, beam_size=64): super().__init__() self.E1 = nn.Embedding(num_embedding, low_rank) self.E2 = nn.Embedding(num_embedding, low_rank) self.vocb = num_embedding self.rank = low_rank self.beam = beam_size def extra_repr(self): return "vocab_size={}, low_rank={}, beam_size={}".format( self.vocb, self.rank, self.beam ) def forward(self, emissions, targets, masks, beam=None): """ Compute the conditional log-likelihood of a sequence of target tokens given emission scores Args: emissions (`~torch.Tensor`): Emission score are usually the unnormalized decoder output ``(batch_size, seq_len, vocab_size)``. We assume batch-first targets (`~torch.LongTensor`): Sequence of target token indices ``(batch_size, seq_len) masks (`~torch.ByteTensor`): Mask tensor with the same size as targets Returns: `~torch.Tensor`: approximated log-likelihood """ numerator = self._compute_score(emissions, targets, masks) denominator = self._compute_normalizer(emissions, targets, masks, beam) return numerator - denominator def forward_decoder(self, emissions, masks=None, beam=None): """ Find the most likely output sequence using Viterbi algorithm. Args: emissions (`~torch.Tensor`): Emission score are usually the unnormalized decoder output ``(batch_size, seq_len, vocab_size)``. We assume batch-first masks (`~torch.ByteTensor`): Mask tensor with the same size as targets Returns: `~torch.LongTensor`: decoded sequence from the CRF model """ return self._viterbi_decode(emissions, masks, beam) def _compute_score(self, emissions, targets, masks=None): batch_size, seq_len = targets.size() emission_scores = emissions.gather(2, targets[:, :, None])[:, :, 0] # B x T transition_scores = (self.E1(targets[:, :-1]) * self.E2(targets[:, 1:])).sum(2) scores = emission_scores scores[:, 1:] += transition_scores if masks is not None: scores = scores * masks.type_as(scores) return scores.sum(-1) def _compute_normalizer(self, emissions, targets=None, masks=None, beam=None): # HACK: we include "target" which is a hueristic for training # HACK: we use a beam of tokens to approximate the normalizing factor (which is bad?) beam = beam if beam is not None else self.beam batch_size, seq_len = emissions.size()[:2] if targets is not None: _emissions = emissions.scatter(2, targets[:, :, None], np.float("inf")) beam_targets = _emissions.topk(beam, 2)[1] beam_emission_scores = emissions.gather(2, beam_targets) else: beam_emission_scores, beam_targets = emissions.topk(beam, 2) beam_transition_score1 = self.E1(beam_targets[:, :-1]) # B x (T-1) x K x D beam_transition_score2 = self.E2(beam_targets[:, 1:]) # B x (T-1) x K x D beam_transition_matrix = torch.bmm( beam_transition_score1.view(-1, beam, self.rank), beam_transition_score2.view(-1, beam, self.rank).transpose(1, 2), ) beam_transition_matrix = beam_transition_matrix.view(batch_size, -1, beam, beam) # compute the normalizer in the log-space score = beam_emission_scores[:, 0] # B x K for i in range(1, seq_len): next_score = score[:, :, None] + beam_transition_matrix[:, i - 1] next_score = logsumexp(next_score, dim=1) + beam_emission_scores[:, i] if masks is not None: score = torch.where(masks[:, i : i + 1], next_score, score) else: score = next_score # Sum (log-sum-exp) over all possible tags return logsumexp(score, dim=1) def _viterbi_decode(self, emissions, masks=None, beam=None): # HACK: we use a beam of tokens to approximate the normalizing factor (which is bad?) beam = beam if beam is not None else self.beam batch_size, seq_len = emissions.size()[:2] beam_emission_scores, beam_targets = emissions.topk(beam, 2) beam_transition_score1 = self.E1(beam_targets[:, :-1]) # B x (T-1) x K x D beam_transition_score2 = self.E2(beam_targets[:, 1:]) # B x (T-1) x K x D beam_transition_matrix = torch.bmm( beam_transition_score1.view(-1, beam, self.rank), beam_transition_score2.view(-1, beam, self.rank).transpose(1, 2), ) beam_transition_matrix = beam_transition_matrix.view(batch_size, -1, beam, beam) traj_tokens, traj_scores = [], [] finalized_tokens, finalized_scores = [], [] # compute the normalizer in the log-space score = beam_emission_scores[:, 0] # B x K dummy = ( torch.arange(beam, device=score.device).expand(*score.size()).contiguous() ) for i in range(1, seq_len): traj_scores.append(score) _score = score[:, :, None] + beam_transition_matrix[:, i - 1] _score, _index = _score.max(dim=1) _score = _score + beam_emission_scores[:, i] if masks is not None: score = torch.where(masks[:, i : i + 1], _score, score) index = torch.where(masks[:, i : i + 1], _index, dummy) else: score, index = _score, _index traj_tokens.append(index) # now running the back-tracing and find the best best_score, best_index = score.max(dim=1) finalized_tokens.append(best_index[:, None]) finalized_scores.append(best_score[:, None]) for idx, scs in zip(reversed(traj_tokens), reversed(traj_scores)): previous_index = finalized_tokens[-1] finalized_tokens.append(idx.gather(1, previous_index)) finalized_scores.append(scs.gather(1, previous_index)) finalized_tokens.reverse() finalized_tokens = torch.cat(finalized_tokens, 1) finalized_tokens = beam_targets.gather(2, finalized_tokens[:, :, None])[:, :, 0] finalized_scores.reverse() finalized_scores = torch.cat(finalized_scores, 1) finalized_scores[:, 1:] = finalized_scores[:, 1:] - finalized_scores[:, :-1] return finalized_scores, finalized_tokens
EXA-1-master
exa/libraries/fairseq/fairseq/modules/dynamic_crf_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ See "Gaussian Error Linear Units (GELUs)" by Dan Hendrycks and Kevin Gimpel with the corresponding GitHub repo: https://github.com/hendrycks/GELUs """ import math import torch import torch.nn as nn def gelu_accurate(x): if not hasattr(gelu_accurate, "_a"): gelu_accurate._a = math.sqrt(2 / math.pi) return ( 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3)))) ) def gelu(x: torch.Tensor) -> torch.Tensor: return torch.nn.functional.gelu(x.float()).type_as(x)
EXA-1-master
exa/libraries/fairseq/fairseq/modules/gelu.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ LayerDrop as described in https://arxiv.org/abs/1909.11556. """ import torch import torch.nn as nn class LayerDropModuleList(nn.ModuleList): """ A LayerDrop implementation based on :class:`torch.nn.ModuleList`. We refresh the choice of which layers to drop every time we iterate over the LayerDropModuleList instance. During evaluation we always iterate over all layers. Usage:: layers = LayerDropList(p=0.5, modules=[layer1, layer2, layer3]) for layer in layers: # this might iterate over layers 1 and 3 x = layer(x) for layer in layers: # this might iterate over all layers x = layer(x) for layer in layers: # this might not iterate over any layers x = layer(x) Args: p (float): probability of dropping out each layer modules (iterable, optional): an iterable of modules to add """ def __init__(self, p, modules=None): super().__init__(modules) self.p = p def __iter__(self): dropout_probs = torch.empty(len(self)).uniform_() for i, m in enumerate(super().__iter__()): if not self.training or (dropout_probs[i] > self.p): yield m
EXA-1-master
exa/libraries/fairseq/fairseq/modules/layer_drop.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Optional, Tuple import torch import torch.nn as nn from fairseq.modules import ( FairseqDropout, LayerDropModuleList, LayerNorm, MultiheadAttention, PositionalEmbedding, TransformerSentenceEncoderLayer, ) from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ def init_bert_params(module): """ Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_weights is set then weights of embedding layer will be initialized using the normal distribution. 3. If normal_init_proj_weights is set then weights of in_project_weight for MultiHeadAttention initialized using the normal distribution (to be validated). """ def normal_(data): # with FSDP, module params will be on CUDA, so we cast them back to CPU # so that the RNG is consistent with and without FSDP data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device)) if isinstance(module, nn.Linear): normal_(module.weight.data) if module.bias is not None: module.bias.data.zero_() if isinstance(module, nn.Embedding): normal_(module.weight.data) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() if isinstance(module, MultiheadAttention): normal_(module.q_proj.weight.data) normal_(module.k_proj.weight.data) normal_(module.v_proj.weight.data) class TransformerSentenceEncoder(nn.Module): """ Implementation for a Bi-directional Transformer based Sentence Encoder used in BERT/XLM style pre-trained models. This first computes the token embedding using the token embedding matrix, position embeddings (if specified) and segment embeddings (if specified). After applying the specified number of TransformerEncoderLayers, it outputs all the internal states of the encoder as well as the final representation associated with the first token (usually CLS token). Input: - tokens: B x T matrix representing sentences - segment_labels: B x T matrix representing segment label for tokens Output: - a tuple of the following: - a list of internal model states used to compute the predictions where each tensor has shape T x B x C - sentence representation associated with first input token in format B x C. """ def __init__( self, padding_idx: int, vocab_size: int, num_encoder_layers: int = 6, embedding_dim: int = 768, ffn_embedding_dim: int = 3072, num_attention_heads: int = 8, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, layerdrop: float = 0.0, max_seq_len: int = 256, num_segments: int = 2, use_position_embeddings: bool = True, offset_positions_by_padding: bool = True, encoder_normalize_before: bool = False, apply_bert_init: bool = False, activation_fn: str = "relu", learned_pos_embedding: bool = True, embed_scale: float = None, freeze_embeddings: bool = False, n_trans_layers_to_freeze: int = 0, export: bool = False, traceable: bool = False, q_noise: float = 0.0, qn_block_size: int = 8, ) -> None: super().__init__() self.padding_idx = padding_idx self.vocab_size = vocab_size self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.layerdrop = layerdrop self.max_seq_len = max_seq_len self.embedding_dim = embedding_dim self.num_segments = num_segments self.use_position_embeddings = use_position_embeddings self.apply_bert_init = apply_bert_init self.learned_pos_embedding = learned_pos_embedding self.traceable = traceable self.embed_tokens = self.build_embedding( self.vocab_size, self.embedding_dim, self.padding_idx ) self.embed_scale = embed_scale if q_noise > 0: self.quant_noise = apply_quant_noise_( nn.Linear(self.embedding_dim, self.embedding_dim, bias=False), q_noise, qn_block_size, ) else: self.quant_noise = None self.segment_embeddings = ( nn.Embedding(self.num_segments, self.embedding_dim, padding_idx=None) if self.num_segments > 0 else None ) self.embed_positions = ( PositionalEmbedding( self.max_seq_len, self.embedding_dim, padding_idx=(self.padding_idx if offset_positions_by_padding else None), learned=self.learned_pos_embedding, ) if self.use_position_embeddings else None ) if encoder_normalize_before: self.emb_layer_norm = LayerNorm(self.embedding_dim, export=export) else: self.emb_layer_norm = None if self.layerdrop > 0.0: self.layers = LayerDropModuleList(p=self.layerdrop) else: self.layers = nn.ModuleList([]) self.layers.extend( [ self.build_transformer_sentence_encoder_layer( embedding_dim=self.embedding_dim, ffn_embedding_dim=ffn_embedding_dim, num_attention_heads=num_attention_heads, dropout=self.dropout_module.p, attention_dropout=attention_dropout, activation_dropout=activation_dropout, activation_fn=activation_fn, export=export, q_noise=q_noise, qn_block_size=qn_block_size, ) for _ in range(num_encoder_layers) ] ) # Apply initialization of model params after building the model if self.apply_bert_init: self.apply(init_bert_params) def freeze_module_params(m): if m is not None: for p in m.parameters(): p.requires_grad = False if freeze_embeddings: freeze_module_params(self.embed_tokens) freeze_module_params(self.segment_embeddings) freeze_module_params(self.embed_positions) freeze_module_params(self.emb_layer_norm) for layer in range(n_trans_layers_to_freeze): freeze_module_params(self.layers[layer]) def build_embedding(self, vocab_size, embedding_dim, padding_idx): return nn.Embedding(vocab_size, embedding_dim, padding_idx) def build_transformer_sentence_encoder_layer( self, embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, attention_dropout, activation_dropout, activation_fn, export, q_noise, qn_block_size, ): return TransformerSentenceEncoderLayer( embedding_dim=embedding_dim, ffn_embedding_dim=ffn_embedding_dim, num_attention_heads=num_attention_heads, dropout=dropout, attention_dropout=attention_dropout, activation_dropout=activation_dropout, activation_fn=activation_fn, export=export, q_noise=q_noise, qn_block_size=qn_block_size, ) def forward( self, tokens: torch.Tensor, segment_labels: torch.Tensor = None, last_state_only: bool = False, positions: Optional[torch.Tensor] = None, token_embeddings: Optional[torch.Tensor] = None, attn_mask: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: is_tpu = tokens.device.type == "xla" # compute padding mask. This is needed for multi-head attention padding_mask = tokens.eq(self.padding_idx) if not self.traceable and not is_tpu and not padding_mask.any(): padding_mask = None if token_embeddings is not None: x = token_embeddings else: x = self.embed_tokens(tokens) if self.embed_scale is not None: x = x * self.embed_scale if self.embed_positions is not None: x = x + self.embed_positions(tokens, positions=positions) if self.segment_embeddings is not None and segment_labels is not None: x = x + self.segment_embeddings(segment_labels) if self.quant_noise is not None: x = self.quant_noise(x) if self.emb_layer_norm is not None: x = self.emb_layer_norm(x) x = self.dropout_module(x) # account for padding while computing the representation if padding_mask is not None: x = x * (1 - padding_mask.unsqueeze(-1).type_as(x)) # B x T x C -> T x B x C x = x.transpose(0, 1) inner_states = [] if not last_state_only: inner_states.append(x) for layer in self.layers: x, _ = layer( x, self_attn_padding_mask=padding_mask, self_attn_mask=attn_mask ) if not last_state_only: inner_states.append(x) sentence_rep = x[0, :, :] if last_state_only: inner_states = [x] if self.traceable: return torch.stack(inner_states), sentence_rep else: return inner_states, sentence_rep
EXA-1-master
exa/libraries/fairseq/fairseq/modules/transformer_sentence_encoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" from .adaptive_input import AdaptiveInput from .adaptive_softmax import AdaptiveSoftmax from .base_layer import BaseLayer from .beamable_mm import BeamableMM from .character_token_embedder import CharacterTokenEmbedder from .conv_tbc import ConvTBC from .cross_entropy import cross_entropy from .downsampled_multihead_attention import DownsampledMultiHeadAttention from .dynamic_convolution import DynamicConv, DynamicConv1dTBC, DynamicConv_scripatable from .dynamic_crf_layer import DynamicCRF from .ema_module import EMAModuleConfig, EMAModule from .fairseq_dropout import FairseqDropout from .fp32_batch_norm import Fp32BatchNorm from .fp32_group_norm import Fp32GroupNorm from .fp32_instance_norm import Fp32InstanceNorm from .gelu import gelu, gelu_accurate from .grad_multiply import GradMultiply from .gumbel_vector_quantizer import GumbelVectorQuantizer from .kmeans_vector_quantizer import KmeansVectorQuantizer from .layer_drop import LayerDropModuleList from .layer_norm import Fp32LayerNorm, LayerNorm from .learned_positional_embedding import LearnedPositionalEmbedding from .lightweight_convolution import LightweightConv, LightweightConv1dTBC from .linearized_convolution import LinearizedConvolution from .location_attention import LocationAttention from .lstm_cell_with_zoneout import LSTMCellWithZoneOut from .multihead_attention import MultiheadAttention from .positional_embedding import PositionalEmbedding from .same_pad import SamePad, SamePad2d from .scalar_bias import ScalarBias from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding from .transformer_sentence_encoder_layer import TransformerSentenceEncoderLayer from .transformer_sentence_encoder import TransformerSentenceEncoder from .transpose_last import TransposeLast from .unfold import unfold1d from .transformer_layer import TransformerDecoderLayer, TransformerEncoderLayer from .vggblock import VGGBlock from .espnet_multihead_attention import ( ESPNETMultiHeadedAttention, RelPositionMultiHeadedAttention, RotaryPositionMultiHeadedAttention, ) from .rotary_positional_embedding import RotaryPositionalEmbedding from .positional_encoding import ( RelPositionalEncoding, ) __all__ = [ "AdaptiveInput", "AdaptiveSoftmax", "BaseLayer", "BeamableMM", "CharacterTokenEmbedder", "ConvTBC", "cross_entropy", "DownsampledMultiHeadAttention", "DynamicConv1dTBC", "DynamicConv", "DynamicConv_scripatable", "DynamicCRF", "EMAModule", "EMAModuleConfig", "FairseqDropout", "Fp32BatchNorm", "Fp32GroupNorm", "Fp32LayerNorm", "Fp32InstanceNorm", "gelu", "gelu_accurate", "GradMultiply", "GumbelVectorQuantizer", "KmeansVectorQuantizer", "LayerDropModuleList", "LayerNorm", "LearnedPositionalEmbedding", "LightweightConv1dTBC", "LightweightConv", "LinearizedConvolution", "LocationAttention", "LSTMCellWithZoneOut", "MultiheadAttention", "PositionalEmbedding", "SamePad", "SamePad2d", "ScalarBias", "SinusoidalPositionalEmbedding", "TransformerSentenceEncoderLayer", "TransformerSentenceEncoder", "TransformerDecoderLayer", "TransformerEncoderLayer", "TransposeLast", "VGGBlock", "unfold1d", "ESPNETMultiheadedAttention", "PositionalEmbedding", "RelPositionMultiHeadedAttention", "RelPositionalEncoding", "RotaryPositionalEmbedding", "RotaryPositionMultiHeadedAttention", ]
EXA-1-master
exa/libraries/fairseq/fairseq/modules/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from fairseq.modules import TransformerSentenceEncoder from fairseq.modules.sparse_transformer_sentence_encoder_layer import ( SparseTransformerSentenceEncoderLayer, ) class SparseTransformerSentenceEncoder(TransformerSentenceEncoder): """ Sparse implementation of the TransformerSentenceEncoder - see SparseMultiheadAttention """ def __init__( self, padding_idx: int, vocab_size: int, num_encoder_layers: int = 6, embedding_dim: int = 768, ffn_embedding_dim: int = 3072, num_attention_heads: int = 8, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, max_seq_len: int = 256, num_segments: int = 2, use_position_embeddings: bool = True, offset_positions_by_padding: bool = True, encoder_normalize_before: bool = False, apply_bert_init: bool = False, activation_fn: str = "relu", learned_pos_embedding: bool = True, embed_scale: float = None, freeze_embeddings: bool = False, n_trans_layers_to_freeze: int = 0, export: bool = False, is_bidirectional: bool = True, stride: int = 32, expressivity: int = 8, ) -> None: super().__init__( padding_idx, vocab_size, num_encoder_layers, embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, attention_dropout, activation_dropout, max_seq_len, num_segments, use_position_embeddings, offset_positions_by_padding, encoder_normalize_before, apply_bert_init, activation_fn, learned_pos_embedding, embed_scale, freeze_embeddings, n_trans_layers_to_freeze, export, ) self.layers = nn.ModuleList( [ SparseTransformerSentenceEncoderLayer( embedding_dim=self.embedding_dim, ffn_embedding_dim=ffn_embedding_dim, num_attention_heads=num_attention_heads, dropout=dropout, attention_dropout=attention_dropout, activation_dropout=activation_dropout, activation_fn=activation_fn, export=export, is_bidirectional=is_bidirectional, stride=stride, expressivity=expressivity, ) for _ in range(num_encoder_layers) ] ) def freeze_module_params(m): if m is not None: for p in m.parameters(): p.requires_grad = False for layer in range(n_trans_layers_to_freeze): freeze_module_params(self.layers[layer])
EXA-1-master
exa/libraries/fairseq/fairseq/modules/sparse_transformer_sentence_encoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn.functional as F from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from .conv_tbc import ConvTBC from typing import Dict, Optional from torch import Tensor @with_incremental_state class LinearizedConvolution(ConvTBC): """An optimized version of nn.Conv1d. At training time, this module uses ConvTBC, which is an optimized version of Conv1d. At inference time, it optimizes incremental generation (i.e., one time step at a time) by replacing the convolutions with linear layers. Note that the input order changes from training to inference. """ def __init__(self, in_channels, out_channels, kernel_size, **kwargs): super().__init__(in_channels, out_channels, kernel_size, **kwargs) self._linearized_weight = None self.register_backward_hook(self._clear_linearized_weight) def state_dict(self, destination=None, prefix="", keep_vars=False): state = ConvTBC.state_dict(self, destination, prefix, keep_vars=keep_vars) # don't store redundant _linearized_weight in checkpoints if prefix + "_linearized_weight" in state: del state[prefix + "_linearized_weight"] return state def upgrade_state_dict_named(self, state_dict, name): prefix = name + "." if name != "" else "" if prefix + "_linearized_weight" in state_dict: del state_dict[prefix + "_linearized_weight"] @torch.jit.export def forward( self, input, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, ): """ Args: incremental_state: Used to buffer signal; if not None, then input is expected to contain a single frame. If the input order changes between time steps, call reorder_incremental_state. Input: Time x Batch x Channel during training Batch x Time x Channel during inference """ if incremental_state is None: output = self.conv_tbc(input) if self.kernel_size[0] > 1 and self.padding[0] > 0: # remove future timesteps added by padding output = output[: -self.padding[0], :, :] return output # reshape weight weight = self._get_linearized_weight() kw = self.kernel_size[0] bsz = input.size(0) # input: bsz x len x dim if kw > 1: input = input.data input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = input.new(bsz, kw, input.size(2)).zero_() self._set_input_buffer(incremental_state, input_buffer) else: # shift buffer input_buffer[:, :-1, :] = input_buffer[:, 1:, :].clone() # append next input input_buffer[:, -1, :] = input[:, -1, :] input = input_buffer with torch.no_grad(): output = F.linear(input.view(bsz, -1), weight, self.bias) return output.view(bsz, 1, -1) @torch.jit.unused def reorder_incremental_state( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], new_order, ): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(0, new_order) self._set_input_buffer(incremental_state, input_buffer) @torch.jit.unused def _get_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] ): return utils.get_incremental_state(self, incremental_state, "input_buffer") @torch.jit.unused def _set_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], new_buffer, ): return utils.set_incremental_state( self, incremental_state, "input_buffer", new_buffer ) @torch.jit.unused def _get_linearized_weight(self): if self._linearized_weight is None: kw = self.kernel_size[0] weight = self.weight.transpose(2, 1).transpose(1, 0).contiguous() assert weight.size() == (self.out_channels, kw, self.in_channels) return weight.view(self.out_channels, -1) return self._linearized_weight @torch.jit.unused def _clear_linearized_weight(self, *args): self._linearized_weight = None
EXA-1-master
exa/libraries/fairseq/fairseq/modules/linearized_convolution.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, Optional import torch import torch.nn as nn from torch import Tensor from fairseq import utils from fairseq.models.transformer import TransformerConfig from fairseq.modules import LayerNorm, MultiheadAttention from fairseq.modules.fairseq_dropout import FairseqDropout from fairseq.modules.quant_noise import quant_noise class TransformerEncoderLayerBase(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *cfg.encoder.normalize_before* to ``True``. Args: cfg (argparse.Namespace): parsed command-line arguments """ def __init__(self, cfg, return_fc=False): super().__init__() self.cfg = cfg self.return_fc = return_fc self.embed_dim = cfg.encoder.embed_dim self.quant_noise = cfg.quant_noise.pq self.quant_noise_block_size = cfg.quant_noise.pq_block_size self.self_attn = self.build_self_attention(self.embed_dim, cfg) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=cfg.export) self.dropout_module = FairseqDropout( cfg.dropout, module_name=self.__class__.__name__ ) self.activation_fn = utils.get_activation_fn(activation=cfg.activation_fn) activation_dropout_p = cfg.activation_dropout if activation_dropout_p == 0: # for backwards compatibility with models that use cfg.relu_dropout activation_dropout_p = cfg.relu_dropout or 0 self.activation_dropout_module = FairseqDropout( float(activation_dropout_p), module_name=self.__class__.__name__ ) self.normalize_before = cfg.encoder.normalize_before self.fc1 = self.build_fc1( self.embed_dim, cfg.encoder.ffn_embed_dim, self.quant_noise, self.quant_noise_block_size, ) self.fc2 = self.build_fc2( cfg.encoder.ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size, ) self.final_layer_norm = LayerNorm(self.embed_dim, export=cfg.export) def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise( nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size ) def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise( nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size ) def _get_fc_rank(self, remove_num: int) -> List[int]: f1_filter_param = [] for i in range(self.fc1.out_features): f1_filter_param.append( torch.sum(torch.abs(self.fc1.weight[i])) + torch.sum(torch.abs(self.fc2.weight[:, i])) + torch.abs(self.fc1.bias[i]) ) return sorted( range(len(f1_filter_param)), key=lambda k: f1_filter_param[k], reverse=False )[0:remove_num] def _prune_fc_layer(self, remove_index: List[int]): new_fc1_weight = [] new_fc1_bias = [] for i in range(self.fc1.out_features): if i not in remove_index: new_fc1_weight.append(self.fc1.weight[i]) new_fc1_bias.append(self.fc1.bias[i]) new_fc1_weight = torch.stack(new_fc1_weight).detach() new_fc1_weight.requires_grad = True new_fc1_bias = torch.stack(new_fc1_bias).detach() new_fc1_bias.requires_grad = True self.fc1 = quant_noise( nn.Linear(self.fc1.in_features, self.fc1.out_features - len(remove_index)), p=self.quant_noise, block_size=self.quant_noise_block_size, ) self.fc1.weight = torch.nn.Parameter(new_fc1_weight) self.fc1.bias = torch.nn.Parameter(new_fc1_bias) new_fc2_weight = [] new_fc2_bias = [] for i in range(self.fc2.in_features): if i not in remove_index: new_fc2_weight.append(self.fc2.weight[:, i]) new_fc2_bias = self.fc2.bias.detach() new_fc2_weight = torch.stack(new_fc2_weight, dim=-1).detach() new_fc2_weight.requires_grad = True new_fc2_bias = self.fc2.bias.detach() new_fc2_bias.requires_grad = True self.fc2 = quant_noise( nn.Linear(self.fc2.in_features - len(remove_index), self.fc2.out_features), p=self.quant_noise, block_size=self.quant_noise_block_size, ) self.fc2.weight = torch.nn.Parameter(new_fc2_weight) self.fc2.bias = torch.nn.Parameter(new_fc2_bias) def build_self_attention(self, embed_dim, cfg): return MultiheadAttention( embed_dim, cfg.encoder.attention_heads, dropout=cfg.attention_dropout, self_attention=True, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, xformers_att_config=cfg.encoder.xformers_att_config, ) def residual_connection(self, x, residual): return residual + x def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"} for old, new in layer_norm_map.items(): for m in ("weight", "bias"): k = "{}.layer_norms.{}.{}".format(name, old, m) if k in state_dict: state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k] del state_dict[k] def forward( self, x, encoder_padding_mask: Optional[Tensor], attn_mask: Optional[Tensor] = None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, seq_len)` where padding elements are indicated by ``1``. attn_mask (ByteTensor): binary tensor of shape `(tgt_len, src_len)`, where `tgt_len` is the length of output and `src_len` is the length of input, though here both are equal to `seq_len`. `attn_mask[tgt_i, src_j] = 1` means that when calculating the embedding for `tgt_i`, we exclude (mask out) `src_j`. This is useful for strided self-attention. Returns: encoded output of shape `(seq_len, batch, embed_dim)` """ # anything in original attn_mask = 1, becomes -1e8 # anything in original attn_mask = 0, becomes 0 # Note that we cannot use -inf here, because at some edge cases, # the attention weight (before softmax) for some padded element in query # will become -inf, which results in NaN in model parameters if attn_mask is not None: attn_mask = attn_mask.masked_fill( attn_mask.to(torch.bool), -1e8 if x.dtype == torch.float32 else -1e4 ) residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) x, _ = self.self_attn( query=x, key=x, value=x, key_padding_mask=encoder_padding_mask, need_weights=False, attn_mask=attn_mask, ) x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.self_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = self.activation_dropout_module(x) x = self.fc2(x) fc_result = x x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.final_layer_norm(x) if self.return_fc and not torch.jit.is_scripting(): return x, fc_result return x # backward compatible with the legacy argparse format class TransformerEncoderLayer(TransformerEncoderLayerBase): def __init__(self, args): super().__init__(TransformerConfig.from_namespace(args)) self.args = args def build_self_attention(self, embed_dim, args): return super().build_self_attention( embed_dim, TransformerConfig.from_namespace(args) ) class TransformerDecoderLayerBase(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *cfg.decoder.normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__( self, cfg, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False ): super().__init__() self.embed_dim = cfg.decoder.embed_dim self.dropout_module = FairseqDropout( cfg.dropout, module_name=self.__class__.__name__ ) self.quant_noise = cfg.quant_noise.pq self.quant_noise_block_size = cfg.quant_noise.pq_block_size self.cross_self_attention = cfg.cross_self_attention self.self_attn = self.build_self_attention( self.embed_dim, cfg, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ) self.attn_ln = ( LayerNorm(self.embed_dim) if utils.safe_getattr(cfg, "scale_attn", False) else None ) self.nh = self.self_attn.num_heads self.head_dim = self.self_attn.head_dim scale_heads = utils.safe_getattr(cfg, "scale_heads", False) self.c_attn = ( nn.Parameter(torch.ones((self.nh,)), requires_grad=True) if scale_heads else None ) self.activation_fn = utils.get_activation_fn(activation=cfg.activation_fn) activation_dropout_p = cfg.activation_dropout if activation_dropout_p == 0: # for backwards compatibility with models that use cfg.relu_dropout activation_dropout_p = cfg.relu_dropout or 0 self.activation_dropout_module = FairseqDropout( float(activation_dropout_p), module_name=self.__class__.__name__ ) self.normalize_before = cfg.decoder.normalize_before self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=cfg.export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = self.build_encoder_attention(self.embed_dim, cfg) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=cfg.export) self.ffn_layernorm = ( LayerNorm(cfg.decoder.ffn_embed_dim) if utils.safe_getattr(cfg, "scale_fc", False) else None ) self.w_resid = ( nn.Parameter( torch.ones( self.embed_dim, ), requires_grad=True, ) if utils.safe_getattr(cfg, "scale_resids", False) else None ) self.fc1 = self.build_fc1( self.embed_dim, cfg.decoder.ffn_embed_dim, self.quant_noise, self.quant_noise_block_size, ) self.fc2 = self.build_fc2( cfg.decoder.ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size, ) self.final_layer_norm = LayerNorm(self.embed_dim, export=cfg.export) self.need_attn = True self.onnx_trace = False def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size) def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size) def build_self_attention( self, embed_dim, cfg, add_bias_kv=False, add_zero_attn=False ): return MultiheadAttention( embed_dim, cfg.decoder.attention_heads, dropout=cfg.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=not cfg.cross_self_attention, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, xformers_att_config=cfg.decoder.xformers_att_config, ) def build_encoder_attention(self, embed_dim, cfg): return MultiheadAttention( embed_dim, cfg.decoder.attention_heads, kdim=cfg.encoder.embed_dim, vdim=cfg.encoder.embed_dim, dropout=cfg.attention_dropout, encoder_decoder_attention=True, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, xformers_att_config=cfg.encoder.xformers_att_config, ) def prepare_for_onnx_export_(self): self.onnx_trace = True def residual_connection(self, x, residual): return residual + x def forward( self, x, encoder_out: Optional[torch.Tensor] = None, encoder_padding_mask: Optional[torch.Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, prev_self_attn_state: Optional[List[torch.Tensor]] = None, prev_attn_state: Optional[List[torch.Tensor]] = None, self_attn_mask: Optional[torch.Tensor] = None, self_attn_padding_mask: Optional[torch.Tensor] = None, need_attn: bool = False, need_head_weights: bool = False, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor, optional): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. need_attn (bool, optional): return attention weights need_head_weights (bool, optional): return attention weights for each head (default: return average over heads). Returns: encoded output of shape `(seq_len, batch, embed_dim)` """ if need_head_weights: need_attn = True residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) if prev_self_attn_state is not None: prev_key, prev_value = prev_self_attn_state[:2] saved_state: Dict[str, Optional[Tensor]] = { "prev_key": prev_key, "prev_value": prev_value, } if len(prev_self_attn_state) >= 3: saved_state["prev_key_padding_mask"] = prev_self_attn_state[2] assert incremental_state is not None self.self_attn._set_input_buffer(incremental_state, saved_state) _self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state) if self.cross_self_attention and not ( incremental_state is not None and _self_attn_input_buffer is not None and "prev_key" in _self_attn_input_buffer ): if self_attn_mask is not None: assert encoder_out is not None self_attn_mask = torch.cat( (x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1 ) if self_attn_padding_mask is not None: if encoder_padding_mask is None: assert encoder_out is not None encoder_padding_mask = self_attn_padding_mask.new_zeros( encoder_out.size(1), encoder_out.size(0) ) self_attn_padding_mask = torch.cat( (encoder_padding_mask, self_attn_padding_mask), dim=1 ) assert encoder_out is not None y = torch.cat((encoder_out, x), dim=0) else: y = x x, attn = self.self_attn( query=x, key=y, value=y, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) if self.c_attn is not None: tgt_len, bsz = x.size(0), x.size(1) x = x.view(tgt_len, bsz, self.nh, self.head_dim) x = torch.einsum("tbhd,h->tbhd", x, self.c_attn) x = x.reshape(tgt_len, bsz, self.embed_dim) if self.attn_ln is not None: x = self.attn_ln(x) x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.self_attn_layer_norm(x) if self.encoder_attn is not None and encoder_out is not None: residual = x if self.normalize_before: x = self.encoder_attn_layer_norm(x) if prev_attn_state is not None: prev_key, prev_value = prev_attn_state[:2] saved_state: Dict[str, Optional[Tensor]] = { "prev_key": prev_key, "prev_value": prev_value, } if len(prev_attn_state) >= 3: saved_state["prev_key_padding_mask"] = prev_attn_state[2] assert incremental_state is not None self.encoder_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=need_attn or (not self.training and self.need_attn), need_head_weights=need_head_weights, ) x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.encoder_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = self.activation_dropout_module(x) if self.ffn_layernorm is not None: x = self.ffn_layernorm(x) x = self.fc2(x) x = self.dropout_module(x) if self.w_resid is not None: residual = torch.mul(self.w_resid, residual) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.final_layer_norm(x) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) assert saved_state is not None if self_attn_padding_mask is not None: self_attn_state = [ saved_state["prev_key"], saved_state["prev_value"], saved_state["prev_key_padding_mask"], ] else: self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]] return x, attn, self_attn_state return x, attn, None def make_generation_fast_(self, need_attn: bool = False, **kwargs): self.need_attn = need_attn # backward compatible with the legacy argparse format class TransformerDecoderLayer(TransformerDecoderLayerBase): def __init__( self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False ): super().__init__( TransformerConfig.from_namespace(args), no_encoder_attn=no_encoder_attn, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ) self.args = args def build_self_attention( self, embed_dim, args, add_bias_kv=False, add_zero_attn=False ): return super().build_self_attention( embed_dim, TransformerConfig.from_namespace(args), add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ) def build_encoder_attention(self, embed_dim, args): return super().build_encoder_attention( embed_dim, TransformerConfig.from_namespace(args), )
EXA-1-master
exa/libraries/fairseq/fairseq/modules/transformer_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, Optional import torch import torch.nn as nn from fairseq import utils from fairseq.modules import LayerNorm, MultiheadAttention from fairseq.modules.fairseq_dropout import FairseqDropout from fairseq.modules.quant_noise import quant_noise class TransformerSentenceEncoderLayer(nn.Module): """ Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained models. """ def __init__( self, embedding_dim: int = 768, ffn_embedding_dim: int = 3072, num_attention_heads: int = 8, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, activation_fn: str = "relu", export: bool = False, q_noise: float = 0.0, qn_block_size: int = 8, init_fn: Callable = None, ) -> None: super().__init__() if init_fn is not None: init_fn() # Initialize parameters self.embedding_dim = embedding_dim self.num_attention_heads = num_attention_heads self.attention_dropout = attention_dropout self.q_noise = q_noise self.qn_block_size = qn_block_size self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.activation_dropout_module = FairseqDropout( activation_dropout, module_name=self.__class__.__name__ ) # Initialize blocks self.activation_fn = utils.get_activation_fn(activation_fn) self.self_attn = self.build_self_attention( self.embedding_dim, num_attention_heads, dropout=attention_dropout, self_attention=True, q_noise=q_noise, qn_block_size=qn_block_size, ) # layer norm associated with the self attention layer self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export) self.fc1 = self.build_fc1( self.embedding_dim, ffn_embedding_dim, q_noise=q_noise, qn_block_size=qn_block_size, ) self.fc2 = self.build_fc2( ffn_embedding_dim, self.embedding_dim, q_noise=q_noise, qn_block_size=qn_block_size, ) # layer norm associated with the position wise feed-forward NN self.final_layer_norm = LayerNorm(self.embedding_dim, export=export) def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size) def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size) def build_self_attention( self, embed_dim, num_attention_heads, dropout, self_attention, q_noise, qn_block_size, ): return MultiheadAttention( embed_dim, num_attention_heads, dropout=dropout, self_attention=True, q_noise=q_noise, qn_block_size=qn_block_size, ) def forward( self, x: torch.Tensor, self_attn_mask: Optional[torch.Tensor] = None, self_attn_padding_mask: Optional[torch.Tensor] = None, ): """ LayerNorm is applied either before or after the self-attention/ffn modules similar to the original Transformer implementation. """ residual = x x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, need_weights=False, attn_mask=self_attn_mask, ) x = self.dropout_module(x) x = residual + x x = self.self_attn_layer_norm(x) residual = x x = self.activation_fn(self.fc1(x)) x = self.activation_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = residual + x x = self.final_layer_norm(x) return x, attn
EXA-1-master
exa/libraries/fairseq/fairseq/modules/transformer_sentence_encoder_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn def quant_noise(module, p, block_size): """ Wraps modules and applies quantization noise to the weights for subsequent quantization with Iterative Product Quantization as described in "Training with Quantization Noise for Extreme Model Compression" Args: - module: nn.Module - p: amount of Quantization Noise - block_size: size of the blocks for subsequent quantization with iPQ Remarks: - Module weights must have the right sizes wrt the block size - Only Linear, Embedding and Conv2d modules are supported for the moment - For more detail on how to quantize by blocks with convolutional weights, see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks" - We implement the simplest form of noise here as stated in the paper which consists in randomly dropping blocks """ # if no quantization noise, don't register hook if p <= 0: return module # supported modules assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d)) # test whether module.weight has the right sizes wrt block_size is_conv = module.weight.ndim == 4 # 2D matrix if not is_conv: assert ( module.weight.size(1) % block_size == 0 ), "Input features must be a multiple of block sizes" # 4D matrix else: # 1x1 convolutions if module.kernel_size == (1, 1): assert ( module.in_channels % block_size == 0 ), "Input channels must be a multiple of block sizes" # regular convolutions else: k = module.kernel_size[0] * module.kernel_size[1] assert k % block_size == 0, "Kernel size must be a multiple of block size" def _forward_pre_hook(mod, input): # no noise for evaluation if mod.training: if not is_conv: # gather weight and sizes weight = mod.weight in_features = weight.size(1) out_features = weight.size(0) # split weight matrix into blocks and randomly drop selected blocks mask = torch.zeros( in_features // block_size * out_features, device=weight.device ) mask.bernoulli_(p) mask = mask.repeat_interleave(block_size, -1).view(-1, in_features) else: # gather weight and sizes weight = mod.weight in_channels = mod.in_channels out_channels = mod.out_channels # split weight matrix into blocks and randomly drop selected blocks if mod.kernel_size == (1, 1): mask = torch.zeros( int(in_channels // block_size * out_channels), device=weight.device, ) mask.bernoulli_(p) mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels) else: mask = torch.zeros( weight.size(0), weight.size(1), device=weight.device ) mask.bernoulli_(p) mask = ( mask.unsqueeze(2) .unsqueeze(3) .repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1]) ) # scale weights and apply mask mask = mask.to( torch.bool ) # x.bool() is not currently supported in TorchScript s = 1 / (1 - p) mod.weight.data = s * weight.masked_fill(mask, 0) module.register_forward_pre_hook(_forward_pre_hook) return module
EXA-1-master
exa/libraries/fairseq/fairseq/modules/quant_noise.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn import torch import torch.nn.functional as F class LocationAttention(nn.Module): """ Attention-Based Models for Speech Recognition https://arxiv.org/pdf/1506.07503.pdf :param int encoder_dim: # projection-units of encoder :param int decoder_dim: # units of decoder :param int attn_dim: attention dimension :param int conv_dim: # channels of attention convolution :param int conv_kernel_size: filter size of attention convolution """ def __init__( self, attn_dim, encoder_dim, decoder_dim, attn_state_kernel_size, conv_dim, conv_kernel_size, scaling=2.0, ): super(LocationAttention, self).__init__() self.attn_dim = attn_dim self.decoder_dim = decoder_dim self.scaling = scaling self.proj_enc = nn.Linear(encoder_dim, attn_dim) self.proj_dec = nn.Linear(decoder_dim, attn_dim, bias=False) self.proj_attn = nn.Linear(conv_dim, attn_dim, bias=False) self.conv = nn.Conv1d( attn_state_kernel_size, conv_dim, 2 * conv_kernel_size + 1, padding=conv_kernel_size, bias=False, ) self.proj_out = nn.Sequential(nn.Tanh(), nn.Linear(attn_dim, 1)) self.proj_enc_out = None # cache def clear_cache(self): self.proj_enc_out = None def forward(self, encoder_out, encoder_padding_mask, decoder_h, attn_state): """ :param torch.Tensor encoder_out: padded encoder hidden state B x T x D :param torch.Tensor encoder_padding_mask: encoder padding mask :param torch.Tensor decoder_h: decoder hidden state B x D :param torch.Tensor attn_prev: previous attention weight B x K x T :return: attention weighted encoder state (B, D) :rtype: torch.Tensor :return: previous attention weights (B x T) :rtype: torch.Tensor """ bsz, seq_len, _ = encoder_out.size() if self.proj_enc_out is None: self.proj_enc_out = self.proj_enc(encoder_out) # B x K x T -> B x C x T attn = self.conv(attn_state) # B x C x T -> B x T x C -> B x T x D attn = self.proj_attn(attn.transpose(1, 2)) if decoder_h is None: decoder_h = encoder_out.new_zeros(bsz, self.decoder_dim) dec_h = self.proj_dec(decoder_h).view(bsz, 1, self.attn_dim) out = self.proj_out(attn + self.proj_enc_out + dec_h).squeeze(2) out.masked_fill_(encoder_padding_mask, -float("inf")) w = F.softmax(self.scaling * out, dim=1) c = torch.sum(encoder_out * w.view(bsz, seq_len, 1), dim=1) return c, w
EXA-1-master
exa/libraries/fairseq/fairseq/modules/location_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, Optional import torch from numpy.random import uniform from torch import Tensor from fairseq.modules import LayerNorm from fairseq.modules.transformer_layer import TransformerDecoderLayerBase class AugTransformerDecoderLayerBase(TransformerDecoderLayerBase): """Decoder layer block augmented with an additional cross-attention. This decoder block is processed with the sequence of the following sub-modules. self-attention -> cross-attention (first) -> cross-attention (second) -> FFN Args: cfg (argparse.Namespace): parsed command-line arguments encoder_attn_merge_type (str, optional): the way to combine outputs from two cross-attention modules. If "sequential" is set, two cross-attention modules are stacked sequentially. If "parallel" is set, they are processed in parallel and combined before feeding it to FFN (default: sequential). dropnet_ratio (float, optional): a probability to drop each cross-attention module during training (default: 0.0). """ def __init__( self, cfg, add_bias_kv=False, add_zero_attn=False, encoder_attn_merge_type="sequential", dropnet_ratio=0.0, ): super().__init__( cfg, no_encoder_attn=False, add_bias_kv=add_bias_kv, add_zero_attn=False, ) self.encoder_attn = self.build_encoder_attention(self.embed_dim, cfg) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=cfg.export) self.encoder_attn2 = self.build_encoder_attention(self.embed_dim, cfg) if encoder_attn_merge_type == "sequential": self.encoder_attn_layer_norm2 = LayerNorm(self.embed_dim, export=cfg.export) else: self.encoder_attn_layer_norm2 = None self.encoder_attn_merge_type = encoder_attn_merge_type self.dropnet_ratio = dropnet_ratio def forward( self, x, encoder_out: Optional[torch.Tensor] = None, encoder_padding_mask: Optional[torch.Tensor] = None, encoder_out_aug: Optional[torch.Tensor] = None, encoder_padding_mask2: Optional[torch.Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, prev_self_attn_state: Optional[List[torch.Tensor]] = None, prev_attn_state: Optional[List[torch.Tensor]] = None, self_attn_mask: Optional[torch.Tensor] = None, self_attn_padding_mask: Optional[torch.Tensor] = None, need_attn: bool = False, need_head_weights: bool = False, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor, optional): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. need_attn (bool, optional): return attention weights need_head_weights (bool, optional): return attention weights for each head (default: return average over heads). Returns: encoded output of shape `(seq_len, batch, embed_dim)` """ if need_head_weights: need_attn = True residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) if prev_self_attn_state is not None: prev_key, prev_value = prev_self_attn_state[:2] saved_state: Dict[str, Optional[Tensor]] = { "prev_key": prev_key, "prev_value": prev_value, } if len(prev_self_attn_state) >= 3: saved_state["prev_key_padding_mask"] = prev_self_attn_state[2] assert incremental_state is not None self.self_attn._set_input_buffer(incremental_state, saved_state) _self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state) if self.cross_self_attention and not ( incremental_state is not None and _self_attn_input_buffer is not None and "prev_key" in _self_attn_input_buffer ): if self_attn_mask is not None: assert encoder_out is not None self_attn_mask = torch.cat( (x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1 ) if self_attn_padding_mask is not None: if encoder_padding_mask is None: assert encoder_out is not None encoder_padding_mask = self_attn_padding_mask.new_zeros( encoder_out.size(1), encoder_out.size(0) ) self_attn_padding_mask = torch.cat( (encoder_padding_mask, self_attn_padding_mask), dim=1 ) assert encoder_out is not None y = torch.cat((encoder_out, x), dim=0) else: y = x x, attn = self.self_attn( query=x, key=y, value=y, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) if self.c_attn is not None: tgt_len, bsz = x.size(0), x.size(1) x = x.view(tgt_len, bsz, self.nh, self.head_dim) x = torch.einsum("tbhd,h->tbhd", x, self.c_attn) x = x.reshape(tgt_len, bsz, self.embed_dim) if self.attn_ln is not None: x = self.attn_ln(x) x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.self_attn_layer_norm(x) assert encoder_out is not None assert encoder_out_aug is not None if self.encoder_attn_merge_type == "sequential": ratios = self.get_dropnet_ratio() # first encoder attention if ratios[0] > 0: residual = x if self.normalize_before: x = self.encoder_attn_layer_norm(x) if prev_attn_state is not None: prev_key, prev_value = prev_attn_state[:2] saved_state: Dict[str, Optional[Tensor]] = { "prev_key": prev_key, "prev_value": prev_value, } if len(prev_attn_state) >= 3: saved_state["prev_key_padding_mask"] = prev_attn_state[2] assert incremental_state is not None self.encoder_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=need_attn or (not self.training and self.need_attn), need_head_weights=need_head_weights, ) x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.encoder_attn_layer_norm(x) x = ratios[0] * x # second encoder attention if ratios[1] > 0: residual = x if self.normalize_before: x = self.encoder_attn_layer_norm2(x) if prev_attn_state is not None: prev_key, prev_value = prev_attn_state[:2] saved_state: Dict[str, Optional[Tensor]] = { "prev_key": prev_key, "prev_value": prev_value, } if len(prev_attn_state) >= 3: saved_state["prev_key_padding_mask"] = prev_attn_state[2] assert incremental_state is not None self.encoder_attn2._set_input_buffer(incremental_state, saved_state) x, attn2 = self.encoder_attn2( query=x, key=encoder_out_aug, value=encoder_out_aug, key_padding_mask=encoder_padding_mask2, incremental_state=incremental_state, static_kv=True, need_weights=need_attn or (not self.training and self.need_attn), need_head_weights=need_head_weights, ) x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.encoder_attn_layer_norm2(x) x = ratios[1] * x elif self.encoder_attn_merge_type == "parallel": residual = x if self.normalize_before: x = self.encoder_attn_layer_norm(x) if prev_attn_state is not None: prev_key, prev_value = prev_attn_state[:2] saved_state: Dict[str, Optional[Tensor]] = { "prev_key": prev_key, "prev_value": prev_value, } if len(prev_attn_state) >= 3: saved_state["prev_key_padding_mask"] = prev_attn_state[2] assert incremental_state is not None self.encoder_attn._set_input_buffer(incremental_state, saved_state) x1, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=need_attn or (not self.training and self.need_attn), need_head_weights=need_head_weights, ) x2, attn2 = self.encoder_attn2( query=x, key=encoder_out_aug, value=encoder_out_aug, key_padding_mask=encoder_padding_mask2, incremental_state=incremental_state, static_kv=True, need_weights=need_attn or (not self.training and self.need_attn), need_head_weights=need_head_weights, ) x1 = self.dropout_module(x1) x2 = self.dropout_module(x2) ratios = self.get_dropnet_ratio() x = ratios[0] * x1 + ratios[1] * x2 x = self.residual_connection(x, residual) if not self.normalize_before: x = self.encoder_attn_layer_norm(x) else: raise NotImplementedError(self.encoder_attn_merge_type) residual = x if self.normalize_before: x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = self.activation_dropout_module(x) if self.ffn_layernorm is not None: x = self.ffn_layernorm(x) x = self.fc2(x) x = self.dropout_module(x) if self.w_resid is not None: residual = torch.mul(self.w_resid, residual) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.final_layer_norm(x) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) assert saved_state is not None if self_attn_padding_mask is not None: self_attn_state = [ saved_state["prev_key"], saved_state["prev_value"], saved_state["prev_key_padding_mask"], ] else: self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]] return x, attn, attn2, self_attn_state return x, attn, attn2, None def get_dropnet_ratio(self): if self.encoder_attn_merge_type == "sequential": if self.dropnet_ratio > 0: frand = float(uniform(0, 1)) if frand < self.dropnet_ratio and self.training: return [2, 0] elif frand > 1 - self.dropnet_ratio and self.training: return [0, 2] else: return [1, 1] else: return [1, 1] elif self.encoder_attn_merge_type == "parallel": if self.dropnet_ratio > 0: frand = float(uniform(0, 1)) if frand < self.dropnet_ratio and self.training: return [1, 0] elif frand > 1 - self.dropnet_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [0.5, 0.5]
EXA-1-master
exa/libraries/fairseq/fairseq/modules/transformer_layer_aug.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Layer norm done in fp32 (for fp16 training) """ import torch.nn as nn import torch.nn.functional as F class Fp32GroupNorm(nn.GroupNorm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def forward(self, input): output = F.group_norm( input.float(), self.num_groups, self.weight.float() if self.weight is not None else None, self.bias.float() if self.bias is not None else None, self.eps, ) return output.type_as(input)
EXA-1-master
exa/libraries/fairseq/fairseq/modules/fp32_group_norm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, Optional import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.incremental_decoding_utils import ( FairseqIncrementalState, with_incremental_state, ) from fairseq.modules.fairseq_dropout import FairseqDropout from torch import Tensor from .unfold import unfold1d def DynamicConv( input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0.0, weight_softmax=False, renorm_padding=False, bias=False, conv_bias=False, query_size=None, in_proj=False, ): if torch.cuda.is_available(): try: from fairseq.modules.dynamicconv_layer import DynamicconvLayer return DynamicconvLayer( input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, renorm_padding=renorm_padding, bias=bias, conv_bias=conv_bias, query_size=query_size, ) except ImportError as e: print(e) return DynamicConv1dTBC( input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, renorm_padding=renorm_padding, bias=bias, conv_bias=conv_bias, query_size=query_size, ) def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m @with_incremental_state class DynamicConv1dTBC(nn.Module): """Dynamic lightweight convolution taking T x B x C inputs Args: input_size: # of channels of the input kernel_size: convolution channels padding_l: padding to the left when using "same" padding num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size) weight_dropout: the drop rate of the DropConnect to drop the weight weight_softmax: normalize the weight with softmax before the convolution renorm_padding: re-normalize the filters to ignore the padded part (only the non-padding parts sum up to 1) bias: use bias conv_bias: bias of the convolution query_size: specified when feeding a different input as the query in_proj: project the input and generate the filter together Shape: Input: TxBxC, i.e. (timesteps, batch_size, input_size) Output: TxBxC, i.e. (timesteps, batch_size, input_size) Attributes: weight: the learnable weights of the module of shape `(num_heads, 1, kernel_size)` bias: the learnable bias of the module of shape `(input_size)` """ def __init__( self, input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0.0, weight_softmax=False, renorm_padding=False, bias=False, conv_bias=False, query_size=None, in_proj=False, ): super().__init__() self.input_size = input_size self.query_size = input_size if query_size is None else query_size self.kernel_size = kernel_size self.padding_l = padding_l self.num_heads = num_heads self.weight_dropout_module = FairseqDropout( weight_dropout, module_name=self.__class__.__name__ ) self.weight_softmax = weight_softmax self.renorm_padding = renorm_padding if in_proj: self.weight_linear = Linear( self.input_size, self.input_size + num_heads * kernel_size * 1 ) else: self.weight_linear = Linear( self.query_size, num_heads * kernel_size * 1, bias=bias ) if conv_bias: self.conv_bias = nn.Parameter(torch.Tensor(input_size)) else: self.conv_bias = None self.reset_parameters() @property def in_proj(self): return ( self.weight_linear.out_features == self.input_size + self.num_heads * self.kernel_size ) def reset_parameters(self): self.weight_linear.reset_parameters() if self.conv_bias is not None: nn.init.constant_(self.conv_bias, 0.0) def forward(self, x, incremental_state=None, query=None, unfold=None): """Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C args: x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size) incremental_state: A dict to keep the state unfold: unfold the input or not. If not, we use the matrix trick instead query: use the specified query to predict the conv filters """ unfold = ( x.size(0) > 512 if unfold is None else unfold ) # use unfold mode as default for long sequence to save memory unfold = unfold or (incremental_state is not None) assert query is None or not self.in_proj if query is None: query = x if unfold: output = self._forward_unfolded(x, incremental_state, query) else: output = self._forward_expanded(x, incremental_state, query) if self.conv_bias is not None: output = output + self.conv_bias.view(1, 1, -1) return output def _forward_unfolded(self, x, incremental_state, query): """The conventional implementation of convolutions. Unfolding the input by having a window shifting to the right.""" T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size if self.in_proj: proj = self.weight_linear(x) x = proj.narrow(2, 0, self.input_size).contiguous() weight = ( proj.narrow(2, self.input_size, H * K).contiguous().view(T * B * H, -1) ) else: weight = self.weight_linear(query).view(T * B * H, -1) # renorm_padding is only implemented in _forward_expanded assert not self.renorm_padding or incremental_state is not None if incremental_state is not None: input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = x.new() x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) if self.kernel_size > 1: self._set_input_buffer( incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :] ) x_unfold = x_unfold.view(T * B * H, R, -1) else: padding_l = self.padding_l if K > T and padding_l == K - 1: weight = weight.narrow(1, K - T, T) K, padding_l = T, T - 1 # unfold the input: T x B x C --> T' x B x C x K x_unfold = unfold1d(x, K, padding_l, 0) x_unfold = x_unfold.view(T * B * H, R, K) if self.weight_softmax and not self.renorm_padding: weight = F.softmax(weight, dim=1) weight = weight.narrow(1, 0, K) if incremental_state is not None: weight = weight[:, -x_unfold.size(2) :] K = weight.size(1) if self.weight_softmax and self.renorm_padding: weight = F.softmax(weight, dim=1) weight = self.weight_dropout_module(weight, inplace=False) output = torch.bmm(x_unfold, weight.unsqueeze(2)) # T*B*H x R x 1 output = output.view(T, B, C) return output def _forward_expanded(self, x, incremental_stat, query): """Turn the convolution filters into band matrices and do matrix multiplication. This is faster when the sequence is short, but less memory efficient. This is not used in the decoder during inference. """ T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size if self.in_proj: proj = self.weight_linear(x) x = proj.narrow(2, 0, self.input_size).contiguous() weight = ( proj.narrow(2, self.input_size, H * K).contiguous().view(T * B * H, -1) ) else: weight = self.weight_linear(query).view(T * B * H, -1) if not self.renorm_padding: if self.weight_softmax: weight = F.softmax(weight, dim=1) weight = self.weight_dropout_module(weight, inplace=False) weight = weight.narrow(1, 0, K).contiguous() weight = weight.view(T, B * H, K).transpose(0, 1) x = x.view(T, B * H, R).transpose(0, 1) if self.weight_softmax and self.renorm_padding: # turn the convolution filters into band matrices weight_expanded = weight.new(B * H, T, T + K - 1).fill_(float("-inf")) weight_expanded.as_strided( (B * H, T, K), (T * (T + K - 1), T + K, 1) ).copy_(weight) weight_expanded = weight_expanded.narrow(2, self.padding_l, T) # normalize the weight over valid positions like self-attention weight_expanded = F.softmax(weight_expanded, dim=2) weight_expanded = self.weight_dropout_module(weight_expanded, inplace=False) else: P = self.padding_l # For efficiency, we cut the kernel size and reduce the padding when the kernel is larger than the length if K > T and P == K - 1: weight = weight.narrow(2, K - T, T) K, P = T, T - 1 # turn the convolution filters into band matrices weight_expanded = weight.new_zeros(B * H, T, T + K - 1, requires_grad=False) weight_expanded.as_strided( (B * H, T, K), (T * (T + K - 1), T + K, 1) ).copy_(weight) weight_expanded = weight_expanded.narrow(2, P, T) # B*H x T x T output = torch.bmm(weight_expanded, x) output = output.transpose(0, 1).contiguous().view(T, B, C) return output def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state(self, incremental_state, "input_buffer") def _set_input_buffer(self, incremental_state, new_buffer): return utils.set_incremental_state( self, incremental_state, "input_buffer", new_buffer ) def extra_repr(self): s = "{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, conv_bias={}, renorm_padding={}, in_proj={}".format( self.input_size, self.kernel_size, self.padding_l, self.num_heads, self.weight_softmax, self.conv_bias is not None, self.renorm_padding, self.in_proj, ) if self.query_size != self.input_size: s += ", query_size={}".format(self.query_size) if self.weight_dropout_module.p > 0.0: s += ", weight_dropout={}".format(self.weight_dropout_module.p) return s class DynamicConv_scripatable(nn.Module, FairseqIncrementalState): """Dynamic lightweight convolution taking T x B x C inputs Args: input_size: # of channels of the input kernel_size: convolution channels padding_l: padding to the left when using "same" padding num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size) weight_dropout: the drop rate of the DropConnect to drop the weight weight_softmax: normalize the weight with softmax before the convolution renorm_padding: re-normalize the filters to ignore the padded part (only the non-padding parts sum up to 1) bias: use bias conv_bias: bias of the convolution query_size: specified when feeding a different input as the query in_proj: project the input and generate the filter together Shape: Input: TxBxC, i.e. (timesteps, batch_size, input_size) Output: TxBxC, i.e. (timesteps, batch_size, input_size) Attributes: weight: the learnable weights of the module of shape `(num_heads, 1, kernel_size)` bias: the learnable bias of the module of shape `(input_size)` """ def __init__( self, input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0.0, weight_softmax=False, renorm_padding=False, bias=False, conv_bias=False, query_size=None, in_proj=False, ): super().__init__() self.input_size = input_size self.query_size = input_size if query_size is None else query_size self.kernel_size = kernel_size self.padding_l = padding_l self.num_heads = num_heads self.weight_dropout_module = FairseqDropout( weight_dropout, module_name=self.__class__.__name__ ) self.weight_softmax = weight_softmax self.renorm_padding = renorm_padding if in_proj: self.weight_linear = Linear( self.input_size, self.input_size + num_heads * kernel_size * 1 ) else: self.weight_linear = Linear( self.query_size, num_heads * kernel_size * 1, bias=bias ) self.in_proj = ( self.weight_linear.out_features == self.input_size + self.num_heads * self.kernel_size ) self.has_conv_bias = conv_bias self.conv_bias = nn.Parameter(torch.Tensor(input_size).view(1, 1, -1)) self.init_incremental_state() self.reset_parameters() def reset_parameters(self): self.weight_linear.reset_parameters() if self.has_conv_bias: nn.init.constant_(self.conv_bias, 0.0) def forward( self, x, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, query: Optional[Tensor] = None, ): """Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C args: x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size) incremental_state: A dict to keep the state unfold: unfold the input or not. If not, we use the matrix trick instead query: use the specified query to predict the conv filters """ assert query is None or not self.in_proj if query is None: query = x output = self._forward_unfolded(x, incremental_state, query) if self.has_conv_bias: output = output + self.conv_bias return output def _forward_unfolded( self, x, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], query, ): """The conventional implementation of convolutions. Unfolding the input by having a window shifting to the right.""" T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size TxBxH = T * B * H if self.in_proj: proj = self.weight_linear(x) x = proj.narrow(2, 0, self.input_size).contiguous() weight = proj.narrow(2, self.input_size, H * K).contiguous().view(TxBxH, -1) else: weight = self.weight_linear(query).view(TxBxH, -1) # renorm_padding is only implemented in _forward_expanded assert not self.renorm_padding or incremental_state is not None if incremental_state is not None: input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) else: x_unfold = x.unsqueeze(3).clone() if self.kernel_size > 1: self._set_input_buffer( incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :] ) x_unfold = x_unfold.view(TxBxH, R, -1) else: padding_l = self.padding_l if K > T and padding_l == K - 1: weight = weight.narrow(1, K - T, T) K, padding_l = T, T - 1 # unfold the input: T x B x C --> T' x B x C x K x_unfold = unfold1d(x, K, padding_l, 0.0) x_unfold = x_unfold.view(TxBxH, R, K) if self.weight_softmax and not self.renorm_padding: weight = F.softmax(weight, dim=1) weight = weight.narrow(1, 0, K) if incremental_state is not None: weight = weight[:, -(x_unfold.size(2)) :] K = weight.size(1) if self.weight_softmax and self.renorm_padding: weight = F.softmax(weight, dim=1) weight = self.weight_dropout_module(weight, inplace=False) output = torch.bmm(x_unfold, weight.unsqueeze(2)) # T x B x H x R x 1 output = output.view(T, B, C) return output def reorder_incremental_state( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], new_order: Tensor, ): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] ): result = self.get_incremental_state(incremental_state, "input_buffer") if result is not None and "input_buffer" in result: return result["input_buffer"] else: return None def _set_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], new_buffer: Optional[Tensor], ): result = self.set_incremental_state( incremental_state, "input_buffer", {"input_buffer": new_buffer} ) if result is not None: incremental_state = result return incremental_state def extra_repr(self): s = "{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, conv_bias={}, renorm_padding={}, in_proj={}".format( # noqa self.input_size, self.kernel_size, self.padding_l, self.num_heads, self.weight_softmax, self.conv_bias is not None, self.renorm_padding, self.in_proj, ) if self.query_size != self.input_size: s += ", query_size={}".format(self.query_size) if self.weight_dropout_module.p > 0.0: s += ", weight_dropout={}".format(self.weight_dropout_module.p) return s
EXA-1-master
exa/libraries/fairseq/fairseq/modules/dynamic_convolution.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import List, Optional import torch.nn as nn import torch.nn.functional as F logger = logging.getLogger(__name__) class FairseqDropout(nn.Module): def __init__(self, p, module_name=None): super().__init__() self.p = p self.module_name = module_name self.apply_during_inference = False def forward(self, x, inplace: bool = False): if self.p > 0 and (self.training or self.apply_during_inference): return F.dropout(x, p=self.p, training=True, inplace=inplace) else: return x def make_generation_fast_( self, name: str, retain_dropout: bool = False, retain_dropout_modules: Optional[List[str]] = None, **kwargs ): if retain_dropout: if retain_dropout_modules is not None and self.module_name is None: logger.warning( "Cannot enable dropout during inference for module {} " "because module_name was not set".format(name) ) elif ( retain_dropout_modules is None # if None, apply to all modules or self.module_name in retain_dropout_modules ): logger.info( "Enabling dropout during inference for module: {}".format(name) ) self.apply_during_inference = True else: logger.info("Disabling dropout for module: {}".format(name))
EXA-1-master
exa/libraries/fairseq/fairseq/modules/fairseq_dropout.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F class GumbelVectorQuantizer(nn.Module): def __init__( self, dim, num_vars, temp, groups, combine_groups, vq_dim, time_first, activation=nn.GELU(), weight_proj_depth=1, weight_proj_factor=1, hard=True, std=0, ): """Vector quantization using gumbel softmax Args: dim: input dimension (channels) num_vars: number of quantized vectors per group temp: temperature for training. this should be a tuple of 3 elements: (start, stop, decay factor) groups: number of groups for vector quantization combine_groups: whether to use the vectors for all groups vq_dim: dimensionality of the resulting quantized vector time_first: if true, expect input in BxTxC format, otherwise in BxCxT activation: what activation to use (should be a module). this is only used if weight_proj_depth is > 1 weight_proj_depth: number of layers (with activation in between) to project input before computing logits weight_proj_factor: this is used only if weight_proj_depth is > 1. scales the inner dimensionality of projections by this factor """ super().__init__() self.groups = groups self.combine_groups = combine_groups self.input_dim = dim self.num_vars = num_vars self.time_first = time_first self.hard = hard assert ( vq_dim % groups == 0 ), f"dim {vq_dim} must be divisible by groups {groups} for concatenation" var_dim = vq_dim // groups num_groups = groups if not combine_groups else 1 self.vars = nn.Parameter(torch.FloatTensor(1, num_groups * num_vars, var_dim)) if std == 0: nn.init.uniform_(self.vars) else: nn.init.normal_(self.vars, mean=0, std=std) if weight_proj_depth > 1: def block(input_dim, output_dim): return nn.Sequential(nn.Linear(input_dim, output_dim), activation) inner_dim = self.input_dim * weight_proj_factor self.weight_proj = nn.Sequential( *[ block(self.input_dim if i == 0 else inner_dim, inner_dim) for i in range(weight_proj_depth - 1) ], nn.Linear(inner_dim, groups * num_vars), ) else: self.weight_proj = nn.Linear(self.input_dim, groups * num_vars) nn.init.normal_(self.weight_proj.weight, mean=0, std=1) nn.init.zeros_(self.weight_proj.bias) if isinstance(temp, str): import ast temp = ast.literal_eval(temp) assert len(temp) == 3, f"{temp}, {len(temp)}" self.max_temp, self.min_temp, self.temp_decay = temp self.curr_temp = self.max_temp self.codebook_indices = None def set_num_updates(self, num_updates): self.curr_temp = max( self.max_temp * self.temp_decay**num_updates, self.min_temp ) def get_codebook_indices(self): if self.codebook_indices is None: from itertools import product p = [range(self.num_vars)] * self.groups inds = list(product(*p)) self.codebook_indices = torch.tensor( inds, dtype=torch.long, device=self.vars.device ).flatten() if not self.combine_groups: self.codebook_indices = self.codebook_indices.view( self.num_vars**self.groups, -1 ) for b in range(1, self.groups): self.codebook_indices[:, b] += self.num_vars * b self.codebook_indices = self.codebook_indices.flatten() return self.codebook_indices def codebook(self): indices = self.get_codebook_indices() return ( self.vars.squeeze(0) .index_select(0, indices) .view(self.num_vars**self.groups, -1) ) def sample_from_codebook(self, b, n): indices = self.get_codebook_indices() indices = indices.view(-1, self.groups) cb_size = indices.size(0) assert ( n < cb_size ), f"sample size {n} is greater than size of codebook {cb_size}" sample_idx = torch.randint(low=0, high=cb_size, size=(b * n,)) indices = indices[sample_idx] z = self.vars.squeeze(0).index_select(0, indices.flatten()).view(b, n, -1) return z def to_codebook_index(self, indices): res = indices.new_full(indices.shape[:-1], 0) for i in range(self.groups): exponent = self.groups - i - 1 res += indices[..., i] * (self.num_vars**exponent) return res def forward_idx(self, x): res = self.forward(x, produce_targets=True) return res["x"], res["targets"] def forward(self, x, produce_targets=False): result = {"num_vars": self.num_vars * self.groups} if not self.time_first: x = x.transpose(1, 2) bsz, tsz, fsz = x.shape x = x.reshape(-1, fsz) x = self.weight_proj(x) x = x.view(bsz * tsz * self.groups, -1) with torch.no_grad(): _, k = x.max(-1) hard_x = ( x.new_zeros(*x.shape) .scatter_(-1, k.view(-1, 1), 1.0) .view(bsz * tsz, self.groups, -1) ) hard_probs = torch.mean(hard_x.float(), dim=0) result["code_perplexity"] = torch.exp( -torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1) ).sum() avg_probs = torch.softmax( x.view(bsz * tsz, self.groups, -1).float(), dim=-1 ).mean(dim=0) result["prob_perplexity"] = torch.exp( -torch.sum(avg_probs * torch.log(avg_probs + 1e-7), dim=-1) ).sum() result["temp"] = self.curr_temp if self.training: x = F.gumbel_softmax(x.float(), tau=self.curr_temp, hard=self.hard).type_as( x ) else: x = hard_x x = x.view(bsz * tsz, -1) vars = self.vars if self.combine_groups: vars = vars.repeat(1, self.groups, 1) if produce_targets: result["targets"] = ( x.view(bsz * tsz * self.groups, -1) .argmax(dim=-1) .view(bsz, tsz, self.groups) .detach() ) x = x.unsqueeze(-1) * vars x = x.view(bsz * tsz, self.groups, self.num_vars, -1) x = x.sum(-2) x = x.view(bsz, tsz, -1) if not self.time_first: x = x.transpose(1, 2) # BTC -> BCT result["x"] = x return result
EXA-1-master
exa/libraries/fairseq/fairseq/modules/gumbel_vector_quantizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn from fairseq.modules import Fp32GroupNorm class KmeansVectorQuantizer(nn.Module): def __init__( self, dim, num_vars, groups, combine_groups, vq_dim, time_first, gamma=0.25 ): """Vector quantization using straight pass-through estimator (i.e. kmeans) Args: dim: input dimension (channels) num_vars: number of quantized vectors per group groups: number of groups for vector quantization combine_groups: whether to use the vectors for all groups vq_dim: dimensionality of the resulting quantized vector time_first: if true, expect input in BxTxC format, otherwise in BxCxT gamma: commitment loss coefficient """ super().__init__() self.groups = groups self.combine_groups = combine_groups self.input_dim = dim self.num_vars = num_vars self.vq_dim = vq_dim self.time_first = time_first assert ( vq_dim % groups == 0 ), f"dim {vq_dim} must be divisible by groups {groups} for concatenation" self.var_dim = vq_dim // groups num_groups = groups if not combine_groups else 1 self.embedding = nn.Parameter( 0.01 * torch.randn(num_vars, num_groups, self.var_dim) ) self.projection = nn.Sequential( nn.Conv1d(dim, dim, kernel_size=1, groups=groups, bias=False), Fp32GroupNorm(groups, dim), ) self.gamma = gamma self.mse_mean = nn.MSELoss(reduction="mean") def _pass_grad(self, x, y): """Manually set gradient for backward pass. for y = f(x), ensure that during the backward pass, dL/dy = dL/dx regardless of f(x). Returns: y, with the gradient forced to be dL/dy = dL/dx. """ return y.detach() + (x - x.detach()) @property def expand_embedding(self): if self.combine_groups: return self.embedding.expand(self.num_vars, self.groups, self.var_dim) return self.embedding def forward_idx(self, x): res = self.forward(x, produce_targets=True) return res["x"], res["targets"] def forward(self, x, produce_targets=False): result = {"num_vars": self.num_vars} if self.time_first: x = x.transpose(1, 2) bsz, fsz, tsz = x.shape ze = self.projection(x) ze_ = ze.view(bsz, self.groups, self.var_dim, tsz).permute(0, 3, 1, 2) d = ( (ze_.unsqueeze(0) - self.expand_embedding.unsqueeze(1).unsqueeze(1)) .view(self.num_vars, bsz, tsz, self.groups, -1) .norm(dim=-1, p=2) ) idx = d.argmin(dim=0) zq = ( torch.stack( [ self.expand_embedding[idx[..., group], group] for group in range(self.groups) ], dim=-2, ) .view(bsz, tsz, self.groups * self.var_dim) .permute(0, 2, 1) ) assert ze.shape == zq.shape, (ze.shape, zq.shape) x = self._pass_grad(ze, zq) with torch.no_grad(): hard_x = ( idx.new_zeros(bsz * tsz * self.groups, self.num_vars) .scatter_(-1, idx.view(-1, 1), 1.0) .view(bsz * tsz, self.groups, -1) ) hard_probs = torch.mean(hard_x.float(), dim=0) result["code_perplexity"] = torch.exp( -torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1) ).sum() if produce_targets: result["targets"] = idx if self.time_first: x = x.transpose(1, 2) # BCT -> BTC result["x"] = x ze = ze.float() zq = zq.float() latent_loss = self.mse_mean(zq, ze.detach()) commitment_loss = self.mse_mean(ze, zq.detach()) result["kmeans_loss"] = latent_loss + self.gamma * commitment_loss return result
EXA-1-master
exa/libraries/fairseq/fairseq/modules/kmeans_vector_quantizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn class LSTMCellWithZoneOut(nn.Module): """ Zoneout: Regularizing RNNs by Randomly Preserving Hidden Activations https://arxiv.org/abs/1606.01305 """ def __init__( self, prob: float, input_size: int, hidden_size: int, bias: bool = True ): super(LSTMCellWithZoneOut, self).__init__() self.lstm_cell = nn.LSTMCell(input_size, hidden_size, bias=bias) self.prob = prob if prob > 1.0 or prob < 0.0: raise ValueError( "zoneout probability must be in the range from " "0.0 to 1.0." ) def zoneout(self, h, next_h, prob): if isinstance(h, tuple): return tuple([self.zoneout(h[i], next_h[i], prob) for i in range(len(h))]) if self.training: mask = h.new_zeros(*h.size()).bernoulli_(prob) return mask * h + (1 - mask) * next_h return prob * h + (1 - prob) * next_h def forward(self, x, h): return self.zoneout(h, self.lstm_cell(x, h), self.prob)
EXA-1-master
exa/libraries/fairseq/fairseq/modules/lstm_cell_with_zoneout.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List import torch from torch import nn from fairseq.modules.quant_noise import quant_noise class AdaptiveInput(nn.Module): def __init__( self, vocab_size: int, padding_idx: int, initial_dim: int, factor: float, output_dim: int, cutoff: List[int], q_noise: float = 0, qn_block_size: int = 8, ): super().__init__() if vocab_size > cutoff[-1]: cutoff = cutoff + [vocab_size] else: assert ( vocab_size == cutoff[-1] ), "cannot specify cutoff larger than vocab size" self.cutoff = cutoff self.embedding_dim = output_dim self.padding_idx = padding_idx self.embeddings = nn.ModuleList() for i in range(len(self.cutoff)): prev = self.cutoff[i - 1] if i > 0 else 0 size = self.cutoff[i] - prev dim = int(initial_dim // (factor**i)) seq = nn.Sequential( nn.Embedding(size, dim, self.padding_idx), quant_noise( nn.Linear(dim, output_dim, bias=False), q_noise, qn_block_size ), ) self.embeddings.append(seq) self.padding_idx = None self.padding_idx = padding_idx def init_weights(m): if isinstance(m, nn.Embedding): nn.init.normal_(m.weight, mean=0, std=m.weight.shape[1] ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) elif hasattr(m, "weight"): nn.init.xavier_uniform_(m.weight) self.apply(init_weights) self.register_buffer("_float_tensor", torch.FloatTensor(1)) def weights_for_band(self, band: int): return self.embeddings[band][0].weight, self.embeddings[band][1].weight def forward(self, input: torch.Tensor): result = self._float_tensor.new(input.shape + (self.embedding_dim,)) for i in range(len(self.cutoff)): mask = input.lt(self.cutoff[i]) if i > 0: mask.mul_(input.ge(self.cutoff[i - 1])) chunk_input = input[mask] - self.cutoff[i - 1] else: chunk_input = input[mask] if mask.any(): result[mask] = self.embeddings[i](chunk_input) return result
EXA-1-master
exa/libraries/fairseq/fairseq/modules/adaptive_input.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.fairseq_dropout import FairseqDropout from fairseq.modules.unfold import unfold1d def LightweightConv( input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0.0, weight_softmax=False, bias=False, ): if torch.cuda.is_available(): try: from fairseq.modules.lightconv_layer import LightconvLayer return LightconvLayer( input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, bias=bias, ) except ImportError as e: print(e) return LightweightConv1dTBC( input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, bias=bias, ) class LightweightConv1d(nn.Module): """Lightweight Convolution assuming the input is BxCxT This is just an example that explains LightConv clearer than the TBC version. We don't use this module in the model. Args: input_size: # of channels of the input and output kernel_size: convolution channels padding: padding num_heads: number of heads used. The weight is of shape `(num_heads, 1, kernel_size)` weight_softmax: normalize the weight with softmax before the convolution Shape: Input: BxCxT, i.e. (batch_size, input_size, timesteps) Output: BxCxT, i.e. (batch_size, input_size, timesteps) Attributes: weight: the learnable weights of the module of shape `(num_heads, 1, kernel_size)` bias: the learnable bias of the module of shape `(input_size)` """ def __init__( self, input_size, kernel_size=1, padding=0, num_heads=1, weight_softmax=False, bias=False, weight_dropout=0.0, ): super().__init__() self.input_size = input_size self.kernel_size = kernel_size self.num_heads = num_heads self.padding = padding self.weight_softmax = weight_softmax self.weight = nn.Parameter(torch.Tensor(num_heads, 1, kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(input_size)) else: self.bias = None self.weight_dropout_module = FairseqDropout( weight_dropout, module_name=self.__class__.__name__ ) self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.weight) if self.bias is not None: nn.init.constant_(self.bias, 0.0) def forward(self, input): """ input size: B x C x T output size: B x C x T """ B, C, T = input.size() H = self.num_heads weight = self.weight if self.weight_softmax: weight = F.softmax(weight, dim=-1) weight = self.weight_dropout_module(weight) # Merge every C/H entries into the batch dimension (C = self.input_size) # B x C x T -> (B * C/H) x H x T # One can also expand the weight to C x 1 x K by a factor of C/H # and do not reshape the input instead, which is slow though input = input.view(-1, H, T) output = F.conv1d(input, weight, padding=self.padding, groups=self.num_heads) output = output.view(B, C, T) if self.bias is not None: output = output + self.bias.view(1, -1, 1) return output @with_incremental_state class LightweightConv1dTBC(nn.Module): """Lightweight Convolution assuming the input is TxBxC Args: input_size: # of channels of the input kernel_size: convolution channels padding_l: padding to the left when using "same" padding num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size) weight_dropout: the drop rate of the DropConnect to drop the weight weight_softmax: normalize the weight with softmax before the convolution bias: use bias Shape: Input: TxBxC, i.e. (timesteps, batch_size, input_size) Output: TxBxC, i.e. (timesteps, batch_size, input_size) Attributes: weight: the learnable weights of the module of shape `(num_heads, 1, kernel_size)` bias: the learnable bias of the module of shape `(input_size)` """ def __init__( self, input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0.0, weight_softmax=False, bias=False, ): super().__init__() self.input_size = input_size self.kernel_size = kernel_size self.padding_l = padding_l self.num_heads = num_heads self.weight_dropout_module = FairseqDropout( weight_dropout, module_name=self.__class__.__name__ ) self.weight_softmax = weight_softmax self.weight = nn.Parameter(torch.Tensor(num_heads, 1, kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(input_size)) else: self.bias = None self.reset_parameters() self.onnx_trace = False def reset_parameters(self): nn.init.xavier_uniform_(self.weight) if self.bias is not None: nn.init.constant_(self.bias, 0.0) def forward(self, x, incremental_state=None, unfold=False): """Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C args: x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size) incremental_state: A dict to keep the state unfold: unfold the input or not. If not, we use the matrix trick instead """ unfold = unfold or (incremental_state is not None) if unfold: output = self._forward_unfolded(x, incremental_state) else: output = self._forward_expanded(x, incremental_state) if self.bias is not None: output = output + self.bias.view(1, 1, -1) return output def prepare_for_onnx_export_(self): self.onnx_trace = True def _forward_unfolded(self, x, incremental_state): """The conventional implementation of convolutions. Unfolding the input by having a window shifting to the right.""" T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size weight = self.weight.view(H, K) if incremental_state is not None: input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = x.new() x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) if self.kernel_size > 1: self._set_input_buffer( incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :] ) x_unfold = x_unfold.view(T * B * H, R, -1) else: # unfold the input: T x B x C --> T' x B x C x K x_unfold = unfold1d(x, self.kernel_size, self.padding_l, 0) x_unfold = x_unfold.view(T * B * H, R, K) if self.weight_softmax: weight = utils.softmax(weight, dim=1, onnx_trace=self.onnx_trace).type_as( weight ) if incremental_state is not None: weight = weight[:, -x_unfold.size(2) :] K = weight.size(1) weight = ( weight.view(1, H, K).expand(T * B, H, K).contiguous().view(T * B * H, K, 1) ) weight = self.weight_dropout_module(weight) output = torch.bmm(x_unfold, weight) # T*B*H x R x 1 output = output.view(T, B, C) return output def _forward_expanded(self, x, incremental_state): """Turn the convolution filters into band matrices and do matrix multiplication. This is faster when the sequence is short, but less memory efficient. This is not used in the decoder during inference. """ T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size weight = self.weight.view(H, K) if self.weight_softmax: weight = utils.softmax(weight, dim=1, onnx_trace=self.onnx_trace).type_as( weight ) weight = weight.view(1, H, K).expand(T * B, H, K).contiguous() weight = weight.view(T, B * H, K).transpose(0, 1) x = x.view(T, B * H, R).transpose(0, 1) P = self.padding_l if K > T and P == K - 1: weight = weight.narrow(2, K - T, T) K, P = T, T - 1 # turn the convolution filters into band matrices weight_expanded = weight.new_zeros(B * H, T, T + K - 1, requires_grad=False) weight_expanded.as_strided((B * H, T, K), (T * (T + K - 1), T + K, 1)).copy_( weight ) weight_expanded = weight_expanded.narrow(2, P, T) weight_expanded = self.weight_dropout_module(weight_expanded) output = torch.bmm(weight_expanded, x) output = output.transpose(0, 1).contiguous().view(T, B, C) return output def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state(self, incremental_state, "input_buffer") def _set_input_buffer(self, incremental_state, new_buffer): return utils.set_incremental_state( self, incremental_state, "input_buffer", new_buffer ) def extra_repr(self): s = "{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, bias={}".format( self.input_size, self.kernel_size, self.padding_l, self.num_heads, self.weight_softmax, self.bias is not None, ) if self.weight_dropout_module.p > 0.0: s += ", weight_dropout={}".format(self.weight_dropout_module.p) return s
EXA-1-master
exa/libraries/fairseq/fairseq/modules/lightweight_convolution.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import torch class ScalarBias(torch.autograd.Function): """ Adds a vector of scalars, used in self-attention mechanism to allow the model to optionally attend to this vector instead of the past """ @staticmethod def forward(ctx, input, dim, bias_init): size = list(input.size()) size[dim] += 1 output = input.new(*size).fill_(bias_init) output.narrow(dim, 1, size[dim] - 1).copy_(input) ctx.dim = dim return output @staticmethod def backward(ctx, grad): return grad.narrow(ctx.dim, 1, grad.size(ctx.dim) - 1), None, None def scalar_bias(input, dim, bias_init=0): return ScalarBias.apply(input, dim, bias_init)
EXA-1-master
exa/libraries/fairseq/fairseq/modules/scalar_bias.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from .learned_positional_embedding import LearnedPositionalEmbedding from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding def PositionalEmbedding( num_embeddings: int, embedding_dim: int, padding_idx: int, learned: bool = False, ): if learned: # if padding_idx is specified then offset the embedding ids by # this index and adjust num_embeddings appropriately # TODO: The right place for this offset would be inside # LearnedPositionalEmbedding. Move this there for a cleaner implementation. if padding_idx is not None: num_embeddings = num_embeddings + padding_idx + 1 m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim**-0.5) if padding_idx is not None: nn.init.constant_(m.weight[padding_idx], 0) else: m = SinusoidalPositionalEmbedding( embedding_dim, padding_idx, init_size=num_embeddings + padding_idx + 1, ) return m
EXA-1-master
exa/libraries/fairseq/fairseq/modules/positional_embedding.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch from .multihead_attention import MultiheadAttention class SparseMultiheadAttention(MultiheadAttention): """Sparse Multi-Headed Attention. "Generating Long Sequences with Sparse Transformers". Implements fixed factorized self attention, where l=stride and c=expressivity. A(1) includes all words in the stride window and A(2) takes a summary of c words from the end of each stride window. If is_bidirectional=False, we do not include any words past the current word, as in the paper. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, stride=32, expressivity=8, is_bidirectional=True, ): super().__init__( embed_dim, num_heads, kdim, vdim, dropout, bias, add_bias_kv, add_zero_attn, self_attention, encoder_decoder_attention, ) self.is_bidirectional = is_bidirectional self.stride = stride self.expressivity = expressivity assert self.stride > 0 and self.stride >= self.expressivity # Used for Ai(2) calculations - beginning of [l-c, l] range def compute_checkpoint(self, word_index): if word_index % self.stride == 0 and word_index != 0: checkpoint_index = word_index - self.expressivity else: checkpoint_index = ( math.floor(word_index / self.stride) * self.stride + self.stride - self.expressivity ) return checkpoint_index # Computes Ai(2) def compute_subset_summaries(self, absolute_max): checkpoint_index = self.compute_checkpoint(0) subset_two = set() while checkpoint_index <= absolute_max - 1: summary = set( range( checkpoint_index, min(checkpoint_index + self.expressivity + 1, absolute_max), ) ) subset_two = subset_two.union(summary) checkpoint_index = self.compute_checkpoint(checkpoint_index + self.stride) return subset_two # Sparse Transformer Fixed Attention Pattern: https://arxiv.org/pdf/1904.10509.pdf def compute_fixed_attention_subset(self, word_index, tgt_len): # +1s account for range function; [min, max) -> [min, max] if not self.is_bidirectional: absolute_max = word_index + 1 else: absolute_max = tgt_len # Subset 1 - whole window rounded_index = ( math.floor((word_index + self.stride) / self.stride) * self.stride ) if word_index % self.stride == 0 and word_index != 0: subset_one = set( range(word_index - self.stride, min(absolute_max, word_index + 1)) ) else: subset_one = set( range( max(0, rounded_index - self.stride), min(absolute_max, rounded_index + 1), ) ) # Subset 2 - summary per window # If bidirectional, subset 2 is the same for every index subset_two = set() if not self.is_bidirectional: subset_two = self.compute_subset_summaries(absolute_max) return subset_one.union(subset_two) # Compute sparse mask - if bidirectional, can pre-compute and store def buffered_sparse_mask(self, tensor, tgt_len, src_len): assert tgt_len > self.stride sparse_mask = torch.empty((tgt_len, src_len)).float().fill_(float("-inf")) # If bidirectional, subset 2 is the same for every index subset_summaries = set() if self.is_bidirectional: subset_summaries = self.compute_subset_summaries(tgt_len) for i in range(tgt_len): fixed_attention_subset = self.compute_fixed_attention_subset(i, tgt_len) fixed_attention_subset = fixed_attention_subset.union(subset_summaries) included_word_indices = torch.LongTensor(list(fixed_attention_subset)) sparse_mask[i].index_fill_(0, included_word_indices, 0) return sparse_mask.type_as(tensor) def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz): sparse_mask = self.buffered_sparse_mask(attn_weights, tgt_len, src_len) sparse_mask = sparse_mask.unsqueeze(0).expand( bsz * self.num_heads, tgt_len, src_len ) attn_weights += sparse_mask
EXA-1-master
exa/libraries/fairseq/fairseq/modules/sparse_multihead_attention.py
#!/usr/bin/env python3 """ Used for EMA tracking a given pytorch module. The user is responsible for calling step() and setting the appropriate decay """ import copy from dataclasses import dataclass, field import logging import torch from omegaconf import II from fairseq.dataclass import FairseqDataclass try: from amp_C import multi_tensor_l2norm multi_tensor_l2norm_available = True except ImportError: multi_tensor_l2norm_available = False logger = logging.getLogger(__name__) @dataclass class EMAModuleConfig(FairseqDataclass): ema_decay: float = field( default=0.9999, metadata={"help": "decay for exponential moving average model"} ) ema_fp32: bool = field( default=False, metadata={"help": "If true, store EMA model in fp32 even if model is in fp16"}, ) add_missing_params: bool = True log_norms: bool = False class EMAModule: """Exponential Moving Average of Fairseq Models""" def __init__( self, model, config: EMAModuleConfig, copy_model=True, device=None, skip_keys=None, ): """ @param model model to initialize the EMA with @param config EMAConfig object with configuration like ema_decay, ema_update_freq, ema_fp32 @param device If provided, copy EMA to this device (e.g. gpu). Otherwise EMA is in the same device as the model. """ self.config = config if copy_model: self.model = copy.deepcopy(model) self.model.requires_grad_(False) else: self.model = model self.config = config self.decay = config.ema_decay self.skip_keys = skip_keys or set() self.add_missing_params = config.add_missing_params self.fp32_params = {} if device is not None: logging.info(f"Copying EMA model to device {device}") self.model = self.model.to(device=device) if self.config.ema_fp32: self.build_fp32_params() self.log_norms = config.log_norms and multi_tensor_l2norm_available self.logs = {} def build_fp32_params(self, state_dict=None): """ Store a copy of the EMA params in fp32. If state dict is passed, the EMA params is copied from the provided state dict. Otherwise, it is copied from the current EMA model parameters. """ if not self.config.ema_fp32: raise RuntimeError( "build_fp32_params should not be called if ema_fp32=False. " "Use ema_fp32=True if this is really intended." ) if state_dict is None: state_dict = self.model.state_dict() def _to_float(t): return t.float() if torch.is_floating_point(t) else t for param_key in state_dict: if param_key in self.fp32_params: if param_key == "__sq_mom": self.fp32_params[param_key] = state_dict[param_key] else: self.fp32_params[param_key].copy_(state_dict[param_key]) else: self.fp32_params[param_key] = _to_float(state_dict[param_key]) if "__sq_mom" in self.fp32_params: self.fp32_params["__sq_mom"][param_key] = torch.zeros_like( self.fp32_params[param_key] ) def restore(self, state_dict, build_fp32_params=False): """Load data from a model spec into EMA model""" self.model.load_state_dict(state_dict, strict=False) if build_fp32_params: self.build_fp32_params(state_dict) def set_decay(self, decay, weight_decay=None): self.decay = decay if weight_decay is not None: self.weight_decay = weight_decay def get_decay(self): return self.decay def _step_internal(self, new_model): """One update of the EMA model based on new model weights""" decay = self.decay ema_state_dict = {} ema_params = ( self.fp32_params if self.config.ema_fp32 else self.model.state_dict() ) new_p = [] ema_p = [] for key, param in new_model.named_parameters(): if isinstance(param, dict): continue if not self.add_missing_params and key not in ema_params: continue try: ema_param = ema_params[key] except KeyError: ema_param = ( param.float().clone() if param.ndim == 1 else copy.deepcopy(param) ) ema_params[key] = ema_param if param.shape != ema_param.shape: raise ValueError( "incompatible tensor shapes between model param and ema param" + "{} vs. {}".format(param.shape, ema_param.shape) ) if "version" in key: # Do not decay a model.version pytorch param continue lr = 1 - decay if key in self.skip_keys or not param.requires_grad: ema_params[key].copy_(param.to(dtype=ema_param.dtype).data) ema_param = ema_params[key] else: if self.log_norms: new_p.append(param) ema_p.append(ema_param) ema_param.mul_(1 - lr) ema_param.add_(param.data.to(dtype=ema_param.dtype), alpha=lr) ema_state_dict[key] = ema_param for key, param in new_model.named_buffers(): ema_state_dict[key] = param if self.log_norms: if "model_norm" in self.logs: self.prev_model_norm = self.logs["model_norm"] chunk_size = 2048 * 32 has_inf = torch.zeros( (1, 1), dtype=torch.int, device=next(new_model.parameters()).device ) new_norm = multi_tensor_l2norm(chunk_size, has_inf, [new_p], False) old_norm = multi_tensor_l2norm(chunk_size, has_inf, [ema_p], False) self.logs["model_norm"] = new_norm[0] self.logs["ema_norm"] = old_norm[0] self.restore(ema_state_dict, build_fp32_params=False) @torch.no_grad() def step(self, new_model): self._step_internal(new_model) def reverse(self, model): """ Load the model parameters from EMA model. Useful for inference or fine-tuning from the EMA model. """ d = self.model.state_dict() if "_ema" in d: del d["_ema"] model.load_state_dict(d, strict=False) return model
EXA-1-master
exa/libraries/fairseq/fairseq/modules/ema_module.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn import math import torch class PositionalEncoding(nn.Module): """Positional encoding. Args: d_model: Embedding dimension. dropout_rate: Dropout rate. max_len: Maximum input length. reverse: Whether to reverse the input position. """ def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False): """Construct an PositionalEncoding object.""" super(PositionalEncoding, self).__init__() self.d_model = d_model self.reverse = reverse self.xscale = math.sqrt(self.d_model) self.dropout = nn.Dropout(p=dropout_rate) self.pe = None self.extend_pe(torch.tensor(0.0).expand(1, max_len)) def extend_pe(self, x): """Reset the positional encodings.""" if self.pe is not None: if self.pe.size(1) >= x.size(1): if self.pe.dtype != x.dtype or self.pe.device != x.device: self.pe = self.pe.to(dtype=x.dtype, device=x.device) return pe = torch.zeros(x.size(1), self.d_model) if self.reverse: position = torch.arange( x.size(1) - 1, -1, -1.0, dtype=torch.float32 ).unsqueeze(1) else: position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1) div_term = torch.exp( torch.arange(0, self.d_model, 2, dtype=torch.float32) * -(math.log(10000.0) / self.d_model) ) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.pe = pe.to(device=x.device, dtype=x.dtype) def forward(self, x: torch.Tensor): """Add positional encoding. Args: x (torch.Tensor): Input tensor B X T X C Returns: torch.Tensor: Encoded tensor B X T X C """ self.extend_pe(x) x = x * self.xscale + self.pe[:, : x.size(1)] return self.dropout(x) class RelPositionalEncoding(nn.Module): """Relative positional encoding module (new implementation). Args: d_model: Embedding dimension. dropout_rate: Dropout rate. max_len: Maximum input length. """ def __init__(self, max_len, d_model): """Construct an PositionalEncoding object.""" super(RelPositionalEncoding, self).__init__() self.d_model = d_model self.pe = None self.extend_pe(torch.tensor(0.0).expand(1, max_len)) def extend_pe(self, x): """Reset the positional encodings.""" if self.pe is not None: # self.pe contains both positive and negative parts # the length of self.pe is 2 * input_len - 1 if self.pe.size(1) >= x.size(1) * 2 - 1: if self.pe.dtype != x.dtype or self.pe.device != x.device: self.pe = self.pe.to(dtype=x.dtype, device=x.device) return # Suppose `i` means to the position of query vecotr and `j` means the # position of key vector. We use position relative positions when keys # are to the left (i>j) and negative relative positions otherwise (i<j). pe_positive = torch.zeros(x.size(1), self.d_model) pe_negative = torch.zeros(x.size(1), self.d_model) position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1) div_term = torch.exp( torch.arange(0, self.d_model, 2, dtype=torch.float32) * -(math.log(10000.0) / self.d_model) ) pe_positive[:, 0::2] = torch.sin(position * div_term) pe_positive[:, 1::2] = torch.cos(position * div_term) pe_negative[:, 0::2] = torch.sin(-1 * position * div_term) pe_negative[:, 1::2] = torch.cos(-1 * position * div_term) # Reserve the order of positive indices and concat both positive and # negative indices. This is used to support the shifting trick # as in https://arxiv.org/abs/1901.02860 pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0) pe_negative = pe_negative[1:].unsqueeze(0) pe = torch.cat([pe_positive, pe_negative], dim=1) self.pe = pe.to(device=x.device, dtype=x.dtype) def forward(self, x: torch.Tensor): """Add positional encoding. Args: x : Input tensor T X B X C. Returns: torch.Tensor: Encoded tensor T X B X C. """ x = x.transpose(0, 1) # Change TBC to BTC self.extend_pe(x) pos_emb = self.pe[ :, self.pe.size(1) // 2 - x.size(1) + 1 : self.pe.size(1) // 2 + x.size(1), ] pos_emb = pos_emb.transpose(0, 1) # change to TBC return pos_emb
EXA-1-master
exa/libraries/fairseq/fairseq/modules/positional_encoding.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Optional import torch from fairseq.modules import ( ESPNETMultiHeadedAttention, LayerNorm, MultiheadAttention, RelPositionMultiHeadedAttention, RotaryPositionMultiHeadedAttention, ) from fairseq.utils import get_activation_fn class ConvolutionModule(torch.nn.Module): """Convolution block used in the conformer block""" def __init__( self, embed_dim, channels, depthwise_kernel_size, dropout, activation_fn="swish", bias=False, export=False, ): """ Args: embed_dim: Embedding dimension channels: Number of channels in depthwise conv layers depthwise_kernel_size: Depthwise conv layer kernel size dropout: dropout value activation_fn: Activation function to use after depthwise convolution kernel bias: If bias should be added to conv layers export: If layernorm should be exported to jit """ super(ConvolutionModule, self).__init__() assert ( depthwise_kernel_size - 1 ) % 2 == 0, "kernel_size should be a odd number for 'SAME' padding" self.layer_norm = LayerNorm(embed_dim, export=export) self.pointwise_conv1 = torch.nn.Conv1d( embed_dim, 2 * channels, kernel_size=1, stride=1, padding=0, bias=bias, ) self.glu = torch.nn.GLU(dim=1) self.depthwise_conv = torch.nn.Conv1d( channels, channels, depthwise_kernel_size, stride=1, padding=(depthwise_kernel_size - 1) // 2, groups=channels, bias=bias, ) self.batch_norm = torch.nn.BatchNorm1d(channels) self.activation = get_activation_fn(activation_fn)(channels) self.pointwise_conv2 = torch.nn.Conv1d( channels, embed_dim, kernel_size=1, stride=1, padding=0, bias=bias, ) self.dropout = torch.nn.Dropout(dropout) def forward(self, x): """ Args: x: Input of shape B X T X C Returns: Tensor of shape B X T X C """ x = self.layer_norm(x) # exchange the temporal dimension and the feature dimension x = x.transpose(1, 2) # GLU mechanism x = self.pointwise_conv1(x) # (batch, 2*channel, dim) x = self.glu(x) # (batch, channel, dim) # 1D Depthwise Conv x = self.depthwise_conv(x) x = self.batch_norm(x) x = self.activation(x) x = self.pointwise_conv2(x) x = self.dropout(x) return x.transpose(1, 2) class FeedForwardModule(torch.nn.Module): """Positionwise feed forward layer used in conformer""" def __init__( self, input_feat, hidden_units, dropout1, dropout2, activation_fn="swish", bias=True, ): """ Args: input_feat: Input feature dimension hidden_units: Hidden unit dimension dropout1: dropout value for layer1 dropout2: dropout value for layer2 activation_fn: Name of activation function bias: If linear layers should have bias """ super(FeedForwardModule, self).__init__() self.layer_norm = LayerNorm(input_feat) self.w_1 = torch.nn.Linear(input_feat, hidden_units, bias=bias) self.w_2 = torch.nn.Linear(hidden_units, input_feat, bias=bias) self.dropout1 = torch.nn.Dropout(dropout1) self.dropout2 = torch.nn.Dropout(dropout2) self.activation = get_activation_fn(activation_fn)(hidden_units) def forward(self, x): """ Args: x: Input Tensor of shape T X B X C Returns: Tensor of shape T X B X C """ x = self.layer_norm(x) x = self.w_1(x) x = self.activation(x) x = self.dropout1(x) x = self.w_2(x) return self.dropout2(x) class ConformerEncoderLayer(torch.nn.Module): """Conformer block based on https://arxiv.org/abs/2005.08100. We currently don't support relative positional encoding in MHA""" def __init__( self, embed_dim, ffn_embed_dim, attention_heads, dropout, use_fp16, depthwise_conv_kernel_size=31, activation_fn="swish", attn_type=None, pos_enc_type="abs", ): """ Args: embed_dim: Input embedding dimension ffn_embed_dim: FFN layer dimension attention_heads: Number of attention heads in MHA dropout: dropout value depthwise_conv_kernel_size: Size of kernel in depthwise conv layer in convolution module activation_fn: Activation function name to use in convulation block and feed forward block attn_type: MHA implementation from ESPNET vs fairseq pos_enc_type: Positional encoding type - abs, rope, rel_pos """ self.pos_enc_type = pos_enc_type super(ConformerEncoderLayer, self).__init__() self.ffn1 = FeedForwardModule( embed_dim, ffn_embed_dim, dropout, dropout, ) self.self_attn_layer_norm = LayerNorm(embed_dim, export=False) self.self_attn_dropout = torch.nn.Dropout(dropout) if attn_type == "espnet": if self.pos_enc_type == "rel_pos": self.self_attn = RelPositionMultiHeadedAttention( embed_dim, attention_heads, dropout=dropout, ) elif self.pos_enc_type == "rope": self.self_attn = RotaryPositionMultiHeadedAttention( embed_dim, attention_heads, dropout=dropout, precision=use_fp16 ) elif self.pos_enc_type == "abs": self.self_attn = ESPNETMultiHeadedAttention( embed_dim, attention_heads, dropout=dropout, ) else: raise Exception(f"Unsupported attention type {self.pos_enc_type}") else: # Default to fairseq MHA self.self_attn = MultiheadAttention( embed_dim, attention_heads, dropout=dropout, ) self.conv_module = ConvolutionModule( embed_dim=embed_dim, channels=embed_dim, depthwise_kernel_size=depthwise_conv_kernel_size, dropout=dropout, activation_fn=activation_fn, ) self.ffn2 = FeedForwardModule( embed_dim, ffn_embed_dim, dropout, dropout, activation_fn=activation_fn, ) self.final_layer_norm = LayerNorm(embed_dim, export=False) def forward( self, x, encoder_padding_mask: Optional[torch.Tensor], position_emb: Optional[torch.Tensor] = None, ): """ Args: x: Tensor of shape T X B X C encoder_padding_mask: Optional mask tensor positions: Returns: Tensor of shape T X B X C """ residual = x x = self.ffn1(x) x = x * 0.5 + residual residual = x x = self.self_attn_layer_norm(x) if self.pos_enc_type == "rel_pos": x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=encoder_padding_mask, pos_emb=position_emb, need_weights=False, ) else: x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=encoder_padding_mask, need_weights=False, ) x = self.self_attn_dropout(x) x = x + residual residual = x # TBC to BTC x = x.transpose(0, 1) x = self.conv_module(x) # BTC to TBC x = x.transpose(0, 1) x = residual + x residual = x x = self.ffn2(x) layer_result = x x = x * 0.5 + residual x = self.final_layer_norm(x) return x, (attn, layer_result) class ConformerWav2Vec2EncoderLayer(ConformerEncoderLayer): """Encoder layer for Wav2vec2 encoder""" def forward( self, x: torch.Tensor, self_attn_mask: torch.Tensor = None, self_attn_padding_mask: torch.Tensor = None, need_weights: bool = False, att_args=None, position_emb=None, ): return super().forward(x, self_attn_padding_mask, position_emb)
EXA-1-master
exa/libraries/fairseq/fairseq/modules/conformer_layer.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2019 Shigeki Karita # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Multi-Head Attention layer definition.""" import math import torch from torch import nn from fairseq.modules.rotary_positional_embedding import ( RotaryPositionalEmbedding, apply_rotary_pos_emb, ) class ESPNETMultiHeadedAttention(nn.Module): """Multi-Head Attention layer. Args: n_head: The number of heads. n_feat: The number of features. dropout: Dropout rate. """ def __init__(self, n_feat, n_head, dropout): """Construct an MultiHeadedAttention object.""" super(ESPNETMultiHeadedAttention, self).__init__() assert n_feat % n_head == 0 # We assume d_v always equals d_k self.d_k = n_feat // n_head self.h = n_head self.linear_q = nn.Linear(n_feat, n_feat) self.linear_k = nn.Linear(n_feat, n_feat) self.linear_v = nn.Linear(n_feat, n_feat) self.linear_out = nn.Linear(n_feat, n_feat) self.attn = None self.dropout = nn.Dropout(p=dropout) def forward_qkv(self, query, key, value, **kwargs): """Transform query, key and value. Args: query: Query tensor B X T1 X C key: Key tensor B X T2 X C value: Value tensor B X T2 X C Returns: torch.Tensor: Transformed query tensor B X n_head X T1 X d_k torch.Tensor: Transformed key tensor B X n_head X T2 X d_k torch.Tensor: Transformed value tensor B X n_head X T2 X d_k """ n_batch = query.size(0) q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k) k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k) v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k) q = q.transpose(1, 2) # (batch, head, time1, d_k) k = k.transpose(1, 2) # (batch, head, time2, d_k) v = v.transpose(1, 2) # (batch, head, time2, d_k) return q, k, v def forward_attention(self, value, scores, mask): """Compute attention context vector. Args: value: Transformed value B X n_head X T2 X d_k. scores: Attention score B X n_head X T1 X T2 mask: Mask T2 X B Returns: torch.Tensor: Transformed value B X T1 X d_model weighted by the attention score B X T1 X T2 """ n_batch = value.size(0) if mask is not None: scores = scores.masked_fill( mask.unsqueeze(1).unsqueeze(2).to(bool), float("-inf"), # (batch, head, time1, time2) ) self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2) else: self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2) p_attn = self.dropout(self.attn) x = torch.matmul(p_attn, value) # (batch, head, time1, d_k) x = ( x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k) ) # (batch, time1, d_model) return self.linear_out(x) # (batch, time1, d_model) def forward(self, query, key, value, key_padding_mask=None, **kwargs): """Compute scaled dot product attention. Args: query (torch.Tensor): Query tensor T X B X C key (torch.Tensor): Key tensor T X B X C value (torch.Tensor): Value tensor T X B X C mask (torch.Tensor): Mask tensor T X B Returns: torch.Tensor: Output tensor T X B X D. """ query = query.transpose(0, 1) key = key.transpose(0, 1) value = value.transpose(0, 1) q, k, v = self.forward_qkv(query, key, value) scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k) scores = self.forward_attention(v, scores, key_padding_mask) scores = scores.transpose(0, 1) return scores, None class RelPositionMultiHeadedAttention(ESPNETMultiHeadedAttention): """Multi-Head Attention layer with relative position encoding. Paper: https://arxiv.org/abs/1901.02860 Args: n_head: The number of heads. n_feat: The number of features. dropout: Dropout rate. zero_triu: Whether to zero the upper triangular part of attention matrix. """ def __init__(self, n_feat, n_head, dropout, zero_triu=False): """Construct an RelPositionMultiHeadedAttention object.""" super().__init__(n_feat, n_head, dropout) self.zero_triu = zero_triu # linear transformation for positional encoding self.linear_pos = nn.Linear(n_feat, n_feat, bias=False) # these two learnable bias are used in matrix c and matrix d # as described in https://arxiv.org/abs/1901.02860 Section 3.3 self.pos_bias_u = nn.Parameter(torch.zeros(self.h, self.d_k)) self.pos_bias_v = nn.Parameter(torch.zeros(self.h, self.d_k)) torch.nn.init.xavier_uniform_(self.pos_bias_u) torch.nn.init.xavier_uniform_(self.pos_bias_v) def rel_shift(self, x): """Compute relative positional encoding. Args: x: Input tensor B X n_head X T X 2T-1 Returns: torch.Tensor: Output tensor. """ zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype) x_padded = torch.cat([zero_pad, x], dim=-1) x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2)) x = x_padded[:, :, 1:].view_as(x)[ :, :, :, : x.size(-1) // 2 + 1 ] # only keep the positions from 0 to time2 if self.zero_triu: ones = torch.ones((x.size(2), x.size(3)), device=x.device) x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :] return x def forward(self, query, key, value, pos_emb, key_padding_mask=None, **kwargs): """Compute scaled dot product attention. Args: query: Query tensor T X B X C key: Key tensor T X B X C value: Value tensor T X B X C pos_emb: Positional embedding tensor B X 2T-1 X C key_padding_mask: Mask tensor T X B Returns: torch.Tensor: Output tensor T X B X C. """ query = query.transpose(0, 1) key = key.transpose(0, 1) value = value.transpose(0, 1) pos_emb = pos_emb.transpose(0, 1) q, k, v = self.forward_qkv(query, key, value) q = q.transpose(1, 2) # (batch, time1, head, d_k) n_batch_pos = pos_emb.size(0) p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k) p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k) # (batch, head, time1, d_k) q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2) # (batch, head, time1, d_k) q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2) # compute attention score # first compute matrix a and matrix c # as described in https://arxiv.org/abs/1901.02860 Section 3.3 # (batch, head, time1, time2) matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1)) # compute matrix b and matrix d # (batch, head, time1, 2*time1-1) matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1)) matrix_bd = self.rel_shift(matrix_bd) scores = (matrix_ac + matrix_bd) / math.sqrt( self.d_k ) # (batch, head, time1, time2) scores = self.forward_attention(v, scores, key_padding_mask) scores = scores.transpose(0, 1) return scores, None class RotaryPositionMultiHeadedAttention(ESPNETMultiHeadedAttention): def __init__( self, n_feat, n_head, dropout, precision, rotary_emd_base=10000, ): """Construct an RotaryPositionMultiHeadedAttention object.""" super().__init__(n_feat, n_head, dropout) precision = torch.float self.rotary_ndims = self.d_k # also try self.d_k//2 if precision == "fp16": precision = torch.half self.rotary_emb = RotaryPositionalEmbedding( self.rotary_ndims, base=rotary_emd_base, precision=precision ) def forward(self, query, key, value, key_padding_mask=None, **kwargs): """Compute rotary position attention. Args: query: Query tensor T X B X C key: Key tensor T X B X C value: Value tensor T X B X C key_padding_mask: Mask tensor T X B Returns: torch.Tensor: Output tensor T X B X D. Notes: Assumes self attn """ T, B, C = value.size() query = query.view(T, B, self.h, self.d_k) key = key.view(T, B, self.h, self.d_k) value = value.view(T, B, self.h, self.d_k) cos, sin = self.rotary_emb(value, seq_len=T) query, key = apply_rotary_pos_emb( query, key, cos, sin, offset=0 ) # offset is based on layer_past query = query.view(T, B, self.h * self.d_k) key = key.view(T, B, self.h * self.d_k) value = value.view(T, B, self.h * self.d_k) # TBD to BTD query = query.transpose(0, 1) key = key.transpose(0, 1) value = value.transpose(0, 1) q, k, v = self.forward_qkv(query, key, value) scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k) scores = self.forward_attention(v, scores, key_padding_mask) scores = scores.transpose(0, 1) return scores, None
EXA-1-master
exa/libraries/fairseq/fairseq/modules/espnet_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ transpose last 2 dimensions of the input """ import torch.nn as nn class TransposeLast(nn.Module): def __init__(self, deconstruct_idx=None, tranpose_dim=-2): super().__init__() self.deconstruct_idx = deconstruct_idx self.tranpose_dim = tranpose_dim def forward(self, x): if self.deconstruct_idx is not None: x = x[self.deconstruct_idx] return x.transpose(self.tranpose_dim, -1)
EXA-1-master
exa/libraries/fairseq/fairseq/modules/transpose_last.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Any, Optional import torch import torch.onnx.operators from fairseq import utils from torch import Tensor, nn class SinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length. Padding symbols are ignored. """ def __init__(self, embedding_dim, padding_idx, init_size=1024): super().__init__() self.embedding_dim = embedding_dim self.padding_idx = padding_idx if padding_idx is not None else 0 self.weights = SinusoidalPositionalEmbedding.get_embedding( init_size, embedding_dim, padding_idx ) self.onnx_trace = False self.register_buffer("_float_tensor", torch.FloatTensor(1)) self.max_positions = int(1e5) def prepare_for_onnx_export_(self): self.onnx_trace = True @staticmethod def get_embedding( num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None ): """Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze( 1 ) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view( num_embeddings, -1 ) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb def forward( self, input, incremental_state: Optional[Any] = None, timestep: Optional[Tensor] = None, positions: Optional[Any] = None, ): """Input is expected to be of size [bsz x seqlen].""" bspair = torch.onnx.operators.shape_as_tensor(input) bsz, seq_len = bspair[0], bspair[1] max_pos = self.padding_idx + 1 + seq_len if self.weights is None or max_pos > self.weights.size(0): # recompute/expand embeddings if needed self.weights = SinusoidalPositionalEmbedding.get_embedding( max_pos, self.embedding_dim, self.padding_idx ) self.weights = self.weights.to(self._float_tensor) if incremental_state is not None: # positions is the same for every token when decoding a single step pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len if self.onnx_trace: return ( self.weights.index_select(index=self.padding_idx + pos, dim=0) .unsqueeze(1) .repeat(bsz, 1, 1) ) return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1) positions = utils.make_positions( input, self.padding_idx, onnx_trace=self.onnx_trace ) if self.onnx_trace: flat_embeddings = self.weights.detach().index_select(0, positions.view(-1)) embedding_shape = torch.cat( (bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long)) ) embeddings = torch.onnx.operators.reshape_from_tensor_shape( flat_embeddings, embedding_shape ) return embeddings return ( self.weights.index_select(0, positions.view(-1)) .view(bsz, seq_len, -1) .detach() )
EXA-1-master
exa/libraries/fairseq/fairseq/modules/sinusoidal_positional_embedding.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F try: from apex.normalization import FusedLayerNorm as _FusedLayerNorm has_fused_layernorm = True class FusedLayerNorm(_FusedLayerNorm): @torch.jit.unused def forward(self, x): if not x.is_cuda: return super().forward(x) else: with torch.cuda.device(x.device): return super().forward(x) except ImportError: has_fused_layernorm = False def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False): if torch.jit.is_scripting() or torch.jit.is_tracing(): export = True if not export and torch.cuda.is_available() and has_fused_layernorm: return FusedLayerNorm(normalized_shape, eps, elementwise_affine) return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine) class Fp32LayerNorm(nn.LayerNorm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def forward(self, input): output = F.layer_norm( input.float(), self.normalized_shape, self.weight.float() if self.weight is not None else None, self.bias.float() if self.bias is not None else None, self.eps, ) return output.type_as(input)
EXA-1-master
exa/libraries/fairseq/fairseq/modules/layer_norm.py
import math from functools import reduce, wraps from inspect import isfunction from operator import mul import torch import torch.nn as nn import torch.nn.functional as F from aml.multimodal_video.utils.einops.lib import rearrange, repeat from aml.multimodal_video.utils.einops.lib.layers.torch import Rearrange from fairseq.modules.local_attention import LocalAttention # constants TOKEN_SELF_ATTN_VALUE = -5e4 KMEAN_INIT_ITERS = 10 # helper functions def exists(val): return val is not None def identity(x, *args, **kwargs): return x def default(x, d): if not exists(x): return d if not isfunction(d) else d() return x def cast_tuple(x): return x if isinstance(x, tuple) else (x,) def cache_fn(f): cache = None @wraps(f) def cached_fn(*args, **kwargs): nonlocal cache if exists(cache): return cache cache = f(*args, **kwargs) return cache return cached_fn def to(t): return {"device": t.device, "dtype": t.dtype} def find_modules(nn_module, type): return [module for module in nn_module.modules() if isinstance(module, type)] def is_empty(t): return t.nelement() == 0 def max_neg_value(tensor): return -torch.finfo(tensor.dtype).max def batched_index_select(values, indices): last_dim = values.shape[-1] return values.gather(2, expand_dim(indices, -1, last_dim)) def merge_dims(ind_from, ind_to, tensor): shape = list(tensor.shape) arr_slice = slice(ind_from, ind_to + 1) shape[arr_slice] = [reduce(mul, shape[arr_slice])] return tensor.reshape(*shape) def expand_dim(t, dim, k): t = t.unsqueeze(dim) expand_shape = [-1] * len(t.shape) expand_shape[dim] = k return t.expand(*expand_shape) def scatter_mean(src, t, index, dim, eps=1e-5): numer = src.scatter_add(dim, index, t) denom = src.scatter_add(dim, index, torch.ones_like(t)) return numer / (denom + eps) def split_at_index(dim, index, t): pre_slices = (slice(None),) * dim l = (*pre_slices, slice(None, index)) r = (*pre_slices, slice(index, None)) return t[l], t[r] def reshape_dim(t, dim, split_dims): shape = list(t.shape) num_dims = len(shape) dim = (dim + num_dims) % num_dims shape[dim : dim + 1] = split_dims return t.reshape(shape) def ema(old, new, decay): if not exists(old): return new return old * decay + new * (1 - decay) def ema_inplace(moving_avg, new, decay): if is_empty(moving_avg): moving_avg.data.copy_(new) return moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay)) # helper classes def map_first_tuple_or_el(x, fn): if isinstance(x, tuple): return (fn(x[0]),) + x[1:] return fn(x) class Chunk(nn.Module): def __init__(self, chunks, fn, along_dim=-1): super().__init__() self.dim = along_dim self.chunks = chunks self.fn = fn def forward(self, x, **kwargs): if self.chunks <= 1: return self.fn(x, **kwargs) chunks = x.chunk(self.chunks, dim=self.dim) return torch.cat([self.fn(c, **kwargs) for c in chunks], dim=self.dim) class PreNorm(nn.ModuleList): def __init__(self, norm_class, dim, fn): super().__init__() self.norm = norm_class(dim) self.fn = fn def forward(self, x, **kwargs): x = self.norm(x) return self.fn(x, **kwargs) class ReZero(nn.Module): def __init__(self, fn): super().__init__() self.residual_weight = nn.Parameter(torch.zeros(1)) self.fn = fn def forward(self, x, **kwargs): x = self.fn(x, **kwargs) return map_first_tuple_or_el(x, lambda t: t * self.residual_weight) class ScaleNorm(nn.Module): def __init__(self, dim, eps=1e-5): super().__init__() self.g = nn.Parameter(torch.ones(1)) self.eps = eps def forward(self, x): def norm(t): n = torch.norm(t, dim=-1, keepdim=True).clamp(min=self.eps) return t / n * self.g return map_first_tuple_or_el(x, norm) class ProjectInOut(nn.Module): def __init__(self, fn, dim_in, dim_out, project_out=True): super().__init__() self.fn = fn self.project_in = nn.Linear(dim_in, dim_out) self.project_out = nn.Linear(dim_out, dim_in) if project_out else identity def forward(self, x, **kwargs): x = self.project_in(x) x, loss = self.fn(x, **kwargs) x = self.project_out(x) return x, loss class MatrixMultiply(nn.Module): def __init__(self, tensor, transpose=False): super().__init__() self.tensor = tensor self.transpose = transpose def forward(self, x): tensor = self.tensor if self.transpose: tensor = tensor.t() return x @ tensor # positional embeddings class DepthWiseConv1d(nn.Module): def __init__(self, dim_in, dim_out, kernel_size, stride=1, bias=True, causal=False): super().__init__() self.padding = ( ((kernel_size - 1), 0) if causal else (kernel_size // 2, kernel_size // 2) ) self.net = nn.Sequential( nn.Conv1d( dim_in, dim_in, kernel_size=kernel_size, groups=dim_in, stride=stride, bias=bias, ), nn.Conv1d(dim_in, dim_out, 1, bias=bias), ) def forward(self, x): x = F.pad(x, self.padding, value=0.0) return self.net(x) class FixedPositionalEmbedding(nn.Module): def __init__(self, dim, max_seq_len): super().__init__() inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim)) position = torch.arange(0, max_seq_len, dtype=torch.float) sinusoid_inp = torch.einsum("i,j->ij", position, inv_freq) emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1) self.register_buffer("emb", emb) def forward(self, x): return self.emb[None, : x.shape[1], :].to(x) def rotate_every_two(x): x = rearrange(x, "... (d j) -> ... d j", j=2) x1, x2 = x.unbind(dim=-1) x = torch.stack((-x2, x1), dim=-1) return rearrange(x, "... d j -> ... (d j)") def apply_rotary_pos_emb(q, k, sinu_pos): sinu_pos = rearrange(sinu_pos, "() n (j d) -> n j d", j=2) sin, cos = sinu_pos.unbind(dim=-2) sin, cos = map(lambda t: repeat(t, "b n -> b (n j)", j=2), (sin, cos)) q, k = map(lambda t: (t * cos) + (rotate_every_two(t) * sin), (q, k)) return q, k # kmeans related function and class def update_kmeans_on_backwards(module): module.kmean_modules = find_modules(module, Kmeans) def hook(_, grad_in, grad_out): for m in module.kmean_modules: m.update() return module.register_backward_hook(hook) def similarity(x, means): return torch.einsum("bhld,hcd->bhlc", x, means) def dists_and_buckets(x, means): dists = similarity(x, means) _, buckets = torch.max(dists, dim=-1) return dists, buckets def batched_bincount(index, num_classes, dim=-1): shape = list(index.shape) shape[dim] = num_classes out = index.new_zeros(shape) out.scatter_add_(dim, index, torch.ones_like(index, dtype=index.dtype)) return out def kmeans_iter(x, means, buckets=None): b, h, _, d, dtype, num_clusters = *x.shape, x.dtype, means.shape[1] if not exists(buckets): _, buckets = dists_and_buckets(x, means) bins = batched_bincount(buckets, num_clusters).sum(0, keepdim=True) zero_mask = bins.long() == 0 means_ = buckets.new_zeros(b, h, num_clusters, d, dtype=dtype) means_.scatter_add_(-2, expand_dim(buckets, -1, d), x) means_ = F.normalize(means_.sum(0, keepdim=True), dim=-1).type(dtype) means = torch.where(zero_mask.unsqueeze(-1), means, means_) means = means.squeeze(0) return means def distribution(dists, window_size): _, topk_indices = dists.topk(k=window_size, dim=-2) indices = topk_indices.transpose(-2, -1) return indices.reshape(*indices.size()[:2], -1) class Kmeans(nn.Module): def __init__( self, num_heads, head_dim, num_clusters, ema_decay=0.999, commitment=1e-4 ): super().__init__() self.commitment = commitment self.ema_decay = ema_decay self.register_buffer("means", torch.randn(num_heads, num_clusters, head_dim)) self.register_buffer("initted", torch.tensor(False)) self.num_new_means = 0 self.new_means = None @torch.no_grad() def init(self, x): if self.initted: return _, h, _, d, device, _ = *x.shape, x.device, x.dtype num_clusters = self.means.shape[1] means = x.transpose(0, 1).contiguous().view(h, -1, d) num_samples = means.shape[1] if num_samples >= num_clusters: indices = torch.randperm(num_samples, device=device)[:num_clusters] else: indices = torch.randint(0, num_samples, (num_clusters,), device=device) means = means[:, indices] for _ in range(KMEAN_INIT_ITERS): means = kmeans_iter(x, means) self.num_new_means = 0 self.means.data.copy_(means) self.initted.data.copy_(torch.tensor(True)) @torch.no_grad() def update(self, new_means=None): new_means = default(new_means, self.new_means) assert exists(new_means), "new kmeans has not been supplied" ema_inplace(self.means, new_means, self.ema_decay) del self.new_means self.new_means = None self.num_new_means = 0 def forward(self, x, update_means=False): self.init(x) b, dtype = x.shape[0], x.dtype means = self.means.type(dtype) x = F.normalize(x, 2, dim=-1).type(dtype) with torch.no_grad(): dists, buckets = dists_and_buckets(x, means) routed_means = batched_index_select(expand_dim(means, 0, b), buckets) loss = F.mse_loss(x, routed_means) * self.commitment if update_means: with torch.no_grad(): means = kmeans_iter(x, means, buckets) self.new_means = ema( self.new_means, means, self.num_new_means / (self.num_new_means + 1) ) self.num_new_means += 1 return dists, loss # kmeans attention class class KmeansAttention(nn.Module): def __init__( self, num_clusters, window_size, num_heads, head_dim, causal=False, dropout=0.0, ema_decay=0.999, commitment=1e-4, context_window_size=None, receives_context=False, num_mem_kv=0, shared_qk=False, ): super().__init__() self.num_heads = num_heads self.num_clusters = num_clusters self.head_dim = head_dim self.window_size = window_size self.context_window_size = default(context_window_size, window_size) self.causal = causal self.shared_qk = shared_qk self.receives_context = receives_context self.kmeans = Kmeans(num_heads, head_dim, num_clusters, ema_decay, commitment) self.dropout = nn.Dropout(dropout) self.num_mem_kv = max(num_mem_kv, 1 if causal and not shared_qk else 0) self.mem_key = nn.Parameter( torch.randn(num_heads, num_clusters, self.num_mem_kv, head_dim) ) self.mem_value = nn.Parameter( torch.randn(num_heads, num_clusters, self.num_mem_kv, head_dim) ) def forward(self, q, k, v, query_mask=None, key_mask=None, **kwargs): b, h, t, d, kv_t, wsz, c_wsz, nc, device, dtype = ( *q.shape, k.shape[2], self.window_size, self.context_window_size, self.num_clusters, q.device, q.dtype, ) is_reverse = kwargs.pop("_reverse", False) out = torch.zeros_like(q, dtype=dtype) update_kmeans = self.training and not is_reverse key_mask = ( default(key_mask, query_mask) if not self.receives_context else key_mask ) kv_wsz = wsz if not self.receives_context else c_wsz wsz = min(wsz, t) kv_wsz = min(kv_wsz, kv_t) if not self.shared_qk or self.receives_context: dists, aux_loss = self.kmeans(torch.cat((q, k), dim=2), update_kmeans) q_dists, k_dists = split_at_index(2, t, dists) indices = distribution(q_dists, wsz) kv_indices = distribution(k_dists, kv_wsz) else: dists, aux_loss = self.kmeans(q, update_kmeans) k = F.normalize(k, dim=-1).to(q) indices = distribution(dists, wsz) kv_indices = indices q = batched_index_select(q, indices) k = batched_index_select(k, kv_indices) v = batched_index_select(v, kv_indices) reshape_with_window = lambda x: x.reshape(b, h, nc, -1, d) q, k, v = map(reshape_with_window, (q, k, v)) m_k, m_v = map( lambda x: expand_dim(x, 0, b).to(q), (self.mem_key, self.mem_value) ) k, v = map(lambda x: torch.cat(x, dim=3), ((m_k, k), (m_v, v))) dots = torch.einsum("bhnid,bhnjd->bhnij", q, k) * (d**-0.5) mask_value = max_neg_value(dots) if exists(query_mask) or exists(key_mask): query_mask = default( query_mask, lambda: torch.ones((b, t), device=device).bool() ) key_mask = default( key_mask, lambda: torch.ones((b, kv_t), device=device).bool() ) q_mask = expand_dim(query_mask, 1, h).gather(2, indices) kv_mask = expand_dim(key_mask, 1, h).gather(2, kv_indices) q_mask, kv_mask = map(lambda t: t.reshape(b, h, nc, -1), (q_mask, kv_mask)) mask = q_mask[:, :, :, :, None] * kv_mask[:, :, :, None, :] mask = F.pad(mask, (self.num_mem_kv, 0), value=1) dots.masked_fill_(~mask, mask_value) del mask if self.causal: q_mask, kv_mask = map( lambda t: t.reshape(b, h, nc, -1), (indices, kv_indices) ) mask = q_mask[:, :, :, :, None] >= kv_mask[:, :, :, None, :] mask = F.pad(mask, (self.num_mem_kv, 0), value=1) dots.masked_fill_(~mask, mask_value) del mask if self.shared_qk: q_mask, kv_mask = map( lambda t: t.reshape(b, h, nc, -1), (indices, kv_indices) ) mask = q_mask[:, :, :, :, None] == kv_mask[:, :, :, None, :] mask = F.pad(mask, (self.num_mem_kv, 0), value=0) dots.masked_fill_(mask, TOKEN_SELF_ATTN_VALUE) del mask dots = dots.softmax(dim=-1) dots = self.dropout(dots) bo = torch.einsum("bhcij,bhcjd->bhcid", dots, v) so = torch.reshape(bo, (b, h, -1, bo.shape[-1])).type(dtype) out = scatter_mean(out, so, indices.unsqueeze(-1).expand_as(so), -2) return out, aux_loss # feedforward class GELU_(nn.Module): def forward(self, x): return ( 0.5 * x * ( 1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))) ) ) GELU = nn.GELU if hasattr(nn, "GELU") else GELU_ class FeedForward(nn.Module): def __init__(self, dim, mult=4, dropout=0.0, activation=None, glu=False): super().__init__() activation = default(activation, GELU) self.glu = glu self.w1 = nn.Linear(dim, dim * mult * (2 if glu else 1)) self.act = activation() self.dropout = nn.Dropout(dropout) self.w2 = nn.Linear(dim * mult, dim) def forward(self, x, **kwargs): if not self.glu: x = self.w1(x) x = self.act(x) else: x, v = self.w1(x).chunk(2, dim=-1) x = self.act(x) * v x = self.dropout(x) x = self.w2(x) return x # self attention class SelfAttention(nn.Module): def __init__( self, dim, max_seq_len, heads, local_attn_heads, window_size, dim_head=None, local_attn_window_size=None, local_attn_radius_blocks=1, causal=False, attn_dropout=0.0, dropout=0.0, kmeans_ema_decay=0.999, commitment_factor=1e-4, receives_context=False, context_window_size=None, rel_pos_emb=True, num_mem_kv=0, shared_qk=False, conv_query_kernel=9, ): super().__init__() assert ( dim_head or (dim % heads) == 0 ), "hidden dimension must be divisible by number of heads" assert ( max_seq_len % window_size ) == 0, "maximum sequence length must be divisible by the target window size" assert ( local_attn_heads <= heads ), "number of local attention heads must be less than total heads" assert not ( receives_context and local_attn_heads > 0 ), "local attention cannot be used for self attention with context" assert not ( receives_context and causal ), "contextual attention layer cannot be causal" local_attn_window_size = default(local_attn_window_size, window_size) context_window_size = default(context_window_size, window_size) self.shared_qk = shared_qk self.receives_context = receives_context self.heads = heads self.local_attn_heads = local_attn_heads self.global_attn_heads = heads - local_attn_heads self.causal = causal self.window_size = window_size dim_head = default(dim_head, dim // heads) dim_heads = dim_head * heads self.dim_head = dim_head num_clusters = max_seq_len // window_size # local local_dim_heads = dim_head * self.local_attn_heads if self.local_attn_heads > 0: rel_pos_emb_config = (dim_head, local_attn_heads) if rel_pos_emb else None self.local_attn = LocalAttention( dim=dim_head, window_size=local_attn_window_size, causal=causal, dropout=attn_dropout, rel_pos_emb_config=rel_pos_emb_config, look_backward=local_attn_radius_blocks, look_forward=0 if causal else local_attn_radius_blocks, ) self.local_to_qkv = nn.Linear(dim, 3 * local_dim_heads) # global global_dim_heads = dim_head * self.global_attn_heads if self.global_attn_heads > 0: self.global_attn = KmeansAttention( num_clusters, window_size, self.global_attn_heads, dim_head, causal=causal, dropout=attn_dropout, ema_decay=kmeans_ema_decay, commitment=commitment_factor, receives_context=receives_context, num_mem_kv=num_mem_kv, shared_qk=shared_qk, ) self.to_q = nn.Sequential( Rearrange("b n c -> b c n"), DepthWiseConv1d(dim, global_dim_heads, conv_query_kernel, causal=causal), Rearrange("b c n -> b n c"), ) self.to_v = nn.Linear(dim, global_dim_heads, bias=False) if not self.shared_qk: self.to_k = nn.Linear(dim, global_dim_heads, bias=False) # out self.to_out = nn.Linear(dim_heads, dim, bias=False) self.dropout = nn.Dropout(dropout) def forward( self, query, key, value, context=None, key_padding_mask=None, context_mask=None, pos_emb=None, **kwargs ): assert not ( self.receives_context and not exists(context) ), "context must be passed if self attention is set to receive context" input_mask = key_padding_mask x = query.transpose(0, 1) b, t, _, h, dh = *x.shape, self.heads, self.dim_head has_local, has_global = map( lambda x: x > 0, (self.local_attn_heads, self.global_attn_heads) ) split_heads = ( lambda v: reshape_dim(v, -1, (-1, dh)).transpose(1, 2).contiguous() ) if has_local: local_qkv = self.local_to_qkv(x).chunk(3, dim=-1) lq, lk, lv = map(split_heads, local_qkv) if has_global: kv_input = x if not self.receives_context else context q, v = self.to_q(x), self.to_v(kv_input) if not self.shared_qk: k = self.to_k(kv_input) else: k = self.to_q(kv_input) if self.receives_context else q q, k, v = map(split_heads, (q, k, v)) out = [] total_loss = torch.tensor(0.0, requires_grad=True, **to(x)) if has_local: local_out = self.local_attn(lq, lk, lv, input_mask=input_mask) out.append(local_out) if has_global: if not self.receives_context and exists(pos_emb): q, k = apply_rotary_pos_emb(q, k, pos_emb) global_out, loss = self.global_attn( q, k, v, query_mask=input_mask, key_mask=context_mask ) total_loss = total_loss + loss out.append(global_out) out = torch.cat(out, dim=1) out = out.reshape(b, h, t, -1).transpose(1, 2).reshape(b, t, -1) out = self.dropout(out.transpose(0, 1)) # out = self.to_out(out) return out, total_loss
EXA-1-master
exa/libraries/fairseq/fairseq/modules/kmeans_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from torch import nn class SamePad(nn.Module): def __init__(self, kernel_size, causal=False): super().__init__() if causal: self.remove = kernel_size - 1 else: self.remove = 1 if kernel_size % 2 == 0 else 0 def forward(self, x): if self.remove > 0: x = x[:, :, : -self.remove] return x class SamePad2d(nn.Module): def __init__(self, kernel_size): super().__init__() self.remove = 1 if kernel_size % 2 == 0 else 0 def forward(self, x): assert len(x.size()) == 4 if self.remove > 0: x = x[:, :, : -self.remove, : -self.remove] return x
EXA-1-master
exa/libraries/fairseq/fairseq/modules/same_pad.py
EXA-1-master
exa/libraries/fairseq/fairseq/modules/quantization/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. def parse_config_yaml(yaml_data): # Initialize to default options. quantization_options = { "n_centroids": { "Linear": ["in_features", {"*": 256}], "Embedding": ["embedding_dim", {"*": 256}], }, "block_sizes": { "Linear": ["fuzzy_name", {"fc": 8, "attn": 4, "emb": 4}], "Embedding": ["fuzzy_name", {"emb": 8}], }, "layers_to_quantize": [ "decoder\\.layers\\.\\d+\\.fc[12]", "decoder\\.embed_tokens\\.embeddings\\.[012]\\.[01]", "decoder\\.layers\\.\\d+\\.self_attn\\.(k_proj|v_proj|q_proj|out_proj)", ], } if "n_centroids" in yaml_data: quantization_options["n_centroids"] = { layer: convert_yaml_to_tuple(layer_data) for layer, layer_data in yaml_data["n_centroids"].items() } if "block_sizes" in yaml_data: quantization_options["block_sizes"] = { layer: convert_yaml_to_tuple(layer_data) for layer, layer_data in yaml_data["block_sizes"].items() } if "layers_to_quantize" in yaml_data: quantization_options["layers_to_quantize"] = yaml_data["layers_to_quantize"] return quantization_options def convert_yaml_to_tuple(yaml_dictionary): """Converts a yaml dictionary with two keys: `key` and `value` into a two argument tuple of those values.""" return (yaml_dictionary["key"], yaml_dictionary["value"])
EXA-1-master
exa/libraries/fairseq/fairseq/modules/quantization/quantization_options.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .utils import SizeTracker, get_param, attrsetter, quantize_model_ # NOQA
EXA-1-master
exa/libraries/fairseq/fairseq/modules/quantization/pq/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .em import EM, EmptyClusterResolveError class PQ(EM): """ Quantizes the layer weights W with the standard Product Quantization technique. This learns a codebook of codewords or centroids of size block_size from W. For further reference on using PQ to quantize neural networks, see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks", Stock et al., ICLR 2020. PQ is performed in two steps: (1) The matrix W (weights or fully-connected or convolutional layer) is reshaped to (block_size, -1). - If W is fully-connected (2D), its columns are split into blocks of size block_size. - If W is convolutional (4D), its filters are split along the spatial dimension. (2) We apply the standard EM/k-means algorithm to the resulting reshaped matrix. Args: - W: weight matrix to quantize of size (in_features x out_features) - block_size: size of the blocks (subvectors) - n_centroids: number of centroids - n_iter: number of k-means iterations - eps: for cluster reassignment when an empty cluster is found - max_tentatives for cluster reassignment when an empty cluster is found - verbose: print information after each iteration Remarks: - block_size be compatible with the shape of W """ def __init__( self, W, block_size, n_centroids=256, n_iter=20, eps=1e-6, max_tentatives=30, verbose=True, ): self.block_size = block_size W_reshaped = self._reshape(W) super(PQ, self).__init__( W_reshaped, n_centroids=n_centroids, n_iter=n_iter, eps=eps, max_tentatives=max_tentatives, verbose=verbose, ) def _reshape(self, W): """ Reshapes the matrix W as expained in step (1). """ # fully connected: by convention the weight has size out_features x in_features if len(W.size()) == 2: self.out_features, self.in_features = W.size() assert ( self.in_features % self.block_size == 0 ), "Linear: n_blocks must be a multiple of in_features" return ( W.reshape(self.out_features, -1, self.block_size) .permute(2, 1, 0) .flatten(1, 2) ) # convolutional: we reshape along the spatial dimension elif len(W.size()) == 4: self.out_channels, self.in_channels, self.k_h, self.k_w = W.size() assert ( self.in_channels * self.k_h * self.k_w ) % self.block_size == 0, ( "Conv2d: n_blocks must be a multiple of in_channels * k_h * k_w" ) return ( W.reshape(self.out_channels, -1, self.block_size) .permute(2, 1, 0) .flatten(1, 2) ) # not implemented else: raise NotImplementedError(W.size()) def encode(self): """ Performs self.n_iter EM steps. """ self.initialize_centroids() for i in range(self.n_iter): try: self.step(i) except EmptyClusterResolveError: break def decode(self): """ Returns the encoded full weight matrix. Must be called after the encode function. """ # fully connected case if "k_h" not in self.__dict__: return ( self.centroids[self.assignments] .reshape(-1, self.out_features, self.block_size) .permute(1, 0, 2) .flatten(1, 2) ) # convolutional case else: return ( self.centroids[self.assignments] .reshape(-1, self.out_channels, self.block_size) .permute(1, 0, 2) .reshape(self.out_channels, self.in_channels, self.k_h, self.k_w) )
EXA-1-master
exa/libraries/fairseq/fairseq/modules/quantization/pq/pq.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import random from collections import Counter import torch class EM: """ EM algorithm used to quantize the columns of W to minimize ||W - W_hat||^2 Args: - W: weight matrix of size (in_features x out_features) - n_iter: number of k-means iterations - n_centroids: number of centroids (size of codebook) - eps: for cluster reassignment when an empty cluster is found - max_tentatives for cluster reassignment when an empty cluster is found - verbose: print error after each iteration Remarks: - If one cluster is empty, the most populated cluster is split into two clusters - All the relevant dimensions are specified in the code """ def __init__( self, W, n_centroids=256, n_iter=20, eps=1e-6, max_tentatives=30, verbose=True ): self.W = W self.n_centroids = n_centroids self.n_iter = n_iter self.eps = eps self.max_tentatives = max_tentatives self.verbose = verbose self.centroids = torch.Tensor() self.assignments = torch.Tensor() self.objective = [] def initialize_centroids(self): """ Initializes the centroids by sampling random columns from W. """ in_features, out_features = self.W.size() indices = torch.randint( low=0, high=out_features, size=(self.n_centroids,) ).long() self.centroids = self.W[:, indices].t() # (n_centroids x in_features) def step(self, i): """ There are two standard steps for each iteration: expectation (E) and minimization (M). The E-step (assignment) is performed with an exhaustive search and the M-step (centroid computation) is performed with the exact solution. Args: - i: step number Remarks: - The E-step heavily uses PyTorch broadcasting to speed up computations and reduce the memory overhead """ # assignments (E-step) distances = self.compute_distances() # (n_centroids x out_features) self.assignments = torch.argmin(distances, dim=0) # (out_features) n_empty_clusters = self.resolve_empty_clusters() # centroids (M-step) for k in range(self.n_centroids): W_k = self.W[:, self.assignments == k] # (in_features x size_of_cluster_k) self.centroids[k] = W_k.mean(dim=1) # (in_features) # book-keeping obj = (self.centroids[self.assignments].t() - self.W).norm(p=2).item() self.objective.append(obj) if self.verbose: logging.info( f"Iteration: {i},\t" f"objective: {obj:.6f},\t" f"resolved empty clusters: {n_empty_clusters}" ) def resolve_empty_clusters(self): """ If one cluster is empty, the most populated cluster is split into two clusters by shifting the respective centroids. This is done iteratively for a fixed number of tentatives. """ # empty clusters counts = Counter(map(lambda x: x.item(), self.assignments)) empty_clusters = set(range(self.n_centroids)) - set(counts.keys()) n_empty_clusters = len(empty_clusters) tentatives = 0 while len(empty_clusters) > 0: # given an empty cluster, find most populated cluster and split it into two k = random.choice(list(empty_clusters)) m = counts.most_common(1)[0][0] e = torch.randn_like(self.centroids[m]) * self.eps self.centroids[k] = self.centroids[m].clone() self.centroids[k] += e self.centroids[m] -= e # recompute assignments distances = self.compute_distances() # (n_centroids x out_features) self.assignments = torch.argmin(distances, dim=0) # (out_features) # check for empty clusters counts = Counter(map(lambda x: x.item(), self.assignments)) empty_clusters = set(range(self.n_centroids)) - set(counts.keys()) # increment tentatives if tentatives == self.max_tentatives: logging.info( f"Could not resolve all empty clusters, {len(empty_clusters)} remaining" ) raise EmptyClusterResolveError tentatives += 1 return n_empty_clusters def compute_distances(self): """ For every centroid m, computes ||M - m[None, :]||_2 Remarks: - We rely on PyTorch's broadcasting to speed up computations and reduce the memory overhead - Without chunking, the sizes in the broadcasting are modified as: (n_centroids x n_samples x out_features) -> (n_centroids x out_features) - The broadcasting computation is automatically chunked so that the tensors fit into the memory of the GPU """ nb_centroids_chunks = 1 while True: try: return torch.cat( [ (self.W[None, :, :] - centroids_c[:, :, None]).norm(p=2, dim=1) for centroids_c in self.centroids.chunk( nb_centroids_chunks, dim=0 ) ], dim=0, ) except RuntimeError: nb_centroids_chunks *= 2 def assign(self): """ Assigns each column of W to its closest centroid, thus essentially performing the E-step in train(). Remarks: - The function must be called after train() or after loading centroids using self.load(), otherwise it will return empty tensors """ distances = self.compute_distances() # (n_centroids x out_features) self.assignments = torch.argmin(distances, dim=0) # (out_features) def save(self, path, layer): """ Saves centroids and assignments. Args: - path: folder used to save centroids and assignments """ torch.save(self.centroids, os.path.join(path, "{}_centroids.pth".format(layer))) torch.save( self.assignments, os.path.join(path, "{}_assignments.pth".format(layer)) ) torch.save(self.objective, os.path.join(path, "{}_objective.pth".format(layer))) def load(self, path, layer): """ Loads centroids and assignments from a given path Args: - path: folder use to load centroids and assignments """ self.centroids = torch.load( os.path.join(path, "{}_centroids.pth".format(layer)) ) self.assignments = torch.load( os.path.join(path, "{}_assignments.pth".format(layer)) ) self.objective = torch.load( os.path.join(path, "{}_objective.pth".format(layer)) ) class EmptyClusterResolveError(Exception): pass
EXA-1-master
exa/libraries/fairseq/fairseq/modules/quantization/pq/em.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import re from operator import attrgetter, itemgetter import torch import numpy as np import torch.distributed as dist import torch.nn as nn from .modules import PQConv2d, PQEmbedding, PQLinear from .pq import PQ def quantize_model_( model, size_tracker, layers_to_quantize, block_sizes_config, n_centroids_config, step=0, n_iter=15, eps=1e-6, max_tentatives=100, remove_weights=False, verbose=True, state_dict=None, ): """ Quantize a model in-place by stages. All the targeted layers are replaced by their quantized counterpart, and the model is ready for the finetuning of the centroids in a standard training loop (no modifications required). Note that we do not quantize biases. Args: - model: a nn.Module - size_tracker: useful for tracking quatization statistics - layers_to_quantize: a list containing regexps for filtering the layers to quantize at each stage according to their name (as in model.named_parameters()) - block_sizes_config: dict like { 'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}), 'Linear': ('in_features', {'*': 8}) } For instance, all conv2d layers with kernel size 3x3 have a block size of 9 and all Linear layers are quantized with a block size of 8, irrespective of their size. - n_centroids_config: dict like { 'Conv2d': ('kernel_size', {'*': 256}), 'Linear': ('in_features', {'*': 256}) } For instance, all conv2d layers are quantized with 256 centroids - step: the layers to quantize inplace corresponding to layers_to_quantize[step] """ quantized_layers = get_layers( model, layers_to_quantize[step], remove_weights=remove_weights ) for layer in quantized_layers: # book-keeping is_master_process = (not dist.is_initialized()) or ( dist.is_initialized() and dist.get_rank() == 0 ) verbose = verbose and is_master_process # get block size and centroids module = attrgetter(layer)(model) block_size = get_param(module, layer, block_sizes_config) n_centroids = get_param(module, layer, n_centroids_config) if verbose: logging.info( f"Quantizing layer {layer} with block size {block_size} and {n_centroids} centroids" ) # quantize layer weight = module.weight.data.clone() is_bias = "bias" in [x[0] for x in module.named_parameters()] bias = module.bias.data.clone() if is_bias else None quantizer = PQ( weight, block_size, n_centroids=n_centroids, n_iter=n_iter, eps=eps, max_tentatives=max_tentatives, verbose=verbose, ) # quantization performed on all GPUs with same seed quantizer.encode() centroids = quantizer.centroids.contiguous() assignments = quantizer.assignments.contiguous() # If n_iter = 0 and state_dict is provided, then # we initialize random assignments and centroids to # random values of the appropriate dimensions # because the quantized model parameters will # overwritten by the state_dict later on. if n_iter == 0 and state_dict: # Initialize random centroids of the correct size centroids = torch.rand(centroids.size()) centroids.cuda() # Get counts and assignment keys from layer in loaded checkpoint. counts_key = layer + "." + "counts" assignment_key = layer + "." + "assignments" # Get number of different bins to include. counts = list(state_dict[counts_key].shape)[0] print(layer) print(state_dict[counts_key]) print(counts) # Initialize random assignments of the correct size # with an appropriate number of bins. num_assignments = list(state_dict[assignment_key].shape)[0] num_extra = num_assignments - counts print(num_assignments) print(num_extra) assignments_bins = torch.arange(counts) assignments_rand = torch.randint(0, counts - 1, (num_extra,)) assignments = torch.cat((assignments_bins, assignments_rand), 0) # assignments = assignments.type(torch.IntTensor) assignments.cuda() print("assignments") print(assignments) # broadcast results to make sure weights are up-to-date if dist.is_initialized(): dist.broadcast(centroids, 0) dist.broadcast(assignments, 0) # instantiate the quantized counterpart if isinstance(module, nn.Linear): out_features, in_features = map( lambda k: module.__dict__[k], ["out_features", "in_features"] ) quantized_module = PQLinear( centroids, assignments, bias, in_features, out_features ) elif isinstance(module, nn.Embedding): num_embeddings, embedding_dim = map( lambda k: module.__dict__[k], ["num_embeddings", "embedding_dim"] ) quantized_module = PQEmbedding( centroids, assignments, num_embeddings, embedding_dim ) elif isinstance(module, nn.Conv2d): out_channels, in_channels, kernel_size = map( lambda k: module.__dict__[k], ["out_channels", "in_channels", "kernel_size"], ) stride, padding, dilation, groups, padding_mode = map( lambda k: module.__dict__[k], ["stride", "padding", "dilation", "groups", "padding_mode"], ) quantized_module = PQConv2d( centroids, assignments, bias, in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, padding_mode=padding_mode, ) else: raise ValueError(f"Module {module} not yet supported for quantization") # replace layer by its quantized counterpart attrsetter(layer)(model, quantized_module) # update statistics size_tracker.update(weight, block_size, n_centroids) # return name of quantized layers return quantized_layers def get_layers(model, filter_regexp, remove_weights=False): """ Filters out the layers according to a regexp. Note that we omit biases. Args: - model: a nn.Module - filter_regexp: a regexp to filter the layers to keep according to their name in model.named_parameters(). For instance, the regexp: down_layers\\.[123456]\\.(conv[12]|identity\\.conv)) is keeping blocks down_layers from 1 to 6, and inside each block is keeping conv1, conv2 and identity.conv. Remarks: - We add (module\\.)? at the beginning of the regexp to account for the possible use of nn.parallel.DataParallel """ # get all parameter names all_layers = map(itemgetter(0), model.named_parameters()) # remove biases all_layers = filter(lambda x: "bias" not in x, all_layers) # remove .weight in all other names (or .weight_orig is spectral norm) all_layers = map(lambda x: x.replace(".weight_orig", ""), all_layers) # remove weights indicates whether the weights extension should be removed, in addition to # weight_orig and weight extension on names if remove_weights: all_layers = map(lambda x: x.replace(".weights", ""), all_layers) all_layers = map(lambda x: x.replace(".weight", ""), all_layers) # return filtered layers filter_regexp = "(module\\.)?" + "(" + filter_regexp + ")" r = re.compile(filter_regexp) return list(filter(r.match, all_layers)) def get_param(module, layer_name, param_config): """ Given a quantization configuration, get the right parameter for the module to be quantized. Args: - module: a nn.Module - layer_name: the name of the layer - param_config: a dict like { 'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}), 'Linear': ('in_features', {'*': 8}) } For instance, all conv2d layers with kernel size 3x3 have a block size of 9 and all Linear layers are quantized with a block size of 8, irrespective of their size. Remarks: - if 'fuzzy_name' is passed as a parameter, layers whose layer_name include 'fuzzy_name' will be assigned the given parameter. In the following example, conv.expand layers will have a block size of 9 while conv.reduce will have a block size of 4 and all other layers will have a block size of 2. { 'Conv2d': ('fuzzy_name', {'expand': 9, 'reduce': 4, '*': 2}), 'Linear': ('fuzzy_name', {'classifier': 8, 'projection': 4}) } """ layer_type = module.__class__.__name__ if layer_type not in param_config: raise KeyError(f"Layer type {layer_type} not in config for layer {module}") feature, params = param_config[module.__class__.__name__] if feature != "fuzzy_name": feature_value = str(getattr(module, feature)) if feature_value not in params: if "*" in params: feature_value = "*" else: raise KeyError( f"{feature}={feature_value} not in config for layer {module}" ) else: feature_values = [name for name in params if name in layer_name] if len(feature_values) == 0: if "*" in params: feature_value = "*" else: raise KeyError(f"name={layer_name} not in config for {module}") else: feature_value = feature_values[0] return params[feature_value] class SizeTracker(object): """ Class to keep track of the compressed network size with iPQ. Args: - model: a nn.Module Remarks: - The compressed size is the sum of three components for each layer in the network: (1) Storing the centroids given by iPQ in fp16 (2) Storing the assignments of the blocks in int8 (3) Storing all non-compressed elements such as biases - This cost in only valid if we use 256 centroids (then indexing can indeed by done with int8). """ def __init__(self, model): self.model = model self.size_non_compressed_model = self.compute_size() self.size_non_quantized = self.size_non_compressed_model self.size_index = 0 self.size_centroids = 0 self.n_quantized_layers = 0 def compute_size(self): """ Computes the size of the model (in MB). """ res = 0 for _, p in self.model.named_parameters(): res += p.numel() return res * 4 / 1024 / 1024 def update(self, W, block_size, n_centroids): """ Updates the running statistics when quantizing a new layer. """ # bits per weights bits_per_weight = np.log2(n_centroids) / block_size self.n_quantized_layers += 1 # size of indexing the subvectors of size block_size (in MB) size_index_layer = bits_per_weight * W.numel() / 8 / 1024 / 1024 self.size_index += size_index_layer # size of the centroids stored in float16 (in MB) size_centroids_layer = n_centroids * block_size * 2 / 1024 / 1024 self.size_centroids += size_centroids_layer # size of non-compressed layers, e.g. LayerNorms or biases (in MB) size_uncompressed_layer = W.numel() * 4 / 1024 / 1024 self.size_non_quantized -= size_uncompressed_layer def __repr__(self): size_compressed = ( self.size_index + self.size_centroids + self.size_non_quantized ) compression_ratio = self.size_non_compressed_model / size_compressed # NOQA return ( f"Non-compressed model size: {self.size_non_compressed_model:.2f} MB. " f"After quantizing {self.n_quantized_layers} layers, size " f"(indexing + centroids + other): {self.size_index:.2f} MB + " f"{self.size_centroids:.2f} MB + {self.size_non_quantized:.2f} MB = " f"{size_compressed:.2f} MB, compression ratio: {compression_ratio:.2f}x" ) def attrsetter(*items): def resolve_attr(obj, attr): attrs = attr.split(".") head = attrs[:-1] tail = attrs[-1] for name in head: obj = getattr(obj, name) return obj, tail def g(obj, val): for attr in items: resolved_obj, resolved_attr = resolve_attr(obj, attr) setattr(resolved_obj, resolved_attr, val) return g
EXA-1-master
exa/libraries/fairseq/fairseq/modules/quantization/pq/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F class PQLinear(nn.Module): """ Quantized counterpart of nn.Linear module. Stores the centroid, the assignments and the non-quantized biases. The full weight is re-instantiated at each forward pass. Args: - centroids: centroids of size n_centroids x block_size - assignments: assignments of the centroids to the subvectors of size self.out_features x n_blocks - bias: the non-quantized bias Remarks: - We refer the reader to the official documentation of the nn.Linear module for the other arguments and the behavior of the module - Performance tests on GPU show that this implementation is 15% slower than the non-quantized nn.Linear module for a standard training loop. """ def __init__(self, centroids, assignments, bias, in_features, out_features): super(PQLinear, self).__init__() self.block_size = centroids.size(1) self.n_centroids = centroids.size(0) self.in_features = in_features self.out_features = out_features # check compatibility if self.in_features % self.block_size != 0: raise ValueError("Wrong PQ sizes") if len(assignments) % self.out_features != 0: raise ValueError("Wrong PQ sizes") # define parameters self.centroids = nn.Parameter(centroids, requires_grad=True) self.register_buffer("assignments", assignments) self.register_buffer("counts", torch.bincount(assignments).type_as(centroids)) if bias is not None: self.bias = nn.Parameter(bias) else: self.register_parameter("bias", None) @property def weight(self): return ( self.centroids[self.assignments] .reshape(-1, self.out_features, self.block_size) .permute(1, 0, 2) .flatten(1, 2) ) def forward(self, x): return F.linear( x, self.weight, self.bias, ) def extra_repr(self): return f"in_features={self.in_features},\ out_features={self.out_features},\ n_centroids={self.n_centroids},\ block_size={self.block_size},\ bias={self.bias is not None}"
EXA-1-master
exa/libraries/fairseq/fairseq/modules/quantization/pq/modules/qlinear.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.modules.utils import _pair class PQConv2d(nn.Module): """ Quantized counterpart of nn.Conv2d module. Stores the centroid, the assignments and the non-quantized biases. The full weight is re-instantiated at each forward pass and autograd automatically computes the gradients with respect to the centroids. Args: - centroids: centroids of size n_centroids x block_size - assignments: assignments of the centroids to the subvectors of size self.out_channels x n_blocks - bias: the non-quantized bias, must be either torch.Tensor or None Remarks: - We refer the reader to the official documentation of the nn.Conv2d module for the other arguments and the behavior of the module. - Performance tests on GPU show that this implementation is 10% slower than the non-quantized nn.Conv2d module for a standard training loop. - During the backward, the gradients are averaged by cluster and not summed. This explains the hook registered to the centroids. """ def __init__( self, centroids, assignments, bias, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, padding_mode="zeros", ): super(PQConv2d, self).__init__() self.block_size = centroids.size(1) self.n_centroids = centroids.size(0) self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _pair(kernel_size) self.stride = _pair(stride) self.padding = _pair(padding) self.dilation = _pair(dilation) self.groups = groups self.padding_mode = padding_mode # check compatibility if in_channels // groups * np.prod(self.kernel_size) % self.block_size != 0: raise ValueError("Wrong PQ sizes") if len(assignments) % out_channels != 0: raise ValueError("Wrong PQ sizes") if in_channels % groups != 0: raise ValueError("in_channels must be divisible by groups") if out_channels % groups != 0: raise ValueError("out_channels must be divisible by groups") # define parameters self.centroids = nn.Parameter(centroids, requires_grad=True) self.register_buffer("assignments", assignments) self.register_buffer("counts", torch.bincount(assignments).type_as(centroids)) if bias is not None: self.bias = nn.Parameter(bias) else: self.register_parameter("bias", None) # register hook for averaging gradients per centroids instead of summing self.centroids.register_hook(lambda x: x / self.counts[:, None]) @property def weight(self): return ( self.centroids[self.assignments] .reshape(-1, self.out_channels, self.block_size) .permute(1, 0, 2) .reshape( self.out_channels, self.in_channels // self.groups, *self.kernel_size ) ) def forward(self, x): return F.conv2d( x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, ) def extra_repr(self): s = "{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}" if self.padding != (0,) * len(self.padding): s += ", padding={padding}" if self.dilation != (1,) * len(self.dilation): s += ", dilation={dilation}" if self.groups != 1: s += ", groups={groups}" if self.bias is None: s += ", bias=False" if self.padding_mode != "zeros": s += ", padding_mode={padding_mode}" s += ", n_centroids={n_centroids}, block_size={block_size}" return s.format(**self.__dict__)
EXA-1-master
exa/libraries/fairseq/fairseq/modules/quantization/pq/modules/qconv.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .qconv import PQConv2d # NOQA from .qemb import PQEmbedding # NOQA from .qlinear import PQLinear # NOQA
EXA-1-master
exa/libraries/fairseq/fairseq/modules/quantization/pq/modules/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F class PQEmbedding(nn.Module): """ Quantized counterpart of nn.Embedding module. Stores the centroids and the assignments. The full weight is re-instantiated at each forward pass. Args: - centroids: centroids of size n_centroids x block_size - assignments: assignments of the centroids to the subvectors of size self.out_features x n_blocks - bias: the non-quantized bias Remarks: - We refer the reader to the official documentation of the nn.Embedding module for the other arguments and the behavior of the module - Performance tests on GPU show that this implementation is 10% slower than the non-quantized nn.Embedding module for a standard training loop. """ def __init__( self, centroids, assignments, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, _weight=None, ): super(PQEmbedding, self).__init__() self.block_size = centroids.size(1) self.n_centroids = centroids.size(0) self.num_embeddings = num_embeddings self.embedding_dim = embedding_dim if padding_idx is not None: if padding_idx > 0: assert ( padding_idx < self.num_embeddings ), "Padding_idx must be within num_embeddings" elif padding_idx < 0: assert ( padding_idx >= -self.num_embeddings ), "Padding_idx must be within num_embeddings" padding_idx = self.num_embeddings + padding_idx self.padding_idx = padding_idx self.max_norm = max_norm self.norm_type = norm_type self.scale_grad_by_freq = scale_grad_by_freq self.sparse = sparse # check compatibility if self.embedding_dim % self.block_size != 0: raise ValueError("Wrong PQ sizes") if len(assignments) % self.num_embeddings != 0: raise ValueError("Wrong PQ sizes") # define parameters self.centroids = nn.Parameter(centroids, requires_grad=True) self.register_buffer("assignments", assignments) self.register_buffer("counts", torch.bincount(assignments).type_as(centroids)) @property def weight(self): return ( self.centroids[self.assignments] .reshape(-1, self.num_embeddings, self.block_size) .permute(1, 0, 2) .flatten(1, 2) ) def forward(self, input): return F.embedding( input, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, ) def extra_repr(self): s = "{num_embeddings}, {embedding_dim}" if self.padding_idx is not None: s += ", padding_idx={padding_idx}" if self.max_norm is not None: s += ", max_norm={max_norm}" if self.norm_type != 2: s += ", norm_type={norm_type}" if self.scale_grad_by_freq is not False: s += ", scale_grad_by_freq={scale_grad_by_freq}" if self.sparse is not False: s += ", sparse=True" s += ", n_centroids={n_centroids}, block_size={block_size}" return s.format(**self.__dict__)
EXA-1-master
exa/libraries/fairseq/fairseq/modules/quantization/pq/modules/qemb.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .utils import quantize_model_ # NOQA
EXA-1-master
exa/libraries/fairseq/fairseq/modules/quantization/scalar/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch try: import torch.ao.quantization as quantization except ImportError: import torch.quantization as quantization def emulate_int(w, bits, method, scale=None, zero_point=None): q = globals()[f"emulate_int8_{method}"] return q(w, scale=scale, zero_point=zero_point, bits=bits) def quantize(w, scale, zero_point, bits=8): # In the default behavior, max_val = 255. max_val = 2**bits - 1 return ( torch.clamp(torch.round(w / scale + zero_point), 0, max_val) - zero_point ) * scale def emulate_int8_histogram(w, scale=None, zero_point=None, bits=8): if scale is None: obs = quantization.observer.HistogramObserver() obs.to(device=w.device) _ = obs(w.float()) scale, zero_point = obs.calculate_qparams() scale = scale.cuda().type_as(w) zero_point = zero_point.cuda().type_as(w) return quantize(w, scale, zero_point, bits=bits), scale, zero_point def emulate_int8_channel(w, scale=None, zero_point=None, bits=8): if scale is None: obs = quantization.observer.PerChannelMinMaxObserver( ch_axis=-1, qscheme=torch.per_channel_symmetric ) obs.to(device=w.device) _ = obs(w) scale, zero_point, ch_axis = obs.get_qparams() scale = scale.cuda().type_as(w) zero_point = zero_point.cuda().type_as(w) return quantize(w, scale, zero_point, bits=bits), scale, zero_point def emulate_int8_tensor(w, scale=None, zero_point=None, bits=8): if scale is None: obs = quantization.observer.MinMaxObserver() obs.to(device=w.device) _ = obs(w) scale, zero_point = obs.calculate_qparams() scale = scale.cuda().type_as(w) zero_point = zero_point.cuda().type_as(w) return quantize(w, scale, zero_point, bits=bits), scale, zero_point
EXA-1-master
exa/libraries/fairseq/fairseq/modules/quantization/scalar/ops.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from operator import attrgetter import torch.distributed as dist import torch.nn as nn from ..pq.utils import attrsetter, get_layers from .modules import ActivationQuantizer, IntConv2d, IntEmbedding, IntLinear MAPPING = {nn.Linear: IntLinear, nn.Embedding: IntEmbedding, nn.Conv2d: IntConv2d} def quantize_model_( model, p=0.2, bits=8, update_step=3000, method="histogram", remove_weights=False ): """ Replaces all modules with their scalar quantized counterpart and registers hooks to quantize the post-ativations of those modules. Args: - model: a nn.Module - p: amount of noise (0 for no noise, 1 to quantize all the weights/activations) - bits: number of bits - update_step: update quantization parameters every update_step steps """ # quantize all layers # remove weights indicates whether the weights extension should be removed, in addition to # weight_orig and weight extension on names quantized_layers = get_layers(model, "(.*?)", remove_weights=remove_weights) for layer in quantized_layers: # book-keeping is_master_process = (not dist.is_initialized()) or ( dist.is_initialized() and dist.get_rank() == 0 ) # recover module module = attrgetter(layer)(model) if is_master_process: logging.info( f"Quantizing layer {layer} with bits={bits} and QuantNoise={p}" ) # quantization params q_params = { "p": p, "update_step": update_step, "bits": bits, "method": method, "counter": 0, } # instantiate the quantized counterpart if isinstance(module, tuple(MAPPING.keys())): QuantizedModule = MAPPING[module.__class__] quantized_module = QuantizedModule.__new__(QuantizedModule) params = module.__dict__ params.update(q_params) quantized_module.__dict__.update(params) else: if is_master_process: logging.info(f"Module {module} not yet supported for quantization") continue # activation quantization a_q = ActivationQuantizer(quantized_module, p=0, bits=bits, method=method) # replace layer by its quantized counterpart attrsetter(layer)(model, quantized_module) # return name of quantized layers return quantized_layers
EXA-1-master
exa/libraries/fairseq/fairseq/modules/quantization/scalar/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from ..ops import emulate_int class IntLinear(nn.Module): """ Quantized counterpart of the nn.Linear module that applies QuantNoise during training. Args: - in_features: input features - out_features: output features - bias: bias or not - p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights) - bits: number of bits - method: choose among {"tensor", "histogram", "channel"} - update_step: recompute scale and zero_point every update_steps iterations Remarks: - We use the straight-through estimator so that the gradients back-propagate nicely in the network, this is implemented with the detach() trick. - Parameters scale and zero_point are recomputed every update_step forward pass to reduce the overhead - At test time, the weights are fully quantized """ def __init__( self, in_features, out_features, bias=True, p=0, update_step=3000, bits=8, method="histogram", ): super(IntLinear, self).__init__() self.in_features = int(in_features) self.out_features = int(out_features) self.weight = torch.nn.Parameter(torch.Tensor(out_features, in_features)) self.chosen_bias = bias if self.chosen_bias: self.bias = torch.nn.Parameter(torch.Tensor(out_features)) else: self.register_parameter("bias", None) self.reset_parameters() # quantization parameters self.p = p self.bits = bits self.method = method self.update_step = update_step self.counter = 0 def reset_parameters(self): nn.init.xavier_uniform_(self.weight) if self.chosen_bias: nn.init.constant_(self.bias, 0.0) return def forward(self, input): # train with QuantNoise and evaluate the fully quantized network p = self.p if self.training else 1 # update parameters every 100 iterations if self.counter % self.update_step == 0: self.scale = None self.zero_point = None self.counter += 1 # quantize weight weight_quantized, self.scale, self.zero_point = emulate_int( self.weight.detach(), bits=self.bits, method=self.method, scale=self.scale, zero_point=self.zero_point, ) # mask to apply noise mask = torch.zeros_like(self.weight) mask.bernoulli_(1 - p) noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0) # using straight-through estimator (STE) clamp_low = -self.scale * self.zero_point clamp_high = self.scale * (2**self.bits - 1 - self.zero_point) weight = ( torch.clamp(self.weight, clamp_low.item(), clamp_high.item()) + noise.detach() ) # return output output = F.linear(input, weight, self.bias) return output def extra_repr(self): return "in_features={}, out_features={}, bias={}, quant_noise={}, bits={}, method={}".format( self.in_features, self.out_features, self.bias is not None, self.p, self.bits, self.method, )
EXA-1-master
exa/libraries/fairseq/fairseq/modules/quantization/scalar/modules/qlinear.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn.functional as F from torch.nn.modules.conv import _ConvNd from torch.nn.modules.utils import _pair from ..ops import emulate_int class IntConv2d(_ConvNd): """ Quantized counterpart of the nn.Conv2d module that applies QuantNoise during training. Args: - standard nn.Conv2d parameters - p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights) - bits: number of bits - method: choose among {"tensor", "histogram", "channel"} - update_step: recompute scale and zero_point every update_steps iterations Remarks: - We use the straight-thgourh estimator so that the gradients back-propagate nicely in the network, this is implemented with the detach() trick - Parameters scale and zero_point are recomputed every update_step forward pass to reduce the overhead - At test time, the weights are fully quantized """ def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode="zeros", p=0, bits=8, method="histogram", update_step=1000, ): kernel_size = _pair(kernel_size) stride = _pair(stride) padding = _pair(padding) dilation = _pair(dilation) super(IntConv2d, self).__init__( in_channels, out_channels, kernel_size, stride, padding, dilation, False, _pair(0), groups, bias, padding_mode, ) # quantization parameters self.p = p self.bits = bits self.method = method self.update_step = update_step self.counter = 0 def _conv_forward(self, input, weight): if self.padding_mode != "zeros": return F.conv2d( F.pad(input, self._padding_repeated_twice, mode=self.padding_mode), weight, self.bias, self.stride, _pair(0), self.dilation, self.groups, ) return F.conv2d( input, weight, self.bias, self.stride, self.padding, self.dilation, self.groups, ) def forward(self, input): # train with QuantNoise and evaluate the fully quantized network p = self.p if self.training else 1 # update parameters every 100 iterations if self.counter % self.update_step == 0: self.scale = None self.zero_point = None self.counter += 1 # quantize weight weight_quantized, self.scale, self.zero_point = emulate_int( self.weight.detach(), bits=self.bits, method=self.method, scale=self.scale, zero_point=self.zero_point, ) # mask to apply noise mask = torch.zeros_like(self.weight) mask.bernoulli_(1 - p) noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0) # using straight-through estimator (STE) clamp_low = -self.scale * self.zero_point clamp_high = self.scale * (2**self.bits - 1 - self.zero_point) weight = ( torch.clamp(self.weight, clamp_low.item(), clamp_high.item()) + noise.detach() ) # return output output = self._conv_forward(input, weight) return output def extra_repr(self): return ( "in_channels={}, out_channels={}, kernel_size={}, stride={}, " "padding={}, dilation={}, groups={}, bias={}, quant_noise={}, " "bits={}, method={}".format( self.in_channels, self.out_channels, self.kernel_size, self.stride, self.padding, self.dilation, self.groups, self.bias is not None, self.p, self.bits, self.method, ) )
EXA-1-master
exa/libraries/fairseq/fairseq/modules/quantization/scalar/modules/qconv.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .qact import ActivationQuantizer # NOQA from .qconv import IntConv2d # NOQA from .qemb import IntEmbedding # NOQA from .qlinear import IntLinear # NOQA
EXA-1-master
exa/libraries/fairseq/fairseq/modules/quantization/scalar/modules/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from ..ops import emulate_int class IntEmbedding(nn.Module): """ Quantized counterpart of the nn.Embedding module that applies QuantNoise during training. Args: - num_embeddings: number of tokens - embedding_dim: embedding dimension - p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights) - bits: number of bits - method: choose among {"tensor", "histogram", "channel"} - update_step: recompute scale and zero_point every update_steps iterations Remarks: - We use the straight-through estimator so that the gradients back-propagate nicely in the network, this is implemented with the detach() trick - Parameters scale and zero_point are recomputed every update_step forward pass to reduce the overhead - At test time, the weights are fully quantized """ def __init__( self, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, _weight=None, p=0, update_step=1000, bits=8, method="histogram", ): super(IntEmbedding, self).__init__() self.num_embeddings = num_embeddings self.embedding_dim = embedding_dim if padding_idx is not None: if padding_idx > 0: assert ( padding_idx < self.num_embeddings ), "Padding_idx must be within num_embeddings" elif padding_idx < 0: assert ( padding_idx >= -self.num_embeddings ), "Padding_idx must be within num_embeddings" padding_idx = self.num_embeddings + padding_idx self.padding_idx = padding_idx self.max_norm = max_norm self.norm_type = norm_type self.scale_grad_by_freq = scale_grad_by_freq if _weight is None: self.weight = nn.Parameter(torch.Tensor(num_embeddings, embedding_dim)) self.reset_parameters() else: assert list(_weight.shape) == [ num_embeddings, embedding_dim, ], "Shape of weight does not match num_embeddings and embedding_dim" self.weight = nn.Parameter(_weight) self.sparse = sparse # quantization parameters self.p = p self.bits = bits self.method = method self.update_step = update_step self.counter = 0 def reset_parameters(self): nn.init.normal_(self.weight) if self.padding_idx is not None: with torch.no_grad(): self.weight[self.padding_idx].fill_(0) def forward(self, input): # train with QuantNoise and evaluate the fully quantized network p = self.p if self.training else 1 # update parameters every 1000 iterations if self.counter % self.update_step == 0: self.scale = None self.zero_point = None self.counter += 1 # quantize weight weight_quantized, self.scale, self.zero_point = emulate_int( self.weight.detach(), bits=self.bits, method=self.method, scale=self.scale, zero_point=self.zero_point, ) # mask to apply noise mask = torch.zeros_like(self.weight) mask.bernoulli_(1 - p) noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0) # using straight-through estimator (STE) clamp_low = -self.scale * self.zero_point clamp_high = self.scale * (2**self.bits - 1 - self.zero_point) weight = ( torch.clamp(self.weight, clamp_low.item(), clamp_high.item()) + noise.detach() ) # return output output = F.embedding( input, weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, ) return output def extra_repr(self): s = "{num_embeddings}, {embedding_dim}" if self.padding_idx is not None: s += ", padding_idx={padding_idx}" if self.max_norm is not None: s += ", max_norm={max_norm}" if self.norm_type != 2: s += ", norm_type={norm_type}" if self.scale_grad_by_freq is not False: s += ", scale_grad_by_freq={scale_grad_by_freq}" if self.sparse is not False: s += ", sparse=True" s += "quant_noise={p}, bits={bits}, method={method}" return s.format(**self.__dict__)
EXA-1-master
exa/libraries/fairseq/fairseq/modules/quantization/scalar/modules/qemb.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from ..ops import emulate_int class ActivationQuantizer: """ Fake scalar quantization of the activations using a forward hook. Args: - module. a nn.Module for which we quantize the *post-activations* - p: proportion of activations to quantize, set by default to 1 - update_step: to recompute quantization parameters - bits: number of bits for quantization - method: choose among {"tensor", "histogram", "channel"} - clamp_threshold: to prevent gradients overflow Remarks: - Parameters scale and zero_point are recomputed every update_step forward pass to reduce the overhead - For the list of quantization methods and number of bits, see ops.py - To remove the hook from the module, simply call self.handle.remove() - At test time, the activations are fully quantized - We use the straight-through estimator so that the gradients back-propagate nicely in the network, this is implemented with the detach() trick - The activations are hard-clamped in [-clamp_threshold, clamp_threshold] to prevent overflow during the backward pass """ def __init__( self, module, p=1, update_step=1000, bits=8, method="histogram", clamp_threshold=5, ): self.module = module self.p = p self.update_step = update_step self.counter = 0 self.bits = bits self.method = method self.clamp_threshold = clamp_threshold self.handle = None self.register_hook() def register_hook(self): # forward hook def quantize_hook(module, x, y): # update parameters every 1000 iterations if self.counter % self.update_step == 0: self.scale = None self.zero_point = None self.counter += 1 # train with QuantNoise and evaluate the fully quantized network p = self.p if self.module.training else 1 # quantize activations y_q, self.scale, self.zero_point = emulate_int( y.detach(), bits=self.bits, method=self.method, scale=self.scale, zero_point=self.zero_point, ) # mask to apply noise mask = torch.zeros_like(y) mask.bernoulli_(1 - p) noise = (y_q - y).masked_fill(mask.bool(), 0) # using straight-through estimator (STE) clamp_low = -self.scale * self.zero_point clamp_high = self.scale * (2**self.bits - 1 - self.zero_point) return torch.clamp(y, clamp_low.item(), clamp_high.item()) + noise.detach() # register hook self.handle = self.module.register_forward_hook(quantize_hook)
EXA-1-master
exa/libraries/fairseq/fairseq/modules/quantization/scalar/modules/qact.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. def gen_forward(): kernels = [3, 5, 7, 15, 31, 63, 127, 255] seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]] head = """ /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "lightconv_cuda.cuh" std::vector<at::Tensor> lightconv_cuda_forward(at::Tensor input, at::Tensor filters, int padding_l) { at::DeviceGuard g(input.device()); const auto minibatch = input.size(0); const auto numFeatures = input.size(1); const auto sequenceLength = input.size(2); const auto numHeads = filters.size(0); const auto filterSize = filters.size(1); const auto numFiltersInBlock = numFeatures / numHeads; const dim3 blocks(minibatch, numFeatures); auto output = at::zeros_like(input); auto stream = at::cuda::getCurrentCUDAStream(); """ sequence_if = """ if (sequenceLength <= {seq}) {{ switch(filterSize) {{ """ case_k = """ case {k}: """ main_block = """ if (padding_l == {pad}) {{ AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "lightconv_forward", ([&] {{ lightconv_forward_kernel<{k}, {b_size}, {pad}, scalar_t> <<<blocks, {b_size}, 0, stream>>>( input.data<scalar_t>(), filters.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, output.data<scalar_t>()); }})); }} else """ bad_padding = """ { std::cout << "WARNING: Unsupported padding size - skipping forward pass" << std::endl; } break; """ bad_filter = """ default: std::cout << "WARNING: Unsupported filter length passed - skipping forward pass" << std::endl; } """ con_else = """ } else """ final_else = """ { switch(filterSize) { """ final_return = """ } return {output}; } """ with open("lightconv_cuda_forward.cu", "w") as forward: forward.write(head) for seq in seqs: forward.write(sequence_if.format(seq=seq)) for k in kernels: forward.write(case_k.format(k=k)) for pad in [k // 2, k - 1]: forward.write(main_block.format(k=k, b_size=seq, pad=pad)) forward.write(bad_padding) forward.write(bad_filter) forward.write(con_else) forward.write(final_else) for k in kernels: forward.write(case_k.format(k=k)) for pad in [k // 2, k - 1]: forward.write(main_block.format(k=k, b_size=seq, pad=pad)) forward.write(bad_padding) forward.write(bad_filter) forward.write(final_return) def gen_backward(): head = """ /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "lightconv_cuda.cuh" std::vector<at::Tensor> lightconv_cuda_backward( at::Tensor gradOutput, int padding_l, at::Tensor input, at::Tensor filters) { // gradWrtInput const int minibatch = input.size(0); const int numFeatures = input.size(1); const int sequenceLength = input.size(2); const int numHeads = filters.size(0); const int filterSize = filters.size(1); const dim3 gradBlocks(minibatch, numFeatures); const dim3 weightGradFirstpassShortBlocks(minibatch, numHeads); const dim3 weightGradSecondpassBlocks(numHeads, filterSize); const int numFiltersInBlock = numFeatures / numHeads; auto gradInput = at::zeros_like(input); auto gradFilters = at::zeros_like(filters); at::DeviceGuard g(input.device()); auto stream = at::cuda::getCurrentCUDAStream(); switch(filterSize) { """ sequence_if = """ if (sequenceLength <= {seq}) {{ """ case_k = """ case {k}: """ main_block = """ if (padding_l == {p}) {{ AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "lightconv_backward", ([&] {{ lightconv_grad_wrt_input_kernel<{k}, {b_size}, {p}, scalar_t> <<<gradBlocks, {b_size}, 0, stream>>>( gradOutput.data<scalar_t>(), filters.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, gradInput.data<scalar_t>()); """ weight_grad_short = """ at::Tensor tempSumGradFilters = at::zeros({{minibatch, numHeads, filterSize}}, input.options().dtype(at::kFloat)); lightconv_grad_wrt_weights_firstpass_short_kernel<{k}, {b_size}, {p}, scalar_t> <<<weightGradFirstpassShortBlocks, {b_size}, 0, stream>>>( input.data<scalar_t>(), gradOutput.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, numHeads, tempSumGradFilters.data<float>() ); lightconv_grad_wrt_weights_secondpass_short_kernel<{k}, {b_size}, scalar_t> <<<weightGradSecondpassBlocks, {b_size}, 0, stream>>>( tempSumGradFilters.data<float>(), minibatch, numFiltersInBlock, gradFilters.data<scalar_t>() ); }})); }} else """ weight_grad = """ at::Tensor tempSumGradFilters = at::zeros({{minibatch, numFeatures, filterSize}}, input.options().dtype(at::kFloat)); lightconv_grad_wrt_weights_firstpass_kernel<{k}, {b_size}, {p}, scalar_t> <<<gradBlocks, {b_size}, 0, stream>>>( input.data<scalar_t>(), gradOutput.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, tempSumGradFilters.data<float>() ); lightconv_grad_wrt_weights_secondpass_kernel<{k}, {b_size}, scalar_t> <<<weightGradSecondpassBlocks, {b_size}, 0, stream>>>( tempSumGradFilters.data<float>(), minibatch, numFiltersInBlock, gradFilters.data<scalar_t>() ); }})); }} else """ bad_padding = """ { std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl; } """ breakout = """ break; """ bad_filter = """ default: std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl; """ con_else = """ } else """ final_else = """ { switch(filterSize) { """ last_return = """ } return {gradInput, gradFilters}; } """ kernels = [3, 5, 7, 15, 31, 63, 127, 255] seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]] thresh = [32, 32, 64, 128, 256, -1, -1, -1] max_mem = [-1, -1, -1, -1, -1, 192, 96, 64] with open("lightconv_cuda_backward.cu", "w") as backward: backward.write(head) for (k, t, mem) in zip(kernels, thresh, max_mem): backward.write(case_k.format(k=k)) for seq in seqs: if (t == -1 or seq <= t) and (mem == -1 or seq < mem): backward.write(sequence_if.format(seq=seq)) for p in [k // 2, k - 1]: backward.write(main_block.format(k=k, b_size=seq, p=p)) backward.write(weight_grad_short.format(k=k, b_size=seq, p=p)) backward.write(bad_padding) else: for p in [k // 2, k - 1]: backward.write(main_block.format(k=k, b_size=32, p=p)) backward.write(weight_grad.format(k=k, b_size=32, p=p)) backward.write(bad_padding) backward.write(breakout) break backward.write(con_else) backward.write(bad_filter) backward.write(last_return) if __name__ == "__main__": gen_forward() gen_backward()
EXA-1-master
exa/libraries/fairseq/fairseq/modules/lightconv_layer/cuda_function_gen.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .lightconv_layer import LightconvLayer # noqa
EXA-1-master
exa/libraries/fairseq/fairseq/modules/lightconv_layer/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import lightconv_cuda import torch import torch.nn.functional as F from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.fairseq_dropout import FairseqDropout from torch import nn from torch.autograd import Function class lightconvFunction(Function): @staticmethod def forward(ctx, x, weights, padding_l): ctx.padding_l = padding_l outputs = lightconv_cuda.forward(x, weights, padding_l) variables = [x, weights] ctx.save_for_backward(*variables) return outputs[0] @staticmethod def backward(ctx, grad_output): outputs = lightconv_cuda.backward( grad_output.contiguous(), ctx.padding_l, *ctx.saved_tensors ) grad_input, grad_weights = outputs return grad_input, grad_weights, None @with_incremental_state class LightconvLayer(nn.Module): def __init__( self, input_size, kernel_size=1, padding_l=None, weight_softmax=False, num_heads=1, weight_dropout=0.0, bias=False, ): super(LightconvLayer, self).__init__() self.input_size = input_size self.kernel_size = kernel_size self.padding_l = padding_l self.num_heads = num_heads self.weight_softmax = weight_softmax self.weight_dropout_module = FairseqDropout( weight_dropout, module_name=self.__class__.__name__ ) self.weight = nn.Parameter(torch.Tensor(num_heads, kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(input_size)) else: self.bias = None self.reset_parameters() def upgrade_state_dict_named(self, state_dict, name): prefix = name + "." if name != "" else "" for k, v in state_dict.items(): if k.endswith(prefix + "weight"): if v.dim() == 3 and v.size(1) == 1: state_dict[k] = v.squeeze(1) def reset_parameters(self): nn.init.xavier_uniform_(self.weight) if self.bias is not None: nn.init.constant_(self.bias, 0.0) def forward(self, x, incremental_state=None): # during inference time, incremental BMM is faster if incremental_state is not None: T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = x.new() x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) if self.kernel_size > 1: self._set_input_buffer( incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :] ) x_unfold = x_unfold.view(T * B * H, R, -1) weight = self.weight if self.weight_softmax: weight = F.softmax(weight.float(), dim=1).type_as(weight) weight = weight[:, -x_unfold.size(2) :] K = weight.size(1) weight = ( weight.view(1, H, K) .expand(T * B, H, K) .contiguous() .view(T * B * H, K, 1) ) weight = self.weight_dropout_module(weight) output = torch.bmm(x_unfold, weight) # T*B*H x R x 1 output = output.view(T, B, C) return output # during training time, use CUDA kernel else: x = x.permute(1, 2, 0).contiguous() weight = self.weight if self.weight_softmax: weight = F.softmax(self.weight, -1) if self.weight_dropout_module.p: weight = self.weight_dropout_module(weight) return lightconvFunction.apply(x, weight, self.padding_l).permute(2, 0, 1) def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state(self, incremental_state, "input_buffer") def _set_input_buffer(self, incremental_state, new_buffer): return utils.set_incremental_state( self, incremental_state, "input_buffer", new_buffer ) def half(self): return self._apply(lambda t: t.half() if t.is_floating_point() else t)
EXA-1-master
exa/libraries/fairseq/fairseq/modules/lightconv_layer/lightconv_layer.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from setuptools import setup from torch.utils.cpp_extension import BuildExtension, CUDAExtension setup( name="lightconv_layer", ext_modules=[ CUDAExtension( "lightconv_cuda", [ "lightconv_cuda.cpp", "lightconv_cuda_kernel.cu", ], ), ], cmdclass={"build_ext": BuildExtension}, )
EXA-1-master
exa/libraries/fairseq/fairseq/modules/lightconv_layer/setup.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. def gen_forward(): kernels = [3, 5, 7, 15, 31, 63, 127, 255] blocks = [32, 64, 128, 256] head = """ /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "dynamicconv_cuda.cuh" std::vector<at::Tensor> dynamicconv_cuda_forward(at::Tensor input, at::Tensor weight, int padding_l) { at::DeviceGuard g(input.device()); const auto minibatch = input.size(0); const auto numFeatures = input.size(1); const auto sequenceLength = input.size(2); const auto numHeads = weight.size(1); const auto filterSize = weight.size(2); const auto numFiltersInBlock = numFeatures / numHeads; const dim3 blocks(minibatch, numFeatures); auto output = at::zeros_like(input); auto stream = at::cuda::getCurrentCUDAStream(); """ switch = """ switch(filterSize) { """ case_k = """ case {k}: """ main_block = """ if (padding_l == {pad}) {{ AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "dynamicconv_forward", ([&] {{ dynamicconv_forward_kernel<{k}, {b_size}, {pad}, scalar_t> <<<blocks, {b_size}, 0, stream>>>( input.data<scalar_t>(), weight.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, numHeads, output.data<scalar_t>()); }})); }} else """ bad_padding = """ { std::cout << "WARNING: Unsupported padding size - skipping forward pass" << std::endl; } break;\n """ end = """ default: std::cout << "WARNING: Unsupported filter length passed - skipping forward pass" << std::endl; } return {output}; } """ with open("dynamicconv_cuda_forward.cu", "w") as forward: forward.write(head) forward.write(switch) for k in kernels: b_size = 32 for b in blocks: if b > k: b_size = b break forward.write(case_k.format(k=k)) for pad in [k // 2, k - 1]: forward.write(main_block.format(k=k, b_size=b_size, pad=pad)) forward.write(bad_padding) forward.write(end) def gen_backward(): kernels = [3, 5, 7, 15, 31, 63, 127, 255] thresh = [512, 512, 512, 512, 512, 380, 256, 256] min_block = [64, 64, 64, 64, 64, 64, 128, 256] seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]] head = """ /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "dynamicconv_cuda.cuh" std::vector<at::Tensor> dynamicconv_cuda_backward(at::Tensor gradOutput, int padding_l, at::Tensor input, at::Tensor weight) { at::DeviceGuard g(input.device()); const auto minibatch = input.size(0); const auto numFeatures = input.size(1); const auto sequenceLength = input.size(2); const auto numHeads = weight.size(1); const auto filterSize = weight.size(2); const auto numFiltersInBlock = numFeatures / numHeads; auto numChunks = 1; auto gradInput = at::zeros_like(input); auto gradWeight = at::zeros_like(weight); auto stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(minibatch, numHeads, numChunks); """ sequence_if = """ if (sequenceLength < {seq}) {{ switch(filterSize) {{ """ case_k = """ case {k}: """ chunks_reset = """ numChunks = int(ceilf(sequenceLength/float({b_size}))); blocks = dim3(minibatch, numHeads, numChunks); """ main_block = """ if (padding_l == {p}) {{ AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {{ dynamicconv_backward_kernel<{k}, {b_size}, {p}, scalar_t> <<<blocks, {b_size}, 0, stream>>>( gradOutput.data<scalar_t>(), input.data<scalar_t>(), weight.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, numHeads, gradWeight.data<scalar_t>(), gradInput.data<scalar_t>()); }})); }} else """ bad_padding = """ { std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl; } break;\n """ bad_filter = """ default: std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl; } """ con_else = """ } else """ final_else = """ { switch(filterSize) { """ last_return = """ } return {gradInput, gradWeight}; } """ with open("dynamicconv_cuda_backward.cu", "w") as backward: backward.write(head) for seq in seqs: backward.write(sequence_if.format(seq=seq)) for k, t, m in zip(kernels, thresh, min_block): backward.write(case_k.format(k=k)) if seq <= t: b_size = seq else: b_size = m backward.write(chunks_reset.format(b_size=b_size)) for p in [k // 2, k - 1]: backward.write(main_block.format(k=k, b_size=b_size, p=p)) backward.write(bad_padding) backward.write(bad_filter) backward.write(con_else) backward.write(final_else) for k, m in zip(kernels, min_block): backward.write(case_k.format(k=k)) backward.write(chunks_reset.format(b_size=m)) for p in [k // 2, k - 1]: backward.write(main_block.format(k=k, b_size=m, p=p)) backward.write(bad_padding) backward.write(bad_filter) backward.write(last_return) if __name__ == "__main__": gen_forward() gen_backward()
EXA-1-master
exa/libraries/fairseq/fairseq/modules/dynamicconv_layer/cuda_function_gen.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .dynamicconv_layer import DynamicconvLayer # noqa
EXA-1-master
exa/libraries/fairseq/fairseq/modules/dynamicconv_layer/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import dynamicconv_cuda import torch import torch.nn.functional as F from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.fairseq_dropout import FairseqDropout from fairseq.modules.unfold import unfold1d from torch import nn from torch.autograd import Function class dynamicconvFunction(Function): @staticmethod def forward(ctx, x, weights, padding_l): ctx.padding_l = padding_l outputs = dynamicconv_cuda.forward(x, weights, padding_l) variables = [x, weights] ctx.save_for_backward(*variables) return outputs[0] @staticmethod def backward(ctx, grad_output): outputs = dynamicconv_cuda.backward( grad_output.contiguous(), ctx.padding_l, *ctx.saved_tensors ) grad_input, grad_weights = outputs return grad_input, grad_weights, None @with_incremental_state class DynamicconvLayer(nn.Module): def __init__( self, input_size, kernel_size=1, padding_l=None, weight_softmax=False, num_heads=1, weight_dropout=0.0, bias=False, renorm_padding=False, conv_bias=False, query_size=None, ): super(DynamicconvLayer, self).__init__() self.input_size = input_size self.query_size = input_size if query_size is None else query_size self.kernel_size = kernel_size self.padding_l = padding_l self.num_heads = num_heads self.weight_softmax = weight_softmax self.weight_dropout_module = FairseqDropout( weight_dropout, module_name=self.__class__.__name__ ) self.renorm_padding = renorm_padding self.bias = bias self.weight_linear = nn.Linear(input_size, num_heads * kernel_size, bias) if conv_bias: self.conv_bias = nn.Parameter(torch.Tensor(input_size)) else: self.conv_bias = None self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.weight_linear.weight) if self.conv_bias is not None: nn.init.constant_(self.conv_bias, 0.0) nn.init.constant_(self.weight_linaer.bias, 0.0) def forward(self, x, incremental_state=None, query=None, unfold=None): T, B, C = x.size() K, H = self.kernel_size, self.num_heads # R = C // H # during inference time, incremental BMM is faster if incremental_state is not None: unfold = ( x.size(0) > 512 if unfold is None else unfold ) # use unfold mode as default for long sequence to save memory unfold = unfold or (incremental_state is not None) assert query is None if query is None: query = x if unfold: output = self._forward_unfolded(x, incremental_state, query) else: output = self._forward_expanded(x, incremental_state, query) if self.conv_bias is not None: output = output + self.conv_bias.view(1, 1, -1) return output # during training time, use CUDA kernel else: weight = self.weight_linear(x).view(T, B, H, K) if self.weight_softmax: weight = F.softmax(weight, dim=-1) if self.weight_dropout_module.p: weight = self.weight_dropout_module(weight) weight = weight.permute(1, 2, 3, 0).contiguous() self.filters = weight x = x.permute(1, 2, 0).contiguous() output = dynamicconvFunction.apply(x, weight, self.padding_l).permute( 2, 0, 1 ) if self.conv_bias is not None: output = output + self.conv_bias.view(1, 1, -1) return output def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state(self, incremental_state, "input_buffer") def _set_input_buffer(self, incremental_state, new_buffer): return utils.set_incremental_state( self, incremental_state, "input_buffer", new_buffer ) def _forward_unfolded(self, x, incremental_state, query): """The conventional implementation of convolutions. Unfolding the input by having a window shifting to the right.""" T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size weight = self.weight_linear(query).view(T * B * H, -1) # renorm_padding is only implemented in _forward_expanded assert not self.renorm_padding or incremental_state is not None if incremental_state is not None: input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = x.new() x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) if self.kernel_size > 1: self._set_input_buffer( incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :] ) x_unfold = x_unfold.view(T * B * H, R, -1) else: padding_l = self.padding_l if K > T and padding_l == K - 1: weight = weight.narrow(1, K - T, T) K, padding_l = T, T - 1 # unfold the input: T x B x C --> T' x B x C x K x_unfold = unfold1d(x, K, padding_l, 0) x_unfold = x_unfold.view(T * B * H, R, K) if self.weight_softmax and not self.renorm_padding: weight = F.softmax(weight, dim=1) weight = weight.narrow(1, 0, K) if incremental_state is not None: weight = weight[:, -x_unfold.size(2) :] K = weight.size(1) if self.weight_softmax and self.renorm_padding: weight = F.softmax(weight, dim=1) weight = self.weight_dropout_module(weight, inplace=False) output = torch.bmm(x_unfold, weight.unsqueeze(2)) # T*B*H x R x 1 output = output.view(T, B, C) return output def _forward_expanded(self, x, incremental_stat, query): """Turn the convolution filters into band matrices and do matrix multiplication. This is faster when the sequence is short, but less memory efficient. This is not used in the decoder during inference. """ T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size weight = self.weight_linear(query).view(T * B * H, -1) if not self.renorm_padding: if self.weight_softmax: weight = F.softmax(weight, dim=1) weight = self.weight_dropout_module(weight, inplace=False) weight = weight.narrow(1, 0, K).contiguous() weight = weight.view(T, B * H, K).transpose(0, 1) x = x.view(T, B * H, R).transpose(0, 1) if self.weight_softmax and self.renorm_padding: # turn the convolution filters into band matrices weight_expanded = weight.new(B * H, T, T + K - 1).fill_(float("-inf")) weight_expanded.as_strided( (B * H, T, K), (T * (T + K - 1), T + K, 1) ).copy_(weight) weight_expanded = weight_expanded.narrow(2, self.padding_l, T) # normalize the weight over valid positions like self-attention weight_expanded = F.softmax(weight_expanded, dim=2) weight_expanded = self.weight_dropout_module(weight_expanded, inplace=False) else: P = self.padding_l # For efficiency, we cut the kernel size and reduce the padding when the kernel is larger than the length if K > T and P == K - 1: weight = weight.narrow(2, K - T, T) K, P = T, T - 1 # turn the convolution filters into band matrices weight_expanded = weight.new_zeros(B * H, T, T + K - 1, requires_grad=False) weight_expanded.as_strided( (B * H, T, K), (T * (T + K - 1), T + K, 1) ).copy_(weight) weight_expanded = weight_expanded.narrow(2, P, T) # B*H x T x T output = torch.bmm(weight_expanded, x) output = output.transpose(0, 1).contiguous().view(T, B, C) return output
EXA-1-master
exa/libraries/fairseq/fairseq/modules/dynamicconv_layer/dynamicconv_layer.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from setuptools import setup from torch.utils.cpp_extension import BuildExtension, CUDAExtension setup( name="dynamicconv_layer", ext_modules=[ CUDAExtension( name="dynamicconv_cuda", sources=[ "dynamicconv_cuda.cpp", "dynamicconv_cuda_kernel.cu", ], ), ], cmdclass={"build_ext": BuildExtension}, )
EXA-1-master
exa/libraries/fairseq/fairseq/modules/dynamicconv_layer/setup.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import BaseWrapperDataset class OffsetTokensDataset(BaseWrapperDataset): def __init__(self, dataset, offset): super().__init__(dataset) self.offset = offset def __getitem__(self, idx): return self.dataset[idx] + self.offset
EXA-1-master
exa/libraries/fairseq/fairseq/data/offset_tokens_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import OrderedDict import torch from torch.utils.data.dataloader import default_collate from . import FairseqDataset def _flatten(dico, prefix=None): """Flatten a nested dictionary.""" new_dico = OrderedDict() if isinstance(dico, dict): prefix = prefix + "." if prefix is not None else "" for k, v in dico.items(): if v is None: continue new_dico.update(_flatten(v, prefix + k)) elif isinstance(dico, list): for i, v in enumerate(dico): new_dico.update(_flatten(v, prefix + ".[" + str(i) + "]")) else: new_dico = OrderedDict({prefix: dico}) return new_dico def _unflatten(dico): """Unflatten a flattened dictionary into a nested dictionary.""" new_dico = OrderedDict() for full_k, v in dico.items(): full_k = full_k.split(".") node = new_dico for k in full_k[:-1]: if k.startswith("[") and k.endswith("]"): k = int(k[1:-1]) if k not in node: node[k] = OrderedDict() node = node[k] node[full_k[-1]] = v return new_dico class NestedDictionaryDataset(FairseqDataset): def __init__(self, defn, sizes=None): super().__init__() self.defn = _flatten(defn) self.sizes = [sizes] if not isinstance(sizes, (list, tuple)) else sizes first = None for v in self.defn.values(): if not isinstance( v, ( FairseqDataset, torch.utils.data.Dataset, ), ): raise ValueError("Expected Dataset but found: {}".format(v.__class__)) first = first or v if len(v) > 0: assert len(v) == len(first), "dataset lengths must match" self._len = len(first) def __getitem__(self, index): return OrderedDict((k, ds[index]) for k, ds in self.defn.items()) def __len__(self): return self._len def collater(self, samples): """Merge a list of samples to form a mini-batch. Args: samples (List[dict]): samples to collate Returns: dict: a mini-batch suitable for forwarding with a Model """ if len(samples) == 0: return {} sample = OrderedDict() for k, ds in self.defn.items(): try: sample[k] = ds.collater([s[k] for s in samples]) except NotImplementedError: sample[k] = default_collate([s[k] for s in samples]) return _unflatten(sample) def num_tokens(self, index): """Return the number of tokens in a sample. This value is used to enforce ``--max-tokens`` during batching.""" return max(s[index] for s in self.sizes) def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" if len(self.sizes) == 1: return self.sizes[0][index] else: return (s[index] for s in self.sizes) @property def supports_prefetch(self): """Whether this dataset supports prefetching.""" return any(ds.supports_prefetch for ds in self.defn.values()) def prefetch(self, indices): """Prefetch the data required for this epoch.""" for ds in self.defn.values(): if getattr(ds, "supports_prefetch", False): ds.prefetch(indices) @property def can_reuse_epoch_itr_across_epochs(self): return all(ds.can_reuse_epoch_itr_across_epochs for ds in self.defn.values()) def set_epoch(self, epoch): super().set_epoch(epoch) for ds in self.defn.values(): ds.set_epoch(epoch)
EXA-1-master
exa/libraries/fairseq/fairseq/data/nested_dictionary_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import asyncio import logging import time from collections import OrderedDict from typing import Dict, List, Optional import numpy as np from fairseq.data import data_utils from . import FairseqDataset logger = logging.getLogger(__name__) class MultiCorpusDataset(FairseqDataset): """ Stores multiple instances of FairseqDataset together. Unless batch_sample=True, requires each instance to be the same dataset, as the collate method needs to work on batches with samples from each dataset. Allows specifying a distribution over the datasets to use. Note that unlike MultiCorpusSampledDataset, this distribution allows sampling for each item, rather than on a batch level. Note that datasets with sampling probabilty of 0 will be skipped. Each time ordered_indices() is called, a new sample is generated with the specified distribution. Args: datasets: a OrderedDict of FairseqDataset instances. distribution: a List containing the probability of getting an utterance from corresponding dataset seed: random seed for sampling the datsets sort_indices: if true, will sort the ordered indices by size batch_sample: if true, will ensure each batch is from a single dataset """ def __init__( self, datasets: Dict[str, FairseqDataset], distribution: List[float], seed: int, sort_indices: bool = False, batch_sample: bool = False, distributed_rank: Optional[int] = None, ): super().__init__() assert isinstance(datasets, OrderedDict) assert len(datasets) == len(distribution) assert sum(distribution) == 1 self.datasets = datasets self.distribution = distribution self.seed = seed self.sort_indices = sort_indices self.batch_sample = batch_sample self.distributed_rank = distributed_rank # Avoid repeated conversions to list later self.dataset_list = list(datasets.values()) self.total_num_instances = 0 first_dataset = self.dataset_list[0] self.num_instances_per_dataset = [] self.dataset_offsets = [] for i, dataset in enumerate(self.dataset_list): assert isinstance(dataset, FairseqDataset) assert type(dataset) is type(first_dataset) self.num_instances_per_dataset.append( 0 if self.distribution[i] == 0 else len(dataset) ) self.dataset_offsets.append(self.total_num_instances) self.total_num_instances += self.num_instances_per_dataset[i] def ordered_indices(self): start = time.time() with data_utils.numpy_seed(self.seed, self.epoch): logger.info( f"sampling new dataset with seed {self.seed} epoch {self.epoch}" ) sampled_indices = [] num_selected_instances = 0 # For each dataset i, sample self.distribution[i] * self.total_num_instances for i, key in enumerate(self.datasets): if self.distribution[i] == 0: # skip dataset if sampling probability is 0 continue if i < len(self.datasets) - 1: num_instances = int(self.distribution[i] * self.total_num_instances) high = self.dataset_offsets[i + 1] else: num_instances = self.total_num_instances - num_selected_instances high = self.total_num_instances logger.info(f"sampling {num_instances} from {key} dataset") num_selected_instances += num_instances # First, add k copies of the dataset where k = num_instances // len(dataset). # This ensures an equal distribution of the data points as much as possible. # For the remaining entries randomly sample them dataset_size = len(self.datasets[key]) num_copies = num_instances // dataset_size dataset_indices = ( np.random.permutation(high - self.dataset_offsets[i]) + self.dataset_offsets[i] )[: num_instances - num_copies * dataset_size] if num_copies > 0: sampled_indices += list( np.concatenate( ( np.repeat( np.arange(self.dataset_offsets[i], high), num_copies ), dataset_indices, ) ) ) else: sampled_indices += list(dataset_indices) assert ( len(sampled_indices) == self.total_num_instances ), f"{len(sampled_indices)} vs {self.total_num_instances}" np.random.shuffle(sampled_indices) if self.sort_indices: sampled_indices.sort(key=lambda i: self.num_tokens(i)) logger.info( "multi_corpus_dataset ordered_indices took {}s".format( time.time() - start ) ) return np.array(sampled_indices, dtype=np.int64) def _map_index(self, index: int): """ If dataset A has length N and dataset B has length M then index 1 maps to index 1 of dataset A, and index N + 1 maps to index 1 of B. """ counter = 0 for num_instances, key in zip(self.num_instances_per_dataset, self.datasets): if index < counter + num_instances: return index - counter, key counter += num_instances raise ValueError( "Invalid index: {}, max: {}".format(index, self.total_num_instances) ) def __len__(self): """ Length of this dataset is the sum of individual datasets """ return self.total_num_instances async def getitem(self, index): new_index, key = self._map_index(index) try: if hasattr(self.datasets[key], "getitem"): item = await self.datasets[key].getitem(new_index) else: item = self.datasets[key][new_index] item["full_id"] = index return item except Exception as e: e.args = (f"Error from {key} dataset", *e.args) raise def __getitem__(self, index): return asyncio.run(self.getitem(index)) async def getitems(self, indices): # initialize a bunch of everstore read operations # wait in the end to reduce overhead # very helpful if io is latency bounded max_concurrency = 32 sem = asyncio.Semaphore(max_concurrency) async def controlled_getitem(index): async with sem: return await self.getitem(index) coroutines = [] for index in indices: coroutines.append(controlled_getitem(index)) results = await asyncio.gather(*coroutines) return results def __getitems__(self, indices): return asyncio.run(self.getitems(indices)) def collater(self, samples): """ If we are doing batch sampling, then pick the right collater to use. Otherwise we assume all collaters are the same. """ if len(samples) == 0: return None if "full_id" in samples[0]: _, key = self._map_index(samples[0]["full_id"]) try: batch = self.datasets[key].collater(samples) except Exception: print(f"Collating failed for key {key}", flush=True) raise return batch else: # Subclasses may override __getitem__ to not specify full_id return list(self.datasets.values())[0].collater(samples) def num_tokens(self, index: int): index, key = self._map_index(index) return self.datasets[key].num_tokens(index) def size(self, index: int): index, key = self._map_index(index) return self.datasets[key].size(index) @property def can_reuse_epoch_itr_across_epochs(self): return False def set_epoch(self, epoch, **unused): super().set_epoch(epoch) logger.info(f"setting epoch of multi_corpus_dataset to {epoch}") self.epoch = epoch @property def supports_prefetch(self): return False @property def supports_fetch_outside_dataloader(self): return all( self.datasets[key].supports_fetch_outside_dataloader for key in self.datasets ) def batch_by_size( self, indices, max_tokens=None, max_sentences=None, required_batch_size_multiple=1, ): if not self.batch_sample: return super().batch_by_size( indices, max_tokens, max_sentences, required_batch_size_multiple ) dataset_indices = {key: [] for key in self.datasets} for i in indices: _, key = self._map_index(i) dataset_indices[key].append(i) batches = [] for key in dataset_indices: cur_batches = super().batch_by_size( np.array(dataset_indices[key], dtype=np.int64), max_tokens, max_sentences, required_batch_size_multiple, ) logger.info(f"Created {len(cur_batches)} batches for dataset {key}") batches += cur_batches # If this dataset is used in a distributed training setup, # then shuffle such that the order is seeded by the distributed rank # as well if self.distributed_rank is not None: with data_utils.numpy_seed(self.seed, self.epoch, self.distributed_rank): np.random.shuffle(batches) return batches
EXA-1-master
exa/libraries/fairseq/fairseq/data/multi_corpus_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import bisect import numpy as np from torch.utils.data.dataloader import default_collate from . import FairseqDataset class ConcatDataset(FairseqDataset): @staticmethod def cumsum(sequence, sample_ratios): r, s = [], 0 for e, ratio in zip(sequence, sample_ratios): curr_len = int(ratio * len(e)) r.append(curr_len + s) s += curr_len return r def __init__(self, datasets, sample_ratios=1): super(ConcatDataset, self).__init__() assert len(datasets) > 0, "datasets should not be an empty iterable" self.datasets = list(datasets) if isinstance(sample_ratios, int): sample_ratios = [sample_ratios] * len(self.datasets) self.sample_ratios = sample_ratios self.cumulative_sizes = self.cumsum(self.datasets, sample_ratios) self.real_sizes = [len(d) for d in self.datasets] def __len__(self): return self.cumulative_sizes[-1] def __getitem__(self, idx): dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx) return self.datasets[dataset_idx][sample_idx] def _get_dataset_and_sample_index(self, idx: int): dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) if dataset_idx == 0: sample_idx = idx else: sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] sample_idx = sample_idx % self.real_sizes[dataset_idx] return dataset_idx, sample_idx def collater(self, samples, **extra_args): # For now only supports datasets with same underlying collater implementations if hasattr(self.datasets[0], "collater"): return self.datasets[0].collater(samples, **extra_args) else: return default_collate(samples, **extra_args) def size(self, idx: int): """ Return an example's size as a float or tuple. """ dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx) return self.datasets[dataset_idx].size(sample_idx) def num_tokens(self, index: int): return np.max(self.size(index)) def attr(self, attr: str, index: int): dataset_idx = bisect.bisect_right(self.cumulative_sizes, index) return getattr(self.datasets[dataset_idx], attr, None) @property def sizes(self): _dataset_sizes = [] for ds, sr in zip(self.datasets, self.sample_ratios): if isinstance(ds.sizes, np.ndarray): _dataset_sizes.append(np.tile(ds.sizes, sr)) else: # Only support underlying dataset with single size array. assert isinstance(ds.sizes, list) _dataset_sizes.append(np.tile(ds.sizes[0], sr)) return np.concatenate(_dataset_sizes) @property def supports_prefetch(self): return all(d.supports_prefetch for d in self.datasets) def ordered_indices(self): """ Returns indices sorted by length. So less padding is needed. """ if isinstance(self.sizes, np.ndarray) and len(self.sizes.shape) > 1: # special handling for concatenating lang_pair_datasets indices = np.arange(len(self)) sizes = self.sizes tgt_sizes = ( sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None ) src_sizes = ( sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes ) # sort by target length, then source length if tgt_sizes is not None: indices = indices[np.argsort(tgt_sizes[indices], kind="mergesort")] return indices[np.argsort(src_sizes[indices], kind="mergesort")] else: return np.argsort(self.sizes) def prefetch(self, indices): frm = 0 for to, ds in zip(self.cumulative_sizes, self.datasets): real_size = len(ds) if getattr(ds, "supports_prefetch", False): ds.prefetch([(i - frm) % real_size for i in indices if frm <= i < to]) frm = to @property def can_reuse_epoch_itr_across_epochs(self): return all(d.can_reuse_epoch_itr_across_epochs for d in self.datasets) def set_epoch(self, epoch): super().set_epoch(epoch) for ds in self.datasets: if hasattr(ds, "set_epoch"): ds.set_epoch(epoch)
EXA-1-master
exa/libraries/fairseq/fairseq/data/concat_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import BaseWrapperDataset class ReplaceDataset(BaseWrapperDataset): """Replaces tokens found in the dataset by a specified replacement token Args: dataset (~torch.utils.data.Dataset): dataset to replace tokens in replace_map(Dictionary[int,int]): map of token to replace -> replacement token offsets (List[int]): do not replace tokens before (from left if pos, right if neg) this offset. should be as many as the number of objects returned by the underlying dataset __getitem__ method. """ def __init__(self, dataset, replace_map, offsets): super().__init__(dataset) assert len(replace_map) > 0 self.replace_map = replace_map self.offsets = offsets def __getitem__(self, index): item = self.dataset[index] is_tuple = isinstance(item, tuple) srcs = item if is_tuple else [item] for offset, src in zip(self.offsets, srcs): for k, v in self.replace_map.items(): src_off = src[offset:] if offset >= 0 else src[:offset] src_off.masked_fill_(src_off == k, v) item = srcs if is_tuple else srcs[0] return item
EXA-1-master
exa/libraries/fairseq/fairseq/data/replace_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq import utils from . import FairseqDataset def backtranslate_samples(samples, collate_fn, generate_fn, cuda=True): """Backtranslate a list of samples. Given an input (*samples*) of the form: [{'id': 1, 'source': 'hallo welt'}] this will return: [{'id': 1, 'source': 'hello world', 'target': 'hallo welt'}] Args: samples (List[dict]): samples to backtranslate. Individual samples are expected to have a 'source' key, which will become the 'target' after backtranslation. collate_fn (callable): function to collate samples into a mini-batch generate_fn (callable): function to generate backtranslations cuda (bool): use GPU for generation (default: ``True``) Returns: List[dict]: an updated list of samples with a backtranslated source """ collated_samples = collate_fn(samples) s = utils.move_to_cuda(collated_samples) if cuda else collated_samples generated_sources = generate_fn(s) id_to_src = {sample["id"]: sample["source"] for sample in samples} # Go through each tgt sentence in batch and its corresponding best # generated hypothesis and create a backtranslation data pair # {id: id, source: generated backtranslation, target: original tgt} return [ { "id": id.item(), "target": id_to_src[id.item()], "source": hypos[0]["tokens"].cpu(), } for id, hypos in zip(collated_samples["id"], generated_sources) ] class BacktranslationDataset(FairseqDataset): """ Sets up a backtranslation dataset which takes a tgt batch, generates a src using a tgt-src backtranslation function (*backtranslation_fn*), and returns the corresponding `{generated src, input tgt}` batch. Args: tgt_dataset (~fairseq.data.FairseqDataset): the dataset to be backtranslated. Only the source side of this dataset will be used. After backtranslation, the source sentences in this dataset will be returned as the targets. src_dict (~fairseq.data.Dictionary): the dictionary of backtranslated sentences. tgt_dict (~fairseq.data.Dictionary, optional): the dictionary of sentences to be backtranslated. backtranslation_fn (callable, optional): function to call to generate backtranslations. This is typically the `generate` method of a :class:`~fairseq.sequence_generator.SequenceGenerator` object. Pass in None when it is not available at initialization time, and use set_backtranslation_fn function to set it when available. output_collater (callable, optional): function to call on the backtranslated samples to create the final batch (default: ``tgt_dataset.collater``). cuda: use GPU for generation """ def __init__( self, tgt_dataset, src_dict, tgt_dict=None, backtranslation_fn=None, output_collater=None, cuda=True, **kwargs ): self.tgt_dataset = tgt_dataset self.backtranslation_fn = backtranslation_fn self.output_collater = ( output_collater if output_collater is not None else tgt_dataset.collater ) self.cuda = cuda if torch.cuda.is_available() else False self.src_dict = src_dict self.tgt_dict = tgt_dict def __getitem__(self, index): """ Returns a single sample from *tgt_dataset*. Note that backtranslation is not applied in this step; use :func:`collater` instead to backtranslate a batch of samples. """ return self.tgt_dataset[index] def __len__(self): return len(self.tgt_dataset) def set_backtranslation_fn(self, backtranslation_fn): self.backtranslation_fn = backtranslation_fn def collater(self, samples): """Merge and backtranslate a list of samples to form a mini-batch. Using the samples from *tgt_dataset*, load a collated target sample to feed to the backtranslation model. Then take the backtranslation with the best score as the source and the original input as the target. Note: we expect *tgt_dataset* to provide a function `collater()` that will collate samples into the format expected by *backtranslation_fn*. After backtranslation, we will feed the new list of samples (i.e., the `(backtranslated source, original source)` pairs) to *output_collater* and return the result. Args: samples (List[dict]): samples to backtranslate and collate Returns: dict: a mini-batch with keys coming from *output_collater* """ if samples[0].get("is_dummy", False): return samples samples = backtranslate_samples( samples=samples, collate_fn=self.tgt_dataset.collater, generate_fn=(lambda net_input: self.backtranslation_fn(net_input)), cuda=self.cuda, ) return self.output_collater(samples) def num_tokens(self, index): """Just use the tgt dataset num_tokens""" return self.tgt_dataset.num_tokens(index) def ordered_indices(self): """Just use the tgt dataset ordered_indices""" return self.tgt_dataset.ordered_indices() def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``. Note: we use *tgt_dataset* to approximate the length of the source sentence, since we do not know the actual length until after backtranslation. """ tgt_size = self.tgt_dataset.size(index)[0] return (tgt_size, tgt_size) @property def supports_prefetch(self): return getattr(self.tgt_dataset, "supports_prefetch", False) def prefetch(self, indices): return self.tgt_dataset.prefetch(indices)
EXA-1-master
exa/libraries/fairseq/fairseq/data/backtranslation_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from . import FairseqDataset class IdDataset(FairseqDataset): def __getitem__(self, index): return index def __len__(self): return 0 def collater(self, samples): return torch.tensor(samples)
EXA-1-master
exa/libraries/fairseq/fairseq/data/id_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq.data import data_utils from . import BaseWrapperDataset class PaddingMaskDataset(BaseWrapperDataset): def __init__(self, dataset, left_pad, pad_length=None): super().__init__(dataset) self.left_pad = left_pad self.pad_length = pad_length def __getitem__(self, index): item = self.dataset[index] return torch.zeros_like(item).bool() def __len__(self): return len(self.dataset) def collater(self, samples): return data_utils.collate_tokens( samples, True, left_pad=self.left_pad, pad_to_length=self.pad_length ) class LeftPaddingMaskDataset(PaddingMaskDataset): def __init__(self, dataset): super().__init__(dataset, left_pad=True) class RightPaddingMaskDataset(PaddingMaskDataset): def __init__(self, dataset): super().__init__(dataset, left_pad=False)
EXA-1-master
exa/libraries/fairseq/fairseq/data/padding_mask_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from . import BaseWrapperDataset class PrependDataset(BaseWrapperDataset): def __init__(self, dataset, prepend_getter, ensure_first_token_is=None): super().__init__(dataset) self.prepend_getter = prepend_getter self.ensure_first_token = ensure_first_token_is def __getitem__(self, idx): item = self.dataset[idx] is_tuple = isinstance(item, tuple) src = item[0] if is_tuple else item assert self.ensure_first_token is None or src[0] == self.ensure_first_token prepend_idx = self.prepend_getter(self.dataset, idx) assert isinstance(prepend_idx, int) src[0] = prepend_idx item = tuple((src,) + item[1:]) if is_tuple else src return item
EXA-1-master
exa/libraries/fairseq/fairseq/data/prepend_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from . import BaseWrapperDataset, data_utils from fairseq.data.text_compressor import TextCompressor, TextCompressionLevel class AddTargetDataset(BaseWrapperDataset): def __init__( self, dataset, labels, pad, eos, batch_targets, process_label=None, label_len_fn=None, add_to_input=False, text_compression_level=TextCompressionLevel.none, ): super().__init__(dataset) self.labels = labels self.batch_targets = batch_targets self.pad = pad self.eos = eos self.process_label = process_label self.label_len_fn = label_len_fn self.add_to_input = add_to_input self.text_compressor = TextCompressor(level=text_compression_level) def get_label(self, index, process_fn=None): lbl = self.labels[index] lbl = self.text_compressor.decompress(lbl) return lbl if process_fn is None else process_fn(lbl) def __getitem__(self, index): item = self.dataset[index] item["label"] = self.get_label(index, process_fn=self.process_label) return item def size(self, index): sz = self.dataset.size(index) own_sz = self.label_len_fn(self.get_label(index)) return sz, own_sz def collater(self, samples): collated = self.dataset.collater(samples) if len(collated) == 0: return collated indices = set(collated["id"].tolist()) target = [s["label"] for s in samples if s["id"] in indices] if self.batch_targets: collated["target_lengths"] = torch.LongTensor([len(t) for t in target]) target = data_utils.collate_tokens(target, pad_idx=self.pad, left_pad=False) collated["ntokens"] = collated["target_lengths"].sum().item() else: collated["ntokens"] = sum([len(t) for t in target]) collated["target"] = target if self.add_to_input: eos = target.new_full((target.size(0), 1), self.eos) collated["target"] = torch.cat([target, eos], dim=-1).long() collated["net_input"]["prev_output_tokens"] = torch.cat( [eos, target], dim=-1 ).long() collated["ntokens"] += target.size(0) return collated def filter_indices_by_size(self, indices, max_sizes): indices, ignored = data_utils._filter_by_size_dynamic( indices, self.size, max_sizes ) return indices, ignored
EXA-1-master
exa/libraries/fairseq/fairseq/data/add_class_target_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import OrderedDict from typing import Callable, Dict, List import numpy as np from . import FairseqDataset def uniform_sampler(x): # Sample from uniform distribution return np.random.choice(x, 1).item() class MultiCorpusSampledDataset(FairseqDataset): """ Stores multiple instances of FairseqDataset together and in every iteration creates a batch by first sampling a dataset according to a specified probability distribution and then getting instances from that dataset. Args: datasets: an OrderedDict of FairseqDataset instances. sampling_func: A function for sampling over list of dataset keys. The default strategy is to sample uniformly. """ def __init__( self, datasets: Dict[str, FairseqDataset], sampling_func: Callable[[List], int] = None, ): super().__init__() assert isinstance(datasets, OrderedDict) self.datasets = datasets if sampling_func is None: sampling_func = uniform_sampler self.sampling_func = sampling_func self.total_num_instances = 0 for _, dataset in datasets.items(): assert isinstance(dataset, FairseqDataset) self.total_num_instances += len(dataset) self._ordered_indices = None def __len__(self): """ Length of this dataset is the sum of individual datasets """ return self.total_num_instances def ordered_indices(self): """ Ordered indices for batching. Here we call the underlying dataset's ordered_indices() so that we get the same random ordering as we would have from using the underlying dataset directly. """ if self._ordered_indices is None: self._ordered_indices = OrderedDict( [ (key, dataset.ordered_indices()) for key, dataset in self.datasets.items() ] ) return np.arange(len(self)) def _map_index_to_dataset(self, key: int, index: int): """ Different underlying datasets have different lengths. In order to ensure we are not accessing an index outside the range of the current dataset size, we wrap around. This function should be called after we have created an ordering for this and all underlying datasets. """ assert ( self._ordered_indices is not None ), "Must call MultiCorpusSampledDataset.ordered_indices() first" mapped_index = index % len(self.datasets[key]) return self._ordered_indices[key][mapped_index] def __getitem__(self, index: int): """ Get the item associated with index from each underlying dataset. Since index is in the range of [0, TotalNumInstances], we need to map the index to the dataset before retrieving the item. """ return OrderedDict( [ (key, dataset[self._map_index_to_dataset(key, index)]) for key, dataset in self.datasets.items() ] ) def collater(self, samples: List[Dict]): """ Generate a mini-batch for this dataset. To convert this into a regular mini-batch we use the following logic: 1. Select a dataset using the specified probability distribution. 2. Call the collater function of the selected dataset. """ if len(samples) == 0: return None selected_key = self.sampling_func(list(self.datasets.keys())) selected_samples = [sample[selected_key] for sample in samples] return self.datasets[selected_key].collater(selected_samples) def num_tokens(self, index: int): """ Return an example's length (number of tokens), used for batching. Here we return the max across all examples at index across all underlying datasets. """ return max( dataset.num_tokens(self._map_index_to_dataset(key, index)) for key, dataset in self.datasets.items() ) def size(self, index: int): """ Return an example's size as a float or tuple. Here we return the max across all underlying datasets. This value is used when filtering a dataset with max-positions. """ return max( dataset.size(self._map_index_to_dataset(key, index)) for key, dataset in self.datasets.items() ) @property def supports_prefetch(self): return all( getattr(dataset, "supports_prefetch", False) for dataset in self.datasets.values() ) def prefetch(self, indices): for key, dataset in self.datasets.items(): dataset.prefetch( [self._map_index_to_dataset(key, index) for index in indices] ) @property def supports_fetch_outside_dataloader(self): return all( self.datasets[key].supports_fetch_outside_dataloader for key in self.datasets )
EXA-1-master
exa/libraries/fairseq/fairseq/data/multi_corpus_sampled_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import FairseqDataset class NumSamplesDataset(FairseqDataset): def __getitem__(self, index): return 1 def __len__(self): return 0 def collater(self, samples): return sum(samples)
EXA-1-master
exa/libraries/fairseq/fairseq/data/num_samples_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from fairseq.data import data_utils class WordNoising(object): """Generate a noisy version of a sentence, without changing words themselves.""" def __init__(self, dictionary, bpe_cont_marker="@@", bpe_end_marker=None): self.dictionary = dictionary self.bpe_end = None if bpe_cont_marker: self.bpe_end = np.array( [ not self.dictionary[i].endswith(bpe_cont_marker) for i in range(len(self.dictionary)) ] ) elif bpe_end_marker: self.bpe_end = np.array( [ self.dictionary[i].endswith(bpe_end_marker) for i in range(len(self.dictionary)) ] ) self.get_word_idx = ( self._get_bpe_word_idx if self.bpe_end is not None else self._get_token_idx ) def noising(self, x, lengths, noising_prob=0.0): raise NotImplementedError() def _get_bpe_word_idx(self, x): """ Given a list of BPE tokens, for every index in the tokens list, return the index of the word grouping that it belongs to. For example, for input x corresponding to ["how", "are", "y@@", "ou"], return [[0], [1], [2], [2]]. """ # x: (T x B) bpe_end = self.bpe_end[x] if x.size(0) == 1 and x.size(1) == 1: # Special case when we only have one word in x. If x = [[N]], # bpe_end is a scalar (bool) instead of a 2-dim array of bools, # which makes the sum operation below fail. return np.array([[0]]) # do a reduce front sum to generate word ids word_idx = bpe_end[::-1].cumsum(0)[::-1] word_idx = word_idx.max(0)[None, :] - word_idx return word_idx def _get_token_idx(self, x): """ This is to extend noising functions to be able to apply to non-bpe tokens, e.g. word or characters. """ x = torch.t(x) word_idx = np.array([range(len(x_i)) for x_i in x]) return np.transpose(word_idx) class WordDropout(WordNoising): """Randomly drop input words. If not passing blank_idx (default is None), then dropped words will be removed. Otherwise, it will be replaced by the blank_idx.""" def __init__( self, dictionary, default_dropout_prob=0.1, bpe_cont_marker="@@", bpe_end_marker=None, ): super().__init__(dictionary, bpe_cont_marker, bpe_end_marker) self.default_dropout_prob = default_dropout_prob def noising(self, x, lengths, dropout_prob=None, blank_idx=None): if dropout_prob is None: dropout_prob = self.default_dropout_prob # x: (T x B), lengths: B if dropout_prob == 0: return x, lengths assert 0 < dropout_prob < 1 # be sure to drop entire words word_idx = self.get_word_idx(x) sentences = [] modified_lengths = [] for i in range(lengths.size(0)): # Since dropout probabilities need to apply over non-pad tokens, # it is not trivial to generate the keep mask without consider # input lengths; otherwise, this could be done outside the loop # We want to drop whole words based on word_idx grouping num_words = max(word_idx[:, i]) + 1 # ith example: [x0, x1, ..., eos, pad, ..., pad] # We should only generate keep probs for non-EOS tokens. Thus if the # input sentence ends in EOS, the last word idx is not included in # the dropout mask generation and we append True to always keep EOS. # Otherwise, just generate the dropout mask for all word idx # positions. has_eos = x[lengths[i] - 1, i] == self.dictionary.eos() if has_eos: # has eos? keep = np.random.rand(num_words - 1) >= dropout_prob keep = np.append(keep, [True]) # keep EOS symbol else: keep = np.random.rand(num_words) >= dropout_prob words = x[: lengths[i], i].tolist() # TODO: speed up the following loop # drop words from the input according to keep new_s = [ w if keep[word_idx[j, i]] else blank_idx for j, w in enumerate(words) ] new_s = [w for w in new_s if w is not None] # we need to have at least one word in the sentence (more than the # start / end sentence symbols) if len(new_s) <= 1: # insert at beginning in case the only token left is EOS # EOS should be at end of list. new_s.insert(0, words[np.random.randint(0, len(words))]) assert len(new_s) >= 1 and ( not has_eos # Either don't have EOS at end or last token is EOS or (len(new_s) >= 2 and new_s[-1] == self.dictionary.eos()) ), "New sentence is invalid." sentences.append(new_s) modified_lengths.append(len(new_s)) # re-construct input modified_lengths = torch.LongTensor(modified_lengths) modified_x = torch.LongTensor( modified_lengths.max(), modified_lengths.size(0) ).fill_(self.dictionary.pad()) for i in range(modified_lengths.size(0)): modified_x[: modified_lengths[i], i].copy_(torch.LongTensor(sentences[i])) return modified_x, modified_lengths class WordShuffle(WordNoising): """Shuffle words by no more than k positions.""" def __init__( self, dictionary, default_max_shuffle_distance=3, bpe_cont_marker="@@", bpe_end_marker=None, ): super().__init__(dictionary, bpe_cont_marker, bpe_end_marker) self.default_max_shuffle_distance = 3 def noising(self, x, lengths, max_shuffle_distance=None): if max_shuffle_distance is None: max_shuffle_distance = self.default_max_shuffle_distance # x: (T x B), lengths: B if max_shuffle_distance == 0: return x, lengths # max_shuffle_distance < 1 will return the same sequence assert max_shuffle_distance > 1 # define noise word scores noise = np.random.uniform( 0, max_shuffle_distance, size=(x.size(0), x.size(1)), ) noise[0] = -1 # do not move start sentence symbol # be sure to shuffle entire words word_idx = self.get_word_idx(x) x2 = x.clone() for i in range(lengths.size(0)): length_no_eos = lengths[i] if x[lengths[i] - 1, i] == self.dictionary.eos(): length_no_eos = lengths[i] - 1 # generate a random permutation scores = word_idx[:length_no_eos, i] + noise[word_idx[:length_no_eos, i], i] # ensure no reordering inside a word scores += 1e-6 * np.arange(length_no_eos.item()) permutation = scores.argsort() # shuffle words x2[:length_no_eos, i].copy_( x2[:length_no_eos, i][torch.from_numpy(permutation)] ) return x2, lengths class UnsupervisedMTNoising(WordNoising): """ Implements the default configuration for noising in UnsupervisedMT (github.com/facebookresearch/UnsupervisedMT) """ def __init__( self, dictionary, max_word_shuffle_distance, word_dropout_prob, word_blanking_prob, bpe_cont_marker="@@", bpe_end_marker=None, ): super().__init__(dictionary) self.max_word_shuffle_distance = max_word_shuffle_distance self.word_dropout_prob = word_dropout_prob self.word_blanking_prob = word_blanking_prob self.word_dropout = WordDropout( dictionary=dictionary, bpe_cont_marker=bpe_cont_marker, bpe_end_marker=bpe_end_marker, ) self.word_shuffle = WordShuffle( dictionary=dictionary, bpe_cont_marker=bpe_cont_marker, bpe_end_marker=bpe_end_marker, ) def noising(self, x, lengths): # 1. Word Shuffle noisy_src_tokens, noisy_src_lengths = self.word_shuffle.noising( x=x, lengths=lengths, max_shuffle_distance=self.max_word_shuffle_distance, ) # 2. Word Dropout noisy_src_tokens, noisy_src_lengths = self.word_dropout.noising( x=noisy_src_tokens, lengths=noisy_src_lengths, dropout_prob=self.word_dropout_prob, ) # 3. Word Blanking noisy_src_tokens, noisy_src_lengths = self.word_dropout.noising( x=noisy_src_tokens, lengths=noisy_src_lengths, dropout_prob=self.word_blanking_prob, blank_idx=self.dictionary.unk(), ) return noisy_src_tokens class NoisingDataset(torch.utils.data.Dataset): def __init__( self, src_dataset, src_dict, seed, noiser=None, noising_class=UnsupervisedMTNoising, **kwargs ): """ Wrap a :class:`~torch.utils.data.Dataset` and apply noise to the samples based on the supplied noising configuration. Args: src_dataset (~torch.utils.data.Dataset): dataset to wrap. to build self.src_dataset -- a LanguagePairDataset with src dataset as the source dataset and None as the target dataset. Should NOT have padding so that src_lengths are accurately calculated by language_pair_dataset collate function. We use language_pair_dataset here to encapsulate the tgt_dataset so we can re-use the LanguagePairDataset collater to format the batches in the structure that SequenceGenerator expects. src_dict (~fairseq.data.Dictionary): source dictionary seed (int): seed to use when generating random noise noiser (WordNoising): a pre-initialized :class:`WordNoising` instance. If this is None, a new instance will be created using *noising_class* and *kwargs*. noising_class (class, optional): class to use to initialize a default :class:`WordNoising` instance. kwargs (dict, optional): arguments to initialize the default :class:`WordNoising` instance given by *noiser*. """ self.src_dataset = src_dataset self.src_dict = src_dict self.seed = seed self.noiser = ( noiser if noiser is not None else noising_class( dictionary=src_dict, **kwargs, ) ) self.sizes = src_dataset.sizes def __getitem__(self, index): """ Returns a single noisy sample. Multiple samples are fed to the collater create a noising dataset batch. """ src_tokens = self.src_dataset[index] src_lengths = torch.LongTensor([len(src_tokens)]) src_tokens = src_tokens.unsqueeze(0) # Transpose src tokens to fit expected shape of x in noising function # (batch size, sequence length) -> (sequence length, batch size) src_tokens_t = torch.t(src_tokens) with data_utils.numpy_seed(self.seed + index): noisy_src_tokens = self.noiser.noising(src_tokens_t, src_lengths) # Transpose back to expected src_tokens format # (sequence length, 1) -> (1, sequence length) noisy_src_tokens = torch.t(noisy_src_tokens) return noisy_src_tokens[0] def __len__(self): """ The length of the noising dataset is the length of src. """ return len(self.src_dataset) @property def supports_prefetch(self): return self.src_dataset.supports_prefetch def prefetch(self, indices): if self.src_dataset.supports_prefetch: self.src_dataset.prefetch(indices)
EXA-1-master
exa/libraries/fairseq/fairseq/data/noising.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json import logging import os import random from pathlib import Path import numpy as np import torch import torch.utils.data from . import data_utils from fairseq.data.fairseq_dataset import FairseqDataset F0_FRAME_SPACE = 0.005 # sec logger = logging.getLogger(__name__) class ExpressiveCodeDataConfig(object): def __init__(self, json_path): with open(json_path, "r") as f: self.config = json.load(f) self._manifests = self.config["manifests"] @property def manifests(self): return self._manifests @property def n_units(self): return self.config["n_units"] @property def sampling_rate(self): return self.config["sampling_rate"] @property def code_hop_size(self): return self.config["code_hop_size"] @property def f0_stats(self): """pre-computed f0 statistics path""" return self.config.get("f0_stats", None) @property def f0_vq_type(self): """naive or precomp""" return self.config["f0_vq_type"] @property def f0_vq_name(self): return self.config["f0_vq_name"] def get_f0_vq_naive_quantizer(self, log, norm_mean, norm_std): key = "log" if log else "linear" if norm_mean and norm_std: key += "_mean_std_norm" elif norm_mean: key += "_mean_norm" else: key += "_none_norm" return self.config["f0_vq_naive_quantizer"][key] @property def f0_vq_n_units(self): return self.config["f0_vq_n_units"] @property def multispkr(self): """how to parse speaker label from audio path""" return self.config.get("multispkr", None) def get_f0(audio, rate=16000): try: import amfm_decompy.basic_tools as basic import amfm_decompy.pYAAPT as pYAAPT from librosa.util import normalize except ImportError: raise "Please install amfm_decompy (`pip install AMFM-decompy`) and librosa (`pip install librosa`)." assert audio.ndim == 1 frame_length = 20.0 # ms to_pad = int(frame_length / 1000 * rate) // 2 audio = normalize(audio) * 0.95 audio = np.pad(audio, (to_pad, to_pad), "constant", constant_values=0) audio = basic.SignalObj(audio, rate) pitch = pYAAPT.yaapt( audio, frame_length=frame_length, frame_space=F0_FRAME_SPACE * 1000, nccf_thresh1=0.25, tda_frame_length=25.0, ) f0 = pitch.samp_values return f0 def interpolate_f0(f0): try: from scipy.interpolate import interp1d except ImportError: raise "Please install scipy (`pip install scipy`)" orig_t = np.arange(f0.shape[0]) f0_interp = f0[:] ii = f0_interp != 0 if ii.sum() > 1: f0_interp = interp1d( orig_t[ii], f0_interp[ii], bounds_error=False, kind="linear", fill_value=0 )(orig_t) f0_interp = torch.Tensor(f0_interp).type_as(f0).to(f0.device) return f0_interp def naive_quantize(x, edges): bin_idx = (x.view(-1, 1) > edges.view(1, -1)).long().sum(dim=1) return bin_idx def load_wav(full_path): try: import soundfile as sf except ImportError: raise "Please install soundfile (`pip install SoundFile`)" data, sampling_rate = sf.read(full_path) return data, sampling_rate def parse_code(code_str, dictionary, append_eos): code, duration = torch.unique_consecutive( torch.ShortTensor(list(map(int, code_str.split()))), return_counts=True ) code = " ".join(map(str, code.tolist())) code = dictionary.encode_line(code, append_eos).short() if append_eos: duration = torch.cat((duration, duration.new_zeros((1,))), dim=0) # eos duration = duration.short() return code, duration def parse_manifest(manifest, dictionary): audio_files = [] codes = [] durations = [] speakers = [] with open(manifest) as info: for line in info.readlines(): sample = eval(line.strip()) if "cpc_km100" in sample: k = "cpc_km100" elif "hubert_km100" in sample: k = "hubert_km100" elif "phone" in sample: k = "phone" else: assert False, "unknown format" code = sample[k] code, duration = parse_code(code, dictionary, append_eos=True) codes.append(code) durations.append(duration) audio_files.append(sample["audio"]) speakers.append(sample.get("speaker", None)) return audio_files, codes, durations, speakers def parse_speaker(path, method): if type(path) == str: path = Path(path) if method == "parent_name": return path.parent.name elif method == "parent_parent_name": return path.parent.parent.name elif method == "_": return path.name.split("_")[0] elif method == "single": return "A" elif callable(method): return method(path) else: raise NotImplementedError() def get_f0_by_filename(filename, tgt_sampling_rate): audio, sampling_rate = load_wav(filename) if sampling_rate != tgt_sampling_rate: raise ValueError( "{} SR doesn't match target {} SR".format(sampling_rate, tgt_sampling_rate) ) # compute un-interpolated f0, and use Ann's interp in __getitem__ if set f0 = get_f0(audio, rate=tgt_sampling_rate) f0 = torch.from_numpy(f0.astype(np.float32)) return f0 def align_f0_to_durations(f0, durations, f0_code_ratio, tol=1): code_len = durations.sum() targ_len = int(f0_code_ratio * code_len) diff = f0.size(0) - targ_len assert abs(diff) <= tol, ( f"Cannot subsample F0: |{f0.size(0)} - {f0_code_ratio}*{code_len}|" f" > {tol} (dur=\n{durations})" ) if diff > 0: f0 = f0[:targ_len] elif diff < 0: f0 = torch.cat((f0, f0.new_full((-diff,), f0[-1])), 0) f0_offset = 0.0 seg_f0s = [] for dur in durations: f0_dur = dur.item() * f0_code_ratio seg_f0 = f0[int(f0_offset) : int(f0_offset + f0_dur)] seg_f0 = seg_f0[seg_f0 != 0] if len(seg_f0) == 0: seg_f0 = torch.tensor(0).type(seg_f0.type()) else: seg_f0 = seg_f0.mean() seg_f0s.append(seg_f0) f0_offset += f0_dur assert int(f0_offset) == f0.size(0), f"{f0_offset} {f0.size()} {durations.sum()}" return torch.tensor(seg_f0s) class Paddings(object): def __init__(self, code_val, dur_val=0, f0_val=-2.0): self.code = code_val self.dur = dur_val self.f0 = f0_val class Shifts(object): def __init__(self, shifts_str, pads): self._shifts = list(map(int, shifts_str.split(","))) assert len(self._shifts) == 2, self._shifts assert all(s >= 0 for s in self._shifts) self.extra_length = max(s for s in self._shifts) self.pads = pads @property def dur(self): return self._shifts[0] @property def f0(self): return self._shifts[1] @staticmethod def shift_one(seq, left_pad_num, right_pad_num, pad): assert seq.ndim == 1 bos = seq.new_full((left_pad_num,), pad) eos = seq.new_full((right_pad_num,), pad) seq = torch.cat([bos, seq, eos]) mask = torch.ones_like(seq).bool() mask[left_pad_num : len(seq) - right_pad_num] = 0 return seq, mask def __call__(self, code, dur, f0): if self.extra_length == 0: code_mask = torch.zeros_like(code).bool() dur_mask = torch.zeros_like(dur).bool() f0_mask = torch.zeros_like(f0).bool() return code, code_mask, dur, dur_mask, f0, f0_mask code, code_mask = self.shift_one(code, 0, self.extra_length, self.pads.code) dur, dur_mask = self.shift_one( dur, self.dur, self.extra_length - self.dur, self.pads.dur ) f0, f0_mask = self.shift_one( f0, self.f0, self.extra_length - self.f0, self.pads.f0 ) return code, code_mask, dur, dur_mask, f0, f0_mask class CodeDataset(FairseqDataset): def __init__( self, manifest, dictionary, dur_dictionary, f0_dictionary, config, discrete_dur, discrete_f0, log_f0, normalize_f0_mean, normalize_f0_std, interpolate_f0, return_filename=False, strip_filename=True, shifts="0,0", return_continuous_f0=False, ): random.seed(1234) self.dictionary = dictionary self.dur_dictionary = dur_dictionary self.f0_dictionary = f0_dictionary self.config = config # duration config self.discrete_dur = discrete_dur # pitch config self.discrete_f0 = discrete_f0 self.log_f0 = log_f0 self.normalize_f0_mean = normalize_f0_mean self.normalize_f0_std = normalize_f0_std self.interpolate_f0 = interpolate_f0 self.return_filename = return_filename self.strip_filename = strip_filename self.f0_code_ratio = config.code_hop_size / ( config.sampling_rate * F0_FRAME_SPACE ) # use lazy loading to avoid sharing file handlers across workers self.manifest = manifest self._codes = None self._durs = None self._f0s = None with open(f"{manifest}.leng.txt", "r") as f: lengs = [int(line.rstrip()) for line in f] edges = np.cumsum([0] + lengs) self.starts, self.ends = edges[:-1], edges[1:] with open(f"{manifest}.path.txt", "r") as f: self.file_names = [line.rstrip() for line in f] logger.info(f"num entries: {len(self.starts)}") if os.path.exists(f"{manifest}.f0_stat.pt"): self.f0_stats = torch.load(f"{manifest}.f0_stat.pt") elif config.f0_stats: self.f0_stats = torch.load(config.f0_stats) self.multispkr = config.multispkr if config.multispkr: with open(f"{manifest}.speaker.txt", "r") as f: self.spkrs = [line.rstrip() for line in f] self.id_to_spkr = sorted(self.spkrs) self.spkr_to_id = {k: v for v, k in enumerate(self.id_to_spkr)} self.pads = Paddings( dictionary.pad(), 0, # use 0 for duration padding f0_dictionary.pad() if discrete_f0 else -5.0, ) self.shifts = Shifts(shifts, pads=self.pads) self.return_continuous_f0 = return_continuous_f0 def get_data_handlers(self): logging.info(f"loading data for {self.manifest}") self._codes = np.load(f"{self.manifest}.code.npy", mmap_mode="r") self._durs = np.load(f"{self.manifest}.dur.npy", mmap_mode="r") if self.discrete_f0: if self.config.f0_vq_type == "precomp": self._f0s = np.load( f"{self.manifest}.{self.config.f0_vq_name}.npy", mmap_mode="r" ) elif self.config.f0_vq_type == "naive": self._f0s = np.load(f"{self.manifest}.f0.npy", mmap_mode="r") quantizers_path = self.config.get_f0_vq_naive_quantizer( self.log_f0, self.normalize_f0_mean, self.normalize_f0_std ) quantizers = torch.load(quantizers_path) n_units = self.config.f0_vq_n_units self._f0_quantizer = torch.from_numpy(quantizers[n_units]) else: raise ValueError(f"f0_vq_type {self.config.f0_vq_type} not supported") else: self._f0s = np.load(f"{self.manifest}.f0.npy", mmap_mode="r") def preprocess_f0(self, f0, stats): """ 1. interpolate 2. log transform (keep unvoiced frame 0) """ # TODO: change this to be dependent on config for naive quantizer f0 = f0.clone() if self.interpolate_f0: f0 = interpolate_f0(f0) mask = f0 != 0 # only process voiced frames if self.log_f0: f0[mask] = f0[mask].log() if self.normalize_f0_mean: mean = stats["logf0_mean"] if self.log_f0 else stats["f0_mean"] f0[mask] = f0[mask] - mean if self.normalize_f0_std: std = stats["logf0_std"] if self.log_f0 else stats["f0_std"] f0[mask] = f0[mask] / std return f0 def _get_raw_item(self, index): start, end = self.starts[index], self.ends[index] if self._codes is None: self.get_data_handlers() code = torch.from_numpy(np.array(self._codes[start:end])).long() dur = torch.from_numpy(np.array(self._durs[start:end])) f0 = torch.from_numpy(np.array(self._f0s[start:end])) return code, dur, f0 def __getitem__(self, index): code, dur, f0 = self._get_raw_item(index) code = torch.cat([code.new([self.dictionary.bos()]), code]) # use 0 for eos and bos dur = torch.cat([dur.new([0]), dur]) if self.discrete_dur: dur = self.dur_dictionary.encode_line( " ".join(map(str, dur.tolist())), append_eos=False ).long() else: dur = dur.float() # TODO: find a more elegant approach raw_f0 = None if self.discrete_f0: if self.config.f0_vq_type == "precomp": f0 = self.f0_dictionary.encode_line( " ".join(map(str, f0.tolist())), append_eos=False ).long() else: f0 = f0.float() f0 = self.preprocess_f0(f0, self.f0_stats[self.spkrs[index]]) if self.return_continuous_f0: raw_f0 = f0 raw_f0 = torch.cat([raw_f0.new([self.f0_dictionary.bos()]), raw_f0]) f0 = naive_quantize(f0, self._f0_quantizer) f0 = torch.cat([f0.new([self.f0_dictionary.bos()]), f0]) else: f0 = f0.float() if self.multispkr: f0 = self.preprocess_f0(f0, self.f0_stats[self.spkrs[index]]) else: f0 = self.preprocess_f0(f0, self.f0_stats) f0 = torch.cat([f0.new([0]), f0]) if raw_f0 is not None: *_, raw_f0, raw_f0_mask = self.shifts(code, dur, raw_f0) else: raw_f0_mask = None code, code_mask, dur, dur_mask, f0, f0_mask = self.shifts(code, dur, f0) if raw_f0_mask is not None: assert (raw_f0_mask == f0_mask).all() # is a padded frame if either input or output is padded feats = { "source": code[:-1], "target": code[1:], "mask": code_mask[1:].logical_or(code_mask[:-1]), "dur_source": dur[:-1], "dur_target": dur[1:], "dur_mask": dur_mask[1:].logical_or(dur_mask[:-1]), "f0_source": f0[:-1], "f0_target": f0[1:], "f0_mask": f0_mask[1:].logical_or(f0_mask[:-1]), } if raw_f0 is not None: feats["raw_f0"] = raw_f0[1:] if self.return_filename: fname = self.file_names[index] feats["filename"] = ( fname if not self.strip_filename else Path(fname).with_suffix("").name ) return feats def __len__(self): return len(self.starts) def size(self, index): return self.ends[index] - self.starts[index] + self.shifts.extra_length def num_tokens(self, index): return self.size(index) def collater(self, samples): pad_idx, eos_idx = self.dictionary.pad(), self.dictionary.eos() if len(samples) == 0: return {} src_tokens = data_utils.collate_tokens( [s["source"] for s in samples], pad_idx, eos_idx, left_pad=False ) tgt_tokens = data_utils.collate_tokens( [s["target"] for s in samples], pad_idx=pad_idx, eos_idx=pad_idx, # appending padding, eos is there already left_pad=False, ) src_durs, tgt_durs = [ data_utils.collate_tokens( [s[k] for s in samples], pad_idx=self.pads.dur, eos_idx=self.pads.dur, left_pad=False, ) for k in ["dur_source", "dur_target"] ] src_f0s, tgt_f0s = [ data_utils.collate_tokens( [s[k] for s in samples], pad_idx=self.pads.f0, eos_idx=self.pads.f0, left_pad=False, ) for k in ["f0_source", "f0_target"] ] mask, dur_mask, f0_mask = [ data_utils.collate_tokens( [s[k] for s in samples], pad_idx=1, eos_idx=1, left_pad=False, ) for k in ["mask", "dur_mask", "f0_mask"] ] src_lengths = torch.LongTensor([s["source"].numel() for s in samples]) n_tokens = sum(len(s["source"]) for s in samples) result = { "nsentences": len(samples), "ntokens": n_tokens, "net_input": { "src_tokens": src_tokens, "src_lengths": src_lengths, "dur_src": src_durs, "f0_src": src_f0s, }, "target": tgt_tokens, "dur_target": tgt_durs, "f0_target": tgt_f0s, "mask": mask, "dur_mask": dur_mask, "f0_mask": f0_mask, } if "filename" in samples[0]: result["filename"] = [s["filename"] for s in samples] # TODO: remove this hack into the inference dataset if "prefix" in samples[0]: result["prefix"] = [s["prefix"] for s in samples] if "raw_f0" in samples[0]: raw_f0s = data_utils.collate_tokens( [s["raw_f0"] for s in samples], pad_idx=self.pads.f0, eos_idx=self.pads.f0, left_pad=False, ) result["raw_f0"] = raw_f0s return result
EXA-1-master
exa/libraries/fairseq/fairseq/data/codedataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np from fairseq.data import data_utils from . import BaseWrapperDataset class TruncateDataset(BaseWrapperDataset): """Truncate a sequence by returning the first truncation_length tokens""" def __init__(self, dataset, truncation_length): super().__init__(dataset) assert truncation_length is not None self.truncation_length = truncation_length self.dataset = dataset def __getitem__(self, index): item = self.dataset[index] item_len = item.size(0) if item_len > self.truncation_length: item = item[: self.truncation_length] return item @property def sizes(self): return np.minimum(self.dataset.sizes, self.truncation_length) def __len__(self): return len(self.dataset) class RandomCropDataset(TruncateDataset): """Truncate a sequence by returning a random crop of truncation_length tokens""" def __init__(self, dataset, truncation_length, seed=1): super().__init__(dataset, truncation_length) self.seed = seed self.epoch = 0 @property def can_reuse_epoch_itr_across_epochs(self): return True # only the crop changes, not item sizes def set_epoch(self, epoch, **unused): super().set_epoch(epoch) self.epoch = epoch def __getitem__(self, index): with data_utils.numpy_seed(self.seed, self.epoch, index): item = self.dataset[index] item_len = item.size(0) excess = item_len - self.truncation_length if excess > 0: start_idx = np.random.randint(0, excess) item = item[start_idx : start_idx + self.truncation_length] return item def maybe_shorten_dataset( dataset, split, shorten_data_split_list, shorten_method, tokens_per_sample, seed, ): truncate_split = ( split in shorten_data_split_list.split(",") or len(shorten_data_split_list) == 0 ) if shorten_method == "truncate" and truncate_split: dataset = TruncateDataset(dataset, tokens_per_sample) elif shorten_method == "random_crop" and truncate_split: dataset = RandomCropDataset(dataset, tokens_per_sample, seed) return dataset
EXA-1-master
exa/libraries/fairseq/fairseq/data/shorten_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import logging import numpy as np from fairseq.data.data_utils import numpy_seed from . import BaseWrapperDataset logger = logging.getLogger(__name__) class SubsampleDataset(BaseWrapperDataset): """Subsamples a given dataset by a specified ratio. Subsampling is done on the number of examples Args: dataset (~torch.utils.data.Dataset): dataset to subsample size_ratio(float): the ratio to subsample to. must be between 0 and 1 (exclusive) """ def __init__(self, dataset, size_ratio, shuffle=False, seed=None): super().__init__(dataset) assert size_ratio < 1 self.actual_size = np.ceil(len(dataset) * size_ratio).astype(int) with numpy_seed(seed) if seed is not None else contextlib.ExitStack(): self.indices = np.random.choice( list(range(len(self.dataset))), self.actual_size, replace=False ) self.shuffle = shuffle logger.info( "subsampled dataset from {} to {} (ratio={})".format( len(self.dataset), self.actual_size, size_ratio ) ) def __getitem__(self, index): return self.dataset[self.indices[index]] def __len__(self): return self.actual_size def collater(self, samples): return self.dataset.collater(samples) @property def sizes(self): return self.dataset.sizes[self.indices] @property def name(self): return self.dataset.name def num_tokens(self, index): return self.dataset.num_tokens(self.indices[index]) def size(self, index): return self.dataset.size(self.indices[index]) def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" if self.shuffle: order = [np.random.permutation(len(self))] else: order = [np.arange(len(self))] order.append(self.sizes) return np.lexsort(order) def prefetch(self, indices): self.dataset.prefetch(self.indices[indices])
EXA-1-master
exa/libraries/fairseq/fairseq/data/subsample_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np from . import BaseWrapperDataset class SortDataset(BaseWrapperDataset): def __init__(self, dataset, sort_order): super().__init__(dataset) if not isinstance(sort_order, (list, tuple)): sort_order = [sort_order] self.sort_order = sort_order assert all(len(so) == len(dataset) for so in sort_order) def ordered_indices(self): return np.lexsort(self.sort_order)
EXA-1-master
exa/libraries/fairseq/fairseq/data/sort_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from enum import Enum class TextCompressionLevel(Enum): none = 0 low = 1 high = 2 class TextCompressor(object): def __init__( self, level: TextCompressionLevel, max_input_byte_length: int = 2**16 ): self.level = level self.max_input_length = max_input_byte_length def compress(self, text: str) -> bytes: if self.level == TextCompressionLevel.low: import zlib # zlib: built-in, fast return zlib.compress(text.encode(), level=0) elif self.level == TextCompressionLevel.high: try: import unishox2 # unishox2: optimized for short text but slower except ImportError: raise ImportError( "Please install unishox2 for the text compression feature: " "pip install unishox2-py3" ) assert len(text.encode()) <= self.max_input_length return unishox2.compress(text)[0] else: return text.encode() def decompress(self, compressed: bytes) -> str: if self.level == TextCompressionLevel.low: import zlib return zlib.decompress(compressed).decode() elif self.level == TextCompressionLevel.high: try: import unishox2 except ImportError: raise ImportError( "Please install unishox2 for the text compression feature: " "pip install unishox2-py3" ) return unishox2.decompress(compressed, self.max_input_length) else: return compressed.decode()
EXA-1-master
exa/libraries/fairseq/fairseq/data/text_compressor.py